mirror of
https://github.com/tendermint/tendermint.git
synced 2026-04-22 00:30:31 +00:00
feat: v0.34.x Prioritized Mempool (#8695)
* Updated mocks * add reactor tests * add v1 reactor tests * Fix fuzz test for priority mempool * e2e adapted to mempool v1; prio pool is default now * Reverted default mempool to be fifo * Changed buf version * Added priority mempool to ci testnet * Fixed linter * Updated makefile * Aligned makefile changes to v0.34.x * Added go install for proto * Add log message to warn about prioritized mempool bug Signed-off-by: Thane Thomson <connect@thanethomson.com> * Changelog message Co-authored-by: Jasmina Malicevic <jasmina.dustinac@gmail.com> Co-authored-by: Callum Waters <cmwaters19@gmail.com> Co-authored-by: Sam Kleinman <garen@tychoish.com> Co-authored-by: Thane Thomson <connect@thanethomson.com>
This commit is contained in:
committed by
GitHub
parent
25101d1116
commit
6b7d30cf37
@@ -21,6 +21,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8674] Add command to force compact goleveldb databases (@cmwaters)
|
||||
- [mempool] [\#8695] Introduction a priority mempool. (@alexanderbez, @jmalicevic, @cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
|
||||
4
Makefile
4
Makefile
@@ -84,7 +84,7 @@ mockery:
|
||||
|
||||
check-proto-deps:
|
||||
ifeq (,$(shell which protoc-gen-gogofaster))
|
||||
$(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install")
|
||||
@go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
endif
|
||||
.PHONY: check-proto-deps
|
||||
|
||||
@@ -121,7 +121,7 @@ proto-check-breaking: check-proto-deps
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
proto-check-breaking-ci:
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against ".git"#branch=v0.34.x
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against $(HTTPS_GIT)#branch=v0.34.x
|
||||
.PHONY: proto-check-breaking-ci
|
||||
|
||||
###############################################################################
|
||||
|
||||
@@ -1967,6 +1967,11 @@ type ResponseCheckTx struct {
|
||||
GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"`
|
||||
Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"`
|
||||
Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"`
|
||||
Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"`
|
||||
Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"`
|
||||
// mempool_error is set by Tendermint.
|
||||
// ABCI applictions creating a ResponseCheckTX should not set mempool_error.
|
||||
MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} }
|
||||
@@ -2058,6 +2063,27 @@ func (m *ResponseCheckTx) GetCodespace() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) GetSender() string {
|
||||
if m != nil {
|
||||
return m.Sender
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) GetPriority() int64 {
|
||||
if m != nil {
|
||||
return m.Priority
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) GetMempoolError() string {
|
||||
if m != nil {
|
||||
return m.MempoolError
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ResponseDeliverTx struct {
|
||||
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
@@ -3199,179 +3225,181 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) }
|
||||
|
||||
var fileDescriptor_252557cfdd89a31a = []byte{
|
||||
// 2741 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x73, 0x1b, 0xc7,
|
||||
0x11, 0xc6, 0xfb, 0xd1, 0x24, 0x1e, 0x1c, 0xd1, 0x12, 0xb4, 0x92, 0x48, 0x79, 0x55, 0x72, 0x2c,
|
||||
0xd9, 0x26, 0x63, 0xaa, 0xa4, 0x48, 0xb1, 0x13, 0x9b, 0x80, 0x20, 0x83, 0x26, 0x4d, 0x30, 0x4b,
|
||||
0x48, 0xce, 0xcb, 0x5a, 0x2f, 0xb0, 0x43, 0x60, 0x2d, 0x60, 0x77, 0x8d, 0x1d, 0x50, 0xa4, 0x8f,
|
||||
0x71, 0x72, 0x51, 0x2e, 0xce, 0x2d, 0x17, 0xff, 0x8f, 0x9c, 0x72, 0xc9, 0xc5, 0x55, 0xb9, 0xf8,
|
||||
0x98, 0x93, 0x93, 0x92, 0x2a, 0x97, 0xfc, 0x81, 0x9c, 0x52, 0x49, 0xcd, 0x63, 0x5f, 0x00, 0x16,
|
||||
0x00, 0xed, 0xdc, 0x7c, 0x9b, 0x99, 0xed, 0xee, 0xc5, 0xf4, 0x4e, 0x7f, 0xfd, 0x75, 0x0f, 0xe0,
|
||||
0x12, 0xc1, 0xa6, 0x8e, 0x87, 0x03, 0xc3, 0x24, 0x9b, 0x5a, 0xbb, 0x63, 0x6c, 0x92, 0x53, 0x1b,
|
||||
0x3b, 0x1b, 0xf6, 0xd0, 0x22, 0x16, 0x2a, 0xf9, 0x0f, 0x37, 0xe8, 0x43, 0xe9, 0x4a, 0x40, 0xba,
|
||||
0x33, 0x3c, 0xb5, 0x89, 0xb5, 0x69, 0x0f, 0x2d, 0xeb, 0x88, 0xcb, 0x4b, 0x97, 0x03, 0x8f, 0x99,
|
||||
0x9d, 0xa0, 0xb5, 0xd0, 0x53, 0xa1, 0xfc, 0x04, 0x9f, 0xba, 0x4f, 0xaf, 0x4c, 0xe8, 0xda, 0xda,
|
||||
0x50, 0x1b, 0xb8, 0x8f, 0xd7, 0xbb, 0x96, 0xd5, 0xed, 0xe3, 0x4d, 0x36, 0x6b, 0x8f, 0x8e, 0x36,
|
||||
0x89, 0x31, 0xc0, 0x0e, 0xd1, 0x06, 0xb6, 0x10, 0x58, 0xed, 0x5a, 0x5d, 0x8b, 0x0d, 0x37, 0xe9,
|
||||
0x88, 0xaf, 0xca, 0x7f, 0xc8, 0x41, 0x56, 0xc1, 0x9f, 0x8e, 0xb0, 0x43, 0xd0, 0x16, 0xa4, 0x70,
|
||||
0xa7, 0x67, 0x55, 0xe2, 0x57, 0xe3, 0xaf, 0x2e, 0x6d, 0x5d, 0xde, 0x18, 0xdb, 0xdc, 0x86, 0x90,
|
||||
0xab, 0x77, 0x7a, 0x56, 0x23, 0xa6, 0x30, 0x59, 0x74, 0x1b, 0xd2, 0x47, 0xfd, 0x91, 0xd3, 0xab,
|
||||
0x24, 0x98, 0xd2, 0x95, 0x28, 0xa5, 0x07, 0x54, 0xa8, 0x11, 0x53, 0xb8, 0x34, 0x7d, 0x95, 0x61,
|
||||
0x1e, 0x59, 0x95, 0xe4, 0xec, 0x57, 0xed, 0x98, 0x47, 0xec, 0x55, 0x54, 0x16, 0x55, 0x01, 0x1c,
|
||||
0x4c, 0x54, 0xcb, 0x26, 0x86, 0x65, 0x56, 0x52, 0x4c, 0xf3, 0xe5, 0x28, 0xcd, 0x43, 0x4c, 0x9a,
|
||||
0x4c, 0xb0, 0x11, 0x53, 0xf2, 0x8e, 0x3b, 0xa1, 0x36, 0x0c, 0xd3, 0x20, 0x6a, 0xa7, 0xa7, 0x19,
|
||||
0x66, 0x25, 0x3d, 0xdb, 0xc6, 0x8e, 0x69, 0x90, 0x1a, 0x15, 0xa4, 0x36, 0x0c, 0x77, 0x42, 0xb7,
|
||||
0xfc, 0xe9, 0x08, 0x0f, 0x4f, 0x2b, 0x99, 0xd9, 0x5b, 0xfe, 0x19, 0x15, 0xa2, 0x5b, 0x66, 0xd2,
|
||||
0xa8, 0x0e, 0x4b, 0x6d, 0xdc, 0x35, 0x4c, 0xb5, 0xdd, 0xb7, 0x3a, 0x4f, 0x2a, 0x59, 0xa6, 0x2c,
|
||||
0x47, 0x29, 0x57, 0xa9, 0x68, 0x95, 0x4a, 0x36, 0x62, 0x0a, 0xb4, 0xbd, 0x19, 0x7a, 0x1b, 0x72,
|
||||
0x9d, 0x1e, 0xee, 0x3c, 0x51, 0xc9, 0x49, 0x25, 0xc7, 0x6c, 0xac, 0x47, 0xd9, 0xa8, 0x51, 0xb9,
|
||||
0xd6, 0x49, 0x23, 0xa6, 0x64, 0x3b, 0x7c, 0x48, 0xf7, 0xaf, 0xe3, 0xbe, 0x71, 0x8c, 0x87, 0x54,
|
||||
0x3f, 0x3f, 0x7b, 0xff, 0xf7, 0xb9, 0x24, 0xb3, 0x90, 0xd7, 0xdd, 0x09, 0x7a, 0x07, 0xf2, 0xd8,
|
||||
0xd4, 0xc5, 0x36, 0x80, 0x99, 0xb8, 0x1a, 0x79, 0x56, 0x4c, 0xdd, 0xdd, 0x44, 0x0e, 0x8b, 0x31,
|
||||
0xba, 0x0b, 0x99, 0x8e, 0x35, 0x18, 0x18, 0xa4, 0xb2, 0xc4, 0xb4, 0xd7, 0x22, 0x37, 0xc0, 0xa4,
|
||||
0x1a, 0x31, 0x45, 0xc8, 0xa3, 0x7d, 0x28, 0xf6, 0x0d, 0x87, 0xa8, 0x8e, 0xa9, 0xd9, 0x4e, 0xcf,
|
||||
0x22, 0x4e, 0x65, 0x99, 0x59, 0xb8, 0x1e, 0x65, 0x61, 0xcf, 0x70, 0xc8, 0xa1, 0x2b, 0xdc, 0x88,
|
||||
0x29, 0x85, 0x7e, 0x70, 0x81, 0xda, 0xb3, 0x8e, 0x8e, 0xf0, 0xd0, 0x33, 0x58, 0x29, 0xcc, 0xb6,
|
||||
0xd7, 0xa4, 0xd2, 0xae, 0x3e, 0xb5, 0x67, 0x05, 0x17, 0xd0, 0xaf, 0xe0, 0x5c, 0xdf, 0xd2, 0x74,
|
||||
0xcf, 0x9c, 0xda, 0xe9, 0x8d, 0xcc, 0x27, 0x95, 0x22, 0x33, 0x7a, 0x23, 0xf2, 0x47, 0x5a, 0x9a,
|
||||
0xee, 0x9a, 0xa8, 0x51, 0x85, 0x46, 0x4c, 0x59, 0xe9, 0x8f, 0x2f, 0xa2, 0xc7, 0xb0, 0xaa, 0xd9,
|
||||
0x76, 0xff, 0x74, 0xdc, 0x7a, 0x89, 0x59, 0xbf, 0x19, 0x65, 0x7d, 0x9b, 0xea, 0x8c, 0x9b, 0x47,
|
||||
0xda, 0xc4, 0x6a, 0x35, 0x0b, 0xe9, 0x63, 0xad, 0x3f, 0xc2, 0xf2, 0x0f, 0x60, 0x29, 0x10, 0xea,
|
||||
0xa8, 0x02, 0xd9, 0x01, 0x76, 0x1c, 0xad, 0x8b, 0x19, 0x32, 0xe4, 0x15, 0x77, 0x2a, 0x17, 0x61,
|
||||
0x39, 0x18, 0xde, 0xf2, 0xc0, 0x53, 0xa4, 0x81, 0x4b, 0x15, 0x8f, 0xf1, 0xd0, 0xa1, 0xd1, 0x2a,
|
||||
0x14, 0xc5, 0x14, 0x5d, 0x83, 0x02, 0x3b, 0x3e, 0xaa, 0xfb, 0x9c, 0xa2, 0x47, 0x4a, 0x59, 0x66,
|
||||
0x8b, 0x8f, 0x84, 0xd0, 0x3a, 0x2c, 0xd9, 0x5b, 0xb6, 0x27, 0x92, 0x64, 0x22, 0x60, 0x6f, 0xd9,
|
||||
0x42, 0x40, 0xfe, 0x31, 0x94, 0xc7, 0xa3, 0x1d, 0x95, 0x21, 0xf9, 0x04, 0x9f, 0x8a, 0xf7, 0xd1,
|
||||
0x21, 0x5a, 0x15, 0xdb, 0x62, 0xef, 0xc8, 0x2b, 0x62, 0x8f, 0x7f, 0x4d, 0x78, 0xca, 0x5e, 0x98,
|
||||
0xa3, 0xbb, 0x90, 0xa2, 0xa8, 0x29, 0x00, 0x50, 0xda, 0xe0, 0x90, 0xba, 0xe1, 0x42, 0xea, 0x46,
|
||||
0xcb, 0x85, 0xd4, 0x6a, 0xee, 0xab, 0x6f, 0xd6, 0x63, 0x5f, 0xfc, 0x7d, 0x3d, 0xae, 0x30, 0x0d,
|
||||
0x74, 0x91, 0x46, 0xa5, 0x66, 0x98, 0xaa, 0xa1, 0x8b, 0xf7, 0x64, 0xd9, 0x7c, 0x47, 0x47, 0xbb,
|
||||
0x50, 0xee, 0x58, 0xa6, 0x83, 0x4d, 0x67, 0xe4, 0xa8, 0x1c, 0xb2, 0x05, 0xec, 0x4d, 0x46, 0x4d,
|
||||
0xcd, 0x15, 0x3c, 0x60, 0x72, 0x4a, 0xa9, 0x13, 0x5e, 0x40, 0x0f, 0x00, 0x8e, 0xb5, 0xbe, 0xa1,
|
||||
0x6b, 0xc4, 0x1a, 0x3a, 0x95, 0xd4, 0xd5, 0xe4, 0x54, 0x33, 0x8f, 0x5c, 0x91, 0x87, 0xb6, 0xae,
|
||||
0x11, 0x5c, 0x4d, 0xd1, 0x5f, 0xab, 0x04, 0x34, 0xd1, 0x2b, 0x50, 0xd2, 0x6c, 0x5b, 0x75, 0x88,
|
||||
0x46, 0xb0, 0xda, 0x3e, 0x25, 0xd8, 0x61, 0x60, 0xb8, 0xac, 0x14, 0x34, 0xdb, 0x3e, 0xa4, 0xab,
|
||||
0x55, 0xba, 0x88, 0xae, 0x43, 0x91, 0x02, 0x9f, 0xa1, 0xf5, 0xd5, 0x1e, 0x36, 0xba, 0x3d, 0xc2,
|
||||
0x40, 0x2f, 0xa9, 0x14, 0xc4, 0x6a, 0x83, 0x2d, 0xca, 0xba, 0x77, 0x10, 0x18, 0xe8, 0x21, 0x04,
|
||||
0x29, 0x5d, 0x23, 0x1a, 0x73, 0xe4, 0xb2, 0xc2, 0xc6, 0x74, 0xcd, 0xd6, 0x48, 0x4f, 0xb8, 0x87,
|
||||
0x8d, 0xd1, 0x79, 0xc8, 0x08, 0xb3, 0x49, 0x66, 0x56, 0xcc, 0xe8, 0x37, 0xb3, 0x87, 0xd6, 0x31,
|
||||
0x66, 0x28, 0x9f, 0x53, 0xf8, 0x44, 0xfe, 0x6d, 0x02, 0x56, 0x26, 0xe0, 0x91, 0xda, 0xed, 0x69,
|
||||
0x4e, 0xcf, 0x7d, 0x17, 0x1d, 0xa3, 0x3b, 0xd4, 0xae, 0xa6, 0xe3, 0xa1, 0x48, 0x4b, 0x95, 0xa0,
|
||||
0x8b, 0x78, 0xca, 0x6d, 0xb0, 0xe7, 0xc2, 0x35, 0x42, 0x1a, 0x35, 0xa1, 0xdc, 0xd7, 0x1c, 0xa2,
|
||||
0x72, 0xb8, 0x51, 0x03, 0x29, 0x6a, 0x12, 0x64, 0xf7, 0x34, 0x17, 0xa0, 0xe8, 0x61, 0x17, 0x86,
|
||||
0x8a, 0xfd, 0xd0, 0x2a, 0x52, 0x60, 0xb5, 0x7d, 0xfa, 0x99, 0x66, 0x12, 0xc3, 0xc4, 0xea, 0xc4,
|
||||
0x97, 0xbb, 0x38, 0x61, 0xb4, 0x7e, 0x6c, 0xe8, 0xd8, 0xec, 0xb8, 0x9f, 0xec, 0x9c, 0xa7, 0xec,
|
||||
0x7d, 0x52, 0x47, 0x56, 0xa0, 0x18, 0x06, 0x78, 0x54, 0x84, 0x04, 0x39, 0x11, 0x0e, 0x48, 0x90,
|
||||
0x13, 0xf4, 0x43, 0x48, 0xd1, 0x4d, 0xb2, 0xcd, 0x17, 0xa7, 0x64, 0x57, 0xa1, 0xd7, 0x3a, 0xb5,
|
||||
0xb1, 0xc2, 0x24, 0x65, 0xd9, 0x8b, 0x06, 0x0f, 0xf4, 0xc7, 0xad, 0xca, 0x37, 0xa0, 0x34, 0x86,
|
||||
0xea, 0x81, 0xef, 0x17, 0x0f, 0x7e, 0x3f, 0xb9, 0x04, 0x85, 0x10, 0x84, 0xcb, 0xe7, 0x61, 0x75,
|
||||
0x1a, 0x22, 0xcb, 0x3d, 0x6f, 0x3d, 0x84, 0xac, 0xe8, 0x36, 0xe4, 0x3c, 0x48, 0xe6, 0xd1, 0x38,
|
||||
0xe9, 0x2b, 0x57, 0x58, 0xf1, 0x44, 0x69, 0x18, 0xd2, 0x63, 0xcd, 0xce, 0x43, 0x82, 0xfd, 0xf0,
|
||||
0xac, 0x66, 0xdb, 0x0d, 0xcd, 0xe9, 0xc9, 0x1f, 0x43, 0x25, 0x0a, 0x6e, 0xc7, 0xb6, 0x91, 0xf2,
|
||||
0x8e, 0xe1, 0x79, 0xc8, 0x1c, 0x59, 0xc3, 0x81, 0x46, 0x98, 0xb1, 0x82, 0x22, 0x66, 0xf4, 0x78,
|
||||
0x72, 0xe8, 0x4d, 0xb2, 0x65, 0x3e, 0x91, 0x55, 0xb8, 0x18, 0x09, 0xb9, 0x54, 0xc5, 0x30, 0x75,
|
||||
0xcc, 0xfd, 0x59, 0x50, 0xf8, 0xc4, 0x37, 0xc4, 0x7f, 0x2c, 0x9f, 0xd0, 0xd7, 0x3a, 0x6c, 0xaf,
|
||||
0xcc, 0x7e, 0x5e, 0x11, 0x33, 0xf9, 0x9f, 0x39, 0xc8, 0x29, 0xd8, 0xb1, 0x29, 0x26, 0xa0, 0x2a,
|
||||
0xe4, 0xf1, 0x49, 0x07, 0x73, 0x32, 0x14, 0x8f, 0x24, 0x13, 0x5c, 0xba, 0xee, 0x4a, 0xd2, 0x4c,
|
||||
0xee, 0xa9, 0xa1, 0x5b, 0x82, 0xf0, 0x45, 0x73, 0x37, 0xa1, 0x1e, 0x64, 0x7c, 0x77, 0x5c, 0xc6,
|
||||
0x97, 0x8c, 0x4c, 0xde, 0x5c, 0x6b, 0x8c, 0xf2, 0xdd, 0x12, 0x94, 0x2f, 0x35, 0xe7, 0x65, 0x21,
|
||||
0xce, 0x57, 0x0b, 0x71, 0xbe, 0xf4, 0x9c, 0x6d, 0x46, 0x90, 0xbe, 0x5a, 0x88, 0xf4, 0x65, 0xe6,
|
||||
0x18, 0x89, 0x60, 0x7d, 0x77, 0x5c, 0xd6, 0x97, 0x9d, 0xb3, 0xed, 0x31, 0xda, 0xf7, 0x20, 0x4c,
|
||||
0xfb, 0x38, 0x65, 0xbb, 0x16, 0xa9, 0x1d, 0xc9, 0xfb, 0x7e, 0x12, 0xe0, 0x7d, 0xf9, 0x48, 0xd2,
|
||||
0xc5, 0x8d, 0x4c, 0x21, 0x7e, 0xb5, 0x10, 0xf1, 0x83, 0x39, 0x3e, 0x88, 0x60, 0x7e, 0xef, 0x06,
|
||||
0x99, 0xdf, 0x52, 0x24, 0x79, 0x14, 0x87, 0x66, 0x1a, 0xf5, 0xbb, 0xe7, 0x51, 0xbf, 0xe5, 0x48,
|
||||
0xee, 0x2a, 0xf6, 0x30, 0xce, 0xfd, 0x9a, 0x13, 0xdc, 0x8f, 0x73, 0xb5, 0x57, 0x22, 0x4d, 0xcc,
|
||||
0x21, 0x7f, 0xcd, 0x09, 0xf2, 0x57, 0x9c, 0x63, 0x70, 0x0e, 0xfb, 0xfb, 0xf5, 0x74, 0xf6, 0x17,
|
||||
0xcd, 0xcf, 0xc4, 0xcf, 0x5c, 0x8c, 0xfe, 0xa9, 0x11, 0xf4, 0xaf, 0xcc, 0xcc, 0xbf, 0x16, 0x69,
|
||||
0xfe, 0xec, 0xfc, 0xef, 0x06, 0x4d, 0xb3, 0x63, 0xc0, 0x41, 0xa1, 0x0a, 0x0f, 0x87, 0xd6, 0x50,
|
||||
0x50, 0x2b, 0x3e, 0x91, 0x5f, 0xa5, 0x89, 0xdf, 0x07, 0x89, 0x19, 0x5c, 0x91, 0xa5, 0x84, 0x00,
|
||||
0x30, 0xc8, 0x7f, 0x8a, 0xfb, 0xba, 0x2c, 0x57, 0x06, 0x49, 0x43, 0x5e, 0x90, 0x86, 0x00, 0x85,
|
||||
0x4c, 0x84, 0x29, 0xe4, 0x3a, 0x2c, 0x51, 0xa8, 0x1f, 0x63, 0x87, 0x9a, 0xed, 0xb2, 0x43, 0x74,
|
||||
0x13, 0x56, 0x58, 0x2e, 0xe7, 0x44, 0x53, 0xe0, 0x7b, 0x8a, 0xa5, 0xa9, 0x12, 0x7d, 0xc0, 0x0f,
|
||||
0x27, 0x07, 0xfa, 0x37, 0xe0, 0x5c, 0x40, 0xd6, 0x4b, 0x21, 0x9c, 0x12, 0x95, 0x3d, 0xe9, 0x6d,
|
||||
0x91, 0x4b, 0x3e, 0xf0, 0x1d, 0xe4, 0x33, 0x4f, 0x04, 0xa9, 0x8e, 0xa5, 0x63, 0x01, 0xf0, 0x6c,
|
||||
0x4c, 0xd9, 0x68, 0xdf, 0xea, 0x0a, 0x18, 0xa7, 0x43, 0x2a, 0xe5, 0xa1, 0x60, 0x9e, 0x83, 0x9c,
|
||||
0xfc, 0x97, 0xb8, 0x6f, 0xcf, 0x27, 0xa3, 0xd3, 0x78, 0x63, 0xfc, 0xff, 0xc3, 0x1b, 0x13, 0xdf,
|
||||
0x9a, 0x37, 0x06, 0x13, 0x6c, 0x32, 0x9c, 0x60, 0xff, 0x1d, 0xf7, 0xbf, 0xb0, 0xc7, 0x02, 0xbf,
|
||||
0x9d, 0x47, 0xfc, 0x6c, 0x99, 0x66, 0xdf, 0x4b, 0x64, 0x4b, 0xc1, 0xed, 0x33, 0xec, 0xbd, 0x61,
|
||||
0x6e, 0x9f, 0xe5, 0xf9, 0x93, 0x4d, 0xd0, 0x5d, 0xc8, 0xb3, 0xa6, 0x8b, 0x6a, 0xd9, 0x8e, 0x00,
|
||||
0xdc, 0x4b, 0xc1, 0xbd, 0xf2, 0xde, 0xca, 0xc6, 0x01, 0x95, 0x69, 0xda, 0x8e, 0x92, 0xb3, 0xc5,
|
||||
0x28, 0x40, 0x04, 0xf2, 0x21, 0x3e, 0x7a, 0x19, 0xf2, 0xf4, 0xd7, 0x3b, 0xb6, 0xd6, 0xc1, 0x0c,
|
||||
0x3c, 0xf3, 0x8a, 0xbf, 0x20, 0x3f, 0x06, 0x34, 0x09, 0xdf, 0xa8, 0x01, 0x19, 0x7c, 0x8c, 0x4d,
|
||||
0x42, 0xbf, 0x1a, 0x75, 0xf7, 0xf9, 0x29, 0x64, 0x0f, 0x9b, 0xa4, 0x5a, 0xa1, 0x4e, 0xfe, 0xd7,
|
||||
0x37, 0xeb, 0x65, 0x2e, 0xfd, 0xba, 0x35, 0x30, 0x08, 0x1e, 0xd8, 0xe4, 0x54, 0x11, 0xfa, 0xf2,
|
||||
0xe7, 0x09, 0xca, 0xbc, 0x42, 0xd0, 0x3e, 0xd5, 0xb7, 0x6e, 0x00, 0x25, 0x02, 0xac, 0x7b, 0x31,
|
||||
0x7f, 0xaf, 0x01, 0x74, 0x35, 0x47, 0x7d, 0xaa, 0x99, 0x04, 0xeb, 0xc2, 0xe9, 0x81, 0x15, 0x24,
|
||||
0x41, 0x8e, 0xce, 0x46, 0x0e, 0xd6, 0x45, 0x01, 0xe0, 0xcd, 0x03, 0xfb, 0xcc, 0x7e, 0xb7, 0x7d,
|
||||
0x86, 0xbd, 0x9c, 0x1b, 0xf7, 0xf2, 0xef, 0x12, 0x7e, 0x94, 0xf8, 0x24, 0xf5, 0xfb, 0xe7, 0x87,
|
||||
0xdf, 0xb3, 0xca, 0x35, 0x9c, 0x63, 0xd1, 0x21, 0xac, 0x78, 0x51, 0xaa, 0x8e, 0x58, 0xf4, 0xba,
|
||||
0xe7, 0x6e, 0xd1, 0x30, 0x2f, 0x1f, 0x87, 0x97, 0x1d, 0xf4, 0x73, 0xb8, 0x30, 0x86, 0x40, 0x9e,
|
||||
0xe9, 0xc4, 0x82, 0x40, 0xf4, 0x52, 0x18, 0x88, 0x5c, 0xcb, 0xbe, 0xaf, 0x92, 0xdf, 0x31, 0x36,
|
||||
0x76, 0x68, 0x31, 0x14, 0x64, 0x0c, 0x53, 0xbf, 0xfe, 0x35, 0x28, 0x0c, 0x31, 0xa1, 0xf5, 0x79,
|
||||
0xa8, 0xdc, 0x5c, 0xe6, 0x8b, 0xa2, 0x88, 0x3d, 0x80, 0x97, 0xa6, 0x32, 0x07, 0xf4, 0x23, 0xc8,
|
||||
0xfb, 0xa4, 0x23, 0x1e, 0x51, 0xb9, 0x79, 0xd5, 0x88, 0x2f, 0x2b, 0xff, 0x39, 0xee, 0x9b, 0x0c,
|
||||
0xd7, 0x37, 0x75, 0xc8, 0x0c, 0xb1, 0x33, 0xea, 0xf3, 0x8a, 0xa3, 0xb8, 0xf5, 0xc6, 0x62, 0x9c,
|
||||
0x83, 0xae, 0x8e, 0xfa, 0x44, 0x11, 0xca, 0xf2, 0x63, 0xc8, 0xf0, 0x15, 0xb4, 0x04, 0xd9, 0x87,
|
||||
0xfb, 0xbb, 0xfb, 0xcd, 0x0f, 0xf7, 0xcb, 0x31, 0x04, 0x90, 0xd9, 0xae, 0xd5, 0xea, 0x07, 0xad,
|
||||
0x72, 0x1c, 0xe5, 0x21, 0xbd, 0x5d, 0x6d, 0x2a, 0xad, 0x72, 0x82, 0x2e, 0x2b, 0xf5, 0xf7, 0xeb,
|
||||
0xb5, 0x56, 0x39, 0x89, 0x56, 0xa0, 0xc0, 0xc7, 0xea, 0x83, 0xa6, 0xf2, 0xc1, 0x76, 0xab, 0x9c,
|
||||
0x0a, 0x2c, 0x1d, 0xd6, 0xf7, 0xef, 0xd7, 0x95, 0x72, 0x5a, 0x7e, 0x93, 0x96, 0x34, 0x11, 0x2c,
|
||||
0xc5, 0x2f, 0x5e, 0xe2, 0x81, 0xe2, 0x45, 0xfe, 0x63, 0x02, 0xa4, 0x68, 0xea, 0x81, 0xde, 0x1f,
|
||||
0xdb, 0xf8, 0xd6, 0x19, 0x78, 0xcb, 0xd8, 0xee, 0xd1, 0x75, 0x28, 0x0e, 0xf1, 0x11, 0x26, 0x9d,
|
||||
0x1e, 0xa7, 0x42, 0x3c, 0xb1, 0x15, 0x94, 0x82, 0x58, 0x65, 0x4a, 0x0e, 0x17, 0xfb, 0x04, 0x77,
|
||||
0x88, 0xca, 0xeb, 0x28, 0x7e, 0xe8, 0xf2, 0x54, 0x8c, 0xae, 0x1e, 0xf2, 0x45, 0xf9, 0xe3, 0x33,
|
||||
0xf9, 0x32, 0x0f, 0x69, 0xa5, 0xde, 0x52, 0x7e, 0x51, 0x4e, 0x22, 0x04, 0x45, 0x36, 0x54, 0x0f,
|
||||
0xf7, 0xb7, 0x0f, 0x0e, 0x1b, 0x4d, 0xea, 0xcb, 0x73, 0x50, 0x72, 0x7d, 0xe9, 0x2e, 0xa6, 0xe5,
|
||||
0xff, 0xc6, 0xa1, 0x34, 0x16, 0x20, 0x68, 0x0b, 0xd2, 0x9c, 0x4e, 0x47, 0x35, 0xdd, 0x59, 0x7c,
|
||||
0x8b, 0x68, 0xe2, 0xa2, 0xe8, 0x6d, 0xc8, 0x61, 0xd1, 0x27, 0x98, 0x16, 0x88, 0xbc, 0xbf, 0xe1,
|
||||
0x76, 0x12, 0x84, 0xaa, 0xa7, 0x81, 0xde, 0x81, 0xbc, 0x17, 0xe9, 0xa2, 0x86, 0x7b, 0x79, 0x52,
|
||||
0xdd, 0xc3, 0x08, 0xa1, 0xef, 0xeb, 0xa0, 0x7b, 0x3e, 0x27, 0x4b, 0x4d, 0x92, 0x78, 0xa1, 0xce,
|
||||
0x05, 0x84, 0xb2, 0x2b, 0x2f, 0xd7, 0x60, 0x29, 0xb0, 0x1f, 0x74, 0x09, 0xf2, 0x03, 0xed, 0x44,
|
||||
0xf4, 0x9f, 0x78, 0x07, 0x21, 0x37, 0xd0, 0x4e, 0x78, 0xeb, 0xe9, 0x02, 0x64, 0xe9, 0xc3, 0xae,
|
||||
0xc6, 0xd1, 0x26, 0xa9, 0x64, 0x06, 0xda, 0xc9, 0x7b, 0x9a, 0x23, 0x7f, 0x04, 0xc5, 0x70, 0xef,
|
||||
0x85, 0x9e, 0xc4, 0xa1, 0x35, 0x32, 0x75, 0x66, 0x23, 0xad, 0xf0, 0x09, 0xba, 0x0d, 0xe9, 0x63,
|
||||
0x8b, 0x83, 0xd5, 0xf4, 0x90, 0x7d, 0x64, 0x11, 0x1c, 0xe8, 0xdd, 0x70, 0x69, 0xf9, 0x33, 0x48,
|
||||
0x33, 0xf0, 0xa1, 0x40, 0xc2, 0xba, 0x28, 0x82, 0x8f, 0xd2, 0x31, 0xfa, 0x08, 0x40, 0x23, 0x64,
|
||||
0x68, 0xb4, 0x47, 0xbe, 0xe1, 0xf5, 0xe9, 0xe0, 0xb5, 0xed, 0xca, 0x55, 0x2f, 0x0b, 0x14, 0x5b,
|
||||
0xf5, 0x55, 0x03, 0x48, 0x16, 0x30, 0x28, 0xef, 0x43, 0x31, 0xac, 0x1b, 0xec, 0x67, 0x2e, 0x4f,
|
||||
0xe9, 0x67, 0x7a, 0x9c, 0xc7, 0x63, 0x4c, 0x49, 0xde, 0x31, 0x63, 0x13, 0xf9, 0x59, 0x1c, 0x72,
|
||||
0xad, 0x13, 0x71, 0xac, 0x23, 0x9a, 0x35, 0xbe, 0x6a, 0x22, 0xd8, 0x9a, 0xe0, 0xdd, 0x9f, 0xa4,
|
||||
0xd7, 0x53, 0x7a, 0xd7, 0x0b, 0xdc, 0xd4, 0xa2, 0xc5, 0xa3, 0xdb, 0x5c, 0x13, 0x60, 0xf5, 0x16,
|
||||
0xe4, 0xbd, 0x53, 0x45, 0x89, 0xbd, 0xa6, 0xeb, 0x43, 0xec, 0x38, 0x62, 0x6f, 0xee, 0x94, 0xf5,
|
||||
0xfe, 0xac, 0xa7, 0xa2, 0xf9, 0x91, 0x54, 0xf8, 0x44, 0xd6, 0xa1, 0x34, 0x96, 0xb6, 0xd0, 0x5b,
|
||||
0x90, 0xb5, 0x47, 0x6d, 0xd5, 0x75, 0xcf, 0x58, 0xf0, 0xb8, 0x24, 0x6f, 0xd4, 0xee, 0x1b, 0x9d,
|
||||
0x5d, 0x7c, 0xea, 0xfe, 0x18, 0x7b, 0xd4, 0xde, 0xe5, 0x5e, 0xe4, 0x6f, 0x49, 0x04, 0xdf, 0x72,
|
||||
0x0c, 0x39, 0xf7, 0x50, 0xa0, 0x9f, 0x06, 0xe3, 0xc4, 0xed, 0x08, 0x47, 0xa6, 0x52, 0x61, 0x3e,
|
||||
0x10, 0x26, 0x37, 0x61, 0xc5, 0x31, 0xba, 0x26, 0xd6, 0x55, 0xbf, 0xb4, 0x60, 0x6f, 0xcb, 0x29,
|
||||
0x25, 0xfe, 0x60, 0xcf, 0xad, 0x2b, 0xe4, 0xff, 0xc4, 0x21, 0xe7, 0x06, 0x2c, 0x7a, 0x33, 0x70,
|
||||
0xee, 0x8a, 0x53, 0x1a, 0x25, 0xae, 0xa0, 0xdf, 0xbe, 0x0b, 0xff, 0xd6, 0xc4, 0xd9, 0x7f, 0x6b,
|
||||
0x54, 0x1f, 0xd6, 0x6d, 0x88, 0xa7, 0xce, 0xdc, 0x10, 0x7f, 0x1d, 0x10, 0xb1, 0x88, 0xd6, 0x57,
|
||||
0x8f, 0x2d, 0x62, 0x98, 0x5d, 0x95, 0x3b, 0x9b, 0x33, 0xaa, 0x32, 0x7b, 0xf2, 0x88, 0x3d, 0x38,
|
||||
0x60, 0x7e, 0xff, 0x4d, 0x1c, 0x72, 0x5e, 0x6e, 0x3c, 0x6b, 0x37, 0xee, 0x3c, 0x64, 0x04, 0xfc,
|
||||
0xf3, 0x76, 0x9c, 0x98, 0x79, 0x8d, 0xe1, 0x54, 0xa0, 0x31, 0x2c, 0x41, 0x6e, 0x80, 0x89, 0xc6,
|
||||
0x08, 0x02, 0xaf, 0xee, 0xbc, 0xf9, 0xcd, 0x7b, 0xb0, 0x14, 0x68, 0x8c, 0xd2, 0xc8, 0xdb, 0xaf,
|
||||
0x7f, 0x58, 0x8e, 0x49, 0xd9, 0x67, 0x5f, 0x5e, 0x4d, 0xee, 0xe3, 0xa7, 0xf4, 0xcc, 0x2a, 0xf5,
|
||||
0x5a, 0xa3, 0x5e, 0xdb, 0x2d, 0xc7, 0xa5, 0xa5, 0x67, 0x5f, 0x5e, 0xcd, 0x2a, 0x98, 0xf5, 0x57,
|
||||
0x6e, 0x36, 0x60, 0x39, 0xf8, 0x55, 0xc2, 0x19, 0x04, 0x41, 0xf1, 0xfe, 0xc3, 0x83, 0xbd, 0x9d,
|
||||
0xda, 0x76, 0xab, 0xae, 0x3e, 0x6a, 0xb6, 0xea, 0xe5, 0x38, 0xba, 0x00, 0xe7, 0xf6, 0x76, 0xde,
|
||||
0x6b, 0xb4, 0xd4, 0xda, 0xde, 0x4e, 0x7d, 0xbf, 0xa5, 0x6e, 0xb7, 0x5a, 0xdb, 0xb5, 0xdd, 0x72,
|
||||
0x62, 0xeb, 0x73, 0x80, 0xd2, 0x76, 0xb5, 0xb6, 0x43, 0xb3, 0x9f, 0xd1, 0xd1, 0x44, 0xff, 0x2a,
|
||||
0xc5, 0x8a, 0xeb, 0x99, 0x37, 0xb2, 0xd2, 0xec, 0xf6, 0x1d, 0x7a, 0x00, 0x69, 0x56, 0x77, 0xa3,
|
||||
0xd9, 0x57, 0xb4, 0xd2, 0x9c, 0x7e, 0x1e, 0xfd, 0x31, 0x2c, 0x3c, 0x66, 0xde, 0xd9, 0x4a, 0xb3,
|
||||
0xdb, 0x7b, 0x48, 0x81, 0xbc, 0x5f, 0x38, 0xcf, 0xbf, 0xc3, 0x95, 0x16, 0x68, 0xf9, 0x51, 0x9b,
|
||||
0x7e, 0x59, 0x30, 0xff, 0x4e, 0x53, 0x5a, 0x00, 0xc0, 0xd0, 0x1e, 0x64, 0xdd, 0x82, 0x6b, 0xde,
|
||||
0x2d, 0xab, 0x34, 0xb7, 0x1d, 0x47, 0x3f, 0x01, 0x2f, 0x8c, 0x67, 0x5f, 0x19, 0x4b, 0x73, 0x7a,
|
||||
0x8b, 0x68, 0x07, 0x32, 0x82, 0xeb, 0xce, 0xb9, 0x39, 0x95, 0xe6, 0xb5, 0xd7, 0xa8, 0xd3, 0xfc,
|
||||
0x8e, 0xc3, 0xfc, 0x8b, 0x70, 0x69, 0x81, 0xb6, 0x29, 0x7a, 0x08, 0x10, 0x28, 0x83, 0x17, 0xb8,
|
||||
0xe1, 0x96, 0x16, 0x69, 0x87, 0xa2, 0x26, 0xe4, 0xbc, 0x72, 0x67, 0xee, 0x7d, 0xb3, 0x34, 0xbf,
|
||||
0x2f, 0x89, 0x1e, 0x43, 0x21, 0xcc, 0xf3, 0x17, 0xbb, 0x45, 0x96, 0x16, 0x6c, 0x38, 0x52, 0xfb,
|
||||
0x61, 0xd2, 0xbf, 0xd8, 0xad, 0xb2, 0xb4, 0x60, 0xff, 0x11, 0x7d, 0x02, 0x2b, 0x93, 0xa4, 0x7c,
|
||||
0xf1, 0x4b, 0x66, 0xe9, 0x0c, 0x1d, 0x49, 0x34, 0x00, 0x34, 0x85, 0xcc, 0x9f, 0xe1, 0xce, 0x59,
|
||||
0x3a, 0x4b, 0x83, 0xb2, 0x5a, 0xff, 0xea, 0xf9, 0x5a, 0xfc, 0xeb, 0xe7, 0x6b, 0xf1, 0x7f, 0x3c,
|
||||
0x5f, 0x8b, 0x7f, 0xf1, 0x62, 0x2d, 0xf6, 0xf5, 0x8b, 0xb5, 0xd8, 0xdf, 0x5e, 0xac, 0xc5, 0x7e,
|
||||
0xf9, 0x5a, 0xd7, 0x20, 0xbd, 0x51, 0x7b, 0xa3, 0x63, 0x0d, 0x36, 0x83, 0x7f, 0x88, 0x99, 0xf6,
|
||||
0x27, 0x9d, 0x76, 0x86, 0x25, 0xaa, 0x5b, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x3b, 0xae, 0x48,
|
||||
0xb4, 0xc4, 0x23, 0x00, 0x00,
|
||||
// 2782 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x77, 0x23, 0xc5,
|
||||
0xf5, 0xd7, 0x5b, 0xea, 0x2b, 0xeb, 0xe1, 0x1a, 0x33, 0x08, 0x31, 0xd8, 0x43, 0x73, 0xe0, 0x0f,
|
||||
0x03, 0xd8, 0x7f, 0xcc, 0x81, 0x40, 0x20, 0x01, 0x4b, 0x68, 0x90, 0xb1, 0xb1, 0x9c, 0xb6, 0x66,
|
||||
0xc8, 0x8b, 0x69, 0x5a, 0xea, 0xb2, 0xd4, 0x8c, 0xd4, 0xdd, 0x74, 0x97, 0x8c, 0xc5, 0x32, 0x8f,
|
||||
0x0d, 0xd9, 0x90, 0x5d, 0x36, 0x7c, 0x8f, 0xac, 0xb2, 0xc9, 0x86, 0x73, 0xb2, 0x61, 0x99, 0x45,
|
||||
0x0e, 0xc9, 0x99, 0x39, 0xd9, 0xe4, 0x0b, 0x64, 0x95, 0x93, 0x9c, 0x7a, 0xf4, 0x4b, 0x52, 0x4b,
|
||||
0x32, 0x64, 0x97, 0x5d, 0xd5, 0xed, 0x7b, 0x6f, 0xab, 0xaa, 0xeb, 0xfe, 0xee, 0xef, 0xde, 0x12,
|
||||
0x3c, 0x4e, 0xb0, 0xa9, 0x63, 0x67, 0x6c, 0x98, 0x64, 0x4f, 0xeb, 0xf5, 0x8d, 0x3d, 0x32, 0xb5,
|
||||
0xb1, 0xbb, 0x6b, 0x3b, 0x16, 0xb1, 0x50, 0x25, 0x78, 0xb8, 0x4b, 0x1f, 0xd6, 0x9f, 0x08, 0x69,
|
||||
0xf7, 0x9d, 0xa9, 0x4d, 0xac, 0x3d, 0xdb, 0xb1, 0xac, 0x73, 0xae, 0x5f, 0xbf, 0x11, 0x7a, 0xcc,
|
||||
0xfc, 0x84, 0xbd, 0x45, 0x9e, 0x0a, 0xe3, 0xfb, 0x78, 0xea, 0x3d, 0x7d, 0x62, 0xce, 0xd6, 0xd6,
|
||||
0x1c, 0x6d, 0xec, 0x3d, 0xde, 0x19, 0x58, 0xd6, 0x60, 0x84, 0xf7, 0xd8, 0xac, 0x37, 0x39, 0xdf,
|
||||
0x23, 0xc6, 0x18, 0xbb, 0x44, 0x1b, 0xdb, 0x42, 0x61, 0x6b, 0x60, 0x0d, 0x2c, 0x36, 0xdc, 0xa3,
|
||||
0x23, 0x2e, 0x95, 0x7f, 0x5b, 0x80, 0xbc, 0x82, 0x3f, 0x99, 0x60, 0x97, 0xa0, 0x7d, 0xc8, 0xe0,
|
||||
0xfe, 0xd0, 0xaa, 0x25, 0x6f, 0x26, 0x9f, 0x2d, 0xee, 0xdf, 0xd8, 0x9d, 0x59, 0xdc, 0xae, 0xd0,
|
||||
0x6b, 0xf5, 0x87, 0x56, 0x3b, 0xa1, 0x30, 0x5d, 0xf4, 0x0a, 0x64, 0xcf, 0x47, 0x13, 0x77, 0x58,
|
||||
0x4b, 0x31, 0xa3, 0x27, 0xe2, 0x8c, 0x6e, 0x53, 0xa5, 0x76, 0x42, 0xe1, 0xda, 0xf4, 0x55, 0x86,
|
||||
0x79, 0x6e, 0xd5, 0xd2, 0xcb, 0x5f, 0x75, 0x68, 0x9e, 0xb3, 0x57, 0x51, 0x5d, 0xd4, 0x00, 0x70,
|
||||
0x31, 0x51, 0x2d, 0x9b, 0x18, 0x96, 0x59, 0xcb, 0x30, 0xcb, 0x27, 0xe3, 0x2c, 0xcf, 0x30, 0xe9,
|
||||
0x30, 0xc5, 0x76, 0x42, 0x91, 0x5c, 0x6f, 0x42, 0x7d, 0x18, 0xa6, 0x41, 0xd4, 0xfe, 0x50, 0x33,
|
||||
0xcc, 0x5a, 0x76, 0xb9, 0x8f, 0x43, 0xd3, 0x20, 0x4d, 0xaa, 0x48, 0x7d, 0x18, 0xde, 0x84, 0x2e,
|
||||
0xf9, 0x93, 0x09, 0x76, 0xa6, 0xb5, 0xdc, 0xf2, 0x25, 0xff, 0x88, 0x2a, 0xd1, 0x25, 0x33, 0x6d,
|
||||
0xd4, 0x82, 0x62, 0x0f, 0x0f, 0x0c, 0x53, 0xed, 0x8d, 0xac, 0xfe, 0xfd, 0x5a, 0x9e, 0x19, 0xcb,
|
||||
0x71, 0xc6, 0x0d, 0xaa, 0xda, 0xa0, 0x9a, 0xed, 0x84, 0x02, 0x3d, 0x7f, 0x86, 0xde, 0x84, 0x42,
|
||||
0x7f, 0x88, 0xfb, 0xf7, 0x55, 0x72, 0x59, 0x2b, 0x30, 0x1f, 0x3b, 0x71, 0x3e, 0x9a, 0x54, 0xaf,
|
||||
0x7b, 0xd9, 0x4e, 0x28, 0xf9, 0x3e, 0x1f, 0xd2, 0xf5, 0xeb, 0x78, 0x64, 0x5c, 0x60, 0x87, 0xda,
|
||||
0x4b, 0xcb, 0xd7, 0xff, 0x0e, 0xd7, 0x64, 0x1e, 0x24, 0xdd, 0x9b, 0xa0, 0xb7, 0x40, 0xc2, 0xa6,
|
||||
0x2e, 0x96, 0x01, 0xcc, 0xc5, 0xcd, 0xd8, 0xb3, 0x62, 0xea, 0xde, 0x22, 0x0a, 0x58, 0x8c, 0xd1,
|
||||
0x6b, 0x90, 0xeb, 0x5b, 0xe3, 0xb1, 0x41, 0x6a, 0x45, 0x66, 0xbd, 0x1d, 0xbb, 0x00, 0xa6, 0xd5,
|
||||
0x4e, 0x28, 0x42, 0x1f, 0x9d, 0x40, 0x79, 0x64, 0xb8, 0x44, 0x75, 0x4d, 0xcd, 0x76, 0x87, 0x16,
|
||||
0x71, 0x6b, 0x1b, 0xcc, 0xc3, 0xd3, 0x71, 0x1e, 0x8e, 0x0d, 0x97, 0x9c, 0x79, 0xca, 0xed, 0x84,
|
||||
0x52, 0x1a, 0x85, 0x05, 0xd4, 0x9f, 0x75, 0x7e, 0x8e, 0x1d, 0xdf, 0x61, 0xad, 0xb4, 0xdc, 0x5f,
|
||||
0x87, 0x6a, 0x7b, 0xf6, 0xd4, 0x9f, 0x15, 0x16, 0xa0, 0x9f, 0xc1, 0xb5, 0x91, 0xa5, 0xe9, 0xbe,
|
||||
0x3b, 0xb5, 0x3f, 0x9c, 0x98, 0xf7, 0x6b, 0x65, 0xe6, 0xf4, 0xb9, 0xd8, 0x1f, 0x69, 0x69, 0xba,
|
||||
0xe7, 0xa2, 0x49, 0x0d, 0xda, 0x09, 0x65, 0x73, 0x34, 0x2b, 0x44, 0xf7, 0x60, 0x4b, 0xb3, 0xed,
|
||||
0xd1, 0x74, 0xd6, 0x7b, 0x85, 0x79, 0xbf, 0x15, 0xe7, 0xfd, 0x80, 0xda, 0xcc, 0xba, 0x47, 0xda,
|
||||
0x9c, 0xb4, 0x91, 0x87, 0xec, 0x85, 0x36, 0x9a, 0x60, 0xf9, 0xff, 0xa0, 0x18, 0x0a, 0x75, 0x54,
|
||||
0x83, 0xfc, 0x18, 0xbb, 0xae, 0x36, 0xc0, 0x0c, 0x19, 0x24, 0xc5, 0x9b, 0xca, 0x65, 0xd8, 0x08,
|
||||
0x87, 0xb7, 0x3c, 0xf6, 0x0d, 0x69, 0xe0, 0x52, 0xc3, 0x0b, 0xec, 0xb8, 0x34, 0x5a, 0x85, 0xa1,
|
||||
0x98, 0xa2, 0xa7, 0xa0, 0xc4, 0x8e, 0x8f, 0xea, 0x3d, 0xa7, 0xe8, 0x91, 0x51, 0x36, 0x98, 0xf0,
|
||||
0xae, 0x50, 0xda, 0x81, 0xa2, 0xbd, 0x6f, 0xfb, 0x2a, 0x69, 0xa6, 0x02, 0xf6, 0xbe, 0x2d, 0x14,
|
||||
0xe4, 0xef, 0x43, 0x75, 0x36, 0xda, 0x51, 0x15, 0xd2, 0xf7, 0xf1, 0x54, 0xbc, 0x8f, 0x0e, 0xd1,
|
||||
0x96, 0x58, 0x16, 0x7b, 0x87, 0xa4, 0x88, 0x35, 0xfe, 0x29, 0xe5, 0x1b, 0xfb, 0x61, 0x8e, 0x5e,
|
||||
0x83, 0x0c, 0x45, 0x4d, 0x01, 0x80, 0xf5, 0x5d, 0x0e, 0xa9, 0xbb, 0x1e, 0xa4, 0xee, 0x76, 0x3d,
|
||||
0x48, 0x6d, 0x14, 0xbe, 0xfa, 0x66, 0x27, 0xf1, 0xc5, 0x5f, 0x77, 0x92, 0x0a, 0xb3, 0x40, 0x8f,
|
||||
0xd1, 0xa8, 0xd4, 0x0c, 0x53, 0x35, 0x74, 0xf1, 0x9e, 0x3c, 0x9b, 0x1f, 0xea, 0xe8, 0x08, 0xaa,
|
||||
0x7d, 0xcb, 0x74, 0xb1, 0xe9, 0x4e, 0x5c, 0x95, 0x43, 0xb6, 0x80, 0xbd, 0xf9, 0xa8, 0x69, 0x7a,
|
||||
0x8a, 0xa7, 0x4c, 0x4f, 0xa9, 0xf4, 0xa3, 0x02, 0x74, 0x1b, 0xe0, 0x42, 0x1b, 0x19, 0xba, 0x46,
|
||||
0x2c, 0xc7, 0xad, 0x65, 0x6e, 0xa6, 0x17, 0xba, 0xb9, 0xeb, 0xa9, 0xdc, 0xb1, 0x75, 0x8d, 0xe0,
|
||||
0x46, 0x86, 0xfe, 0x5a, 0x25, 0x64, 0x89, 0x9e, 0x81, 0x8a, 0x66, 0xdb, 0xaa, 0x4b, 0x34, 0x82,
|
||||
0xd5, 0xde, 0x94, 0x60, 0x97, 0x81, 0xe1, 0x86, 0x52, 0xd2, 0x6c, 0xfb, 0x8c, 0x4a, 0x1b, 0x54,
|
||||
0x88, 0x9e, 0x86, 0x32, 0x05, 0x3e, 0x43, 0x1b, 0xa9, 0x43, 0x6c, 0x0c, 0x86, 0x84, 0x81, 0x5e,
|
||||
0x5a, 0x29, 0x09, 0x69, 0x9b, 0x09, 0x65, 0xdd, 0x3f, 0x08, 0x0c, 0xf4, 0x10, 0x82, 0x8c, 0xae,
|
||||
0x11, 0x8d, 0x6d, 0xe4, 0x86, 0xc2, 0xc6, 0x54, 0x66, 0x6b, 0x64, 0x28, 0xb6, 0x87, 0x8d, 0xd1,
|
||||
0x75, 0xc8, 0x09, 0xb7, 0x69, 0xe6, 0x56, 0xcc, 0xe8, 0x37, 0xb3, 0x1d, 0xeb, 0x02, 0x33, 0x94,
|
||||
0x2f, 0x28, 0x7c, 0x22, 0xff, 0x2a, 0x05, 0x9b, 0x73, 0xf0, 0x48, 0xfd, 0x0e, 0x35, 0x77, 0xe8,
|
||||
0xbd, 0x8b, 0x8e, 0xd1, 0xab, 0xd4, 0xaf, 0xa6, 0x63, 0x47, 0xa4, 0xa5, 0x5a, 0x78, 0x8b, 0x78,
|
||||
0xca, 0x6d, 0xb3, 0xe7, 0x62, 0x6b, 0x84, 0x36, 0xea, 0x40, 0x75, 0xa4, 0xb9, 0x44, 0xe5, 0x70,
|
||||
0xa3, 0x86, 0x52, 0xd4, 0x3c, 0xc8, 0x1e, 0x6b, 0x1e, 0x40, 0xd1, 0xc3, 0x2e, 0x1c, 0x95, 0x47,
|
||||
0x11, 0x29, 0x52, 0x60, 0xab, 0x37, 0xfd, 0x4c, 0x33, 0x89, 0x61, 0x62, 0x75, 0xee, 0xcb, 0x3d,
|
||||
0x36, 0xe7, 0xb4, 0x75, 0x61, 0xe8, 0xd8, 0xec, 0x7b, 0x9f, 0xec, 0x9a, 0x6f, 0xec, 0x7f, 0x52,
|
||||
0x57, 0x56, 0xa0, 0x1c, 0x05, 0x78, 0x54, 0x86, 0x14, 0xb9, 0x14, 0x1b, 0x90, 0x22, 0x97, 0xe8,
|
||||
0xff, 0x21, 0x43, 0x17, 0xc9, 0x16, 0x5f, 0x5e, 0x90, 0x5d, 0x85, 0x5d, 0x77, 0x6a, 0x63, 0x85,
|
||||
0x69, 0xca, 0xb2, 0x1f, 0x0d, 0x3e, 0xe8, 0xcf, 0x7a, 0x95, 0x9f, 0x83, 0xca, 0x0c, 0xaa, 0x87,
|
||||
0xbe, 0x5f, 0x32, 0xfc, 0xfd, 0xe4, 0x0a, 0x94, 0x22, 0x10, 0x2e, 0x5f, 0x87, 0xad, 0x45, 0x88,
|
||||
0x2c, 0x0f, 0x7d, 0x79, 0x04, 0x59, 0xd1, 0x2b, 0x50, 0xf0, 0x21, 0x99, 0x47, 0xe3, 0xfc, 0x5e,
|
||||
0x79, 0xca, 0x8a, 0xaf, 0x4a, 0xc3, 0x90, 0x1e, 0x6b, 0x76, 0x1e, 0x52, 0xec, 0x87, 0xe7, 0x35,
|
||||
0xdb, 0x6e, 0x6b, 0xee, 0x50, 0xfe, 0x08, 0x6a, 0x71, 0x70, 0x3b, 0xb3, 0x8c, 0x8c, 0x7f, 0x0c,
|
||||
0xaf, 0x43, 0xee, 0xdc, 0x72, 0xc6, 0x1a, 0x61, 0xce, 0x4a, 0x8a, 0x98, 0xd1, 0xe3, 0xc9, 0xa1,
|
||||
0x37, 0xcd, 0xc4, 0x7c, 0x22, 0xab, 0xf0, 0x58, 0x2c, 0xe4, 0x52, 0x13, 0xc3, 0xd4, 0x31, 0xdf,
|
||||
0xcf, 0x92, 0xc2, 0x27, 0x81, 0x23, 0xfe, 0x63, 0xf9, 0x84, 0xbe, 0xd6, 0x65, 0x6b, 0x65, 0xfe,
|
||||
0x25, 0x45, 0xcc, 0xe4, 0xbf, 0x17, 0xa0, 0xa0, 0x60, 0xd7, 0xa6, 0x98, 0x80, 0x1a, 0x20, 0xe1,
|
||||
0xcb, 0x3e, 0xe6, 0x64, 0x28, 0x19, 0x4b, 0x26, 0xb8, 0x76, 0xcb, 0xd3, 0xa4, 0x99, 0xdc, 0x37,
|
||||
0x43, 0x2f, 0x0b, 0xc2, 0x17, 0xcf, 0xdd, 0x84, 0x79, 0x98, 0xf1, 0xbd, 0xea, 0x31, 0xbe, 0x74,
|
||||
0x6c, 0xf2, 0xe6, 0x56, 0x33, 0x94, 0xef, 0x65, 0x41, 0xf9, 0x32, 0x2b, 0x5e, 0x16, 0xe1, 0x7c,
|
||||
0xcd, 0x08, 0xe7, 0xcb, 0xae, 0x58, 0x66, 0x0c, 0xe9, 0x6b, 0x46, 0x48, 0x5f, 0x6e, 0x85, 0x93,
|
||||
0x18, 0xd6, 0xf7, 0xaa, 0xc7, 0xfa, 0xf2, 0x2b, 0x96, 0x3d, 0x43, 0xfb, 0x6e, 0x47, 0x69, 0x1f,
|
||||
0xa7, 0x6c, 0x4f, 0xc5, 0x5a, 0xc7, 0xf2, 0xbe, 0x1f, 0x84, 0x78, 0x9f, 0x14, 0x4b, 0xba, 0xb8,
|
||||
0x93, 0x05, 0xc4, 0xaf, 0x19, 0x21, 0x7e, 0xb0, 0x62, 0x0f, 0x62, 0x98, 0xdf, 0xdb, 0x61, 0xe6,
|
||||
0x57, 0x8c, 0x25, 0x8f, 0xe2, 0xd0, 0x2c, 0xa2, 0x7e, 0xaf, 0xfb, 0xd4, 0x6f, 0x23, 0x96, 0xbb,
|
||||
0x8a, 0x35, 0xcc, 0x72, 0xbf, 0xce, 0x1c, 0xf7, 0xe3, 0x5c, 0xed, 0x99, 0x58, 0x17, 0x2b, 0xc8,
|
||||
0x5f, 0x67, 0x8e, 0xfc, 0x95, 0x57, 0x38, 0x5c, 0xc1, 0xfe, 0x7e, 0xbe, 0x98, 0xfd, 0xc5, 0xf3,
|
||||
0x33, 0xf1, 0x33, 0xd7, 0xa3, 0x7f, 0x6a, 0x0c, 0xfd, 0xab, 0x32, 0xf7, 0xcf, 0xc7, 0xba, 0xbf,
|
||||
0x3a, 0xff, 0x7b, 0x8e, 0xa6, 0xd9, 0x19, 0xe0, 0xa0, 0x50, 0x85, 0x1d, 0xc7, 0x72, 0x04, 0xb5,
|
||||
0xe2, 0x13, 0xf9, 0x59, 0x9a, 0xf8, 0x03, 0x90, 0x58, 0xc2, 0x15, 0x59, 0x4a, 0x08, 0x01, 0x83,
|
||||
0xfc, 0xfb, 0x64, 0x60, 0xcb, 0x72, 0x65, 0x98, 0x34, 0x48, 0x82, 0x34, 0x84, 0x28, 0x64, 0x2a,
|
||||
0x4a, 0x21, 0x77, 0xa0, 0x48, 0xa1, 0x7e, 0x86, 0x1d, 0x6a, 0xb6, 0xc7, 0x0e, 0xd1, 0x2d, 0xd8,
|
||||
0x64, 0xb9, 0x9c, 0x13, 0x4d, 0x81, 0xef, 0x19, 0x96, 0xa6, 0x2a, 0xf4, 0x01, 0x3f, 0x9c, 0x1c,
|
||||
0xe8, 0x5f, 0x84, 0x6b, 0x21, 0x5d, 0x3f, 0x85, 0x70, 0x4a, 0x54, 0xf5, 0xb5, 0x0f, 0x44, 0x2e,
|
||||
0x79, 0x3f, 0xd8, 0xa0, 0x80, 0x79, 0x22, 0xc8, 0xf4, 0x2d, 0x1d, 0x0b, 0x80, 0x67, 0x63, 0xca,
|
||||
0x46, 0x47, 0xd6, 0x40, 0xc0, 0x38, 0x1d, 0x52, 0x2d, 0x1f, 0x05, 0x25, 0x0e, 0x72, 0xf2, 0x1f,
|
||||
0x93, 0x81, 0xbf, 0x80, 0x8c, 0x2e, 0xe2, 0x8d, 0xc9, 0xff, 0x0e, 0x6f, 0x4c, 0x7d, 0x6b, 0xde,
|
||||
0x18, 0x4e, 0xb0, 0xe9, 0x68, 0x82, 0xfd, 0x67, 0x32, 0xf8, 0xc2, 0x3e, 0x0b, 0xfc, 0x76, 0x3b,
|
||||
0x12, 0x64, 0xcb, 0x2c, 0xfb, 0x5e, 0x22, 0x5b, 0x0a, 0x6e, 0x9f, 0x63, 0xef, 0x8d, 0x72, 0xfb,
|
||||
0x3c, 0xcf, 0x9f, 0x6c, 0x82, 0x5e, 0x03, 0x89, 0x35, 0x5d, 0x54, 0xcb, 0x76, 0x05, 0xe0, 0x3e,
|
||||
0x1e, 0x5e, 0x2b, 0xef, 0xad, 0xec, 0x9e, 0x52, 0x9d, 0x8e, 0xed, 0x2a, 0x05, 0x5b, 0x8c, 0x42,
|
||||
0x44, 0x40, 0x8a, 0xf0, 0xd1, 0x1b, 0x20, 0xd1, 0x5f, 0xef, 0xda, 0x5a, 0x1f, 0x33, 0xf0, 0x94,
|
||||
0x94, 0x40, 0x20, 0xdf, 0x03, 0x34, 0x0f, 0xdf, 0xa8, 0x0d, 0x39, 0x7c, 0x81, 0x4d, 0x42, 0xbf,
|
||||
0x1a, 0xdd, 0xee, 0xeb, 0x0b, 0xc8, 0x1e, 0x36, 0x49, 0xa3, 0x46, 0x37, 0xf9, 0x1f, 0xdf, 0xec,
|
||||
0x54, 0xb9, 0xf6, 0x0b, 0xd6, 0xd8, 0x20, 0x78, 0x6c, 0x93, 0xa9, 0x22, 0xec, 0xe5, 0xbf, 0xa4,
|
||||
0x28, 0xf3, 0x8a, 0x40, 0xfb, 0xc2, 0xbd, 0xf5, 0x02, 0x28, 0x15, 0x62, 0xdd, 0xeb, 0xed, 0xf7,
|
||||
0x36, 0xc0, 0x40, 0x73, 0xd5, 0x4f, 0x35, 0x93, 0x60, 0x5d, 0x6c, 0x7a, 0x48, 0x82, 0xea, 0x50,
|
||||
0xa0, 0xb3, 0x89, 0x8b, 0x75, 0x51, 0x00, 0xf8, 0xf3, 0xd0, 0x3a, 0xf3, 0xdf, 0x6d, 0x9d, 0xd1,
|
||||
0x5d, 0x2e, 0xcc, 0xec, 0x72, 0x88, 0x15, 0x49, 0x61, 0x56, 0x44, 0x7f, 0x9b, 0xed, 0x18, 0x96,
|
||||
0x63, 0x90, 0x29, 0xfb, 0x34, 0x69, 0xc5, 0x9f, 0xd3, 0x3a, 0x73, 0x8c, 0xc7, 0xb6, 0x65, 0x8d,
|
||||
0x54, 0x0e, 0x5e, 0x45, 0x66, 0xba, 0x21, 0x84, 0x2d, 0x86, 0x61, 0xbf, 0x4e, 0x05, 0xe1, 0x17,
|
||||
0xb0, 0xdf, 0xff, 0xb9, 0x0d, 0x96, 0x7f, 0xc3, 0x4a, 0xe2, 0x68, 0xf2, 0x46, 0x67, 0xb0, 0xe9,
|
||||
0x87, 0xbf, 0x3a, 0x61, 0xb0, 0xe0, 0x1d, 0xe8, 0x75, 0xf1, 0xa3, 0x7a, 0x11, 0x15, 0xbb, 0xe8,
|
||||
0xc7, 0xf0, 0xe8, 0x0c, 0xb4, 0xf9, 0xae, 0x53, 0x6b, 0x22, 0xdc, 0x23, 0x51, 0x84, 0xf3, 0x3c,
|
||||
0x07, 0x7b, 0x95, 0xfe, 0x8e, 0x41, 0x77, 0x48, 0xab, 0xac, 0x30, 0x15, 0x59, 0xf8, 0xf5, 0x9f,
|
||||
0x82, 0x92, 0x83, 0x09, 0x2d, 0xfc, 0x23, 0x75, 0xec, 0x06, 0x17, 0x8a, 0xea, 0xf8, 0x14, 0x1e,
|
||||
0x59, 0x48, 0x49, 0xd0, 0xf7, 0x40, 0x0a, 0xd8, 0x4c, 0x32, 0xa6, 0x24, 0xf4, 0xcb, 0x9c, 0x40,
|
||||
0x57, 0xfe, 0x43, 0x32, 0x70, 0x19, 0x2d, 0x9c, 0x5a, 0x90, 0x73, 0xb0, 0x3b, 0x19, 0xf1, 0x52,
|
||||
0xa6, 0xbc, 0xff, 0xe2, 0x7a, 0x64, 0x86, 0x4a, 0x27, 0x23, 0xa2, 0x08, 0x63, 0xf9, 0x1e, 0xe4,
|
||||
0xb8, 0x04, 0x15, 0x21, 0x7f, 0xe7, 0xe4, 0xe8, 0xa4, 0xf3, 0xc1, 0x49, 0x35, 0x81, 0x00, 0x72,
|
||||
0x07, 0xcd, 0x66, 0xeb, 0xb4, 0x5b, 0x4d, 0x22, 0x09, 0xb2, 0x07, 0x8d, 0x8e, 0xd2, 0xad, 0xa6,
|
||||
0xa8, 0x58, 0x69, 0xbd, 0xd7, 0x6a, 0x76, 0xab, 0x69, 0xb4, 0x09, 0x25, 0x3e, 0x56, 0x6f, 0x77,
|
||||
0x94, 0xf7, 0x0f, 0xba, 0xd5, 0x4c, 0x48, 0x74, 0xd6, 0x3a, 0x79, 0xa7, 0xa5, 0x54, 0xb3, 0xf2,
|
||||
0x4b, 0xb4, 0x56, 0x8a, 0xa1, 0x3f, 0x41, 0x55, 0x94, 0x0c, 0x55, 0x45, 0xf2, 0xef, 0x52, 0x50,
|
||||
0x8f, 0xe7, 0x34, 0xe8, 0xbd, 0x99, 0x85, 0xef, 0x5f, 0x81, 0x10, 0xcd, 0xac, 0x1e, 0x3d, 0x0d,
|
||||
0x65, 0x07, 0x9f, 0x63, 0xd2, 0x1f, 0x72, 0x8e, 0xc5, 0x33, 0x66, 0x49, 0x29, 0x09, 0x29, 0x33,
|
||||
0x72, 0xb9, 0xda, 0xc7, 0xb8, 0x4f, 0x54, 0x0e, 0x45, 0xfc, 0xd0, 0x49, 0x54, 0x8d, 0x4a, 0xcf,
|
||||
0xb8, 0x50, 0xfe, 0xe8, 0x4a, 0x7b, 0x29, 0x41, 0x56, 0x69, 0x75, 0x95, 0x9f, 0x54, 0xd3, 0x08,
|
||||
0x41, 0x99, 0x0d, 0xd5, 0xb3, 0x93, 0x83, 0xd3, 0xb3, 0x76, 0x87, 0xee, 0xe5, 0x35, 0xa8, 0x78,
|
||||
0x7b, 0xe9, 0x09, 0xb3, 0xf2, 0xbf, 0x93, 0x50, 0x99, 0x09, 0x10, 0xb4, 0x0f, 0x59, 0xce, 0xd3,
|
||||
0xe3, 0xba, 0xf9, 0x2c, 0xbe, 0x45, 0x34, 0x71, 0x55, 0xf4, 0x26, 0x14, 0xb0, 0x68, 0x40, 0x2c,
|
||||
0x0a, 0x44, 0xde, 0x38, 0xf1, 0x5a, 0x14, 0xc2, 0xd4, 0xb7, 0x40, 0x6f, 0x81, 0xe4, 0x47, 0xba,
|
||||
0x28, 0x0e, 0x9f, 0x9c, 0x37, 0xf7, 0x31, 0x42, 0xd8, 0x07, 0x36, 0xe8, 0xf5, 0x80, 0xec, 0x65,
|
||||
0xe6, 0xab, 0x03, 0x61, 0xce, 0x15, 0x84, 0xb1, 0xa7, 0x2f, 0x37, 0xa1, 0x18, 0x5a, 0x0f, 0x7a,
|
||||
0x1c, 0xa4, 0xb1, 0x76, 0x29, 0x1a, 0x5b, 0xbc, 0x35, 0x51, 0x18, 0x6b, 0x97, 0xbc, 0xa7, 0xf5,
|
||||
0x28, 0xe4, 0xe9, 0xc3, 0x81, 0xc6, 0xd1, 0x26, 0xad, 0xe4, 0xc6, 0xda, 0xe5, 0xbb, 0x9a, 0x2b,
|
||||
0x7f, 0x08, 0xe5, 0x68, 0x53, 0x87, 0x9e, 0x44, 0xc7, 0x9a, 0x98, 0x3a, 0xf3, 0x91, 0x55, 0xf8,
|
||||
0x04, 0xbd, 0x02, 0xd9, 0x0b, 0x8b, 0x83, 0xd5, 0xe2, 0x90, 0xbd, 0x6b, 0x11, 0x1c, 0x6a, 0x0a,
|
||||
0x71, 0x6d, 0xf9, 0x33, 0xc8, 0x32, 0xf0, 0xa1, 0x40, 0xc2, 0xda, 0x33, 0x82, 0xe8, 0xd2, 0x31,
|
||||
0xfa, 0x10, 0x40, 0x23, 0xc4, 0x31, 0x7a, 0x93, 0xc0, 0xf1, 0xce, 0x62, 0xf0, 0x3a, 0xf0, 0xf4,
|
||||
0x1a, 0x37, 0x04, 0x8a, 0x6d, 0x05, 0xa6, 0x21, 0x24, 0x0b, 0x39, 0x94, 0x4f, 0xa0, 0x1c, 0xb5,
|
||||
0x0d, 0x37, 0x4a, 0x37, 0x16, 0x34, 0x4a, 0x7d, 0x32, 0xe5, 0x53, 0xb1, 0x34, 0x6f, 0xc5, 0xb1,
|
||||
0x89, 0xfc, 0x79, 0x12, 0x0a, 0xdd, 0x4b, 0x71, 0xac, 0x63, 0xba, 0x40, 0x81, 0x69, 0x2a, 0xdc,
|
||||
0xf3, 0xe0, 0x6d, 0xa5, 0xb4, 0xdf, 0xac, 0x7a, 0xdb, 0x0f, 0xdc, 0xcc, 0xba, 0x55, 0xa9, 0xd7,
|
||||
0xb5, 0x13, 0x60, 0xf5, 0x06, 0x48, 0xfe, 0xa9, 0xa2, 0x15, 0x83, 0xa6, 0xeb, 0x0e, 0x76, 0x5d,
|
||||
0xb1, 0x36, 0x6f, 0xca, 0x9a, 0x8a, 0xd6, 0xa7, 0xa2, 0xab, 0x92, 0x56, 0xf8, 0x44, 0xd6, 0xa1,
|
||||
0x32, 0x93, 0xb6, 0xd0, 0x1b, 0x90, 0xb7, 0x27, 0x3d, 0xd5, 0xdb, 0x9e, 0x99, 0xe0, 0xf1, 0xd8,
|
||||
0xe3, 0xa4, 0x37, 0x32, 0xfa, 0x47, 0x78, 0xea, 0xfd, 0x18, 0x7b, 0xd2, 0x3b, 0xe2, 0xbb, 0xc8,
|
||||
0xdf, 0x92, 0x0a, 0xbf, 0xe5, 0x02, 0x0a, 0xde, 0xa1, 0x40, 0x3f, 0x0c, 0xc7, 0x89, 0xd7, 0x6a,
|
||||
0x8e, 0x4d, 0xa5, 0xc2, 0x7d, 0x28, 0x4c, 0x6e, 0xc1, 0xa6, 0x6b, 0x0c, 0x4c, 0xac, 0xab, 0x41,
|
||||
0xcd, 0xc2, 0xde, 0x56, 0x50, 0x2a, 0xfc, 0xc1, 0xb1, 0x57, 0xb0, 0xc8, 0xff, 0x4a, 0x42, 0xc1,
|
||||
0x0b, 0x58, 0xf4, 0x52, 0xe8, 0xdc, 0x95, 0x17, 0x74, 0x60, 0x3c, 0xc5, 0xa0, 0x2f, 0x18, 0xfd,
|
||||
0xad, 0xa9, 0xab, 0xff, 0xd6, 0xb8, 0x06, 0xaf, 0xd7, 0x69, 0xcf, 0x5c, 0xb9, 0xd3, 0xfe, 0x02,
|
||||
0x20, 0x62, 0x11, 0x6d, 0xa4, 0x5e, 0x58, 0xc4, 0x30, 0x07, 0x2a, 0xdf, 0x6c, 0xce, 0xa8, 0xaa,
|
||||
0xec, 0xc9, 0x5d, 0xf6, 0xe0, 0x94, 0xed, 0xfb, 0x2f, 0x92, 0x50, 0xf0, 0x73, 0xe3, 0x55, 0xdb,
|
||||
0x7c, 0xd7, 0x21, 0x27, 0xe0, 0x9f, 0xf7, 0xf9, 0xc4, 0xcc, 0xef, 0x38, 0x67, 0x42, 0x1d, 0xe7,
|
||||
0x3a, 0x14, 0xc6, 0x98, 0x68, 0x8c, 0x20, 0xf0, 0xb2, 0xd1, 0x9f, 0xdf, 0x7a, 0x1d, 0x8a, 0xa1,
|
||||
0x8e, 0x2b, 0x8d, 0xbc, 0x93, 0xd6, 0x07, 0xd5, 0x44, 0x3d, 0xff, 0xf9, 0x97, 0x37, 0xd3, 0x27,
|
||||
0xf8, 0x53, 0x7a, 0x66, 0x95, 0x56, 0xb3, 0xdd, 0x6a, 0x1e, 0x55, 0x93, 0xf5, 0xe2, 0xe7, 0x5f,
|
||||
0xde, 0xcc, 0x2b, 0x98, 0x35, 0x6e, 0x6e, 0xb5, 0x61, 0x23, 0xfc, 0x55, 0xa2, 0x19, 0x04, 0x41,
|
||||
0xf9, 0x9d, 0x3b, 0xa7, 0xc7, 0x87, 0xcd, 0x83, 0x6e, 0x4b, 0xbd, 0xdb, 0xe9, 0xb6, 0xaa, 0x49,
|
||||
0xf4, 0x28, 0x5c, 0x3b, 0x3e, 0x7c, 0xb7, 0xdd, 0x55, 0x9b, 0xc7, 0x87, 0xad, 0x93, 0xae, 0x7a,
|
||||
0xd0, 0xed, 0x1e, 0x34, 0x8f, 0xaa, 0xa9, 0xfd, 0x5f, 0x02, 0x54, 0x0e, 0x1a, 0xcd, 0x43, 0x9a,
|
||||
0xfd, 0x8c, 0xbe, 0x26, 0x1a, 0x63, 0x19, 0x56, 0xb5, 0x2f, 0xbd, 0xea, 0xad, 0x2f, 0xef, 0x0b,
|
||||
0xa2, 0xdb, 0x90, 0x65, 0x05, 0x3d, 0x5a, 0x7e, 0xf7, 0x5b, 0x5f, 0xd1, 0x28, 0xa4, 0x3f, 0x86,
|
||||
0x85, 0xc7, 0xd2, 0xcb, 0xe0, 0xfa, 0xf2, 0xbe, 0x21, 0x52, 0x40, 0x0a, 0x2a, 0xf2, 0xd5, 0x97,
|
||||
0xc3, 0xf5, 0x35, 0x7a, 0x89, 0xd4, 0x67, 0x50, 0x16, 0xac, 0xbe, 0x2c, 0xad, 0xaf, 0x01, 0x60,
|
||||
0xe8, 0x18, 0xf2, 0x5e, 0x25, 0xb7, 0xea, 0xfa, 0xb6, 0xbe, 0xb2, 0xcf, 0x47, 0x3f, 0x01, 0xaf,
|
||||
0xb8, 0x97, 0xdf, 0x45, 0xd7, 0x57, 0x34, 0x2d, 0xd1, 0x21, 0xe4, 0x04, 0xd7, 0x5d, 0x71, 0x25,
|
||||
0x5b, 0x5f, 0xd5, 0xb7, 0xa3, 0x9b, 0x16, 0xb4, 0x32, 0x56, 0xdf, 0xb0, 0xd7, 0xd7, 0xe8, 0xc7,
|
||||
0xa2, 0x3b, 0x00, 0xa1, 0xfa, 0x7a, 0x8d, 0xab, 0xf3, 0xfa, 0x3a, 0x7d, 0x56, 0xd4, 0x81, 0x82,
|
||||
0x5f, 0xee, 0xac, 0xbc, 0xc8, 0xae, 0xaf, 0x6e, 0x78, 0xa2, 0x7b, 0x50, 0x8a, 0xf2, 0xfc, 0xf5,
|
||||
0xae, 0xa7, 0xeb, 0x6b, 0x76, 0x32, 0xa9, 0xff, 0x28, 0xe9, 0x5f, 0xef, 0xba, 0xba, 0xbe, 0x66,
|
||||
0x63, 0x13, 0x7d, 0x0c, 0x9b, 0xf3, 0xa4, 0x7c, 0xfd, 0xdb, 0xeb, 0xfa, 0x15, 0x5a, 0x9d, 0x68,
|
||||
0x0c, 0x68, 0x01, 0x99, 0xbf, 0xc2, 0x65, 0x76, 0xfd, 0x2a, 0x9d, 0xcf, 0x46, 0xeb, 0xab, 0x07,
|
||||
0xdb, 0xc9, 0xaf, 0x1f, 0x6c, 0x27, 0xff, 0xf6, 0x60, 0x3b, 0xf9, 0xc5, 0xc3, 0xed, 0xc4, 0xd7,
|
||||
0x0f, 0xb7, 0x13, 0x7f, 0x7e, 0xb8, 0x9d, 0xf8, 0xe9, 0xf3, 0x03, 0x83, 0x0c, 0x27, 0xbd, 0xdd,
|
||||
0xbe, 0x35, 0xde, 0x0b, 0xff, 0xd3, 0x66, 0xd1, 0xbf, 0x7f, 0x7a, 0x39, 0x96, 0xa8, 0x5e, 0xfe,
|
||||
0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x2d, 0x07, 0xd8, 0x1d, 0x24, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@@ -5635,6 +5663,25 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.MempoolError) > 0 {
|
||||
i -= len(m.MempoolError)
|
||||
copy(dAtA[i:], m.MempoolError)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError)))
|
||||
i--
|
||||
dAtA[i] = 0x5a
|
||||
}
|
||||
if m.Priority != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Priority))
|
||||
i--
|
||||
dAtA[i] = 0x50
|
||||
}
|
||||
if len(m.Sender) > 0 {
|
||||
i -= len(m.Sender)
|
||||
copy(dAtA[i:], m.Sender)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender)))
|
||||
i--
|
||||
dAtA[i] = 0x4a
|
||||
}
|
||||
if len(m.Codespace) > 0 {
|
||||
i -= len(m.Codespace)
|
||||
copy(dAtA[i:], m.Codespace)
|
||||
@@ -7390,6 +7437,17 @@ func (m *ResponseCheckTx) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
l = len(m.Sender)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
if m.Priority != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Priority))
|
||||
}
|
||||
l = len(m.MempoolError)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -11929,6 +11987,89 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.Codespace = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 9:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Sender = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 10:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType)
|
||||
}
|
||||
m.Priority = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Priority |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 11:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.MempoolError = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
|
||||
@@ -23,6 +23,11 @@ const (
|
||||
|
||||
// DefaultLogLevel defines a default log level as INFO.
|
||||
DefaultLogLevel = "info"
|
||||
|
||||
// Mempool versions. V1 is prioritized mempool, v0 is regular mempool.
|
||||
// Default is v0.
|
||||
MempoolV0 = "v0"
|
||||
MempoolV1 = "v1"
|
||||
)
|
||||
|
||||
// NOTE: Most of the structs & relevant comments + the
|
||||
@@ -676,6 +681,7 @@ func DefaultFuzzConnConfig() *FuzzConnConfig {
|
||||
|
||||
// MempoolConfig defines the configuration options for the Tendermint mempool
|
||||
type MempoolConfig struct {
|
||||
Version string `mapstructure:"version"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
@@ -699,20 +705,39 @@ type MempoolConfig struct {
|
||||
// Including space needed by encoding (one varint per transaction).
|
||||
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
MaxBatchBytes int `mapstructure:"max_batch_bytes"`
|
||||
|
||||
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
|
||||
// insertion time into the mempool is beyond TTLDuration.
|
||||
TTLDuration time.Duration `mapstructure:"ttl-duration"`
|
||||
|
||||
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLDuration is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if
|
||||
// it's insertion time into the mempool is beyond TTLDuration.
|
||||
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
|
||||
}
|
||||
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
|
||||
func DefaultMempoolConfig() *MempoolConfig {
|
||||
return &MempoolConfig{
|
||||
Version: MempoolV0,
|
||||
Recheck: true,
|
||||
Broadcast: true,
|
||||
WalPath: "",
|
||||
// Each signature verification takes .5ms, Size reduced until we implement
|
||||
// ABCI Recheck
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
TTLDuration: 0 * time.Second,
|
||||
TTLNumBlocks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -371,6 +371,22 @@ max_tx_bytes = {{ .Mempool.MaxTxBytes }}
|
||||
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
max_batch_bytes = {{ .Mempool.MaxBatchBytes }}
|
||||
|
||||
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
|
||||
# insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-duration = "{{ .Mempool.TTLDuration }}"
|
||||
|
||||
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-duration is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if
|
||||
# it's insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
|
||||
|
||||
#######################################################
|
||||
### State Sync Configuration Options ###
|
||||
#######################################################
|
||||
|
||||
@@ -21,6 +21,10 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
@@ -58,14 +62,31 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
// one for mempool, one for consensus
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
var mempool mempl.Mempool
|
||||
|
||||
switch thisConfig.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
}
|
||||
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
@@ -31,6 +31,8 @@ import (
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
@@ -390,12 +392,34 @@ func newStateWithConfigAndBlockStore(
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
|
||||
// Make Mempool
|
||||
memplMetrics := mempl.NopMetrics()
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
var mempool mempl.Mempool
|
||||
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
logger := consensusLogger()
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
}
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
@@ -29,6 +29,8 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
p2pmock "github.com/tendermint/tendermint/p2p/mock"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
@@ -152,14 +154,33 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
memplMetrics := mempl.NopMetrics()
|
||||
// one for mempool, one for consensus
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
var mempool mempl.Mempool
|
||||
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
}
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
@@ -15,12 +15,18 @@ type emptyMempool struct{}
|
||||
|
||||
var _ mempl.Mempool = emptyMempool{}
|
||||
|
||||
func (emptyMempool) Lock() {}
|
||||
func (emptyMempool) Unlock() {}
|
||||
func (emptyMempool) Size() int { return 0 }
|
||||
func (emptyMempool) Lock() {}
|
||||
func (emptyMempool) Unlock() {}
|
||||
func (emptyMempool) Size() int { return 0 }
|
||||
func (emptyMempool) SizeBytes() int64 { return 0 }
|
||||
func (emptyMempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (txmp emptyMempool) RemoveTxByKey(txKey types.TxKey) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) Update(
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
)
|
||||
|
||||
func BenchmarkReap(b *testing.B) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
size := 10000
|
||||
for i := 0; i < size; i++ {
|
||||
tx := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(tx, uint64(i))
|
||||
if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
mempool.ReapMaxBytesMaxGas(100000000, 10000000)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCheckTx(b *testing.B) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
tx := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(tx, uint64(i))
|
||||
if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCacheInsertTime(b *testing.B) {
|
||||
cache := newMapTxCache(b.N)
|
||||
txs := make([][]byte, b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
txs[i] = make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txs[i], uint64(i))
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Push(txs[i])
|
||||
}
|
||||
}
|
||||
|
||||
// This benchmark is probably skewed, since we actually will be removing
|
||||
// txs in parallel, which may cause some overhead due to mutex locking.
|
||||
func BenchmarkCacheRemoveTime(b *testing.B) {
|
||||
cache := newMapTxCache(b.N)
|
||||
txs := make([][]byte, b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
txs[i] = make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txs[i], uint64(i))
|
||||
cache.Push(txs[i])
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Remove(txs[i])
|
||||
}
|
||||
}
|
||||
107
mempool/cache.go
Normal file
107
mempool/cache.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// TxCache defines an interface for raw transaction caching in a mempool.
|
||||
// Currently, a TxCache does not allow direct reading or getting of transaction
|
||||
// values. A TxCache is used primarily to push transactions and removing
|
||||
// transactions. Pushing via Push returns a boolean telling the caller if the
|
||||
// transaction already exists in the cache or not.
|
||||
type TxCache interface {
|
||||
// Reset resets the cache to an empty state.
|
||||
Reset()
|
||||
|
||||
// Push adds the given raw transaction to the cache and returns true if it was
|
||||
// newly added. Otherwise, it returns false.
|
||||
Push(tx types.Tx) bool
|
||||
|
||||
// Remove removes the given raw transaction from the cache.
|
||||
Remove(tx types.Tx)
|
||||
}
|
||||
|
||||
var _ TxCache = (*LRUTxCache)(nil)
|
||||
|
||||
// LRUTxCache maintains a thread-safe LRU cache of raw transactions. The cache
|
||||
// only stores the hash of the raw transaction.
|
||||
type LRUTxCache struct {
|
||||
mtx tmsync.Mutex
|
||||
size int
|
||||
cacheMap map[types.TxKey]*list.Element
|
||||
list *list.List
|
||||
}
|
||||
|
||||
func NewLRUTxCache(cacheSize int) *LRUTxCache {
|
||||
return &LRUTxCache{
|
||||
size: cacheSize,
|
||||
cacheMap: make(map[types.TxKey]*list.Element, cacheSize),
|
||||
list: list.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// GetList returns the underlying linked-list that backs the LRU cache. Note,
|
||||
// this should be used for testing purposes only!
|
||||
func (c *LRUTxCache) GetList() *list.List {
|
||||
return c.list
|
||||
}
|
||||
|
||||
func (c *LRUTxCache) Reset() {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
c.cacheMap = make(map[types.TxKey]*list.Element, c.size)
|
||||
c.list.Init()
|
||||
}
|
||||
|
||||
func (c *LRUTxCache) Push(tx types.Tx) bool {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
key := tx.Key()
|
||||
|
||||
moved, ok := c.cacheMap[key]
|
||||
if ok {
|
||||
c.list.MoveToBack(moved)
|
||||
return false
|
||||
}
|
||||
|
||||
if c.list.Len() >= c.size {
|
||||
front := c.list.Front()
|
||||
if front != nil {
|
||||
frontKey := front.Value.(types.TxKey)
|
||||
delete(c.cacheMap, frontKey)
|
||||
c.list.Remove(front)
|
||||
}
|
||||
}
|
||||
|
||||
e := c.list.PushBack(key)
|
||||
c.cacheMap[key] = e
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *LRUTxCache) Remove(tx types.Tx) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
key := tx.Key()
|
||||
e := c.cacheMap[key]
|
||||
delete(c.cacheMap, key)
|
||||
|
||||
if e != nil {
|
||||
c.list.Remove(e)
|
||||
}
|
||||
}
|
||||
|
||||
// NopTxCache defines a no-op raw transaction cache.
|
||||
type NopTxCache struct{}
|
||||
|
||||
var _ TxCache = (*NopTxCache)(nil)
|
||||
|
||||
func (NopTxCache) Reset() {}
|
||||
func (NopTxCache) Push(types.Tx) bool { return true }
|
||||
func (NopTxCache) Remove(types.Tx) {}
|
||||
41
mempool/cache_bench_test.go
Normal file
41
mempool/cache_bench_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkCacheInsertTime(b *testing.B) {
|
||||
cache := NewLRUTxCache(b.N)
|
||||
|
||||
txs := make([][]byte, b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
txs[i] = make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txs[i], uint64(i))
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Push(txs[i])
|
||||
}
|
||||
}
|
||||
|
||||
// This benchmark is probably skewed, since we actually will be removing
|
||||
// txs in parallel, which may cause some overhead due to mutex locking.
|
||||
func BenchmarkCacheRemoveTime(b *testing.B) {
|
||||
cache := NewLRUTxCache(b.N)
|
||||
|
||||
txs := make([][]byte, b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
txs[i] = make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txs[i], uint64(i))
|
||||
cache.Push(txs[i])
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Remove(txs[i])
|
||||
}
|
||||
}
|
||||
@@ -2,32 +2,30 @@ package mempool
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestCacheRemove(t *testing.T) {
|
||||
cache := newMapTxCache(100)
|
||||
cache := NewLRUTxCache(100)
|
||||
numTxs := 10
|
||||
|
||||
txs := make([][]byte, numTxs)
|
||||
for i := 0; i < numTxs; i++ {
|
||||
// probability of collision is 2**-256
|
||||
txBytes := make([]byte, 32)
|
||||
_, err := rand.Read(txBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
txs[i] = txBytes
|
||||
cache.Push(txBytes)
|
||||
|
||||
// make sure its added to both the linked list and the map
|
||||
require.Equal(t, i+1, len(cache.cacheMap))
|
||||
require.Equal(t, i+1, cache.list.Len())
|
||||
}
|
||||
|
||||
for i := 0; i < numTxs; i++ {
|
||||
cache.Remove(txs[i])
|
||||
// make sure its removed from both the map and the linked list
|
||||
@@ -35,70 +33,3 @@ func TestCacheRemove(t *testing.T) {
|
||||
require.Equal(t, numTxs-(i+1), cache.list.Len())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheAfterUpdate(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
// reAddIndices & txsInCache can have elements > numTxsToCreate
|
||||
// also assumes max index is 255 for convenience
|
||||
// txs in cache also checks order of elements
|
||||
tests := []struct {
|
||||
numTxsToCreate int
|
||||
updateIndices []int
|
||||
reAddIndices []int
|
||||
txsInCache []int
|
||||
}{
|
||||
{1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works
|
||||
{2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache
|
||||
{2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache
|
||||
{2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe
|
||||
}
|
||||
for tcIndex, tc := range tests {
|
||||
for i := 0; i < tc.numTxsToCreate; i++ {
|
||||
tx := types.Tx{byte(i)}
|
||||
err := mempool.CheckTx(tx, nil, TxInfo{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
updateTxs := []types.Tx{}
|
||||
for _, v := range tc.updateIndices {
|
||||
tx := types.Tx{byte(v)}
|
||||
updateTxs = append(updateTxs, tx)
|
||||
}
|
||||
err := mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, v := range tc.reAddIndices {
|
||||
tx := types.Tx{byte(v)}
|
||||
_ = mempool.CheckTx(tx, nil, TxInfo{})
|
||||
}
|
||||
|
||||
cache := mempool.cache.(*mapTxCache)
|
||||
node := cache.list.Front()
|
||||
counter := 0
|
||||
for node != nil {
|
||||
require.NotEqual(t, len(tc.txsInCache), counter,
|
||||
"cache larger than expected on testcase %d", tcIndex)
|
||||
|
||||
nodeVal := node.Value.([sha256.Size]byte)
|
||||
expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])})
|
||||
// Reference for reading the errors:
|
||||
// >>> sha256('\x00').hexdigest()
|
||||
// '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d'
|
||||
// >>> sha256('\x01').hexdigest()
|
||||
// '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a'
|
||||
// >>> sha256('\x02').hexdigest()
|
||||
// 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986'
|
||||
|
||||
require.Equal(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex)
|
||||
counter++
|
||||
node = node.Next()
|
||||
}
|
||||
require.Equal(t, len(tc.txsInCache), counter,
|
||||
"cache smaller than expected on testcase %d", tcIndex)
|
||||
mempool.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTxInCache is returned to the client if we saw tx earlier
|
||||
ErrTxInCache = errors.New("tx already exists in cache")
|
||||
)
|
||||
|
||||
// ErrTxTooLarge means the tx is too big to be sent in a message to other peers
|
||||
type ErrTxTooLarge struct {
|
||||
max int
|
||||
actual int
|
||||
}
|
||||
|
||||
func (e ErrTxTooLarge) Error() string {
|
||||
return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.max, e.actual)
|
||||
}
|
||||
|
||||
// ErrMempoolIsFull means Tendermint & an application can't handle that much load
|
||||
type ErrMempoolIsFull struct {
|
||||
numTxs int
|
||||
maxTxs int
|
||||
|
||||
txsBytes int64
|
||||
maxTxsBytes int64
|
||||
}
|
||||
|
||||
func (e ErrMempoolIsFull) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)",
|
||||
e.numTxs, e.maxTxs,
|
||||
e.txsBytes, e.maxTxsBytes)
|
||||
}
|
||||
|
||||
// ErrPreCheck is returned when tx is too big
|
||||
type ErrPreCheck struct {
|
||||
Reason error
|
||||
}
|
||||
|
||||
func (e ErrPreCheck) Error() string {
|
||||
return e.Reason.Error()
|
||||
}
|
||||
|
||||
// IsPreCheckError returns true if err is due to pre check failure.
|
||||
func IsPreCheckError(err error) bool {
|
||||
_, ok := err.(ErrPreCheck)
|
||||
return ok
|
||||
}
|
||||
3
mempool/ids.go
Normal file
3
mempool/ids.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package mempool
|
||||
|
||||
// These functions were moved into v0/reactor.go and v1/reactor.go
|
||||
23
mempool/ids_test.go
Normal file
23
mempool/ids_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package mempool
|
||||
|
||||
// import (
|
||||
// "testing"
|
||||
|
||||
// "github.com/stretchr/testify/require"
|
||||
// "github.com/tendermint/tendermint/types"
|
||||
// )
|
||||
|
||||
// func TestMempoolIDsBasic(t *testing.T) {
|
||||
// ids := NewMempoolIDs()
|
||||
|
||||
// peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
// require.NoError(t, err)
|
||||
|
||||
// ids.ReserveForPeer(peerID)
|
||||
// require.EqualValues(t, 1, ids.GetForPeer(peerID))
|
||||
// ids.Reclaim(peerID)
|
||||
|
||||
// ids.ReserveForPeer(peerID)
|
||||
// require.EqualValues(t, 2, ids.GetForPeer(peerID))
|
||||
// ids.Reclaim(peerID)
|
||||
// }
|
||||
@@ -1,43 +1,67 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
MempoolChannel = byte(0x30)
|
||||
|
||||
// PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind
|
||||
PeerCatchupSleepIntervalMS = 100
|
||||
|
||||
// UnknownPeerID is the peer ID to use when running CheckTx when there is
|
||||
// no peer (e.g. RPC)
|
||||
UnknownPeerID uint16 = 0
|
||||
|
||||
MaxActiveIDs = math.MaxUint16
|
||||
)
|
||||
|
||||
// Mempool defines the mempool interface.
|
||||
//
|
||||
// Updates to the mempool need to be synchronized with committing a block so
|
||||
// apps can reset their transient state on Commit.
|
||||
// applications can reset their transient state on Commit.
|
||||
type Mempool interface {
|
||||
// CheckTx executes a new transaction against the application to determine
|
||||
// its validity and whether it should be added to the mempool.
|
||||
CheckTx(tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error
|
||||
|
||||
// RemoveTxByKey removes a transaction, identified by its key,
|
||||
// from the mempool.
|
||||
RemoveTxByKey(txKey types.TxKey) error
|
||||
|
||||
// ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes
|
||||
// bytes total with the condition that the total gasWanted must be less than
|
||||
// maxGas.
|
||||
//
|
||||
// If both maxes are negative, there is no cap on the size of all returned
|
||||
// transactions (~ all available transactions).
|
||||
ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs
|
||||
|
||||
// ReapMaxTxs reaps up to max transactions from the mempool.
|
||||
// If max is negative, there is no cap on the size of all returned
|
||||
// transactions (~ all available transactions).
|
||||
// ReapMaxTxs reaps up to max transactions from the mempool. If max is
|
||||
// negative, there is no cap on the size of all returned transactions
|
||||
// (~ all available transactions).
|
||||
ReapMaxTxs(max int) types.Txs
|
||||
|
||||
// Lock locks the mempool. The consensus must be able to hold lock to safely update.
|
||||
// Lock locks the mempool. The consensus must be able to hold lock to safely
|
||||
// update.
|
||||
Lock()
|
||||
|
||||
// Unlock unlocks the mempool.
|
||||
Unlock()
|
||||
|
||||
// Update informs the mempool that the given txs were committed and can be discarded.
|
||||
// NOTE: this should be called *after* block is committed by consensus.
|
||||
// NOTE: Lock/Unlock must be managed by caller
|
||||
// Update informs the mempool that the given txs were committed and can be
|
||||
// discarded.
|
||||
//
|
||||
// NOTE:
|
||||
// 1. This should be called *after* block is committed by consensus.
|
||||
// 2. Lock/Unlock must be managed by the caller.
|
||||
Update(
|
||||
blockHeight int64,
|
||||
blockTxs types.Txs,
|
||||
@@ -46,17 +70,21 @@ type Mempool interface {
|
||||
newPostFn PostCheckFunc,
|
||||
) error
|
||||
|
||||
// FlushAppConn flushes the mempool connection to ensure async reqResCb calls are
|
||||
// done. E.g. from CheckTx.
|
||||
// NOTE: Lock/Unlock must be managed by caller
|
||||
// FlushAppConn flushes the mempool connection to ensure async callback calls
|
||||
// are done, e.g. from CheckTx.
|
||||
//
|
||||
// NOTE:
|
||||
// 1. Lock/Unlock must be managed by caller.
|
||||
FlushAppConn() error
|
||||
|
||||
// Flush removes all transactions from the mempool and cache
|
||||
// Flush removes all transactions from the mempool and caches.
|
||||
Flush()
|
||||
|
||||
// TxsAvailable returns a channel which fires once for every height,
|
||||
// and only when transactions are available in the mempool.
|
||||
// NOTE: the returned channel may be nil if EnableTxsAvailable was not called.
|
||||
// TxsAvailable returns a channel which fires once for every height, and only
|
||||
// when transactions are available in the mempool.
|
||||
//
|
||||
// NOTE:
|
||||
// 1. The returned channel may be nil if EnableTxsAvailable was not called.
|
||||
TxsAvailable() <-chan struct{}
|
||||
|
||||
// EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will
|
||||
@@ -66,20 +94,10 @@ type Mempool interface {
|
||||
// Size returns the number of transactions in the mempool.
|
||||
Size() int
|
||||
|
||||
// TxsBytes returns the total size of all txs in the mempool.
|
||||
TxsBytes() int64
|
||||
|
||||
// InitWAL creates a directory for the WAL file and opens a file itself. If
|
||||
// there is an error, it will be of type *PathError.
|
||||
InitWAL() error
|
||||
|
||||
// CloseWAL closes and discards the underlying WAL file.
|
||||
// Any further writes will not be relayed to disk.
|
||||
CloseWAL()
|
||||
// SizeBytes returns the total size of all txs in the mempool.
|
||||
SizeBytes() int64
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
// PreCheckFunc is an optional filter executed before CheckTx and rejects
|
||||
// transaction if false is returned. An example would be to ensure that a
|
||||
// transaction doesn't exceeded the block size.
|
||||
@@ -90,27 +108,16 @@ type PreCheckFunc func(types.Tx) error
|
||||
// transaction doesn't require more gas than available for the block.
|
||||
type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error
|
||||
|
||||
// TxInfo are parameters that get passed when attempting to add a tx to the
|
||||
// mempool.
|
||||
type TxInfo struct {
|
||||
// SenderID is the internal peer ID used in the mempool to identify the
|
||||
// sender, storing 2 bytes with each tx instead of 20 bytes for the p2p.ID.
|
||||
SenderID uint16
|
||||
// SenderP2PID is the actual p2p.ID of the sender, used e.g. for logging.
|
||||
SenderP2PID p2p.ID
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
// PreCheckMaxBytes checks that the size of the transaction is smaller or equal to the expected maxBytes.
|
||||
// PreCheckMaxBytes checks that the size of the transaction is smaller or equal
|
||||
// to the expected maxBytes.
|
||||
func PreCheckMaxBytes(maxBytes int64) PreCheckFunc {
|
||||
return func(tx types.Tx) error {
|
||||
txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx})
|
||||
|
||||
if txSize > maxBytes {
|
||||
return fmt.Errorf("tx size is too big: %d, max: %d",
|
||||
txSize, maxBytes)
|
||||
return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -130,6 +137,57 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc {
|
||||
return fmt.Errorf("gas wanted %d is greater than max gas %d",
|
||||
res.GasWanted, maxGas)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ErrTxInCache is returned to the client if we saw tx earlier
|
||||
var ErrTxInCache = errors.New("tx already exists in cache")
|
||||
|
||||
// TxKey is the fixed length array key used as an index.
|
||||
type TxKey [sha256.Size]byte
|
||||
|
||||
// ErrTxTooLarge defines an error when a transaction is too big to be sent in a
|
||||
// message to other peers.
|
||||
type ErrTxTooLarge struct {
|
||||
Max int
|
||||
Actual int
|
||||
}
|
||||
|
||||
func (e ErrTxTooLarge) Error() string {
|
||||
return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.Max, e.Actual)
|
||||
}
|
||||
|
||||
// ErrMempoolIsFull defines an error where Tendermint and the application cannot
|
||||
// handle that much load.
|
||||
type ErrMempoolIsFull struct {
|
||||
NumTxs int
|
||||
MaxTxs int
|
||||
TxsBytes int64
|
||||
MaxTxsBytes int64
|
||||
}
|
||||
|
||||
func (e ErrMempoolIsFull) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)",
|
||||
e.NumTxs,
|
||||
e.MaxTxs,
|
||||
e.TxsBytes,
|
||||
e.MaxTxsBytes,
|
||||
)
|
||||
}
|
||||
|
||||
// ErrPreCheck defines an error where a transaction fails a pre-check.
|
||||
type ErrPreCheck struct {
|
||||
Reason error
|
||||
}
|
||||
|
||||
func (e ErrPreCheck) Error() string {
|
||||
return e.Reason.Error()
|
||||
}
|
||||
|
||||
// IsPreCheckError returns true if err is due to pre check failure.
|
||||
func IsPreCheckError(err error) bool {
|
||||
return errors.As(err, &ErrPreCheck{})
|
||||
}
|
||||
|
||||
@@ -18,10 +18,25 @@ const (
|
||||
type Metrics struct {
|
||||
// Size of the mempool.
|
||||
Size metrics.Gauge
|
||||
|
||||
// Histogram of transaction sizes, in bytes.
|
||||
TxSizeBytes metrics.Histogram
|
||||
|
||||
// Number of failed transactions.
|
||||
FailedTxs metrics.Counter
|
||||
|
||||
// RejectedTxs defines the number of rejected transactions. These are
|
||||
// transactions that passed CheckTx but failed to make it into the mempool
|
||||
// due to resource limits, e.g. mempool is full and no lower priority
|
||||
// transactions exist in the mempool.
|
||||
RejectedTxs metrics.Counter
|
||||
|
||||
// EvictedTxs defines the number of evicted transactions. These are valid
|
||||
// transactions that passed CheckTx and existed in the mempool but were later
|
||||
// evicted to make room for higher priority valid transactions that passed
|
||||
// CheckTx.
|
||||
EvictedTxs metrics.Counter
|
||||
|
||||
// Number of times transactions are rechecked in the mempool.
|
||||
RecheckTimes metrics.Counter
|
||||
}
|
||||
@@ -41,6 +56,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "size",
|
||||
Help: "Size of the mempool (number of uncommitted transactions).",
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
@@ -48,12 +64,28 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Help: "Transaction sizes in bytes.",
|
||||
Buckets: stdprometheus.ExponentialBuckets(1, 3, 17),
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
FailedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "failed_txs",
|
||||
Help: "Number of failed transactions.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
RejectedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "rejected_txs",
|
||||
Help: "Number of rejected transactions.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
EvictedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "evicted_txs",
|
||||
Help: "Number of evicted transactions.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
@@ -69,6 +101,8 @@ func NopMetrics() *Metrics {
|
||||
Size: discard.NewGauge(),
|
||||
TxSizeBytes: discard.NewHistogram(),
|
||||
FailedTxs: discard.NewCounter(),
|
||||
RejectedTxs: discard.NewCounter(),
|
||||
EvictedTxs: discard.NewCounter(),
|
||||
RecheckTimes: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,29 +3,30 @@ package mock
|
||||
import (
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/clist"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Mempool is an empty implementation of a Mempool, useful for testing.
|
||||
type Mempool struct{}
|
||||
|
||||
var _ mempl.Mempool = Mempool{}
|
||||
var _ mempool.Mempool = Mempool{}
|
||||
|
||||
func (Mempool) Lock() {}
|
||||
func (Mempool) Unlock() {}
|
||||
func (Mempool) Size() int { return 0 }
|
||||
func (Mempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
|
||||
func (Mempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error {
|
||||
return nil
|
||||
}
|
||||
func (Mempool) RemoveTxByKey(txKey types.TxKey) error { return nil }
|
||||
func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
|
||||
func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} }
|
||||
func (Mempool) Update(
|
||||
_ int64,
|
||||
_ types.Txs,
|
||||
_ []*abci.ResponseDeliverTx,
|
||||
_ mempl.PreCheckFunc,
|
||||
_ mempl.PostCheckFunc,
|
||||
_ mempool.PreCheckFunc,
|
||||
_ mempool.PostCheckFunc,
|
||||
) error {
|
||||
return nil
|
||||
}
|
||||
@@ -33,7 +34,7 @@ func (Mempool) Flush() {}
|
||||
func (Mempool) FlushAppConn() error { return nil }
|
||||
func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) }
|
||||
func (Mempool) EnableTxsAvailable() {}
|
||||
func (Mempool) TxsBytes() int64 { return 0 }
|
||||
func (Mempool) SizeBytes() int64 { return 0 }
|
||||
|
||||
func (Mempool) TxsFront() *clist.CElement { return nil }
|
||||
func (Mempool) TxsWaitChan() <-chan struct{} { return nil }
|
||||
|
||||
17
mempool/tx.go
Normal file
17
mempool/tx.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// TxInfo are parameters that get passed when attempting to add a tx to the
|
||||
// mempool.
|
||||
type TxInfo struct {
|
||||
// SenderID is the internal peer ID used in the mempool to identify the
|
||||
// sender, storing two bytes with each transaction instead of 20 bytes for
|
||||
// the types.NodeID.
|
||||
SenderID uint16
|
||||
|
||||
// SenderP2PID is the actual p2p.ID of the sender, used e.g. for logging.
|
||||
SenderP2PID p2p.ID
|
||||
}
|
||||
101
mempool/v0/bench_test.go
Normal file
101
mempool/v0/bench_test.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
)
|
||||
|
||||
func BenchmarkReap(b *testing.B) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
mp.config.Size = 100000
|
||||
|
||||
size := 10000
|
||||
for i := 0; i < size; i++ {
|
||||
tx := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(tx, uint64(i))
|
||||
if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
mp.ReapMaxBytesMaxGas(100000000, 10000000)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCheckTx(b *testing.B) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
mp.config.Size = 1000000
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
tx := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(tx, uint64(i))
|
||||
b.StartTimer()
|
||||
|
||||
if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParallelCheckTx(b *testing.B) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
mp.config.Size = 100000000
|
||||
|
||||
var txcnt uint64
|
||||
next := func() uint64 {
|
||||
return atomic.AddUint64(&txcnt, 1) - 1
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
tx := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(tx, next())
|
||||
if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCheckDuplicateTx(b *testing.B) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
mp.config.Size = 1000000
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
tx := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(tx, uint64(i))
|
||||
if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err == nil {
|
||||
b.Fatal("tx should be duplicate")
|
||||
}
|
||||
}
|
||||
}
|
||||
81
mempool/v0/cache_test.go
Normal file
81
mempool/v0/cache_test.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestCacheAfterUpdate(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
// reAddIndices & txsInCache can have elements > numTxsToCreate
|
||||
// also assumes max index is 255 for convenience
|
||||
// txs in cache also checks order of elements
|
||||
tests := []struct {
|
||||
numTxsToCreate int
|
||||
updateIndices []int
|
||||
reAddIndices []int
|
||||
txsInCache []int
|
||||
}{
|
||||
{1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works
|
||||
{2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache
|
||||
{2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache
|
||||
{2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe
|
||||
}
|
||||
for tcIndex, tc := range tests {
|
||||
for i := 0; i < tc.numTxsToCreate; i++ {
|
||||
tx := types.Tx{byte(i)}
|
||||
err := mp.CheckTx(tx, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
updateTxs := []types.Tx{}
|
||||
for _, v := range tc.updateIndices {
|
||||
tx := types.Tx{byte(v)}
|
||||
updateTxs = append(updateTxs, tx)
|
||||
}
|
||||
err := mp.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, v := range tc.reAddIndices {
|
||||
tx := types.Tx{byte(v)}
|
||||
_ = mp.CheckTx(tx, nil, mempool.TxInfo{})
|
||||
}
|
||||
|
||||
cache := mp.cache.(*mempool.LRUTxCache)
|
||||
node := cache.GetList().Front()
|
||||
counter := 0
|
||||
for node != nil {
|
||||
require.NotEqual(t, len(tc.txsInCache), counter,
|
||||
"cache larger than expected on testcase %d", tcIndex)
|
||||
|
||||
nodeVal := node.Value.(types.TxKey)
|
||||
expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])})
|
||||
// Reference for reading the errors:
|
||||
// >>> sha256('\x00').hexdigest()
|
||||
// '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d'
|
||||
// >>> sha256('\x01').hexdigest()
|
||||
// '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a'
|
||||
// >>> sha256('\x02').hexdigest()
|
||||
// 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986'
|
||||
|
||||
require.EqualValues(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex)
|
||||
counter++
|
||||
node = node.Next()
|
||||
}
|
||||
require.Equal(t, len(tc.txsInCache), counter,
|
||||
"cache smaller than expected on testcase %d", tcIndex)
|
||||
mp.Flush()
|
||||
}
|
||||
}
|
||||
@@ -1,33 +1,23 @@
|
||||
package mempool
|
||||
package v0
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/list"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"errors"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/clist"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// TxKeySize is the size of the transaction key index
|
||||
const TxKeySize = sha256.Size
|
||||
|
||||
var newline = []byte("\n")
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
// CListMempool is an ordered in-memory pool for transactions before they are
|
||||
// proposed in a consensus round. Transaction validity is checked using the
|
||||
// CheckTx abci message before the transaction is added to the pool. The
|
||||
@@ -42,16 +32,15 @@ type CListMempool struct {
|
||||
notifiedTxsAvailable bool
|
||||
txsAvailable chan struct{} // fires once for each height, when the mempool is not empty
|
||||
|
||||
config *cfg.MempoolConfig
|
||||
config *config.MempoolConfig
|
||||
|
||||
// Exclusive mutex for Update method to prevent concurrent execution of
|
||||
// CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods.
|
||||
updateMtx tmsync.RWMutex
|
||||
preCheck PreCheckFunc
|
||||
postCheck PostCheckFunc
|
||||
preCheck mempool.PreCheckFunc
|
||||
postCheck mempool.PostCheckFunc
|
||||
|
||||
wal *auto.AutoFile // a log of mempool txs
|
||||
txs *clist.CList // concurrent linked-list of good txs
|
||||
txs *clist.CList // concurrent linked-list of good txs
|
||||
proxyAppConn proxy.AppConnMempool
|
||||
|
||||
// Track whether we're rechecking txs.
|
||||
@@ -66,45 +55,50 @@ type CListMempool struct {
|
||||
|
||||
// Keep a cache of already-seen txs.
|
||||
// This reduces the pressure on the proxyApp.
|
||||
cache txCache
|
||||
cache mempool.TxCache
|
||||
|
||||
logger log.Logger
|
||||
|
||||
metrics *Metrics
|
||||
logger log.Logger
|
||||
metrics *mempool.Metrics
|
||||
}
|
||||
|
||||
var _ Mempool = &CListMempool{}
|
||||
var _ mempool.Mempool = &CListMempool{}
|
||||
|
||||
// CListMempoolOption sets an optional parameter on the mempool.
|
||||
type CListMempoolOption func(*CListMempool)
|
||||
|
||||
// NewCListMempool returns a new mempool with the given configuration and connection to an application.
|
||||
// NewCListMempool returns a new mempool with the given configuration and
|
||||
// connection to an application.
|
||||
func NewCListMempool(
|
||||
config *cfg.MempoolConfig,
|
||||
cfg *config.MempoolConfig,
|
||||
proxyAppConn proxy.AppConnMempool,
|
||||
height int64,
|
||||
options ...CListMempoolOption,
|
||||
) *CListMempool {
|
||||
mempool := &CListMempool{
|
||||
config: config,
|
||||
|
||||
mp := &CListMempool{
|
||||
config: cfg,
|
||||
proxyAppConn: proxyAppConn,
|
||||
txs: clist.New(),
|
||||
height: height,
|
||||
recheckCursor: nil,
|
||||
recheckEnd: nil,
|
||||
logger: log.NewNopLogger(),
|
||||
metrics: NopMetrics(),
|
||||
metrics: mempool.NopMetrics(),
|
||||
}
|
||||
if config.CacheSize > 0 {
|
||||
mempool.cache = newMapTxCache(config.CacheSize)
|
||||
|
||||
if cfg.CacheSize > 0 {
|
||||
mp.cache = mempool.NewLRUTxCache(cfg.CacheSize)
|
||||
} else {
|
||||
mempool.cache = nopTxCache{}
|
||||
mp.cache = mempool.NopTxCache{}
|
||||
}
|
||||
proxyAppConn.SetResponseCallback(mempool.globalCb)
|
||||
|
||||
proxyAppConn.SetResponseCallback(mp.globalCb)
|
||||
|
||||
for _, option := range options {
|
||||
option(mempool)
|
||||
option(mp)
|
||||
}
|
||||
return mempool
|
||||
|
||||
return mp
|
||||
}
|
||||
|
||||
// NOTE: not thread safe - should only be called once, on startup
|
||||
@@ -120,49 +114,22 @@ func (mem *CListMempool) SetLogger(l log.Logger) {
|
||||
// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns
|
||||
// false. This is ran before CheckTx. Only applies to the first created block.
|
||||
// After that, Update overwrites the existing value.
|
||||
func WithPreCheck(f PreCheckFunc) CListMempoolOption {
|
||||
func WithPreCheck(f mempool.PreCheckFunc) CListMempoolOption {
|
||||
return func(mem *CListMempool) { mem.preCheck = f }
|
||||
}
|
||||
|
||||
// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns
|
||||
// false. This is ran after CheckTx. Only applies to the first created block.
|
||||
// After that, Update overwrites the existing value.
|
||||
func WithPostCheck(f PostCheckFunc) CListMempoolOption {
|
||||
func WithPostCheck(f mempool.PostCheckFunc) CListMempoolOption {
|
||||
return func(mem *CListMempool) { mem.postCheck = f }
|
||||
}
|
||||
|
||||
// WithMetrics sets the metrics.
|
||||
func WithMetrics(metrics *Metrics) CListMempoolOption {
|
||||
func WithMetrics(metrics *mempool.Metrics) CListMempoolOption {
|
||||
return func(mem *CListMempool) { mem.metrics = metrics }
|
||||
}
|
||||
|
||||
func (mem *CListMempool) InitWAL() error {
|
||||
var (
|
||||
walDir = mem.config.WalDir()
|
||||
walFile = walDir + "/wal"
|
||||
)
|
||||
|
||||
const perm = 0700
|
||||
if err := tmos.EnsureDir(walDir, perm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
af, err := auto.OpenAutoFile(walFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open autofile %s: %w", walFile, err)
|
||||
}
|
||||
|
||||
mem.wal = af
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mem *CListMempool) CloseWAL() {
|
||||
if err := mem.wal.Close(); err != nil {
|
||||
mem.logger.Error("Error closing WAL", "err", err)
|
||||
}
|
||||
mem.wal = nil
|
||||
}
|
||||
|
||||
// Safe for concurrent use by multiple goroutines.
|
||||
func (mem *CListMempool) Lock() {
|
||||
mem.updateMtx.Lock()
|
||||
@@ -179,7 +146,7 @@ func (mem *CListMempool) Size() int {
|
||||
}
|
||||
|
||||
// Safe for concurrent use by multiple goroutines.
|
||||
func (mem *CListMempool) TxsBytes() int64 {
|
||||
func (mem *CListMempool) SizeBytes() int64 {
|
||||
return atomic.LoadInt64(&mem.txsBytes)
|
||||
}
|
||||
|
||||
@@ -231,7 +198,12 @@ func (mem *CListMempool) TxsWaitChan() <-chan struct{} {
|
||||
// CONTRACT: Either cb will get called, or err returned.
|
||||
//
|
||||
// Safe for concurrent use by multiple goroutines.
|
||||
func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) error {
|
||||
func (mem *CListMempool) CheckTx(
|
||||
tx types.Tx,
|
||||
cb func(*abci.Response),
|
||||
txInfo mempool.TxInfo,
|
||||
) error {
|
||||
|
||||
mem.updateMtx.RLock()
|
||||
// use defer to unlock mutex because application (*local client*) might panic
|
||||
defer mem.updateMtx.RUnlock()
|
||||
@@ -243,24 +215,17 @@ func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo Tx
|
||||
}
|
||||
|
||||
if txSize > mem.config.MaxTxBytes {
|
||||
return ErrTxTooLarge{mem.config.MaxTxBytes, txSize}
|
||||
return mempool.ErrTxTooLarge{
|
||||
Max: mem.config.MaxTxBytes,
|
||||
Actual: txSize,
|
||||
}
|
||||
}
|
||||
|
||||
if mem.preCheck != nil {
|
||||
if err := mem.preCheck(tx); err != nil {
|
||||
return ErrPreCheck{err}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: writing to the WAL and calling proxy must be done before adding tx
|
||||
// to the cache. otherwise, if either of them fails, next time CheckTx is
|
||||
// called with tx, ErrTxInCache will be returned without tx being checked at
|
||||
// all even once.
|
||||
if mem.wal != nil {
|
||||
// TODO: Notify administrators when WAL fails
|
||||
_, err := mem.wal.Write(append([]byte(tx), newline...))
|
||||
if err != nil {
|
||||
return fmt.Errorf("wal.Write: %w", err)
|
||||
return mempool.ErrPreCheck{
|
||||
Reason: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -269,20 +234,19 @@ func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo Tx
|
||||
return err
|
||||
}
|
||||
|
||||
if !mem.cache.Push(tx) {
|
||||
if !mem.cache.Push(tx) { // if the transaction already exists in the cache
|
||||
// Record a new sender for a tx we've already seen.
|
||||
// Note it's possible a tx is still in the cache but no longer in the mempool
|
||||
// (eg. after committing a block, txs are removed from mempool but not cache),
|
||||
// so we only record the sender for txs still in the mempool.
|
||||
if e, ok := mem.txsMap.Load(TxKey(tx)); ok {
|
||||
if e, ok := mem.txsMap.Load(tx.Key()); ok {
|
||||
memTx := e.(*clist.CElement).Value.(*mempoolTx)
|
||||
memTx.senders.LoadOrStore(txInfo.SenderID, true)
|
||||
// TODO: consider punishing peer for dups,
|
||||
// its non-trivial since invalid txs can become valid,
|
||||
// but they can spam the same tx with little cost to them atm.
|
||||
}
|
||||
|
||||
return ErrTxInCache
|
||||
return mempool.ErrTxInCache
|
||||
}
|
||||
|
||||
reqRes := mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx})
|
||||
@@ -349,7 +313,7 @@ func (mem *CListMempool) reqResCb(
|
||||
// - resCbFirstTime (lock not held) if tx is valid
|
||||
func (mem *CListMempool) addTx(memTx *mempoolTx) {
|
||||
e := mem.txs.PushBack(memTx)
|
||||
mem.txsMap.Store(TxKey(memTx.tx), e)
|
||||
mem.txsMap.Store(memTx.tx.Key(), e)
|
||||
atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx)))
|
||||
mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx)))
|
||||
}
|
||||
@@ -360,7 +324,7 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) {
|
||||
func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
|
||||
mem.txs.Remove(elem)
|
||||
elem.DetachPrev()
|
||||
mem.txsMap.Delete(TxKey(tx))
|
||||
mem.txsMap.Delete(tx.Key())
|
||||
atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
|
||||
|
||||
if removeFromCache {
|
||||
@@ -369,25 +333,30 @@ func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromC
|
||||
}
|
||||
|
||||
// RemoveTxByKey removes a transaction from the mempool by its TxKey index.
|
||||
func (mem *CListMempool) RemoveTxByKey(txKey [TxKeySize]byte, removeFromCache bool) {
|
||||
func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error {
|
||||
if e, ok := mem.txsMap.Load(txKey); ok {
|
||||
memTx := e.(*clist.CElement).Value.(*mempoolTx)
|
||||
if memTx != nil {
|
||||
mem.removeTx(memTx.tx, e.(*clist.CElement), removeFromCache)
|
||||
mem.removeTx(memTx.tx, e.(*clist.CElement), false)
|
||||
return nil
|
||||
}
|
||||
return errors.New("transaction not found")
|
||||
}
|
||||
return errors.New("invalid transaction found")
|
||||
}
|
||||
|
||||
func (mem *CListMempool) isFull(txSize int) error {
|
||||
var (
|
||||
memSize = mem.Size()
|
||||
txsBytes = mem.TxsBytes()
|
||||
txsBytes = mem.SizeBytes()
|
||||
)
|
||||
|
||||
if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes {
|
||||
return ErrMempoolIsFull{
|
||||
memSize, mem.config.Size,
|
||||
txsBytes, mem.config.MaxTxsBytes,
|
||||
return mempool.ErrMempoolIsFull{
|
||||
NumTxs: memSize,
|
||||
MaxTxs: mem.config.Size,
|
||||
TxsBytes: txsBytes,
|
||||
MaxTxsBytes: mem.config.MaxTxsBytes,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -427,8 +396,9 @@ func (mem *CListMempool) resCbFirstTime(
|
||||
}
|
||||
memTx.senders.Store(peerID, true)
|
||||
mem.addTx(memTx)
|
||||
mem.logger.Debug("added good transaction",
|
||||
"tx", txID(tx),
|
||||
mem.logger.Debug(
|
||||
"added good transaction",
|
||||
"tx", types.Tx(tx).Hash(),
|
||||
"res", r,
|
||||
"height", memTx.height,
|
||||
"total", mem.Size(),
|
||||
@@ -436,14 +406,21 @@ func (mem *CListMempool) resCbFirstTime(
|
||||
mem.notifyTxsAvailable()
|
||||
} else {
|
||||
// ignore bad transaction
|
||||
mem.logger.Debug("rejected bad transaction",
|
||||
"tx", txID(tx), "peerID", peerP2PID, "res", r, "err", postCheckErr)
|
||||
mem.logger.Debug(
|
||||
"rejected bad transaction",
|
||||
"tx", types.Tx(tx).Hash(),
|
||||
"peerID", peerP2PID,
|
||||
"res", r,
|
||||
"err", postCheckErr,
|
||||
)
|
||||
mem.metrics.FailedTxs.Add(1)
|
||||
|
||||
if !mem.config.KeepInvalidTxsInCache {
|
||||
// remove from cache (it might be good later)
|
||||
mem.cache.Remove(tx)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
// ignore other messages
|
||||
}
|
||||
@@ -458,21 +435,45 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) {
|
||||
case *abci.Response_CheckTx:
|
||||
tx := req.GetCheckTx().Tx
|
||||
memTx := mem.recheckCursor.Value.(*mempoolTx)
|
||||
if !bytes.Equal(tx, memTx.tx) {
|
||||
panic(fmt.Sprintf(
|
||||
"Unexpected tx response from proxy during recheck\nExpected %X, got %X",
|
||||
memTx.tx,
|
||||
tx))
|
||||
|
||||
// Search through the remaining list of tx to recheck for a transaction that matches
|
||||
// the one we received from the ABCI application.
|
||||
for {
|
||||
if bytes.Equal(tx, memTx.tx) {
|
||||
// We've found a tx in the recheck list that matches the tx that we
|
||||
// received from the ABCI application.
|
||||
// Break, and use this transaction for further checks.
|
||||
break
|
||||
}
|
||||
|
||||
mem.logger.Error(
|
||||
"re-CheckTx transaction mismatch",
|
||||
"got", types.Tx(tx),
|
||||
"expected", memTx.tx,
|
||||
)
|
||||
|
||||
if mem.recheckCursor == mem.recheckEnd {
|
||||
// we reached the end of the recheckTx list without finding a tx
|
||||
// matching the one we received from the ABCI application.
|
||||
// Return without processing any tx.
|
||||
mem.recheckCursor = nil
|
||||
return
|
||||
}
|
||||
|
||||
mem.recheckCursor = mem.recheckCursor.Next()
|
||||
memTx = mem.recheckCursor.Value.(*mempoolTx)
|
||||
}
|
||||
|
||||
var postCheckErr error
|
||||
if mem.postCheck != nil {
|
||||
postCheckErr = mem.postCheck(tx, r.CheckTx)
|
||||
}
|
||||
|
||||
if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
|
||||
// Good, nothing to do.
|
||||
} else {
|
||||
// Tx became invalidated due to newly committed block.
|
||||
mem.logger.Debug("tx is no longer valid", "tx", txID(tx), "res", r, "err", postCheckErr)
|
||||
mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr)
|
||||
// NOTE: we remove tx from the cache because it might be good later
|
||||
mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache)
|
||||
}
|
||||
@@ -519,7 +520,10 @@ func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
|
||||
mem.updateMtx.RLock()
|
||||
defer mem.updateMtx.RUnlock()
|
||||
|
||||
var totalGas int64
|
||||
var (
|
||||
totalGas int64
|
||||
runningSize int64
|
||||
)
|
||||
|
||||
// TODO: we will get a performance boost if we have a good estimate of avg
|
||||
// size per tx, and set the initial capacity based off of that.
|
||||
@@ -528,22 +532,26 @@ func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
|
||||
for e := mem.txs.Front(); e != nil; e = e.Next() {
|
||||
memTx := e.Value.(*mempoolTx)
|
||||
|
||||
dataSize := types.ComputeProtoSizeForTxs(append(txs, memTx.tx))
|
||||
txs = append(txs, memTx.tx)
|
||||
|
||||
dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx})
|
||||
|
||||
// Check total size requirement
|
||||
if maxBytes > -1 && dataSize > maxBytes {
|
||||
return txs
|
||||
if maxBytes > -1 && runningSize+dataSize > maxBytes {
|
||||
return txs[:len(txs)-1]
|
||||
}
|
||||
|
||||
runningSize += dataSize
|
||||
|
||||
// Check total gas requirement.
|
||||
// If maxGas is negative, skip this check.
|
||||
// Since newTotalGas < masGas, which
|
||||
// must be non-negative, it follows that this won't overflow.
|
||||
newTotalGas := totalGas + memTx.gasWanted
|
||||
if maxGas > -1 && newTotalGas > maxGas {
|
||||
return txs
|
||||
return txs[:len(txs)-1]
|
||||
}
|
||||
totalGas = newTotalGas
|
||||
txs = append(txs, memTx.tx)
|
||||
}
|
||||
return txs
|
||||
}
|
||||
@@ -570,8 +578,8 @@ func (mem *CListMempool) Update(
|
||||
height int64,
|
||||
txs types.Txs,
|
||||
deliverTxResponses []*abci.ResponseDeliverTx,
|
||||
preCheck PreCheckFunc,
|
||||
postCheck PostCheckFunc,
|
||||
preCheck mempool.PreCheckFunc,
|
||||
postCheck mempool.PostCheckFunc,
|
||||
) error {
|
||||
// Set height
|
||||
mem.height = height
|
||||
@@ -603,7 +611,7 @@ func (mem *CListMempool) Update(
|
||||
// Mempool after:
|
||||
// 100
|
||||
// https://github.com/tendermint/tendermint/issues/3322.
|
||||
if e, ok := mem.txsMap.Load(TxKey(tx)); ok {
|
||||
if e, ok := mem.txsMap.Load(tx.Key()); ok {
|
||||
mem.removeTx(tx, e.(*clist.CElement), false)
|
||||
}
|
||||
}
|
||||
@@ -666,98 +674,3 @@ type mempoolTx struct {
|
||||
func (memTx *mempoolTx) Height() int64 {
|
||||
return atomic.LoadInt64(&memTx.height)
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
type txCache interface {
|
||||
Reset()
|
||||
Push(tx types.Tx) bool
|
||||
Remove(tx types.Tx)
|
||||
}
|
||||
|
||||
// mapTxCache maintains a LRU cache of transactions. This only stores the hash
|
||||
// of the tx, due to memory concerns.
|
||||
type mapTxCache struct {
|
||||
mtx tmsync.Mutex
|
||||
size int
|
||||
cacheMap map[[TxKeySize]byte]*list.Element
|
||||
list *list.List
|
||||
}
|
||||
|
||||
var _ txCache = (*mapTxCache)(nil)
|
||||
|
||||
// newMapTxCache returns a new mapTxCache.
|
||||
func newMapTxCache(cacheSize int) *mapTxCache {
|
||||
return &mapTxCache{
|
||||
size: cacheSize,
|
||||
cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize),
|
||||
list: list.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// Reset resets the cache to an empty state.
|
||||
func (cache *mapTxCache) Reset() {
|
||||
cache.mtx.Lock()
|
||||
cache.cacheMap = make(map[[TxKeySize]byte]*list.Element, cache.size)
|
||||
cache.list.Init()
|
||||
cache.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Push adds the given tx to the cache and returns true. It returns
|
||||
// false if tx is already in the cache.
|
||||
func (cache *mapTxCache) Push(tx types.Tx) bool {
|
||||
cache.mtx.Lock()
|
||||
defer cache.mtx.Unlock()
|
||||
|
||||
// Use the tx hash in the cache
|
||||
txHash := TxKey(tx)
|
||||
if moved, exists := cache.cacheMap[txHash]; exists {
|
||||
cache.list.MoveToBack(moved)
|
||||
return false
|
||||
}
|
||||
|
||||
if cache.list.Len() >= cache.size {
|
||||
popped := cache.list.Front()
|
||||
if popped != nil {
|
||||
poppedTxHash := popped.Value.([TxKeySize]byte)
|
||||
delete(cache.cacheMap, poppedTxHash)
|
||||
cache.list.Remove(popped)
|
||||
}
|
||||
}
|
||||
e := cache.list.PushBack(txHash)
|
||||
cache.cacheMap[txHash] = e
|
||||
return true
|
||||
}
|
||||
|
||||
// Remove removes the given tx from the cache.
|
||||
func (cache *mapTxCache) Remove(tx types.Tx) {
|
||||
cache.mtx.Lock()
|
||||
txHash := TxKey(tx)
|
||||
popped := cache.cacheMap[txHash]
|
||||
delete(cache.cacheMap, txHash)
|
||||
if popped != nil {
|
||||
cache.list.Remove(popped)
|
||||
}
|
||||
|
||||
cache.mtx.Unlock()
|
||||
}
|
||||
|
||||
type nopTxCache struct{}
|
||||
|
||||
var _ txCache = (*nopTxCache)(nil)
|
||||
|
||||
func (nopTxCache) Reset() {}
|
||||
func (nopTxCache) Push(types.Tx) bool { return true }
|
||||
func (nopTxCache) Remove(types.Tx) {}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
// TxKey is the fixed length array hash used as the key in maps.
|
||||
func TxKey(tx types.Tx) [TxKeySize]byte {
|
||||
return sha256.Sum256(tx)
|
||||
}
|
||||
|
||||
// txID is a hash of the Tx.
|
||||
func txID(tx []byte) []byte {
|
||||
return types.Tx(tx).Hash()
|
||||
}
|
||||
@@ -1,30 +1,30 @@
|
||||
package mempool
|
||||
package v0
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
mrand "math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
gogotypes "github.com/gogo/protobuf/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abciclimocks "github.com/tendermint/tendermint/abci/client/mocks"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -33,20 +33,48 @@ import (
|
||||
// test.
|
||||
type cleanupFunc func()
|
||||
|
||||
func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) {
|
||||
return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test"))
|
||||
func newMempoolWithAppMock(cc proxy.ClientCreator, client abciclient.Client) (*CListMempool, cleanupFunc, error) {
|
||||
conf := config.ResetTestRoot("mempool_test")
|
||||
|
||||
mp, cu := newMempoolWithAppAndConfigMock(cc, conf, client)
|
||||
return mp, cu, nil
|
||||
}
|
||||
|
||||
func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) {
|
||||
func newMempoolWithAppAndConfigMock(cc proxy.ClientCreator,
|
||||
cfg *config.Config,
|
||||
client abciclient.Client) (*CListMempool, cleanupFunc) {
|
||||
appConnMem := client
|
||||
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
|
||||
err := appConnMem.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
mp := NewCListMempool(cfg.Mempool, appConnMem, 0)
|
||||
mp.SetLogger(log.TestingLogger())
|
||||
|
||||
return mp, func() { os.RemoveAll(cfg.RootDir) }
|
||||
}
|
||||
|
||||
func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) {
|
||||
conf := config.ResetTestRoot("mempool_test")
|
||||
|
||||
mp, cu := newMempoolWithAppAndConfig(cc, conf)
|
||||
return mp, cu
|
||||
}
|
||||
|
||||
func newMempoolWithAppAndConfig(cc proxy.ClientCreator, cfg *config.Config) (*CListMempool, cleanupFunc) {
|
||||
appConnMem, _ := cc.NewABCIClient()
|
||||
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
|
||||
err := appConnMem.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mempool := NewCListMempool(config.Mempool, appConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger())
|
||||
return mempool, func() { os.RemoveAll(config.RootDir) }
|
||||
|
||||
mp := NewCListMempool(cfg.Mempool, appConnMem, 0)
|
||||
mp.SetLogger(log.TestingLogger())
|
||||
|
||||
return mp, func() { os.RemoveAll(cfg.RootDir) }
|
||||
}
|
||||
|
||||
func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
|
||||
@@ -67,9 +95,9 @@ func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs {
|
||||
func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs {
|
||||
txs := make(types.Txs, count)
|
||||
txInfo := TxInfo{SenderID: peerID}
|
||||
txInfo := mempool.TxInfo{SenderID: peerID}
|
||||
for i := 0; i < count; i++ {
|
||||
txBytes := make([]byte, 20)
|
||||
txs[i] = txBytes
|
||||
@@ -77,11 +105,11 @@ func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := mempool.CheckTx(txBytes, nil, txInfo); err != nil {
|
||||
if err := mp.CheckTx(txBytes, nil, txInfo); err != nil {
|
||||
// Skip invalid txs.
|
||||
// TestMempoolFilters will fail otherwise. It asserts a number of txs
|
||||
// returned.
|
||||
if IsPreCheckError(err) {
|
||||
if mempool.IsPreCheckError(err) {
|
||||
continue
|
||||
}
|
||||
t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i)
|
||||
@@ -93,18 +121,18 @@ func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs
|
||||
func TestReapMaxBytesMaxGas(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
// Ensure gas calculation behaves as expected
|
||||
checkTxs(t, mempool, 1, UnknownPeerID)
|
||||
tx0 := mempool.TxsFront().Value.(*mempoolTx)
|
||||
checkTxs(t, mp, 1, mempool.UnknownPeerID)
|
||||
tx0 := mp.TxsFront().Value.(*mempoolTx)
|
||||
// assert that kv store has gas wanted = 1.
|
||||
require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1")
|
||||
require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly")
|
||||
// ensure each tx is 20 bytes long
|
||||
require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes")
|
||||
mempool.Flush()
|
||||
mp.Flush()
|
||||
|
||||
// each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
|
||||
// each tx has 20 bytes
|
||||
@@ -131,18 +159,18 @@ func TestReapMaxBytesMaxGas(t *testing.T) {
|
||||
{20, 20000, 30, 20},
|
||||
}
|
||||
for tcIndex, tt := range tests {
|
||||
checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID)
|
||||
got := mempool.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas)
|
||||
checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID)
|
||||
got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas)
|
||||
assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d",
|
||||
len(got), tt.expectedNumTxs, tcIndex)
|
||||
mempool.Flush()
|
||||
mp.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func TestMempoolFilters(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
emptyTxArr := []types.Tx{[]byte{}}
|
||||
|
||||
@@ -153,75 +181,125 @@ func TestMempoolFilters(t *testing.T) {
|
||||
// each tx has 20 bytes
|
||||
tests := []struct {
|
||||
numTxsToCreate int
|
||||
preFilter PreCheckFunc
|
||||
postFilter PostCheckFunc
|
||||
preFilter mempool.PreCheckFunc
|
||||
postFilter mempool.PostCheckFunc
|
||||
expectedNumTxs int
|
||||
}{
|
||||
{10, nopPreFilter, nopPostFilter, 10},
|
||||
{10, PreCheckMaxBytes(10), nopPostFilter, 0},
|
||||
{10, PreCheckMaxBytes(22), nopPostFilter, 10},
|
||||
{10, nopPreFilter, PostCheckMaxGas(-1), 10},
|
||||
{10, nopPreFilter, PostCheckMaxGas(0), 0},
|
||||
{10, nopPreFilter, PostCheckMaxGas(1), 10},
|
||||
{10, nopPreFilter, PostCheckMaxGas(3000), 10},
|
||||
{10, PreCheckMaxBytes(10), PostCheckMaxGas(20), 0},
|
||||
{10, PreCheckMaxBytes(30), PostCheckMaxGas(20), 10},
|
||||
{10, PreCheckMaxBytes(22), PostCheckMaxGas(1), 10},
|
||||
{10, PreCheckMaxBytes(22), PostCheckMaxGas(0), 0},
|
||||
{10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0},
|
||||
{10, mempool.PreCheckMaxBytes(22), nopPostFilter, 10},
|
||||
{10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10},
|
||||
{10, nopPreFilter, mempool.PostCheckMaxGas(0), 0},
|
||||
{10, nopPreFilter, mempool.PostCheckMaxGas(1), 10},
|
||||
{10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10},
|
||||
{10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0},
|
||||
{10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10},
|
||||
{10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(1), 10},
|
||||
{10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0},
|
||||
}
|
||||
for tcIndex, tt := range tests {
|
||||
err := mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter)
|
||||
err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter)
|
||||
require.NoError(t, err)
|
||||
checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID)
|
||||
require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex)
|
||||
mempool.Flush()
|
||||
checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID)
|
||||
require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex)
|
||||
mp.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func TestMempoolUpdate(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
// 1. Adds valid txs to the cache
|
||||
{
|
||||
err := mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
err = mempool.CheckTx([]byte{0x01}, nil, TxInfo{})
|
||||
err = mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{})
|
||||
if assert.Error(t, err) {
|
||||
assert.Equal(t, ErrTxInCache, err)
|
||||
assert.Equal(t, mempool.ErrTxInCache, err)
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Removes valid txs from the mempool
|
||||
{
|
||||
err := mempool.CheckTx([]byte{0x02}, nil, TxInfo{})
|
||||
err := mp.CheckTx([]byte{0x02}, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
err = mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
err = mp.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, mempool.Size())
|
||||
assert.Zero(t, mp.Size())
|
||||
}
|
||||
|
||||
// 3. Removes invalid transactions from the cache and the mempool (if present)
|
||||
{
|
||||
err := mempool.CheckTx([]byte{0x03}, nil, TxInfo{})
|
||||
err := mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
err = mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil)
|
||||
err = mp.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, mempool.Size())
|
||||
assert.Zero(t, mp.Size())
|
||||
|
||||
err = mempool.CheckTx([]byte{0x03}, nil, TxInfo{})
|
||||
err = mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
|
||||
app := counter.NewApplication(true)
|
||||
func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) {
|
||||
var callback abciclient.Callback
|
||||
mockClient := new(abciclimocks.Client)
|
||||
mockClient.On("Start").Return(nil)
|
||||
mockClient.On("SetLogger", mock.Anything)
|
||||
|
||||
mockClient.On("Error").Return(nil).Times(4)
|
||||
mockClient.On("FlushAsync", mock.Anything).Return(abciclient.NewReqRes(abci.ToRequestFlush()), nil)
|
||||
mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true }))
|
||||
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
wcfg := cfg.DefaultConfig()
|
||||
mp, cleanup, err := newMempoolWithAppMock(cc, mockClient)
|
||||
require.NoError(t, err)
|
||||
defer cleanup()
|
||||
|
||||
// Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them.
|
||||
txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}}
|
||||
for _, tx := range txs {
|
||||
reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx}))
|
||||
reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK})
|
||||
|
||||
mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil)
|
||||
err := mp.CheckTx(tx, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// ensure that the callback that the mempool sets on the ReqRes is run.
|
||||
reqRes.InvokeCallback()
|
||||
}
|
||||
|
||||
// Calling update to remove the first transaction from the mempool.
|
||||
// This call also triggers the mempool to recheck its remaining transactions.
|
||||
err = mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
require.Nil(t, err)
|
||||
|
||||
// The mempool has now sent its requests off to the client to be rechecked
|
||||
// and is waiting for the corresponding callbacks to be called.
|
||||
// We now call the mempool-supplied callback on the first and third transaction.
|
||||
// This simulates the client dropping the second request.
|
||||
// Previous versions of this code panicked when the ABCI application missed
|
||||
// a recheck-tx request.
|
||||
resp := abci.ResponseCheckTx{Code: abci.CodeTypeOK}
|
||||
req := abci.RequestCheckTx{Tx: txs[1]}
|
||||
callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp))
|
||||
|
||||
req = abci.RequestCheckTx{Tx: txs[3]}
|
||||
callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp))
|
||||
mockClient.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
wcfg := config.DefaultConfig()
|
||||
wcfg.Mempool.KeepInvalidTxsInCache = true
|
||||
mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
|
||||
mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
|
||||
defer cleanup()
|
||||
|
||||
// 1. An invalid transaction must remain in the cache after Update
|
||||
@@ -232,26 +310,26 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, 1)
|
||||
|
||||
err := mempool.CheckTx(b, nil, TxInfo{})
|
||||
err := mp.CheckTx(b, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// simulate new block
|
||||
_ = app.DeliverTx(abci.RequestDeliverTx{Tx: a})
|
||||
_ = app.DeliverTx(abci.RequestDeliverTx{Tx: b})
|
||||
err = mempool.Update(1, []types.Tx{a, b},
|
||||
err = mp.Update(1, []types.Tx{a, b},
|
||||
[]*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// a must be added to the cache
|
||||
err = mempool.CheckTx(a, nil, TxInfo{})
|
||||
err = mp.CheckTx(a, nil, mempool.TxInfo{})
|
||||
if assert.Error(t, err) {
|
||||
assert.Equal(t, ErrTxInCache, err)
|
||||
assert.Equal(t, mempool.ErrTxInCache, err)
|
||||
}
|
||||
|
||||
// b must remain in the cache
|
||||
err = mempool.CheckTx(b, nil, TxInfo{})
|
||||
err = mp.CheckTx(b, nil, mempool.TxInfo{})
|
||||
if assert.Error(t, err) {
|
||||
assert.Equal(t, ErrTxInCache, err)
|
||||
assert.Equal(t, mempool.ErrTxInCache, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -261,68 +339,62 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
|
||||
binary.BigEndian.PutUint64(a, 0)
|
||||
|
||||
// remove a from the cache to test (2)
|
||||
mempool.cache.Remove(a)
|
||||
mp.cache.Remove(a)
|
||||
|
||||
err := mempool.CheckTx(a, nil, TxInfo{})
|
||||
err := mp.CheckTx(a, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = mempool.CheckTx(a, nil, TxInfo{})
|
||||
if assert.Error(t, err) {
|
||||
assert.Equal(t, ErrTxInCache, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxsAvailable(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
mempool.EnableTxsAvailable()
|
||||
mp.EnableTxsAvailable()
|
||||
|
||||
timeoutMS := 500
|
||||
|
||||
// with no txs, it shouldnt fire
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
|
||||
|
||||
// send a bunch of txs, it should only fire once
|
||||
txs := checkTxs(t, mempool, 100, UnknownPeerID)
|
||||
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
txs := checkTxs(t, mp, 100, mempool.UnknownPeerID)
|
||||
ensureFire(t, mp.TxsAvailable(), timeoutMS)
|
||||
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
|
||||
|
||||
// call update with half the txs.
|
||||
// it should fire once now for the new height
|
||||
// since there are still txs left
|
||||
committedTxs, txs := txs[:50], txs[50:]
|
||||
if err := mempool.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
|
||||
if err := mp.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
ensureFire(t, mp.TxsAvailable(), timeoutMS)
|
||||
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
|
||||
|
||||
// send a bunch more txs. we already fired for this height so it shouldnt fire again
|
||||
moreTxs := checkTxs(t, mempool, 50, UnknownPeerID)
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID)
|
||||
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
|
||||
|
||||
// now call update with all the txs. it should not fire as there are no txs left
|
||||
committedTxs = append(txs, moreTxs...) //nolint: gocritic
|
||||
if err := mempool.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
|
||||
committedTxs = append(txs, moreTxs...)
|
||||
if err := mp.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
|
||||
|
||||
// send a bunch more txs, it should only fire once
|
||||
checkTxs(t, mempool, 100, UnknownPeerID)
|
||||
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
checkTxs(t, mp, 100, mempool.UnknownPeerID)
|
||||
ensureFire(t, mp.TxsAvailable(), timeoutMS)
|
||||
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
|
||||
}
|
||||
|
||||
func TestSerialReap(t *testing.T) {
|
||||
app := counter.NewApplication(true)
|
||||
app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"})
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
appConnCon, _ := cc.NewABCIClient()
|
||||
@@ -338,7 +410,7 @@ func TestSerialReap(t *testing.T) {
|
||||
// This will succeed
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
err := mempool.CheckTx(txBytes, nil, TxInfo{})
|
||||
err := mp.CheckTx(txBytes, nil, mempool.TxInfo{})
|
||||
_, cached := cacheMap[string(txBytes)]
|
||||
if cached {
|
||||
require.NotNil(t, err, "expected error for cached tx")
|
||||
@@ -348,13 +420,13 @@ func TestSerialReap(t *testing.T) {
|
||||
cacheMap[string(txBytes)] = struct{}{}
|
||||
|
||||
// Duplicates are cached and should return error
|
||||
err = mempool.CheckTx(txBytes, nil, TxInfo{})
|
||||
err = mp.CheckTx(txBytes, nil, mempool.TxInfo{})
|
||||
require.NotNil(t, err, "Expected error after CheckTx on duplicated tx")
|
||||
}
|
||||
}
|
||||
|
||||
reapCheck := func(exp int) {
|
||||
txs := mempool.ReapMaxBytesMaxGas(-1, -1)
|
||||
txs := mp.ReapMaxBytesMaxGas(-1, -1)
|
||||
require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs)))
|
||||
}
|
||||
|
||||
@@ -365,7 +437,7 @@ func TestSerialReap(t *testing.T) {
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
txs = append(txs, txBytes)
|
||||
}
|
||||
if err := mempool.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil {
|
||||
if err := mp.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
@@ -428,58 +500,10 @@ func TestSerialReap(t *testing.T) {
|
||||
reapCheck(600)
|
||||
}
|
||||
|
||||
func TestMempoolCloseWAL(t *testing.T) {
|
||||
// 1. Create the temporary directory for mempool and WAL testing.
|
||||
rootDir, err := ioutil.TempDir("", "mempool-test")
|
||||
require.Nil(t, err, "expecting successful tmpdir creation")
|
||||
|
||||
// 2. Ensure that it doesn't contain any elements -- Sanity check
|
||||
m1, err := filepath.Glob(filepath.Join(rootDir, "*"))
|
||||
require.Nil(t, err, "successful globbing expected")
|
||||
require.Equal(t, 0, len(m1), "no matches yet")
|
||||
|
||||
// 3. Create the mempool
|
||||
wcfg := cfg.DefaultConfig()
|
||||
wcfg.Mempool.RootDir = rootDir
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
|
||||
defer cleanup()
|
||||
mempool.height = 10
|
||||
err = mempool.InitWAL()
|
||||
require.NoError(t, err)
|
||||
|
||||
// 4. Ensure that the directory contains the WAL file
|
||||
m2, err := filepath.Glob(filepath.Join(rootDir, "*"))
|
||||
require.Nil(t, err, "successful globbing expected")
|
||||
require.Equal(t, 1, len(m2), "expecting the wal match in")
|
||||
|
||||
// 5. Write some contents to the WAL
|
||||
err = mempool.CheckTx(types.Tx([]byte("foo")), nil, TxInfo{})
|
||||
require.NoError(t, err)
|
||||
walFilepath := mempool.wal.Path
|
||||
sum1 := checksumFile(walFilepath, t)
|
||||
|
||||
// 6. Sanity check to ensure that the written TX matches the expectation.
|
||||
require.Equal(t, sum1, checksumIt([]byte("foo\n")), "foo with a newline should be written")
|
||||
|
||||
// 7. Invoke CloseWAL() and ensure it discards the
|
||||
// WAL thus any other write won't go through.
|
||||
mempool.CloseWAL()
|
||||
err = mempool.CheckTx(types.Tx([]byte("bar")), nil, TxInfo{})
|
||||
require.NoError(t, err)
|
||||
sum2 := checksumFile(walFilepath, t)
|
||||
require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded")
|
||||
|
||||
// 8. Sanity check to ensure that the WAL file still exists
|
||||
m3, err := filepath.Glob(filepath.Join(rootDir, "*"))
|
||||
require.Nil(t, err, "successful globbing expected")
|
||||
require.Equal(t, 1, len(m3), "expecting the wal match in")
|
||||
}
|
||||
|
||||
func TestMempool_CheckTxChecksTxSize(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
mempl, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
@@ -505,7 +529,7 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) {
|
||||
|
||||
tx := tmrand.Bytes(testCase.len)
|
||||
|
||||
err := mempl.CheckTx(tx, nil, TxInfo{})
|
||||
err := mempl.CheckTx(tx, nil, mempool.TxInfo{})
|
||||
bv := gogotypes.BytesValue{Value: tx}
|
||||
bz, err2 := bv.Marshal()
|
||||
require.NoError(t, err2)
|
||||
@@ -514,7 +538,10 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) {
|
||||
if !testCase.err {
|
||||
require.NoError(t, err, caseString)
|
||||
} else {
|
||||
require.Equal(t, err, ErrTxTooLarge{maxTxSize, testCase.len}, caseString)
|
||||
require.Equal(t, err, mempool.ErrTxTooLarge{
|
||||
Max: maxTxSize,
|
||||
Actual: testCase.len,
|
||||
}, caseString)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -522,52 +549,60 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) {
|
||||
func TestMempoolTxsBytes(t *testing.T) {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
config := cfg.ResetTestRoot("mempool_test")
|
||||
config.Mempool.MaxTxsBytes = 10
|
||||
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
|
||||
|
||||
cfg := config.ResetTestRoot("mempool_test")
|
||||
|
||||
cfg.Mempool.MaxTxsBytes = 10
|
||||
mp, cleanup := newMempoolWithAppAndConfig(cc, cfg)
|
||||
defer cleanup()
|
||||
|
||||
// 1. zero by default
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
assert.EqualValues(t, 0, mp.SizeBytes())
|
||||
|
||||
// 2. len(tx) after CheckTx
|
||||
err := mempool.CheckTx([]byte{0x01}, nil, TxInfo{})
|
||||
err := mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 1, mempool.TxsBytes())
|
||||
assert.EqualValues(t, 1, mp.SizeBytes())
|
||||
|
||||
// 3. zero again after tx is removed by Update
|
||||
err = mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
err = mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
assert.EqualValues(t, 0, mp.SizeBytes())
|
||||
|
||||
// 4. zero after Flush
|
||||
err = mempool.CheckTx([]byte{0x02, 0x03}, nil, TxInfo{})
|
||||
err = mp.CheckTx([]byte{0x02, 0x03}, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 2, mempool.TxsBytes())
|
||||
assert.EqualValues(t, 2, mp.SizeBytes())
|
||||
|
||||
mempool.Flush()
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
mp.Flush()
|
||||
assert.EqualValues(t, 0, mp.SizeBytes())
|
||||
|
||||
// 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached.
|
||||
err = mempool.CheckTx([]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, nil, TxInfo{})
|
||||
err = mp.CheckTx(
|
||||
[]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04},
|
||||
nil,
|
||||
mempool.TxInfo{},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
err = mempool.CheckTx([]byte{0x05}, nil, TxInfo{})
|
||||
|
||||
err = mp.CheckTx([]byte{0x05}, nil, mempool.TxInfo{})
|
||||
if assert.Error(t, err) {
|
||||
assert.IsType(t, ErrMempoolIsFull{}, err)
|
||||
assert.IsType(t, mempool.ErrMempoolIsFull{}, err)
|
||||
}
|
||||
|
||||
// 6. zero after tx is rechecked and removed due to not being valid anymore
|
||||
app2 := counter.NewApplication(true)
|
||||
app2 := kvstore.NewApplication()
|
||||
cc = proxy.NewLocalClientCreator(app2)
|
||||
mempool, cleanup = newMempoolWithApp(cc)
|
||||
|
||||
mp, cleanup = newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(0))
|
||||
|
||||
err = mempool.CheckTx(txBytes, nil, TxInfo{})
|
||||
err = mp.CheckTx(txBytes, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 8, mempool.TxsBytes())
|
||||
assert.EqualValues(t, 8, mp.SizeBytes())
|
||||
|
||||
appConnCon, _ := cc.NewABCIClient()
|
||||
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
|
||||
@@ -578,26 +613,28 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 0, res.Code)
|
||||
|
||||
res2, err := appConnCon.CommitSync()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, res2.Data)
|
||||
|
||||
// Pretend like we committed nothing so txBytes gets rechecked and removed.
|
||||
err = mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
|
||||
err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
assert.EqualValues(t, 8, mp.SizeBytes())
|
||||
|
||||
// 7. Test RemoveTxByKey function
|
||||
err = mempool.CheckTx([]byte{0x06}, nil, TxInfo{})
|
||||
err = mp.CheckTx([]byte{0x06}, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 1, mempool.TxsBytes())
|
||||
mempool.RemoveTxByKey(TxKey([]byte{0x07}), true)
|
||||
assert.EqualValues(t, 1, mempool.TxsBytes())
|
||||
mempool.RemoveTxByKey(TxKey([]byte{0x06}), true)
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
assert.EqualValues(t, 9, mp.SizeBytes())
|
||||
assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key()))
|
||||
assert.EqualValues(t, 9, mp.SizeBytes())
|
||||
assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key()))
|
||||
assert.EqualValues(t, 8, mp.SizeBytes())
|
||||
|
||||
}
|
||||
|
||||
@@ -608,14 +645,16 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
func TestMempoolRemoteAppConcurrency(t *testing.T) {
|
||||
sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6))
|
||||
app := kvstore.NewApplication()
|
||||
cc, server := newRemoteApp(t, sockPath, app)
|
||||
_, server := newRemoteApp(t, sockPath, app)
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
config := cfg.ResetTestRoot("mempool_test")
|
||||
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
|
||||
|
||||
cfg := config.ResetTestRoot("mempool_test")
|
||||
|
||||
mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg)
|
||||
defer cleanup()
|
||||
|
||||
// generate small number of txs
|
||||
@@ -627,7 +666,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
|
||||
}
|
||||
|
||||
// simulate a group of peers sending them over and over
|
||||
N := config.Mempool.Size
|
||||
N := cfg.Mempool.Size
|
||||
maxPeers := 5
|
||||
for i := 0; i < N; i++ {
|
||||
peerID := mrand.Intn(maxPeers)
|
||||
@@ -635,41 +674,25 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
|
||||
tx := txs[txNum]
|
||||
|
||||
// this will err with ErrTxInCache many times ...
|
||||
mempool.CheckTx(tx, nil, TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error
|
||||
mp.CheckTx(tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error
|
||||
}
|
||||
err := mempool.FlushAppConn()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, mp.FlushAppConn())
|
||||
}
|
||||
|
||||
// caller must close server
|
||||
func newRemoteApp(
|
||||
t *testing.T,
|
||||
addr string,
|
||||
app abci.Application,
|
||||
) (
|
||||
clientCreator proxy.ClientCreator,
|
||||
server service.Service,
|
||||
) {
|
||||
clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true)
|
||||
func newRemoteApp(t *testing.T, addr string, app abci.Application) (abciclient.Client, service.Service) {
|
||||
clientCreator, err := abciclient.NewClient(addr, "socket", true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start server
|
||||
server = abciserver.NewSocketServer(addr, app)
|
||||
server := abciserver.NewSocketServer(addr, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
t.Fatalf("Error starting socket server: %v", err.Error())
|
||||
}
|
||||
return clientCreator, server
|
||||
}
|
||||
func checksumIt(data []byte) string {
|
||||
h := sha256.New()
|
||||
h.Write(data)
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
func checksumFile(p string, t *testing.T) string {
|
||||
data, err := ioutil.ReadFile(p)
|
||||
require.Nil(t, err, "expecting successful read of %q", p)
|
||||
return checksumIt(data)
|
||||
return clientCreator, server
|
||||
}
|
||||
|
||||
func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx {
|
||||
@@ -20,4 +20,4 @@
|
||||
// broadcastTxRoutine().
|
||||
|
||||
// TODO: Better handle abci client errors. (make it automatically handle connection errors)
|
||||
package mempool
|
||||
package v0
|
||||
@@ -1,32 +1,20 @@
|
||||
package mempool
|
||||
package v0
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/clist"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
MempoolChannel = byte(0x30)
|
||||
|
||||
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
|
||||
|
||||
// UnknownPeerID is the peer ID to use when running CheckTx when there is
|
||||
// no peer (e.g. RPC)
|
||||
UnknownPeerID uint16 = 0
|
||||
|
||||
maxActiveIDs = math.MaxUint16
|
||||
)
|
||||
|
||||
// Reactor handles mempool tx broadcasting amongst peers.
|
||||
// It maintains a map from peer ID to counter, to prevent gossiping txs to the
|
||||
// peers you received it from.
|
||||
@@ -58,8 +46,8 @@ func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
|
||||
// nextPeerID returns the next unused peer ID to use.
|
||||
// This assumes that ids's mutex is already locked.
|
||||
func (ids *mempoolIDs) nextPeerID() uint16 {
|
||||
if len(ids.activeIDs) == maxActiveIDs {
|
||||
panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs))
|
||||
if len(ids.activeIDs) == mempool.MaxActiveIDs {
|
||||
panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", mempool.MaxActiveIDs))
|
||||
}
|
||||
|
||||
_, idExists := ids.activeIDs[ids.nextID]
|
||||
@@ -143,7 +131,7 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: MempoolChannel,
|
||||
ID: mempool.MempoolChannel,
|
||||
Priority: 5,
|
||||
RecvMessageCapacity: batchMsg.Size(),
|
||||
},
|
||||
@@ -175,18 +163,20 @@ func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
}
|
||||
memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
|
||||
txInfo := TxInfo{SenderID: memR.ids.GetForPeer(src)}
|
||||
txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)}
|
||||
if src != nil {
|
||||
txInfo.SenderP2PID = src.ID()
|
||||
}
|
||||
|
||||
for _, tx := range msg.Txs {
|
||||
err = memR.mempool.CheckTx(tx, nil, txInfo)
|
||||
if err == ErrTxInCache {
|
||||
memR.Logger.Debug("Tx already exists in cache", "tx", txID(tx))
|
||||
if errors.Is(err, mempool.ErrTxInCache) {
|
||||
memR.Logger.Debug("Tx already exists in cache", "tx", tx.String())
|
||||
} else if err != nil {
|
||||
memR.Logger.Info("Could not check tx", "tx", txID(tx), "err", err)
|
||||
memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// broadcasting happens from go routines per peer
|
||||
}
|
||||
|
||||
@@ -229,14 +219,14 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
|
||||
// different every time due to us using a map. Sometimes other reactors
|
||||
// will be initialized before the consensus reactor. We should wait a few
|
||||
// milliseconds and retry.
|
||||
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
|
||||
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
// Allow for a lag of 1 block.
|
||||
memTx := next.Value.(*mempoolTx)
|
||||
if peerState.GetHeight() < memTx.Height()-1 {
|
||||
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
|
||||
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -249,13 +239,15 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
|
||||
Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}},
|
||||
},
|
||||
}
|
||||
|
||||
bz, err := msg.Marshal()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
success := peer.Send(MempoolChannel, bz)
|
||||
|
||||
success := peer.Send(mempool.MempoolChannel, bz)
|
||||
if !success {
|
||||
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
|
||||
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -272,9 +264,6 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) {
|
||||
msg := protomem.Message{}
|
||||
err := msg.Unmarshal(bz)
|
||||
@@ -304,8 +293,6 @@ func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) {
|
||||
return message, fmt.Errorf("msg type: %T is not supported", msg)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// TxsMessage is a Message containing transactions.
|
||||
type TxsMessage struct {
|
||||
Txs []types.Tx
|
||||
@@ -1,4 +1,4 @@
|
||||
package mempool
|
||||
package v0
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/mock"
|
||||
memproto "github.com/tendermint/tendermint/proto/tendermint/mempool"
|
||||
@@ -61,7 +62,7 @@ func TestReactorBroadcastTxsMessage(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID)
|
||||
txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
|
||||
waitForTxsOnReactors(t, txs, reactors)
|
||||
}
|
||||
|
||||
@@ -91,7 +92,7 @@ func TestReactorConcurrency(t *testing.T) {
|
||||
|
||||
// 1. submit a bunch of txs
|
||||
// 2. update the whole mempool
|
||||
txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID)
|
||||
txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
@@ -108,7 +109,7 @@ func TestReactorConcurrency(t *testing.T) {
|
||||
|
||||
// 1. submit a bunch of txs
|
||||
// 2. update none
|
||||
_ = checkTxs(t, reactors[1].mempool, numTxs, UnknownPeerID)
|
||||
_ = checkTxs(t, reactors[1].mempool, numTxs, mempool.UnknownPeerID)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
@@ -170,7 +171,7 @@ func TestReactor_MaxTxBytes(t *testing.T) {
|
||||
// Broadcast a tx, which has the max size
|
||||
// => ensure it's received by the second reactor.
|
||||
tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes)
|
||||
err := reactors[0].mempool.CheckTx(tx1, nil, TxInfo{SenderID: UnknownPeerID})
|
||||
err := reactors[0].mempool.CheckTx(tx1, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
|
||||
require.NoError(t, err)
|
||||
waitForTxsOnReactors(t, []types.Tx{tx1}, reactors)
|
||||
|
||||
@@ -180,7 +181,7 @@ func TestReactor_MaxTxBytes(t *testing.T) {
|
||||
// Broadcast a tx, which is beyond the max size
|
||||
// => ensure it's not sent
|
||||
tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1)
|
||||
err = reactors[0].mempool.CheckTx(tx2, nil, TxInfo{SenderID: UnknownPeerID})
|
||||
err = reactors[0].mempool.CheckTx(tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -252,7 +253,7 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
|
||||
// 0 is already reserved for UnknownPeerID
|
||||
ids := newMempoolIDs()
|
||||
|
||||
for i := 0; i < maxActiveIDs-1; i++ {
|
||||
for i := 0; i < mempool.MaxActiveIDs-1; i++ {
|
||||
peer := mock.NewPeer(net.IP{127, 0, 0, 1})
|
||||
ids.ReserveForPeer(peer)
|
||||
}
|
||||
@@ -276,9 +277,9 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) {
|
||||
}()
|
||||
reactor := reactors[0]
|
||||
|
||||
for i := 0; i < maxActiveIDs+1; i++ {
|
||||
for i := 0; i < mempool.MaxActiveIDs+1; i++ {
|
||||
peer := mock.NewPeer(nil)
|
||||
reactor.Receive(MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
|
||||
reactor.Receive(mempool.MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
|
||||
reactor.AddPeer(peer)
|
||||
}
|
||||
}
|
||||
863
mempool/v1/mempool.go
Normal file
863
mempool/v1/mempool.go
Normal file
@@ -0,0 +1,863 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/clist"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var _ mempool.Mempool = (*TxMempool)(nil)
|
||||
|
||||
// TxMempoolOption sets an optional parameter on the TxMempool.
|
||||
type TxMempoolOption func(*TxMempool)
|
||||
|
||||
// TxMempool defines a prioritized mempool data structure used by the v1 mempool
|
||||
// reactor. It keeps a thread-safe priority queue of transactions that is used
|
||||
// when a block proposer constructs a block and a thread-safe linked-list that
|
||||
// is used to gossip transactions to peers in a FIFO manner.
|
||||
type TxMempool struct {
|
||||
logger log.Logger
|
||||
metrics *mempool.Metrics
|
||||
config *config.MempoolConfig
|
||||
proxyAppConn proxy.AppConnMempool
|
||||
|
||||
// txsAvailable fires once for each height when the mempool is not empty
|
||||
txsAvailable chan struct{}
|
||||
notifiedTxsAvailable bool
|
||||
|
||||
// height defines the last block height process during Update()
|
||||
height int64
|
||||
|
||||
// sizeBytes defines the total size of the mempool (sum of all tx bytes)
|
||||
sizeBytes int64
|
||||
|
||||
// cache defines a fixed-size cache of already seen transactions as this
|
||||
// reduces pressure on the proxyApp.
|
||||
cache mempool.TxCache
|
||||
|
||||
// txStore defines the main storage of valid transactions. Indexes are built
|
||||
// on top of this store.
|
||||
txStore *TxStore
|
||||
|
||||
// gossipIndex defines the gossiping index of valid transactions via a
|
||||
// thread-safe linked-list. We also use the gossip index as a cursor for
|
||||
// rechecking transactions already in the mempool.
|
||||
gossipIndex *clist.CList
|
||||
|
||||
// recheckCursor and recheckEnd are used as cursors based on the gossip index
|
||||
// to recheck transactions that are already in the mempool. Iteration is not
|
||||
// thread-safe and transaction may be mutated in serial order.
|
||||
//
|
||||
// XXX/TODO: It might be somewhat of a codesmell to use the gossip index for
|
||||
// iterator and cursor management when rechecking transactions. If the gossip
|
||||
// index changes or is removed in a future refactor, this will have to be
|
||||
// refactored. Instead, we should consider just keeping a slice of a snapshot
|
||||
// of the mempool's current transactions during Update and an integer cursor
|
||||
// into that slice. This, however, requires additional O(n) space complexity.
|
||||
recheckCursor *clist.CElement // next expected response
|
||||
recheckEnd *clist.CElement // re-checking stops here
|
||||
|
||||
// priorityIndex defines the priority index of valid transactions via a
|
||||
// thread-safe priority queue.
|
||||
priorityIndex *TxPriorityQueue
|
||||
|
||||
// heightIndex defines a height-based, in ascending order, transaction index.
|
||||
// i.e. older transactions are first.
|
||||
heightIndex *WrappedTxList
|
||||
|
||||
// timestampIndex defines a timestamp-based, in ascending order, transaction
|
||||
// index. i.e. older transactions are first.
|
||||
timestampIndex *WrappedTxList
|
||||
|
||||
// A read/write lock is used to safe guard updates, insertions and deletions
|
||||
// from the mempool. A read-lock is implicitly acquired when executing CheckTx,
|
||||
// however, a caller must explicitly grab a write-lock via Lock when updating
|
||||
// the mempool via Update().
|
||||
mtx tmsync.RWMutex
|
||||
preCheck mempool.PreCheckFunc
|
||||
postCheck mempool.PostCheckFunc
|
||||
}
|
||||
|
||||
func NewTxMempool(
|
||||
logger log.Logger,
|
||||
cfg *config.MempoolConfig,
|
||||
proxyAppConn proxy.AppConnMempool,
|
||||
height int64,
|
||||
options ...TxMempoolOption,
|
||||
) *TxMempool {
|
||||
|
||||
txmp := &TxMempool{
|
||||
logger: logger,
|
||||
config: cfg,
|
||||
proxyAppConn: proxyAppConn,
|
||||
height: height,
|
||||
cache: mempool.NopTxCache{},
|
||||
metrics: mempool.NopMetrics(),
|
||||
txStore: NewTxStore(),
|
||||
gossipIndex: clist.New(),
|
||||
priorityIndex: NewTxPriorityQueue(),
|
||||
heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
}),
|
||||
timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp)
|
||||
}),
|
||||
}
|
||||
|
||||
if cfg.CacheSize > 0 {
|
||||
txmp.cache = mempool.NewLRUTxCache(cfg.CacheSize)
|
||||
}
|
||||
|
||||
proxyAppConn.SetResponseCallback(txmp.defaultTxCallback)
|
||||
|
||||
for _, opt := range options {
|
||||
opt(txmp)
|
||||
}
|
||||
|
||||
return txmp
|
||||
}
|
||||
|
||||
// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx)
|
||||
// returns an error. This is executed before CheckTx. It only applies to the
|
||||
// first created block. After that, Update() overwrites the existing value.
|
||||
func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption {
|
||||
return func(txmp *TxMempool) { txmp.preCheck = f }
|
||||
}
|
||||
|
||||
// WithPostCheck sets a filter for the mempool to reject a transaction if
|
||||
// f(tx, resp) returns an error. This is executed after CheckTx. It only applies
|
||||
// to the first created block. After that, Update overwrites the existing value.
|
||||
func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption {
|
||||
return func(txmp *TxMempool) { txmp.postCheck = f }
|
||||
}
|
||||
|
||||
// WithMetrics sets the mempool's metrics collector.
|
||||
func WithMetrics(metrics *mempool.Metrics) TxMempoolOption {
|
||||
return func(txmp *TxMempool) { txmp.metrics = metrics }
|
||||
}
|
||||
|
||||
// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly
|
||||
// release the lock when finished.
|
||||
func (txmp *TxMempool) Lock() {
|
||||
txmp.mtx.Lock()
|
||||
}
|
||||
|
||||
// Unlock releases a write-lock on the mempool.
|
||||
func (txmp *TxMempool) Unlock() {
|
||||
txmp.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Size returns the number of valid transactions in the mempool. It is
|
||||
// thread-safe.
|
||||
func (txmp *TxMempool) Size() int {
|
||||
return txmp.txStore.Size()
|
||||
}
|
||||
|
||||
// SizeBytes return the total sum in bytes of all the valid transactions in the
|
||||
// mempool. It is thread-safe.
|
||||
func (txmp *TxMempool) SizeBytes() int64 {
|
||||
return atomic.LoadInt64(&txmp.sizeBytes)
|
||||
}
|
||||
|
||||
// FlushAppConn executes FlushSync on the mempool's proxyAppConn.
|
||||
//
|
||||
// NOTE: The caller must obtain a write-lock via Lock() prior to execution.
|
||||
func (txmp *TxMempool) FlushAppConn() error {
|
||||
return txmp.proxyAppConn.FlushSync()
|
||||
}
|
||||
|
||||
// WaitForNextTx returns a blocking channel that will be closed when the next
|
||||
// valid transaction is available to gossip. It is thread-safe.
|
||||
func (txmp *TxMempool) WaitForNextTx() <-chan struct{} {
|
||||
return txmp.gossipIndex.WaitChan()
|
||||
}
|
||||
|
||||
// NextGossipTx returns the next valid transaction to gossip. A caller must wait
|
||||
// for WaitForNextTx to signal a transaction is available to gossip first. It is
|
||||
// thread-safe.
|
||||
func (txmp *TxMempool) NextGossipTx() *clist.CElement {
|
||||
return txmp.gossipIndex.Front()
|
||||
}
|
||||
|
||||
// EnableTxsAvailable enables the mempool to trigger events when transactions
|
||||
// are available on a block by block basis.
|
||||
func (txmp *TxMempool) EnableTxsAvailable() {
|
||||
txmp.mtx.Lock()
|
||||
defer txmp.mtx.Unlock()
|
||||
|
||||
txmp.txsAvailable = make(chan struct{}, 1)
|
||||
}
|
||||
|
||||
// TxsAvailable returns a channel which fires once for every height, and only
|
||||
// when transactions are available in the mempool. It is thread-safe.
|
||||
func (txmp *TxMempool) TxsAvailable() <-chan struct{} {
|
||||
return txmp.txsAvailable
|
||||
}
|
||||
|
||||
// CheckTx executes the ABCI CheckTx method for a given transaction. It acquires
|
||||
// a read-lock attempts to execute the application's CheckTx ABCI method via
|
||||
// CheckTxAsync. We return an error if any of the following happen:
|
||||
//
|
||||
// - The CheckTxAsync execution fails.
|
||||
// - The transaction already exists in the cache and we've already received the
|
||||
// transaction from the peer. Otherwise, if it solely exists in the cache, we
|
||||
// return nil.
|
||||
// - The transaction size exceeds the maximum transaction size as defined by the
|
||||
// configuration provided to the mempool.
|
||||
// - The transaction fails Pre-Check (if it is defined).
|
||||
// - The proxyAppConn fails, e.g. the buffer is full.
|
||||
//
|
||||
// If the mempool is full, we still execute CheckTx and attempt to find a lower
|
||||
// priority transaction to evict. If such a transaction exists, we remove the
|
||||
// lower priority transaction and add the new one with higher priority.
|
||||
//
|
||||
// NOTE:
|
||||
// - The applications' CheckTx implementation may panic.
|
||||
// - The caller is not to explicitly require any locks for executing CheckTx.
|
||||
func (txmp *TxMempool) CheckTx(
|
||||
tx types.Tx,
|
||||
cb func(*abci.Response),
|
||||
txInfo mempool.TxInfo,
|
||||
) error {
|
||||
|
||||
txmp.mtx.RLock()
|
||||
defer txmp.mtx.RUnlock()
|
||||
|
||||
txSize := len(tx)
|
||||
if txSize > txmp.config.MaxTxBytes {
|
||||
return mempool.ErrTxTooLarge{
|
||||
Max: txmp.config.MaxTxBytes,
|
||||
Actual: txSize,
|
||||
}
|
||||
}
|
||||
|
||||
if txmp.preCheck != nil {
|
||||
if err := txmp.preCheck(tx); err != nil {
|
||||
return mempool.ErrPreCheck{
|
||||
Reason: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := txmp.proxyAppConn.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
txHash := tx.Key()
|
||||
|
||||
// We add the transaction to the mempool's cache and if the
|
||||
// transaction is already present in the cache, i.e. false is returned, then we
|
||||
// check if we've seen this transaction and error if we have.
|
||||
if !txmp.cache.Push(tx) {
|
||||
txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID)
|
||||
return mempool.ErrTxInCache
|
||||
}
|
||||
|
||||
reqRes := txmp.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx})
|
||||
reqRes.SetCallback(func(res *abci.Response) {
|
||||
if txmp.recheckCursor != nil {
|
||||
panic("recheck cursor is non-nil in CheckTx callback")
|
||||
}
|
||||
|
||||
wtx := &WrappedTx{
|
||||
tx: tx,
|
||||
hash: txHash,
|
||||
timestamp: time.Now().UTC(),
|
||||
height: txmp.height,
|
||||
}
|
||||
txmp.initTxCallback(wtx, res, txInfo)
|
||||
|
||||
if cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error {
|
||||
txmp.Lock()
|
||||
defer txmp.Unlock()
|
||||
|
||||
// remove the committed transaction from the transaction store and indexes
|
||||
if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil {
|
||||
txmp.removeTx(wtx, false)
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("transaction not found")
|
||||
}
|
||||
|
||||
// Flush flushes out the mempool. It acquires a read-lock, fetches all the
|
||||
// transactions currently in the transaction store and removes each transaction
|
||||
// from the store and all indexes and finally resets the cache.
|
||||
//
|
||||
// NOTE:
|
||||
// - Flushing the mempool may leave the mempool in an inconsistent state.
|
||||
func (txmp *TxMempool) Flush() {
|
||||
txmp.mtx.RLock()
|
||||
defer txmp.mtx.RUnlock()
|
||||
|
||||
txmp.heightIndex.Reset()
|
||||
txmp.timestampIndex.Reset()
|
||||
|
||||
for _, wtx := range txmp.txStore.GetAllTxs() {
|
||||
txmp.removeTx(wtx, false)
|
||||
}
|
||||
|
||||
atomic.SwapInt64(&txmp.sizeBytes, 0)
|
||||
txmp.cache.Reset()
|
||||
}
|
||||
|
||||
// ReapMaxBytesMaxGas returns a list of transactions within the provided size
|
||||
// and gas constraints. Transaction are retrieved in priority order.
|
||||
//
|
||||
// NOTE:
|
||||
// - A read-lock is acquired.
|
||||
// - Transactions returned are not actually removed from the mempool transaction
|
||||
// store or indexes.
|
||||
func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
|
||||
txmp.mtx.RLock()
|
||||
defer txmp.mtx.RUnlock()
|
||||
|
||||
var (
|
||||
totalGas int64
|
||||
totalSize int64
|
||||
)
|
||||
|
||||
// wTxs contains a list of *WrappedTx retrieved from the priority queue that
|
||||
// need to be re-enqueued prior to returning.
|
||||
wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs())
|
||||
defer func() {
|
||||
for _, wtx := range wTxs {
|
||||
txmp.priorityIndex.PushTx(wtx)
|
||||
}
|
||||
}()
|
||||
|
||||
txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs())
|
||||
for txmp.priorityIndex.NumTxs() > 0 {
|
||||
wtx := txmp.priorityIndex.PopTx()
|
||||
txs = append(txs, wtx.tx)
|
||||
wTxs = append(wTxs, wtx)
|
||||
size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx})
|
||||
|
||||
// Ensure we have capacity for the transaction with respect to the
|
||||
// transaction size.
|
||||
if maxBytes > -1 && totalSize+size > maxBytes {
|
||||
return txs[:len(txs)-1]
|
||||
}
|
||||
|
||||
totalSize += size
|
||||
|
||||
// ensure we have capacity for the transaction with respect to total gas
|
||||
gas := totalGas + wtx.gasWanted
|
||||
if maxGas > -1 && gas > maxGas {
|
||||
return txs[:len(txs)-1]
|
||||
}
|
||||
|
||||
totalGas = gas
|
||||
}
|
||||
|
||||
return txs
|
||||
}
|
||||
|
||||
// ReapMaxTxs returns a list of transactions within the provided number of
|
||||
// transactions bound. Transaction are retrieved in priority order.
|
||||
//
|
||||
// NOTE:
|
||||
// - A read-lock is acquired.
|
||||
// - Transactions returned are not actually removed from the mempool transaction
|
||||
// store or indexes.
|
||||
func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs {
|
||||
txmp.mtx.RLock()
|
||||
defer txmp.mtx.RUnlock()
|
||||
|
||||
numTxs := txmp.priorityIndex.NumTxs()
|
||||
if max < 0 {
|
||||
max = numTxs
|
||||
}
|
||||
|
||||
cap := tmmath.MinInt(numTxs, max)
|
||||
|
||||
// wTxs contains a list of *WrappedTx retrieved from the priority queue that
|
||||
// need to be re-enqueued prior to returning.
|
||||
wTxs := make([]*WrappedTx, 0, cap)
|
||||
defer func() {
|
||||
for _, wtx := range wTxs {
|
||||
txmp.priorityIndex.PushTx(wtx)
|
||||
}
|
||||
}()
|
||||
|
||||
txs := make([]types.Tx, 0, cap)
|
||||
for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max {
|
||||
wtx := txmp.priorityIndex.PopTx()
|
||||
txs = append(txs, wtx.tx)
|
||||
wTxs = append(wTxs, wtx)
|
||||
}
|
||||
|
||||
return txs
|
||||
}
|
||||
|
||||
// Update iterates over all the transactions provided by the caller, i.e. the
|
||||
// block producer, and removes them from the cache (if applicable) and removes
|
||||
// the transactions from the main transaction store and associated indexes.
|
||||
// Finally, if there are trainsactions remaining in the mempool, we initiate a
|
||||
// re-CheckTx for them (if applicable), otherwise, we notify the caller more
|
||||
// transactions are available.
|
||||
//
|
||||
// NOTE:
|
||||
// - The caller must explicitly acquire a write-lock via Lock().
|
||||
func (txmp *TxMempool) Update(
|
||||
blockHeight int64,
|
||||
blockTxs types.Txs,
|
||||
deliverTxResponses []*abci.ResponseDeliverTx,
|
||||
newPreFn mempool.PreCheckFunc,
|
||||
newPostFn mempool.PostCheckFunc,
|
||||
) error {
|
||||
|
||||
txmp.height = blockHeight
|
||||
txmp.notifiedTxsAvailable = false
|
||||
|
||||
if newPreFn != nil {
|
||||
txmp.preCheck = newPreFn
|
||||
}
|
||||
if newPostFn != nil {
|
||||
txmp.postCheck = newPostFn
|
||||
}
|
||||
|
||||
for i, tx := range blockTxs {
|
||||
if deliverTxResponses[i].Code == abci.CodeTypeOK {
|
||||
// add the valid committed transaction to the cache (if missing)
|
||||
_ = txmp.cache.Push(tx)
|
||||
} else if !txmp.config.KeepInvalidTxsInCache {
|
||||
// allow invalid transactions to be re-submitted
|
||||
txmp.cache.Remove(tx)
|
||||
}
|
||||
|
||||
// remove the committed transaction from the transaction store and indexes
|
||||
if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil {
|
||||
txmp.removeTx(wtx, false)
|
||||
}
|
||||
}
|
||||
|
||||
txmp.purgeExpiredTxs(blockHeight)
|
||||
|
||||
// If there any uncommitted transactions left in the mempool, we either
|
||||
// initiate re-CheckTx per remaining transaction or notify that remaining
|
||||
// transactions are left.
|
||||
if txmp.Size() > 0 {
|
||||
if txmp.config.Recheck {
|
||||
txmp.logger.Debug(
|
||||
"executing re-CheckTx for all remaining transactions",
|
||||
"num_txs", txmp.Size(),
|
||||
"height", blockHeight,
|
||||
)
|
||||
txmp.updateReCheckTxs()
|
||||
} else {
|
||||
txmp.notifyTxsAvailable()
|
||||
}
|
||||
}
|
||||
|
||||
txmp.metrics.Size.Set(float64(txmp.Size()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// initTxCallback performs the initial, i.e. the first, callback after CheckTx
|
||||
// has been executed by the ABCI application. In other words, initTxCallback is
|
||||
// called after executing CheckTx when we see a unique transaction for the first
|
||||
// time. CheckTx can be called again for the same transaction at a later point
|
||||
// in time when re-checking, however, this callback will not be called.
|
||||
//
|
||||
// After the ABCI application executes CheckTx, initTxCallback is called with
|
||||
// the ABCI *Response object and TxInfo. If postCheck is defined on the mempool,
|
||||
// we execute that first. If there is no error from postCheck (if defined) and
|
||||
// the ABCI CheckTx response code is OK, we attempt to insert the transaction.
|
||||
//
|
||||
// When attempting to insert the transaction, we first check if there is
|
||||
// sufficient capacity. If there is sufficient capacity, the transaction is
|
||||
// inserted into the txStore and indexed across all indexes. Otherwise, if the
|
||||
// mempool is full, we attempt to find a lower priority transaction to evict in
|
||||
// place of the new incoming transaction. If no such transaction exists, the
|
||||
// new incoming transaction is rejected.
|
||||
//
|
||||
// If the new incoming transaction fails CheckTx or postCheck fails, we reject
|
||||
// the new incoming transaction.
|
||||
//
|
||||
// NOTE:
|
||||
// - An explicit lock is NOT required.
|
||||
func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo mempool.TxInfo) {
|
||||
checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
if txmp.postCheck != nil {
|
||||
err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx)
|
||||
}
|
||||
|
||||
if err != nil || checkTxRes.CheckTx.Code != abci.CodeTypeOK {
|
||||
// ignore bad transactions
|
||||
txmp.logger.Info(
|
||||
"rejected bad transaction",
|
||||
"priority", wtx.priority,
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"peer_id", txInfo.SenderP2PID,
|
||||
"code", checkTxRes.CheckTx.Code,
|
||||
"post_check_err", err,
|
||||
)
|
||||
|
||||
txmp.metrics.FailedTxs.Add(1)
|
||||
|
||||
if !txmp.config.KeepInvalidTxsInCache {
|
||||
txmp.cache.Remove(wtx.tx)
|
||||
}
|
||||
if err != nil {
|
||||
checkTxRes.CheckTx.MempoolError = err.Error()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
sender := checkTxRes.CheckTx.Sender
|
||||
priority := checkTxRes.CheckTx.Priority
|
||||
|
||||
if len(sender) > 0 {
|
||||
if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil {
|
||||
txmp.logger.Error(
|
||||
"rejected incoming good transaction; tx already exists for sender",
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"sender", sender,
|
||||
)
|
||||
txmp.metrics.RejectedTxs.Add(1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := txmp.canAddTx(wtx); err != nil {
|
||||
evictTxs := txmp.priorityIndex.GetEvictableTxs(
|
||||
priority,
|
||||
int64(wtx.Size()),
|
||||
txmp.SizeBytes(),
|
||||
txmp.config.MaxTxsBytes,
|
||||
)
|
||||
if len(evictTxs) == 0 {
|
||||
// No room for the new incoming transaction so we just remove it from
|
||||
// the cache.
|
||||
txmp.cache.Remove(wtx.tx)
|
||||
txmp.logger.Error(
|
||||
"rejected incoming good transaction; mempool full",
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"err", err.Error(),
|
||||
)
|
||||
txmp.metrics.RejectedTxs.Add(1)
|
||||
return
|
||||
}
|
||||
|
||||
// evict an existing transaction(s)
|
||||
//
|
||||
// NOTE:
|
||||
// - The transaction, toEvict, can be removed while a concurrent
|
||||
// reCheckTx callback is being executed for the same transaction.
|
||||
for _, toEvict := range evictTxs {
|
||||
txmp.removeTx(toEvict, true)
|
||||
txmp.logger.Debug(
|
||||
"evicted existing good transaction; mempool full",
|
||||
"old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()),
|
||||
"old_priority", toEvict.priority,
|
||||
"new_tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"new_priority", wtx.priority,
|
||||
)
|
||||
txmp.metrics.EvictedTxs.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
wtx.gasWanted = checkTxRes.CheckTx.GasWanted
|
||||
wtx.priority = priority
|
||||
wtx.sender = sender
|
||||
wtx.peers = map[uint16]struct{}{
|
||||
txInfo.SenderID: {},
|
||||
}
|
||||
|
||||
txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size()))
|
||||
txmp.metrics.Size.Set(float64(txmp.Size()))
|
||||
|
||||
txmp.insertTx(wtx)
|
||||
txmp.logger.Debug(
|
||||
"inserted good transaction",
|
||||
"priority", wtx.priority,
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"height", txmp.height,
|
||||
"num_txs", txmp.Size(),
|
||||
)
|
||||
txmp.notifyTxsAvailable()
|
||||
|
||||
}
|
||||
|
||||
// defaultTxCallback performs the default CheckTx application callback. This is
|
||||
// NOT executed when a transaction is first seen/received. Instead, this callback
|
||||
// is executed during re-checking transactions (if enabled). A caller, i.e a
|
||||
// block proposer, acquires a mempool write-lock via Lock() and when executing
|
||||
// Update(), if the mempool is non-empty and Recheck is enabled, then all
|
||||
// remaining transactions will be rechecked via CheckTxAsync. The order in which
|
||||
// they are rechecked must be the same order in which this callback is called
|
||||
// per transaction.
|
||||
func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) {
|
||||
if txmp.recheckCursor == nil {
|
||||
return
|
||||
}
|
||||
|
||||
txmp.metrics.RecheckTimes.Add(1)
|
||||
|
||||
checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
|
||||
if !ok {
|
||||
txmp.logger.Error("received incorrect type in mempool callback",
|
||||
"expected", reflect.TypeOf(&abci.Response_CheckTx{}).Name(),
|
||||
"got", reflect.TypeOf(res.Value).Name(),
|
||||
)
|
||||
return
|
||||
}
|
||||
tx := req.GetCheckTx().Tx
|
||||
wtx := txmp.recheckCursor.Value.(*WrappedTx)
|
||||
|
||||
// Search through the remaining list of tx to recheck for a transaction that matches
|
||||
// the one we received from the ABCI application.
|
||||
for {
|
||||
if bytes.Equal(tx, wtx.tx) {
|
||||
// We've found a tx in the recheck list that matches the tx that we
|
||||
// received from the ABCI application.
|
||||
// Break, and use this transaction for further checks.
|
||||
break
|
||||
}
|
||||
|
||||
txmp.logger.Error(
|
||||
"re-CheckTx transaction mismatch",
|
||||
"got", wtx.tx.Hash(),
|
||||
"expected", types.Tx(tx).Key(),
|
||||
)
|
||||
|
||||
if txmp.recheckCursor == txmp.recheckEnd {
|
||||
// we reached the end of the recheckTx list without finding a tx
|
||||
// matching the one we received from the ABCI application.
|
||||
// Return without processing any tx.
|
||||
txmp.recheckCursor = nil
|
||||
return
|
||||
}
|
||||
|
||||
txmp.recheckCursor = txmp.recheckCursor.Next()
|
||||
wtx = txmp.recheckCursor.Value.(*WrappedTx)
|
||||
}
|
||||
|
||||
// Only evaluate transactions that have not been removed. This can happen
|
||||
// if an existing transaction is evicted during CheckTx and while this
|
||||
// callback is being executed for the same evicted transaction.
|
||||
if !txmp.txStore.IsTxRemoved(wtx.hash) {
|
||||
var err error
|
||||
if txmp.postCheck != nil {
|
||||
err = txmp.postCheck(tx, checkTxRes.CheckTx)
|
||||
}
|
||||
|
||||
if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil {
|
||||
wtx.priority = checkTxRes.CheckTx.Priority
|
||||
} else {
|
||||
txmp.logger.Debug(
|
||||
"existing transaction no longer valid; failed re-CheckTx callback",
|
||||
"priority", wtx.priority,
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"err", err,
|
||||
"code", checkTxRes.CheckTx.Code,
|
||||
)
|
||||
|
||||
if wtx.gossipEl != txmp.recheckCursor {
|
||||
panic("corrupted reCheckTx cursor")
|
||||
}
|
||||
|
||||
txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache)
|
||||
}
|
||||
}
|
||||
|
||||
// move reCheckTx cursor to next element
|
||||
if txmp.recheckCursor == txmp.recheckEnd {
|
||||
txmp.recheckCursor = nil
|
||||
} else {
|
||||
txmp.recheckCursor = txmp.recheckCursor.Next()
|
||||
}
|
||||
|
||||
if txmp.recheckCursor == nil {
|
||||
txmp.logger.Debug("finished rechecking transactions")
|
||||
|
||||
if txmp.Size() > 0 {
|
||||
txmp.notifyTxsAvailable()
|
||||
}
|
||||
}
|
||||
|
||||
txmp.metrics.Size.Set(float64(txmp.Size()))
|
||||
}
|
||||
|
||||
// updateReCheckTxs updates the recheck cursors by using the gossipIndex. For
|
||||
// each transaction, it executes CheckTxAsync. The global callback defined on
|
||||
// the proxyAppConn will be executed for each transaction after CheckTx is
|
||||
// executed.
|
||||
//
|
||||
// NOTE:
|
||||
// - The caller must have a write-lock when executing updateReCheckTxs.
|
||||
func (txmp *TxMempool) updateReCheckTxs() {
|
||||
if txmp.Size() == 0 {
|
||||
panic("attempted to update re-CheckTx txs when mempool is empty")
|
||||
}
|
||||
|
||||
txmp.recheckCursor = txmp.gossipIndex.Front()
|
||||
txmp.recheckEnd = txmp.gossipIndex.Back()
|
||||
|
||||
for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() {
|
||||
wtx := e.Value.(*WrappedTx)
|
||||
|
||||
// Only execute CheckTx if the transaction is not marked as removed which
|
||||
// could happen if the transaction was evicted.
|
||||
if !txmp.txStore.IsTxRemoved(wtx.hash) {
|
||||
txmp.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{
|
||||
Tx: wtx.tx,
|
||||
Type: abci.CheckTxType_Recheck,
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
txmp.proxyAppConn.FlushAsync()
|
||||
}
|
||||
|
||||
// canAddTx returns an error if we cannot insert the provided *WrappedTx into
|
||||
// the mempool due to mempool configured constraints. Otherwise, nil is returned
|
||||
// and the transaction can be inserted into the mempool.
|
||||
func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error {
|
||||
var (
|
||||
numTxs = txmp.Size()
|
||||
sizeBytes = txmp.SizeBytes()
|
||||
)
|
||||
|
||||
if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes {
|
||||
return mempool.ErrMempoolIsFull{
|
||||
NumTxs: numTxs,
|
||||
MaxTxs: txmp.config.Size,
|
||||
TxsBytes: sizeBytes,
|
||||
MaxTxsBytes: txmp.config.MaxTxsBytes,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (txmp *TxMempool) insertTx(wtx *WrappedTx) {
|
||||
txmp.txStore.SetTx(wtx)
|
||||
txmp.priorityIndex.PushTx(wtx)
|
||||
txmp.heightIndex.Insert(wtx)
|
||||
txmp.timestampIndex.Insert(wtx)
|
||||
|
||||
// Insert the transaction into the gossip index and mark the reference to the
|
||||
// linked-list element, which will be needed at a later point when the
|
||||
// transaction is removed.
|
||||
gossipEl := txmp.gossipIndex.PushBack(wtx)
|
||||
wtx.gossipEl = gossipEl
|
||||
|
||||
atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size()))
|
||||
}
|
||||
|
||||
func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) {
|
||||
if txmp.txStore.IsTxRemoved(wtx.hash) {
|
||||
return
|
||||
}
|
||||
|
||||
txmp.txStore.RemoveTx(wtx)
|
||||
txmp.priorityIndex.RemoveTx(wtx)
|
||||
txmp.heightIndex.Remove(wtx)
|
||||
txmp.timestampIndex.Remove(wtx)
|
||||
|
||||
// Remove the transaction from the gossip index and cleanup the linked-list
|
||||
// element so it can be garbage collected.
|
||||
txmp.gossipIndex.Remove(wtx.gossipEl)
|
||||
wtx.gossipEl.DetachPrev()
|
||||
|
||||
atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size()))
|
||||
|
||||
if removeFromCache {
|
||||
txmp.cache.Remove(wtx.tx)
|
||||
}
|
||||
}
|
||||
|
||||
// purgeExpiredTxs removes all transactions that have exceeded their respective
|
||||
// height and/or time based TTLs from their respective indexes. Every expired
|
||||
// transaction will be removed from the mempool entirely, except for the cache.
|
||||
//
|
||||
// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which
|
||||
// the caller has a write-lock on the mempool and so we can safely iterate over
|
||||
// the height and time based indexes.
|
||||
func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) {
|
||||
now := time.Now()
|
||||
expiredTxs := make(map[types.TxKey]*WrappedTx)
|
||||
|
||||
if txmp.config.TTLNumBlocks > 0 {
|
||||
purgeIdx := -1
|
||||
for i, wtx := range txmp.heightIndex.txs {
|
||||
if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks {
|
||||
expiredTxs[wtx.tx.Key()] = wtx
|
||||
purgeIdx = i
|
||||
} else {
|
||||
// since the index is sorted, we know no other txs can be be purged
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if purgeIdx >= 0 {
|
||||
txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:]
|
||||
}
|
||||
}
|
||||
|
||||
if txmp.config.TTLDuration > 0 {
|
||||
purgeIdx := -1
|
||||
for i, wtx := range txmp.timestampIndex.txs {
|
||||
if now.Sub(wtx.timestamp) > txmp.config.TTLDuration {
|
||||
expiredTxs[wtx.tx.Key()] = wtx
|
||||
purgeIdx = i
|
||||
} else {
|
||||
// since the index is sorted, we know no other txs can be be purged
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if purgeIdx >= 0 {
|
||||
txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:]
|
||||
}
|
||||
}
|
||||
|
||||
for _, wtx := range expiredTxs {
|
||||
txmp.removeTx(wtx, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (txmp *TxMempool) notifyTxsAvailable() {
|
||||
if txmp.Size() == 0 {
|
||||
panic("attempt to notify txs available but mempool is empty!")
|
||||
}
|
||||
|
||||
if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable {
|
||||
// channel cap is 1, so this will send once
|
||||
txmp.notifiedTxsAvailable = true
|
||||
|
||||
select {
|
||||
case txmp.txsAvailable <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
31
mempool/v1/mempool_bench_test.go
Normal file
31
mempool/v1/mempool_bench_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
)
|
||||
|
||||
func BenchmarkTxMempool_CheckTx(b *testing.B) {
|
||||
txmp := setup(b, 10000)
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
b.StopTimer()
|
||||
prefix := make([]byte, 20)
|
||||
_, err := rng.Read(prefix)
|
||||
require.NoError(b, err)
|
||||
|
||||
priority := int64(rng.Intn(9999-1000) + 1000)
|
||||
tx := []byte(fmt.Sprintf("%X=%d", prefix, priority))
|
||||
b.StartTimer()
|
||||
|
||||
require.NoError(b, txmp.CheckTx(tx, nil, mempool.TxInfo{}))
|
||||
}
|
||||
}
|
||||
527
mempool/v1/mempool_test.go
Normal file
527
mempool/v1/mempool_test.go
Normal file
@@ -0,0 +1,527 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// application extends the KV store application by overriding CheckTx to provide
|
||||
// transaction priority based on the value in the key/value pair.
|
||||
type application struct {
|
||||
*kvstore.Application
|
||||
}
|
||||
|
||||
type testTx struct {
|
||||
tx types.Tx
|
||||
priority int64
|
||||
}
|
||||
|
||||
func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
var (
|
||||
priority int64
|
||||
sender string
|
||||
)
|
||||
|
||||
// infer the priority from the raw transaction value (sender=key=value)
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 3 {
|
||||
v, err := strconv.ParseInt(string(parts[2]), 10, 64)
|
||||
if err != nil {
|
||||
return abci.ResponseCheckTx{
|
||||
Priority: priority,
|
||||
Code: 100,
|
||||
GasWanted: 1,
|
||||
}
|
||||
}
|
||||
|
||||
priority = v
|
||||
sender = string(parts[0])
|
||||
} else {
|
||||
return abci.ResponseCheckTx{
|
||||
Priority: priority,
|
||||
Code: 101,
|
||||
GasWanted: 1,
|
||||
}
|
||||
}
|
||||
|
||||
return abci.ResponseCheckTx{
|
||||
Priority: priority,
|
||||
Sender: sender,
|
||||
Code: code.CodeTypeOK,
|
||||
GasWanted: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
|
||||
t.Helper()
|
||||
|
||||
app := &application{kvstore.NewApplication()}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|"))
|
||||
|
||||
cfg.Mempool.CacheSize = cacheSize
|
||||
|
||||
appConnMem, err := cc.NewABCIClient()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, appConnMem.Start())
|
||||
|
||||
t.Cleanup(func() {
|
||||
os.RemoveAll(cfg.RootDir)
|
||||
require.NoError(t, appConnMem.Stop())
|
||||
})
|
||||
|
||||
return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
|
||||
}
|
||||
|
||||
func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
|
||||
txs := make([]testTx, numTxs)
|
||||
txInfo := mempool.TxInfo{SenderID: peerID}
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for i := 0; i < numTxs; i++ {
|
||||
prefix := make([]byte, 20)
|
||||
_, err := rng.Read(prefix)
|
||||
require.NoError(t, err)
|
||||
|
||||
priority := int64(rng.Intn(9999-1000) + 1000)
|
||||
|
||||
txs[i] = testTx{
|
||||
tx: []byte(fmt.Sprintf("sender-%d-%d=%X=%d", i, peerID, prefix, priority)),
|
||||
priority: priority,
|
||||
}
|
||||
require.NoError(t, txmp.CheckTx(txs[i].tx, nil, txInfo))
|
||||
}
|
||||
|
||||
return txs
|
||||
}
|
||||
|
||||
func TestTxMempool_TxsAvailable(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
txmp.EnableTxsAvailable()
|
||||
|
||||
ensureNoTxFire := func() {
|
||||
timer := time.NewTimer(500 * time.Millisecond)
|
||||
select {
|
||||
case <-txmp.TxsAvailable():
|
||||
require.Fail(t, "unexpected transactions event")
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
|
||||
ensureTxFire := func() {
|
||||
timer := time.NewTimer(500 * time.Millisecond)
|
||||
select {
|
||||
case <-txmp.TxsAvailable():
|
||||
case <-timer.C:
|
||||
require.Fail(t, "expected transactions event")
|
||||
}
|
||||
}
|
||||
|
||||
// ensure no event as we have not executed any transactions yet
|
||||
ensureNoTxFire()
|
||||
|
||||
// Execute CheckTx for some transactions and ensure TxsAvailable only fires
|
||||
// once.
|
||||
txs := checkTxs(t, txmp, 100, 0)
|
||||
ensureTxFire()
|
||||
ensureNoTxFire()
|
||||
|
||||
rawTxs := make([]types.Tx, len(txs))
|
||||
for i, tx := range txs {
|
||||
rawTxs[i] = tx.tx
|
||||
}
|
||||
|
||||
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
|
||||
for i := 0; i < len(responses); i++ {
|
||||
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
|
||||
}
|
||||
|
||||
// commit half the transactions and ensure we fire an event
|
||||
txmp.Lock()
|
||||
require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
|
||||
txmp.Unlock()
|
||||
ensureTxFire()
|
||||
ensureNoTxFire()
|
||||
|
||||
// Execute CheckTx for more transactions and ensure we do not fire another
|
||||
// event as we're still on the same height (1).
|
||||
_ = checkTxs(t, txmp, 100, 0)
|
||||
ensureNoTxFire()
|
||||
}
|
||||
|
||||
func TestTxMempool_Size(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
txs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(txs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
rawTxs := make([]types.Tx, len(txs))
|
||||
for i, tx := range txs {
|
||||
rawTxs[i] = tx.tx
|
||||
}
|
||||
|
||||
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
|
||||
for i := 0; i < len(responses); i++ {
|
||||
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
|
||||
}
|
||||
|
||||
txmp.Lock()
|
||||
require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
|
||||
txmp.Unlock()
|
||||
|
||||
require.Equal(t, len(rawTxs)/2, txmp.Size())
|
||||
require.Equal(t, int64(2850), txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_Flush(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
txs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(txs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
rawTxs := make([]types.Tx, len(txs))
|
||||
for i, tx := range txs {
|
||||
rawTxs[i] = tx.tx
|
||||
}
|
||||
|
||||
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
|
||||
for i := 0; i < len(responses); i++ {
|
||||
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
|
||||
}
|
||||
|
||||
txmp.Lock()
|
||||
require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
|
||||
txmp.Unlock()
|
||||
|
||||
txmp.Flush()
|
||||
require.Zero(t, txmp.Size())
|
||||
require.Equal(t, int64(0), txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
txMap := make(map[types.TxKey]testTx)
|
||||
priorities := make([]int64, len(tTxs))
|
||||
for i, tTx := range tTxs {
|
||||
txMap[tTx.tx.Key()] = tTx
|
||||
priorities[i] = tTx.priority
|
||||
}
|
||||
|
||||
sort.Slice(priorities, func(i, j int) bool {
|
||||
// sort by priority, i.e. decreasing order
|
||||
return priorities[i] > priorities[j]
|
||||
})
|
||||
|
||||
ensurePrioritized := func(reapedTxs types.Txs) {
|
||||
reapedPriorities := make([]int64, len(reapedTxs))
|
||||
for i, rTx := range reapedTxs {
|
||||
reapedPriorities[i] = txMap[rTx.Key()].priority
|
||||
}
|
||||
|
||||
require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
|
||||
}
|
||||
|
||||
// reap by gas capacity only
|
||||
reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 50)
|
||||
|
||||
// reap by transaction bytes only
|
||||
reapedTxs = txmp.ReapMaxBytesMaxGas(1000, -1)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.GreaterOrEqual(t, len(reapedTxs), 16)
|
||||
|
||||
// Reap by both transaction bytes and gas, where the size yields 31 reaped
|
||||
// transactions and the gas limit reaps 25 transactions.
|
||||
reapedTxs = txmp.ReapMaxBytesMaxGas(1500, 30)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 25)
|
||||
}
|
||||
|
||||
func TestTxMempool_ReapMaxTxs(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
tTxs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
txMap := make(map[types.TxKey]testTx)
|
||||
priorities := make([]int64, len(tTxs))
|
||||
for i, tTx := range tTxs {
|
||||
txMap[tTx.tx.Key()] = tTx
|
||||
priorities[i] = tTx.priority
|
||||
}
|
||||
|
||||
sort.Slice(priorities, func(i, j int) bool {
|
||||
// sort by priority, i.e. decreasing order
|
||||
return priorities[i] > priorities[j]
|
||||
})
|
||||
|
||||
ensurePrioritized := func(reapedTxs types.Txs) {
|
||||
reapedPriorities := make([]int64, len(reapedTxs))
|
||||
for i, rTx := range reapedTxs {
|
||||
reapedPriorities[i] = txMap[rTx.Key()].priority
|
||||
}
|
||||
|
||||
require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
|
||||
}
|
||||
|
||||
// reap all transactions
|
||||
reapedTxs := txmp.ReapMaxTxs(-1)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, len(tTxs))
|
||||
|
||||
// reap a single transaction
|
||||
reapedTxs = txmp.ReapMaxTxs(1)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 1)
|
||||
|
||||
// reap half of the transactions
|
||||
reapedTxs = txmp.ReapMaxTxs(len(tTxs) / 2)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, len(tTxs)/2)
|
||||
}
|
||||
|
||||
func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
tx := make([]byte, txmp.config.MaxTxBytes+1)
|
||||
_, err := rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}))
|
||||
|
||||
tx = make([]byte, txmp.config.MaxTxBytes-1)
|
||||
_, err = rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}))
|
||||
}
|
||||
|
||||
func TestTxMempool_CheckTxSamePeer(t *testing.T) {
|
||||
txmp := setup(t, 100)
|
||||
peerID := uint16(1)
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
prefix := make([]byte, 20)
|
||||
_, err := rng.Read(prefix)
|
||||
require.NoError(t, err)
|
||||
|
||||
tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50))
|
||||
|
||||
require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID}))
|
||||
require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID}))
|
||||
}
|
||||
|
||||
func TestTxMempool_CheckTxSameSender(t *testing.T) {
|
||||
txmp := setup(t, 100)
|
||||
peerID := uint16(1)
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
prefix1 := make([]byte, 20)
|
||||
_, err := rng.Read(prefix1)
|
||||
require.NoError(t, err)
|
||||
|
||||
prefix2 := make([]byte, 20)
|
||||
_, err = rng.Read(prefix2)
|
||||
require.NoError(t, err)
|
||||
|
||||
tx1 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix1, 50))
|
||||
tx2 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix2, 50))
|
||||
|
||||
require.NoError(t, txmp.CheckTx(tx1, nil, mempool.TxInfo{SenderID: peerID}))
|
||||
require.Equal(t, 1, txmp.Size())
|
||||
require.NoError(t, txmp.CheckTx(tx2, nil, mempool.TxInfo{SenderID: peerID}))
|
||||
require.Equal(t, 1, txmp.Size())
|
||||
}
|
||||
|
||||
func TestTxMempool_ConcurrentTxs(t *testing.T) {
|
||||
txmp := setup(t, 100)
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
checkTxDone := make(chan struct{})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
for i := 0; i < 20; i++ {
|
||||
_ = checkTxs(t, txmp, 100, 0)
|
||||
dur := rng.Intn(1000-500) + 500
|
||||
time.Sleep(time.Duration(dur) * time.Millisecond)
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
close(checkTxDone)
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
defer wg.Done()
|
||||
|
||||
var height int64 = 1
|
||||
|
||||
for range ticker.C {
|
||||
reapedTxs := txmp.ReapMaxTxs(200)
|
||||
if len(reapedTxs) > 0 {
|
||||
responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
|
||||
for i := 0; i < len(responses); i++ {
|
||||
var code uint32
|
||||
|
||||
if i%10 == 0 {
|
||||
code = 100
|
||||
} else {
|
||||
code = abci.CodeTypeOK
|
||||
}
|
||||
|
||||
responses[i] = &abci.ResponseDeliverTx{Code: code}
|
||||
}
|
||||
|
||||
txmp.Lock()
|
||||
require.NoError(t, txmp.Update(height, reapedTxs, responses, nil, nil))
|
||||
txmp.Unlock()
|
||||
|
||||
height++
|
||||
} else {
|
||||
// only return once we know we finished the CheckTx loop
|
||||
select {
|
||||
case <-checkTxDone:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
require.Zero(t, txmp.Size())
|
||||
require.Zero(t, txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
txmp := setup(t, 500)
|
||||
txmp.height = 100
|
||||
txmp.config.TTLNumBlocks = 10
|
||||
|
||||
tTxs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, 100, txmp.heightIndex.Size())
|
||||
|
||||
// reap 5 txs at the next height -- no txs should expire
|
||||
reapedTxs := txmp.ReapMaxTxs(5)
|
||||
responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
|
||||
for i := 0; i < len(responses); i++ {
|
||||
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
|
||||
}
|
||||
|
||||
txmp.Lock()
|
||||
require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil))
|
||||
txmp.Unlock()
|
||||
|
||||
require.Equal(t, 95, txmp.Size())
|
||||
require.Equal(t, 95, txmp.heightIndex.Size())
|
||||
|
||||
// check more txs at height 101
|
||||
_ = checkTxs(t, txmp, 50, 1)
|
||||
require.Equal(t, 145, txmp.Size())
|
||||
require.Equal(t, 145, txmp.heightIndex.Size())
|
||||
|
||||
// Reap 5 txs at a height that would expire all the transactions from before
|
||||
// the previous Update (height 100).
|
||||
//
|
||||
// NOTE: When we reap txs below, we do not know if we're picking txs from the
|
||||
// initial CheckTx calls or from the second round of CheckTx calls. Thus, we
|
||||
// cannot guarantee that all 95 txs are remaining that should be expired and
|
||||
// removed. However, we do know that that at most 95 txs can be expired and
|
||||
// removed.
|
||||
reapedTxs = txmp.ReapMaxTxs(5)
|
||||
responses = make([]*abci.ResponseDeliverTx, len(reapedTxs))
|
||||
for i := 0; i < len(responses); i++ {
|
||||
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
|
||||
}
|
||||
|
||||
txmp.Lock()
|
||||
require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil))
|
||||
txmp.Unlock()
|
||||
|
||||
require.GreaterOrEqual(t, txmp.Size(), 45)
|
||||
require.GreaterOrEqual(t, txmp.heightIndex.Size(), 45)
|
||||
}
|
||||
|
||||
func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "error",
|
||||
err: errors.New("test error"),
|
||||
},
|
||||
{
|
||||
name: "no error",
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
testCase := tc
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error {
|
||||
return testCase.err
|
||||
}
|
||||
txmp := setup(t, 0, WithPostCheck(postCheckFn))
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
tx := make([]byte, txmp.config.MaxTxBytes-1)
|
||||
_, err := rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
callback := func(res *abci.Response) {
|
||||
checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
|
||||
require.True(t, ok)
|
||||
expectedErrString := ""
|
||||
if testCase.err != nil {
|
||||
expectedErrString = testCase.err.Error()
|
||||
}
|
||||
require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError)
|
||||
}
|
||||
require.NoError(t, txmp.CheckTx(tx, callback, mempool.TxInfo{SenderID: 0}))
|
||||
})
|
||||
}
|
||||
}
|
||||
159
mempool/v1/priority_queue.go
Normal file
159
mempool/v1/priority_queue.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sort"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
var _ heap.Interface = (*TxPriorityQueue)(nil)
|
||||
|
||||
// TxPriorityQueue defines a thread-safe priority queue for valid transactions.
|
||||
type TxPriorityQueue struct {
|
||||
mtx tmsync.RWMutex
|
||||
txs []*WrappedTx
|
||||
}
|
||||
|
||||
func NewTxPriorityQueue() *TxPriorityQueue {
|
||||
pq := &TxPriorityQueue{
|
||||
txs: make([]*WrappedTx, 0),
|
||||
}
|
||||
|
||||
heap.Init(pq)
|
||||
|
||||
return pq
|
||||
}
|
||||
|
||||
// GetEvictableTxs attempts to find and return a list of *WrappedTx than can be
|
||||
// evicted to make room for another *WrappedTx with higher priority. If no such
|
||||
// list of *WrappedTx exists, nil will be returned. The returned list of *WrappedTx
|
||||
// indicate that these transactions can be removed due to them being of lower
|
||||
// priority and that their total sum in size allows room for the incoming
|
||||
// transaction according to the mempool's configured limits.
|
||||
func (pq *TxPriorityQueue) GetEvictableTxs(priority, txSize, totalSize, cap int64) []*WrappedTx {
|
||||
pq.mtx.RLock()
|
||||
defer pq.mtx.RUnlock()
|
||||
|
||||
txs := make([]*WrappedTx, len(pq.txs))
|
||||
copy(txs, pq.txs)
|
||||
|
||||
sort.Slice(txs, func(i, j int) bool {
|
||||
return txs[i].priority < txs[j].priority
|
||||
})
|
||||
|
||||
var (
|
||||
toEvict []*WrappedTx
|
||||
i int
|
||||
)
|
||||
|
||||
currSize := totalSize
|
||||
|
||||
// Loop over all transactions in ascending priority order evaluating those
|
||||
// that are only of less priority than the provided argument. We continue
|
||||
// evaluating transactions until there is sufficient capacity for the new
|
||||
// transaction (size) as defined by txSize.
|
||||
for i < len(txs) && txs[i].priority < priority {
|
||||
toEvict = append(toEvict, txs[i])
|
||||
currSize -= int64(txs[i].Size())
|
||||
|
||||
if currSize+txSize <= cap {
|
||||
return toEvict
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumTxs returns the number of transactions in the priority queue. It is
|
||||
// thread safe.
|
||||
func (pq *TxPriorityQueue) NumTxs() int {
|
||||
pq.mtx.RLock()
|
||||
defer pq.mtx.RUnlock()
|
||||
|
||||
return len(pq.txs)
|
||||
}
|
||||
|
||||
// RemoveTx removes a specific transaction from the priority queue.
|
||||
func (pq *TxPriorityQueue) RemoveTx(tx *WrappedTx) {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
if tx.heapIndex < len(pq.txs) {
|
||||
heap.Remove(pq, tx.heapIndex)
|
||||
}
|
||||
}
|
||||
|
||||
// PushTx adds a valid transaction to the priority queue. It is thread safe.
|
||||
func (pq *TxPriorityQueue) PushTx(tx *WrappedTx) {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
heap.Push(pq, tx)
|
||||
}
|
||||
|
||||
// PopTx removes the top priority transaction from the queue. It is thread safe.
|
||||
func (pq *TxPriorityQueue) PopTx() *WrappedTx {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
x := heap.Pop(pq)
|
||||
if x != nil {
|
||||
return x.(*WrappedTx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Push implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Push. Use PushTx instead.
|
||||
func (pq *TxPriorityQueue) Push(x interface{}) {
|
||||
n := len(pq.txs)
|
||||
item := x.(*WrappedTx)
|
||||
item.heapIndex = n
|
||||
pq.txs = append(pq.txs, item)
|
||||
}
|
||||
|
||||
// Pop implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Pop. Use PopTx instead.
|
||||
func (pq *TxPriorityQueue) Pop() interface{} {
|
||||
old := pq.txs
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil // avoid memory leak
|
||||
item.heapIndex = -1 // for safety
|
||||
pq.txs = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// Len implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Len. Use NumTxs instead.
|
||||
func (pq *TxPriorityQueue) Len() int {
|
||||
return len(pq.txs)
|
||||
}
|
||||
|
||||
// Less implements the Heap interface. It returns true if the transaction at
|
||||
// position i in the queue is of less priority than the transaction at position j.
|
||||
func (pq *TxPriorityQueue) Less(i, j int) bool {
|
||||
// If there exists two transactions with the same priority, consider the one
|
||||
// that we saw the earliest as the higher priority transaction.
|
||||
if pq.txs[i].priority == pq.txs[j].priority {
|
||||
return pq.txs[i].timestamp.Before(pq.txs[j].timestamp)
|
||||
}
|
||||
|
||||
// We want Pop to give us the highest, not lowest, priority so we use greater
|
||||
// than here.
|
||||
return pq.txs[i].priority > pq.txs[j].priority
|
||||
}
|
||||
|
||||
// Swap implements the Heap interface. It swaps two transactions in the queue.
|
||||
func (pq *TxPriorityQueue) Swap(i, j int) {
|
||||
pq.txs[i], pq.txs[j] = pq.txs[j], pq.txs[i]
|
||||
pq.txs[i].heapIndex = i
|
||||
pq.txs[j].heapIndex = j
|
||||
}
|
||||
176
mempool/v1/priority_queue_test.go
Normal file
176
mempool/v1/priority_queue_test.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTxPriorityQueue(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
numTxs := 1000
|
||||
|
||||
priorities := make([]int, numTxs)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 1; i <= numTxs; i++ {
|
||||
priorities[i-1] = i
|
||||
wg.Add(1)
|
||||
|
||||
go func(i int) {
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: int64(i),
|
||||
timestamp: time.Now(),
|
||||
})
|
||||
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(sort.IntSlice(priorities)))
|
||||
|
||||
wg.Wait()
|
||||
require.Equal(t, numTxs, pq.NumTxs())
|
||||
|
||||
// Wait a second and push a tx with a duplicate priority
|
||||
time.Sleep(time.Second)
|
||||
now := time.Now()
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: 1000,
|
||||
timestamp: now,
|
||||
})
|
||||
require.Equal(t, 1001, pq.NumTxs())
|
||||
|
||||
tx := pq.PopTx()
|
||||
require.Equal(t, 1000, pq.NumTxs())
|
||||
require.Equal(t, int64(1000), tx.priority)
|
||||
require.NotEqual(t, now, tx.timestamp)
|
||||
|
||||
gotPriorities := make([]int, 0)
|
||||
for pq.NumTxs() > 0 {
|
||||
gotPriorities = append(gotPriorities, int(pq.PopTx().priority))
|
||||
}
|
||||
|
||||
require.Equal(t, priorities, gotPriorities)
|
||||
}
|
||||
|
||||
func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
values := make([]int, 1000)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
tx := make([]byte, 5) // each tx is 5 bytes
|
||||
_, err := rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
x := rng.Intn(100000)
|
||||
pq.PushTx(&WrappedTx{
|
||||
tx: tx,
|
||||
priority: int64(x),
|
||||
})
|
||||
|
||||
values[i] = x
|
||||
}
|
||||
|
||||
sort.Ints(values)
|
||||
|
||||
max := values[len(values)-1]
|
||||
min := values[0]
|
||||
totalSize := int64(len(values) * 5)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
priority, txSize, totalSize, cap int64
|
||||
expectedLen int
|
||||
}{
|
||||
{
|
||||
name: "larest priority; single tx",
|
||||
priority: int64(max + 1),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 1,
|
||||
},
|
||||
{
|
||||
name: "larest priority; multi tx",
|
||||
priority: int64(max + 1),
|
||||
txSize: 17,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 4,
|
||||
},
|
||||
{
|
||||
name: "larest priority; out of capacity",
|
||||
priority: int64(max + 1),
|
||||
txSize: totalSize + 1,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
{
|
||||
name: "smallest priority; no tx",
|
||||
priority: int64(min - 1),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
{
|
||||
name: "small priority; no tx",
|
||||
priority: int64(min),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
evictTxs := pq.GetEvictableTxs(tc.priority, tc.txSize, tc.totalSize, tc.cap)
|
||||
require.Len(t, evictTxs, tc.expectedLen)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxPriorityQueue_RemoveTx(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
numTxs := 1000
|
||||
|
||||
values := make([]int, numTxs)
|
||||
|
||||
for i := 0; i < numTxs; i++ {
|
||||
x := rng.Intn(100000)
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: int64(x),
|
||||
})
|
||||
|
||||
values[i] = x
|
||||
}
|
||||
|
||||
require.Equal(t, numTxs, pq.NumTxs())
|
||||
|
||||
sort.Ints(values)
|
||||
max := values[len(values)-1]
|
||||
|
||||
wtx := pq.txs[pq.NumTxs()/2]
|
||||
pq.RemoveTx(wtx)
|
||||
require.Equal(t, numTxs-1, pq.NumTxs())
|
||||
require.Equal(t, int64(max), pq.PopTx().priority)
|
||||
require.Equal(t, numTxs-2, pq.NumTxs())
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
pq.RemoveTx(&WrappedTx{heapIndex: numTxs})
|
||||
pq.RemoveTx(&WrappedTx{heapIndex: numTxs + 1})
|
||||
})
|
||||
require.Equal(t, numTxs-2, pq.NumTxs())
|
||||
}
|
||||
310
mempool/v1/reactor.go
Normal file
310
mempool/v1/reactor.go
Normal file
@@ -0,0 +1,310 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/clist"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Reactor handles mempool tx broadcasting amongst peers.
|
||||
// It maintains a map from peer ID to counter, to prevent gossiping txs to the
|
||||
// peers you received it from.
|
||||
type Reactor struct {
|
||||
p2p.BaseReactor
|
||||
config *cfg.MempoolConfig
|
||||
mempool *TxMempool
|
||||
ids *mempoolIDs
|
||||
}
|
||||
|
||||
type mempoolIDs struct {
|
||||
mtx tmsync.RWMutex
|
||||
peerMap map[p2p.ID]uint16
|
||||
nextID uint16 // assumes that a node will never have over 65536 active peers
|
||||
activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
|
||||
}
|
||||
|
||||
// Reserve searches for the next unused ID and assigns it to the
|
||||
// peer.
|
||||
func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
|
||||
ids.mtx.Lock()
|
||||
defer ids.mtx.Unlock()
|
||||
|
||||
curID := ids.nextPeerID()
|
||||
ids.peerMap[peer.ID()] = curID
|
||||
ids.activeIDs[curID] = struct{}{}
|
||||
}
|
||||
|
||||
// nextPeerID returns the next unused peer ID to use.
|
||||
// This assumes that ids's mutex is already locked.
|
||||
func (ids *mempoolIDs) nextPeerID() uint16 {
|
||||
if len(ids.activeIDs) == mempool.MaxActiveIDs {
|
||||
panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", mempool.MaxActiveIDs))
|
||||
}
|
||||
|
||||
_, idExists := ids.activeIDs[ids.nextID]
|
||||
for idExists {
|
||||
ids.nextID++
|
||||
_, idExists = ids.activeIDs[ids.nextID]
|
||||
}
|
||||
curID := ids.nextID
|
||||
ids.nextID++
|
||||
return curID
|
||||
}
|
||||
|
||||
// Reclaim returns the ID reserved for the peer back to unused pool.
|
||||
func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
|
||||
ids.mtx.Lock()
|
||||
defer ids.mtx.Unlock()
|
||||
|
||||
removedID, ok := ids.peerMap[peer.ID()]
|
||||
if ok {
|
||||
delete(ids.activeIDs, removedID)
|
||||
delete(ids.peerMap, peer.ID())
|
||||
}
|
||||
}
|
||||
|
||||
// GetForPeer returns an ID reserved for the peer.
|
||||
func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
|
||||
ids.mtx.RLock()
|
||||
defer ids.mtx.RUnlock()
|
||||
|
||||
return ids.peerMap[peer.ID()]
|
||||
}
|
||||
|
||||
func newMempoolIDs() *mempoolIDs {
|
||||
return &mempoolIDs{
|
||||
peerMap: make(map[p2p.ID]uint16),
|
||||
activeIDs: map[uint16]struct{}{0: {}},
|
||||
nextID: 1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
|
||||
}
|
||||
}
|
||||
|
||||
// NewReactor returns a new Reactor with the given config and mempool.
|
||||
func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool) *Reactor {
|
||||
memR := &Reactor{
|
||||
config: config,
|
||||
mempool: mempool,
|
||||
ids: newMempoolIDs(),
|
||||
}
|
||||
memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR)
|
||||
return memR
|
||||
}
|
||||
|
||||
// InitPeer implements Reactor by creating a state for the peer.
|
||||
func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
|
||||
memR.ids.ReserveForPeer(peer)
|
||||
return peer
|
||||
}
|
||||
|
||||
// SetLogger sets the Logger on the reactor and the underlying mempool.
|
||||
func (memR *Reactor) SetLogger(l log.Logger) {
|
||||
memR.Logger = l
|
||||
}
|
||||
|
||||
// OnStart implements p2p.BaseReactor.
|
||||
func (memR *Reactor) OnStart() error {
|
||||
if !memR.config.Broadcast {
|
||||
memR.Logger.Info("Tx broadcasting is disabled")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor by returning the list of channels for this
|
||||
// reactor.
|
||||
func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
largestTx := make([]byte, memR.config.MaxTxBytes)
|
||||
batchMsg := protomem.Message{
|
||||
Sum: &protomem.Message_Txs{
|
||||
Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
|
||||
},
|
||||
}
|
||||
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: mempool.MempoolChannel,
|
||||
Priority: 5,
|
||||
RecvMessageCapacity: batchMsg.Size(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor.
|
||||
// It starts a broadcast routine ensuring all txs are forwarded to the given peer.
|
||||
func (memR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
if memR.config.Broadcast {
|
||||
go memR.broadcastTxRoutine(peer)
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor.
|
||||
func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
memR.ids.Reclaim(peer)
|
||||
// broadcast routine checks if peer is gone and returns
|
||||
}
|
||||
|
||||
// Receive implements Reactor.
|
||||
// It adds any received transactions to the mempool.
|
||||
func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := memR.decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
memR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
|
||||
txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)}
|
||||
if src != nil {
|
||||
txInfo.SenderP2PID = src.ID()
|
||||
}
|
||||
for _, tx := range msg.Txs {
|
||||
err = memR.mempool.CheckTx(tx, nil, txInfo)
|
||||
if err == mempool.ErrTxInCache {
|
||||
memR.Logger.Debug("Tx already exists in cache", "tx", tx.String())
|
||||
} else if err != nil {
|
||||
memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err)
|
||||
}
|
||||
}
|
||||
// broadcasting happens from go routines per peer
|
||||
}
|
||||
|
||||
// PeerState describes the state of a peer.
|
||||
type PeerState interface {
|
||||
GetHeight() int64
|
||||
}
|
||||
|
||||
// Send new mempool txs to peer.
|
||||
func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
|
||||
peerID := memR.ids.GetForPeer(peer)
|
||||
var next *clist.CElement
|
||||
|
||||
for {
|
||||
// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
|
||||
if !memR.IsRunning() || !peer.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
// This happens because the CElement we were looking at got garbage
|
||||
// collected (removed). That is, .NextWait() returned nil. Go ahead and
|
||||
// start from the beginning.
|
||||
if next == nil {
|
||||
select {
|
||||
case <-memR.mempool.WaitForNextTx(): // Wait until a tx is available
|
||||
if next = memR.mempool.NextGossipTx(); next == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
case <-peer.Quit():
|
||||
return
|
||||
|
||||
case <-memR.Quit():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the peer is up to date.
|
||||
peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
|
||||
if !ok {
|
||||
// Peer does not have a state yet. We set it in the consensus reactor, but
|
||||
// when we add peer in Switch, the order we call reactors#AddPeer is
|
||||
// different every time due to us using a map. Sometimes other reactors
|
||||
// will be initialized before the consensus reactor. We should wait a few
|
||||
// milliseconds and retry.
|
||||
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
// Allow for a lag of 1 block.
|
||||
memTx := next.Value.(*WrappedTx)
|
||||
if peerState.GetHeight() < memTx.height-1 {
|
||||
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
// NOTE: Transaction batching was disabled due to
|
||||
// https://github.com/tendermint/tendermint/issues/5796
|
||||
if ok := memR.mempool.txStore.TxHasPeer(memTx.hash, peerID); !ok {
|
||||
msg := protomem.Message{
|
||||
Sum: &protomem.Message_Txs{
|
||||
Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}},
|
||||
},
|
||||
}
|
||||
|
||||
bz, err := msg.Marshal()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
success := peer.Send(mempool.MempoolChannel, bz)
|
||||
if !success {
|
||||
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-next.NextWaitChan():
|
||||
// see the start of the for loop for nil check
|
||||
next = next.Next()
|
||||
|
||||
case <-peer.Quit():
|
||||
return
|
||||
|
||||
case <-memR.Quit():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) {
|
||||
msg := protomem.Message{}
|
||||
err := msg.Unmarshal(bz)
|
||||
if err != nil {
|
||||
return TxsMessage{}, err
|
||||
}
|
||||
|
||||
var message TxsMessage
|
||||
|
||||
if i, ok := msg.Sum.(*protomem.Message_Txs); ok {
|
||||
txs := i.Txs.GetTxs()
|
||||
|
||||
if len(txs) == 0 {
|
||||
return message, errors.New("empty TxsMessage")
|
||||
}
|
||||
|
||||
decoded := make([]types.Tx, len(txs))
|
||||
for j, tx := range txs {
|
||||
decoded[j] = types.Tx(tx)
|
||||
}
|
||||
|
||||
message = TxsMessage{
|
||||
Txs: decoded,
|
||||
}
|
||||
return message, nil
|
||||
}
|
||||
return message, fmt.Errorf("msg type: %T is not supported", msg)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// TxsMessage is a Message containing transactions.
|
||||
type TxsMessage struct {
|
||||
Txs []types.Tx
|
||||
}
|
||||
|
||||
// String returns a string representation of the TxsMessage.
|
||||
func (m *TxsMessage) String() string {
|
||||
return fmt.Sprintf("[TxsMessage %v]", m.Txs)
|
||||
}
|
||||
186
mempool/v1/reactor_test.go
Normal file
186
mempool/v1/reactor_test.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log/term"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
memproto "github.com/tendermint/tendermint/proto/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
numTxs = 1000
|
||||
timeout = 120 * time.Second // ridiculously high because CircleCI is slow
|
||||
)
|
||||
|
||||
type peerState struct {
|
||||
height int64
|
||||
}
|
||||
|
||||
func (ps peerState) GetHeight() int64 {
|
||||
return ps.height
|
||||
}
|
||||
|
||||
// Send a bunch of txs to the first reactor's mempool and wait for them all to
|
||||
// be received in the others.
|
||||
func TestReactorBroadcastTxsMessage(t *testing.T) {
|
||||
config := cfg.TestConfig()
|
||||
// if there were more than two reactors, the order of transactions could not be
|
||||
// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
|
||||
// replace Connect2Switches (full mesh) with a func, which connects first
|
||||
// reactor to others and nothing else, this test should also pass with >2 reactors.
|
||||
const N = 2
|
||||
reactors := makeAndConnectReactors(config, N)
|
||||
defer func() {
|
||||
for _, r := range reactors {
|
||||
if err := r.Stop(); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
for _, r := range reactors {
|
||||
for _, peer := range r.Switch.Peers().List() {
|
||||
peer.Set(types.PeerStateKey, peerState{1})
|
||||
}
|
||||
}
|
||||
|
||||
txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
|
||||
transactions := make(types.Txs, len(txs))
|
||||
for idx, tx := range txs {
|
||||
transactions[idx] = tx.tx
|
||||
}
|
||||
|
||||
waitForTxsOnReactors(t, transactions, reactors)
|
||||
}
|
||||
|
||||
func TestMempoolVectors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
tx []byte
|
||||
expBytes string
|
||||
}{
|
||||
{"tx 1", []byte{123}, "0a030a017b"},
|
||||
{"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
|
||||
msg := memproto.Message{
|
||||
Sum: &memproto.Message_Txs{
|
||||
Txs: &memproto.Txs{Txs: [][]byte{tc.tx}},
|
||||
},
|
||||
}
|
||||
bz, err := msg.Marshal()
|
||||
require.NoError(t, err, tc.testName)
|
||||
|
||||
require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
|
||||
}
|
||||
}
|
||||
|
||||
func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
|
||||
reactors := make([]*Reactor, n)
|
||||
logger := mempoolLogger()
|
||||
for i := 0; i < n; i++ {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
|
||||
reactors[i].SetLogger(logger.With("validator", i))
|
||||
}
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("MEMPOOL", reactors[i])
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
return reactors
|
||||
}
|
||||
|
||||
// mempoolLogger is a TestingLogger which uses a different
|
||||
// color for each validator ("validator" key must exist).
|
||||
func mempoolLogger() log.Logger {
|
||||
return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
|
||||
for i := 0; i < len(keyvals)-1; i += 2 {
|
||||
if keyvals[i] == "validator" {
|
||||
return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
|
||||
}
|
||||
}
|
||||
return term.FgBgColor{}
|
||||
})
|
||||
}
|
||||
|
||||
func newMempoolWithApp(cc proxy.ClientCreator) (*TxMempool, func()) {
|
||||
conf := cfg.ResetTestRoot("mempool_test")
|
||||
|
||||
mp, cu := newMempoolWithAppAndConfig(cc, conf)
|
||||
return mp, cu
|
||||
}
|
||||
|
||||
func newMempoolWithAppAndConfig(cc proxy.ClientCreator, conf *cfg.Config) (*TxMempool, func()) {
|
||||
appConnMem, _ := cc.NewABCIClient()
|
||||
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
|
||||
err := appConnMem.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
mp := NewTxMempool(log.TestingLogger(), conf.Mempool, appConnMem, 0)
|
||||
|
||||
return mp, func() { os.RemoveAll(conf.RootDir) }
|
||||
}
|
||||
|
||||
func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
|
||||
// wait for the txs in all mempools
|
||||
wg := new(sync.WaitGroup)
|
||||
for i, reactor := range reactors {
|
||||
wg.Add(1)
|
||||
go func(r *Reactor, reactorIndex int) {
|
||||
defer wg.Done()
|
||||
waitForTxsOnReactor(t, txs, r, reactorIndex)
|
||||
}(reactor, i)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
timer := time.After(timeout)
|
||||
select {
|
||||
case <-timer:
|
||||
t.Fatal("Timed out waiting for txs")
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
|
||||
func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
|
||||
mempool := reactor.mempool
|
||||
for mempool.Size() < len(txs) {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
|
||||
reapedTxs := mempool.ReapMaxTxs(len(txs))
|
||||
for i, tx := range txs {
|
||||
assert.Equalf(t, tx, reapedTxs[i],
|
||||
"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
|
||||
}
|
||||
}
|
||||
281
mempool/v1/tx.go
Normal file
281
mempool/v1/tx.go
Normal file
@@ -0,0 +1,281 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/clist"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// WrappedTx defines a wrapper around a raw transaction with additional metadata
|
||||
// that is used for indexing.
|
||||
type WrappedTx struct {
|
||||
// tx represents the raw binary transaction data
|
||||
tx types.Tx
|
||||
|
||||
// hash defines the transaction hash and the primary key used in the mempool
|
||||
hash types.TxKey
|
||||
|
||||
// height defines the height at which the transaction was validated at
|
||||
height int64
|
||||
|
||||
// gasWanted defines the amount of gas the transaction sender requires
|
||||
gasWanted int64
|
||||
|
||||
// priority defines the transaction's priority as specified by the application
|
||||
// in the ResponseCheckTx response.
|
||||
priority int64
|
||||
|
||||
// sender defines the transaction's sender as specified by the application in
|
||||
// the ResponseCheckTx response.
|
||||
sender string
|
||||
|
||||
// timestamp is the time at which the node first received the transaction from
|
||||
// a peer. It is used as a second dimension is prioritizing transactions when
|
||||
// two transactions have the same priority.
|
||||
timestamp time.Time
|
||||
|
||||
// peers records a mapping of all peers that sent a given transaction
|
||||
peers map[uint16]struct{}
|
||||
|
||||
// heapIndex defines the index of the item in the heap
|
||||
heapIndex int
|
||||
|
||||
// gossipEl references the linked-list element in the gossip index
|
||||
gossipEl *clist.CElement
|
||||
|
||||
// removed marks the transaction as removed from the mempool. This is set
|
||||
// during RemoveTx and is needed due to the fact that a given existing
|
||||
// transaction in the mempool can be evicted when it is simultaneously having
|
||||
// a reCheckTx callback executed.
|
||||
removed bool
|
||||
}
|
||||
|
||||
func (wtx *WrappedTx) Size() int {
|
||||
return len(wtx.tx)
|
||||
}
|
||||
|
||||
// TxStore implements a thread-safe mapping of valid transaction(s).
|
||||
//
|
||||
// NOTE:
|
||||
// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative
|
||||
// access is not allowed. Regardless, it is not expected for the mempool to
|
||||
// need mutative access.
|
||||
type TxStore struct {
|
||||
mtx tmsync.RWMutex
|
||||
hashTxs map[types.TxKey]*WrappedTx // primary index
|
||||
senderTxs map[string]*WrappedTx // sender is defined by the ABCI application
|
||||
}
|
||||
|
||||
func NewTxStore() *TxStore {
|
||||
return &TxStore{
|
||||
senderTxs: make(map[string]*WrappedTx),
|
||||
hashTxs: make(map[types.TxKey]*WrappedTx),
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the total number of transactions in the store.
|
||||
func (txs *TxStore) Size() int {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return len(txs.hashTxs)
|
||||
}
|
||||
|
||||
// GetAllTxs returns all the transactions currently in the store.
|
||||
func (txs *TxStore) GetAllTxs() []*WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wTxs := make([]*WrappedTx, len(txs.hashTxs))
|
||||
i := 0
|
||||
for _, wtx := range txs.hashTxs {
|
||||
wTxs[i] = wtx
|
||||
i++
|
||||
}
|
||||
|
||||
return wTxs
|
||||
}
|
||||
|
||||
// GetTxBySender returns a *WrappedTx by the transaction's sender property
|
||||
// defined by the ABCI application.
|
||||
func (txs *TxStore) GetTxBySender(sender string) *WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return txs.senderTxs[sender]
|
||||
}
|
||||
|
||||
// GetTxByHash returns a *WrappedTx by the transaction's hash.
|
||||
func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return txs.hashTxs[hash]
|
||||
}
|
||||
|
||||
// IsTxRemoved returns true if a transaction by hash is marked as removed and
|
||||
// false otherwise.
|
||||
func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wtx, ok := txs.hashTxs[hash]
|
||||
if ok {
|
||||
return wtx.removed
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a
|
||||
// non-empty sender, we additionally store the transaction by the sender as
|
||||
// defined by the ABCI application.
|
||||
func (txs *TxStore) SetTx(wtx *WrappedTx) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
if len(wtx.sender) > 0 {
|
||||
txs.senderTxs[wtx.sender] = wtx
|
||||
}
|
||||
|
||||
txs.hashTxs[wtx.tx.Key()] = wtx
|
||||
}
|
||||
|
||||
// RemoveTx removes a *WrappedTx from the transaction store. It deletes all
|
||||
// indexes of the transaction.
|
||||
func (txs *TxStore) RemoveTx(wtx *WrappedTx) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
if len(wtx.sender) > 0 {
|
||||
delete(txs.senderTxs, wtx.sender)
|
||||
}
|
||||
|
||||
delete(txs.hashTxs, wtx.tx.Key())
|
||||
wtx.removed = true
|
||||
}
|
||||
|
||||
// TxHasPeer returns true if a transaction by hash has a given peer ID and false
|
||||
// otherwise. If the transaction does not exist, false is returned.
|
||||
func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wtx := txs.hashTxs[hash]
|
||||
if wtx == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := wtx.peers[peerID]
|
||||
return ok
|
||||
}
|
||||
|
||||
// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the
|
||||
// given peerID to the WrappedTx's set of peers that sent us this transaction.
|
||||
// We return true if we've already recorded the given peer for this transaction
|
||||
// and false otherwise. If the transaction does not exist by hash, we return
|
||||
// (nil, false).
|
||||
func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
wtx := txs.hashTxs[hash]
|
||||
if wtx == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if wtx.peers == nil {
|
||||
wtx.peers = make(map[uint16]struct{})
|
||||
}
|
||||
|
||||
if _, ok := wtx.peers[peerID]; ok {
|
||||
return wtx, true
|
||||
}
|
||||
|
||||
wtx.peers[peerID] = struct{}{}
|
||||
return wtx, false
|
||||
}
|
||||
|
||||
// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be
|
||||
// used to build generic transaction indexes in the mempool. It accepts a
|
||||
// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx
|
||||
// references which is used during Insert in order to determine sorted order. If
|
||||
// less returns true, a <= b.
|
||||
type WrappedTxList struct {
|
||||
mtx tmsync.RWMutex
|
||||
txs []*WrappedTx
|
||||
less func(*WrappedTx, *WrappedTx) bool
|
||||
}
|
||||
|
||||
func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList {
|
||||
return &WrappedTxList{
|
||||
txs: make([]*WrappedTx, 0),
|
||||
less: less,
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the number of WrappedTx objects in the list.
|
||||
func (wtl *WrappedTxList) Size() int {
|
||||
wtl.mtx.RLock()
|
||||
defer wtl.mtx.RUnlock()
|
||||
|
||||
return len(wtl.txs)
|
||||
}
|
||||
|
||||
// Reset resets the list of transactions to an empty list.
|
||||
func (wtl *WrappedTxList) Reset() {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
wtl.txs = make([]*WrappedTx, 0)
|
||||
}
|
||||
|
||||
// Insert inserts a WrappedTx reference into the sorted list based on the list's
|
||||
// comparator function.
|
||||
func (wtl *WrappedTxList) Insert(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
if i == len(wtl.txs) {
|
||||
// insert at the end
|
||||
wtl.txs = append(wtl.txs, wtx)
|
||||
return
|
||||
}
|
||||
|
||||
// Make space for the inserted element by shifting values at the insertion
|
||||
// index up one index.
|
||||
//
|
||||
// NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs).
|
||||
wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...)
|
||||
wtl.txs[i] = wtx
|
||||
}
|
||||
|
||||
// Remove attempts to remove a WrappedTx from the sorted list.
|
||||
func (wtl *WrappedTxList) Remove(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
// Since the list is sorted, we evaluate all elements starting at i. Note, if
|
||||
// the element does not exist, we may potentially evaluate the entire remainder
|
||||
// of the list. However, a caller should not be expected to call Remove with a
|
||||
// non-existing element.
|
||||
for i < len(wtl.txs) {
|
||||
if wtl.txs[i] == wtx {
|
||||
wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...)
|
||||
return
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
}
|
||||
230
mempool/v1/tx_test.go
Normal file
230
mempool/v1/tx_test.go
Normal file
@@ -0,0 +1,230 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestTxStore_GetTxBySender(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
sender: "foo",
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
res := txs.GetTxBySender(wtx.sender)
|
||||
require.Nil(t, res)
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxBySender(wtx.sender)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_GetTxByHash(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
sender: "foo",
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
res := txs.GetTxByHash(key)
|
||||
require.Nil(t, res)
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_SetTx(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res := txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
|
||||
wtx.sender = "foo"
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_GetOrSetPeerByTxHash(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res, ok := txs.GetOrSetPeerByTxHash(types.Tx([]byte("test_tx_2")).Key(), 15)
|
||||
require.Nil(t, res)
|
||||
require.False(t, ok)
|
||||
|
||||
res, ok = txs.GetOrSetPeerByTxHash(key, 15)
|
||||
require.NotNil(t, res)
|
||||
require.False(t, ok)
|
||||
|
||||
res, ok = txs.GetOrSetPeerByTxHash(key, 15)
|
||||
require.NotNil(t, res)
|
||||
require.True(t, ok)
|
||||
|
||||
require.True(t, txs.TxHasPeer(key, 15))
|
||||
require.False(t, txs.TxHasPeer(key, 16))
|
||||
}
|
||||
|
||||
func TestTxStore_RemoveTx(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
key := wtx.tx.Key()
|
||||
res := txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
|
||||
txs.RemoveTx(res)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.Nil(t, res)
|
||||
}
|
||||
|
||||
func TestTxStore_Size(t *testing.T) {
|
||||
txStore := NewTxStore()
|
||||
numTxs := 1000
|
||||
|
||||
for i := 0; i < numTxs; i++ {
|
||||
txStore.SetTx(&WrappedTx{
|
||||
tx: []byte(fmt.Sprintf("test_tx_%d", i)),
|
||||
priority: int64(i),
|
||||
timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
require.Equal(t, numTxs, txStore.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Reset(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
require.Zero(t, list.Size())
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
list.Insert(&WrappedTx{height: int64(i)})
|
||||
}
|
||||
|
||||
require.Equal(t, 100, list.Size())
|
||||
|
||||
list.Reset()
|
||||
require.Zero(t, list.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Insert(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var expected []int
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
expected = append(expected, int(height))
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
|
||||
if i%10 == 0 {
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
expected = append(expected, int(height))
|
||||
}
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Remove(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var txs []*WrappedTx
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
tx := &WrappedTx{height: height}
|
||||
|
||||
txs = append(txs, tx)
|
||||
list.Insert(tx)
|
||||
|
||||
if i%10 == 0 {
|
||||
tx = &WrappedTx{height: height}
|
||||
list.Insert(tx)
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
}
|
||||
|
||||
// remove a tx that does not exist
|
||||
list.Remove(&WrappedTx{height: 20000})
|
||||
|
||||
// remove a tx that exists (by height) but not referenced
|
||||
list.Remove(&WrappedTx{height: txs[0].height})
|
||||
|
||||
// remove a few existing txs
|
||||
for i := 0; i < 25; i++ {
|
||||
j := rng.Intn(len(txs))
|
||||
list.Remove(txs[j])
|
||||
txs = append(txs[:j], txs[j+1:]...)
|
||||
}
|
||||
|
||||
expected := make([]int, len(txs))
|
||||
for i, tx := range txs {
|
||||
expected[i] = int(tx.height)
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
96
node/node.go
96
node/node.go
@@ -23,12 +23,15 @@ import (
|
||||
cs "github.com/tendermint/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/evidence"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/pex"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
@@ -209,7 +212,7 @@ type Node struct {
|
||||
stateStore sm.Store
|
||||
blockStore *store.BlockStore // store the blockchain to disk
|
||||
bcReactor p2p.Reactor // for fast-syncing
|
||||
mempoolReactor *mempl.Reactor // for gossipping transactions
|
||||
mempoolReactor p2p.Reactor // for gossipping transactions
|
||||
mempool mempl.Mempool
|
||||
stateSync bool // whether the node should state sync on startup
|
||||
stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots
|
||||
@@ -361,25 +364,64 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
|
||||
return bytes.Equal(pubKey.Address(), addr)
|
||||
}
|
||||
|
||||
func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
|
||||
state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) {
|
||||
func createMempoolAndMempoolReactor(
|
||||
config *cfg.Config,
|
||||
proxyApp proxy.AppConns,
|
||||
state sm.State,
|
||||
memplMetrics *mempl.Metrics,
|
||||
logger log.Logger,
|
||||
) (mempl.Mempool, p2p.Reactor) {
|
||||
|
||||
mempool := mempl.NewCListMempool(
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempl.WithMetrics(memplMetrics),
|
||||
mempl.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempl.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
mempoolLogger := logger.With("module", "mempool")
|
||||
mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
|
||||
mempoolReactor.SetLogger(mempoolLogger)
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV1:
|
||||
// TODO(thane): Remove log once https://github.com/tendermint/tendermint/issues/8775 is resolved.
|
||||
logger.Error("While the prioritized mempool API is stable, there is a critical bug in it that is currently under investigation. See https://github.com/tendermint/tendermint/issues/8775 for details")
|
||||
mp := mempoolv1.NewTxMempool(
|
||||
logger,
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
reactor := mempoolv1.NewReactor(
|
||||
config.Mempool,
|
||||
mp,
|
||||
)
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mp.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
return mp, reactor
|
||||
|
||||
case cfg.MempoolV0:
|
||||
mp := mempoolv0.NewCListMempool(
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
|
||||
mp.SetLogger(logger)
|
||||
mp.SetLogger(logger)
|
||||
|
||||
reactor := mempoolv0.NewReactor(
|
||||
config.Mempool,
|
||||
mp,
|
||||
)
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mp.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
return mp, reactor
|
||||
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
return mempoolReactor, mempool
|
||||
}
|
||||
|
||||
func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
|
||||
@@ -425,7 +467,7 @@ func createConsensusReactor(config *cfg.Config,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore sm.BlockStore,
|
||||
mempool *mempl.CListMempool,
|
||||
mempool mempl.Mempool,
|
||||
evidencePool *evidence.Pool,
|
||||
privValidator types.PrivValidator,
|
||||
csMetrics *cs.Metrics,
|
||||
@@ -527,7 +569,7 @@ func createSwitch(config *cfg.Config,
|
||||
transport p2p.Transport,
|
||||
p2pMetrics *p2p.Metrics,
|
||||
peerFilters []p2p.PeerFilterFunc,
|
||||
mempoolReactor *mempl.Reactor,
|
||||
mempoolReactor p2p.Reactor,
|
||||
bcReactor p2p.Reactor,
|
||||
stateSyncReactor *statesync.Reactor,
|
||||
consensusReactor *cs.Reactor,
|
||||
@@ -752,7 +794,7 @@ func NewNode(config *cfg.Config,
|
||||
csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
|
||||
|
||||
// Make MempoolReactor
|
||||
mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
|
||||
mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
|
||||
|
||||
// Make Evidence Reactor
|
||||
evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
|
||||
@@ -930,13 +972,6 @@ func (n *Node) OnStart() error {
|
||||
|
||||
n.isListening = true
|
||||
|
||||
if n.config.Mempool.WalEnabled() {
|
||||
err = n.mempool.InitWAL()
|
||||
if err != nil {
|
||||
return fmt.Errorf("init mempool WAL: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the switch (the P2P server).
|
||||
err = n.sw.Start()
|
||||
if err != nil {
|
||||
@@ -984,11 +1019,6 @@ func (n *Node) OnStop() {
|
||||
n.Logger.Error("Error closing switch", "err", err)
|
||||
}
|
||||
|
||||
// stop mempool WAL
|
||||
if n.config.Mempool.WalEnabled() {
|
||||
n.mempool.CloseWAL()
|
||||
}
|
||||
|
||||
if err := n.transport.Close(); err != nil {
|
||||
n.Logger.Error("Error closing transport", "err", err)
|
||||
}
|
||||
@@ -1224,7 +1254,7 @@ func (n *Node) ConsensusReactor() *cs.Reactor {
|
||||
}
|
||||
|
||||
// MempoolReactor returns the Node's mempool reactor.
|
||||
func (n *Node) MempoolReactor() *mempl.Reactor {
|
||||
func (n *Node) MempoolReactor() p2p.Reactor {
|
||||
return n.mempoolReactor
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
p2pmock "github.com/tendermint/tendermint/p2p/mock"
|
||||
@@ -242,16 +244,27 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
proposerAddr, _ := state.Validators.GetByIndex(0)
|
||||
|
||||
// Make Mempool
|
||||
memplMetrics := mempl.PrometheusMetrics("node_test_1")
|
||||
mempool := mempl.NewCListMempool(
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempl.WithMetrics(memplMetrics),
|
||||
mempl.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempl.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
mempool.SetLogger(logger)
|
||||
memplMetrics := mempl.NopMetrics()
|
||||
var mempool mempl.Mempool
|
||||
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
}
|
||||
|
||||
// Make EvidencePool
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
@@ -334,16 +347,26 @@ func TestMaxProposalBlockSize(t *testing.T) {
|
||||
proposerAddr, _ := state.Validators.GetByIndex(0)
|
||||
|
||||
// Make Mempool
|
||||
memplMetrics := mempl.PrometheusMetrics("node_test_2")
|
||||
mempool := mempl.NewCListMempool(
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempl.WithMetrics(memplMetrics),
|
||||
mempl.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempl.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
mempool.SetLogger(logger)
|
||||
memplMetrics := mempl.NopMetrics()
|
||||
var mempool mempl.Mempool
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
}
|
||||
|
||||
// fill the mempool with one txs just below the maximum size
|
||||
txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1))
|
||||
|
||||
@@ -75,10 +75,10 @@ message RequestQuery {
|
||||
}
|
||||
|
||||
message RequestBeginBlock {
|
||||
bytes hash = 1;
|
||||
tendermint.types.Header header = 2 [(gogoproto.nullable) = false];
|
||||
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false];
|
||||
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false];
|
||||
bytes hash = 1;
|
||||
tendermint.types.Header header = 2 [(gogoproto.nullable) = false];
|
||||
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false];
|
||||
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
enum CheckTxType {
|
||||
@@ -102,8 +102,7 @@ message RequestEndBlock {
|
||||
message RequestCommit {}
|
||||
|
||||
// lists available snapshots
|
||||
message RequestListSnapshots {
|
||||
}
|
||||
message RequestListSnapshots {}
|
||||
|
||||
// offers a snapshot to the application
|
||||
message RequestOfferSnapshot {
|
||||
@@ -212,6 +211,12 @@ message ResponseCheckTx {
|
||||
repeated Event events = 7
|
||||
[(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"];
|
||||
string codespace = 8;
|
||||
string sender = 9;
|
||||
int64 priority = 10;
|
||||
|
||||
// mempool_error is set by Tendermint.
|
||||
// ABCI applictions creating a ResponseCheckTX should not set mempool_error.
|
||||
string mempool_error = 11;
|
||||
}
|
||||
|
||||
message ResponseDeliverTx {
|
||||
@@ -221,16 +226,17 @@ message ResponseDeliverTx {
|
||||
string info = 4; // nondeterministic
|
||||
int64 gas_wanted = 5 [json_name = "gas_wanted"];
|
||||
int64 gas_used = 6 [json_name = "gas_used"];
|
||||
repeated Event events = 7
|
||||
[(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic
|
||||
repeated Event events = 7 [
|
||||
(gogoproto.nullable) = false,
|
||||
(gogoproto.jsontag) = "events,omitempty"
|
||||
]; // nondeterministic
|
||||
string codespace = 8;
|
||||
}
|
||||
|
||||
message ResponseEndBlock {
|
||||
repeated ValidatorUpdate validator_updates = 1
|
||||
[(gogoproto.nullable) = false];
|
||||
ConsensusParams consensus_param_updates = 2;
|
||||
repeated Event events = 3
|
||||
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false];
|
||||
ConsensusParams consensus_param_updates = 2;
|
||||
repeated Event events = 3
|
||||
[(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"];
|
||||
}
|
||||
|
||||
@@ -364,10 +370,8 @@ message Evidence {
|
||||
// The height when the offense occurred
|
||||
int64 height = 3;
|
||||
// The corresponding time where the offense occurred
|
||||
google.protobuf.Timestamp time = 4 [
|
||||
(gogoproto.nullable) = false,
|
||||
(gogoproto.stdtime) = true
|
||||
];
|
||||
google.protobuf.Timestamp time = 4
|
||||
[(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
|
||||
// Total voting power of the validator set in case the ABCI application does
|
||||
// not store historical validators.
|
||||
// https://github.com/tendermint/tendermint/issues/4581
|
||||
@@ -402,6 +406,8 @@ service ABCIApplication {
|
||||
rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock);
|
||||
rpc ListSnapshots(RequestListSnapshots) returns (ResponseListSnapshots);
|
||||
rpc OfferSnapshot(RequestOfferSnapshot) returns (ResponseOfferSnapshot);
|
||||
rpc LoadSnapshotChunk(RequestLoadSnapshotChunk) returns (ResponseLoadSnapshotChunk);
|
||||
rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk);
|
||||
rpc LoadSnapshotChunk(RequestLoadSnapshotChunk)
|
||||
returns (ResponseLoadSnapshotChunk);
|
||||
rpc ApplySnapshotChunk(RequestApplySnapshotChunk)
|
||||
returns (ResponseApplySnapshotChunk);
|
||||
}
|
||||
|
||||
@@ -388,7 +388,7 @@ func TestUnconfirmedTxs(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 1, res.Count)
|
||||
assert.Equal(t, 1, res.Total)
|
||||
assert.Equal(t, mempool.TxsBytes(), res.TotalBytes)
|
||||
assert.Equal(t, mempool.SizeBytes(), res.TotalBytes)
|
||||
assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs))
|
||||
}
|
||||
|
||||
@@ -419,7 +419,7 @@ func TestNumUnconfirmedTxs(t *testing.T) {
|
||||
|
||||
assert.Equal(t, mempoolSize, res.Count)
|
||||
assert.Equal(t, mempoolSize, res.Total)
|
||||
assert.Equal(t, mempool.TxsBytes(), res.TotalBytes)
|
||||
assert.Equal(t, mempool.SizeBytes(), res.TotalBytes)
|
||||
}
|
||||
|
||||
mempool.Flush()
|
||||
|
||||
@@ -158,7 +158,7 @@ func UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfi
|
||||
return &ctypes.ResultUnconfirmedTxs{
|
||||
Count: len(txs),
|
||||
Total: env.Mempool.Size(),
|
||||
TotalBytes: env.Mempool.TxsBytes(),
|
||||
TotalBytes: env.Mempool.SizeBytes(),
|
||||
Txs: txs}, nil
|
||||
}
|
||||
|
||||
@@ -168,7 +168,7 @@ func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, err
|
||||
return &ctypes.ResultUnconfirmedTxs{
|
||||
Count: env.Mempool.Size(),
|
||||
Total: env.Mempool.Size(),
|
||||
TotalBytes: env.Mempool.TxsBytes()}, nil
|
||||
TotalBytes: env.Mempool.SizeBytes()}, nil
|
||||
}
|
||||
|
||||
// CheckTx checks the transaction without executing it. The transaction won't
|
||||
|
||||
@@ -32,6 +32,7 @@ var (
|
||||
// FIXME: v2 disabled due to flake
|
||||
nodeFastSyncs = uniformChoice{"v0"} // "v2"
|
||||
nodeStateSyncs = uniformChoice{false, true}
|
||||
nodeMempools = uniformChoice{"v0", "v1"}
|
||||
nodePersistIntervals = uniformChoice{0, 1, 5}
|
||||
nodeSnapshotIntervals = uniformChoice{0, 3}
|
||||
nodeRetainBlocks = uniformChoice{0, 1, 5}
|
||||
@@ -210,6 +211,7 @@ func generateNode(
|
||||
Database: nodeDatabases.Choose(r).(string),
|
||||
PrivvalProtocol: nodePrivvalProtocols.Choose(r).(string),
|
||||
FastSync: nodeFastSyncs.Choose(r).(string),
|
||||
Mempool: nodeMempools.Choose(r).(string),
|
||||
StateSync: nodeStateSyncs.Choose(r).(bool) && startAt > 0,
|
||||
PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))),
|
||||
SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)),
|
||||
|
||||
@@ -68,6 +68,7 @@ start_at = 1005 # Becomes part of the validator set at 1010
|
||||
seeds = ["seed02"]
|
||||
database = "cleveldb"
|
||||
fast_sync = "v0"
|
||||
mempool_version = "v1"
|
||||
# FIXME: should be grpc, disabled due to https://github.com/tendermint/tendermint/issues/5439
|
||||
#abci_protocol = "grpc"
|
||||
privval_protocol = "tcp"
|
||||
|
||||
@@ -92,6 +92,10 @@ type ManifestNode struct {
|
||||
// Defaults to disabled.
|
||||
FastSync string `toml:"fast_sync"`
|
||||
|
||||
// Mempool specifies which version of mempool to use. Either "v0" or "v1"
|
||||
// This defaults to v0.
|
||||
Mempool string `toml:"mempool_version"`
|
||||
|
||||
// StateSync enables state sync. The runner automatically configures trusted
|
||||
// block hashes and RPC servers. At least one node in the network must have
|
||||
// SnapshotInterval set to non-zero, and the state syncing node must have
|
||||
|
||||
@@ -75,6 +75,7 @@ type Node struct {
|
||||
StartAt int64
|
||||
FastSync string
|
||||
StateSync bool
|
||||
Mempool string
|
||||
Database string
|
||||
ABCIProtocol Protocol
|
||||
PrivvalProtocol Protocol
|
||||
@@ -157,6 +158,7 @@ func LoadTestnet(file string) (*Testnet, error) {
|
||||
PrivvalProtocol: ProtocolFile,
|
||||
StartAt: nodeManifest.StartAt,
|
||||
FastSync: nodeManifest.FastSync,
|
||||
Mempool: nodeManifest.Mempool,
|
||||
StateSync: nodeManifest.StateSync,
|
||||
PersistInterval: 1,
|
||||
SnapshotInterval: nodeManifest.SnapshotInterval,
|
||||
@@ -309,6 +311,12 @@ func (n Node) Validate(testnet Testnet) error {
|
||||
case "", "v0", "v1", "v2":
|
||||
default:
|
||||
return fmt.Errorf("invalid fast sync setting %q", n.FastSync)
|
||||
|
||||
}
|
||||
switch n.Mempool {
|
||||
case "", "v0", "v1":
|
||||
default:
|
||||
return fmt.Errorf("invalid mempool version %q", n.Mempool)
|
||||
}
|
||||
switch n.Database {
|
||||
case "goleveldb", "cleveldb", "boltdb", "rocksdb", "badgerdb":
|
||||
|
||||
@@ -284,6 +284,9 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) {
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected mode %q", node.Mode)
|
||||
}
|
||||
if node.Mempool != "" {
|
||||
cfg.Mempool.Version = node.Mempool
|
||||
}
|
||||
|
||||
if node.FastSync == "" {
|
||||
cfg.FastSyncMode = false
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package checktx
|
||||
package v0
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
)
|
||||
|
||||
@@ -20,8 +21,7 @@ func init() {
|
||||
|
||||
cfg := config.DefaultMempoolConfig()
|
||||
cfg.Broadcast = false
|
||||
|
||||
mempool = mempl.NewCListMempool(cfg, appConnMem, 0)
|
||||
mempool = mempoolv0.NewCListMempool(cfg, appConnMem, 0)
|
||||
}
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
33
test/fuzz/mempool/v0/fuzz_test.go
Normal file
33
test/fuzz/mempool/v0/fuzz_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package v0_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
mempoolv0 "github.com/tendermint/tendermint/test/fuzz/mempool/v0"
|
||||
)
|
||||
|
||||
const testdataCasesDir = "testdata/cases"
|
||||
|
||||
func TestMempoolTestdataCases(t *testing.T) {
|
||||
entries, err := os.ReadDir(testdataCasesDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, e := range entries {
|
||||
entry := e
|
||||
t.Run(entry.Name(), func(t *testing.T) {
|
||||
defer func() {
|
||||
r := recover()
|
||||
require.Nilf(t, r, "testdata/cases test panic")
|
||||
}()
|
||||
f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name()))
|
||||
require.NoError(t, err)
|
||||
input, err := ioutil.ReadAll(f)
|
||||
require.NoError(t, err)
|
||||
mempoolv0.Fuzz(input)
|
||||
})
|
||||
}
|
||||
}
|
||||
0
test/fuzz/mempool/v0/testdata/cases/empty
vendored
Normal file
0
test/fuzz/mempool/v0/testdata/cases/empty
vendored
Normal file
37
test/fuzz/mempool/v1/checktx.go
Normal file
37
test/fuzz/mempool/v1/checktx.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
)
|
||||
|
||||
var mempool mempl.Mempool
|
||||
|
||||
func init() {
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
appConnMem, _ := cc.NewABCIClient()
|
||||
err := appConnMem.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cfg := config.DefaultMempoolConfig()
|
||||
cfg.Broadcast = false
|
||||
log := log.NewNopLogger()
|
||||
mempool = mempoolv1.NewTxMempool(log, cfg, appConnMem, 0)
|
||||
}
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
|
||||
err := mempool.CheckTx(data, nil, mempl.TxInfo{})
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
33
test/fuzz/mempool/v1/fuzz_test.go
Normal file
33
test/fuzz/mempool/v1/fuzz_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package v1_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
mempoolv1 "github.com/tendermint/tendermint/test/fuzz/mempool/v1"
|
||||
)
|
||||
|
||||
const testdataCasesDir = "testdata/cases"
|
||||
|
||||
func TestMempoolTestdataCases(t *testing.T) {
|
||||
entries, err := os.ReadDir(testdataCasesDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, e := range entries {
|
||||
entry := e
|
||||
t.Run(entry.Name(), func(t *testing.T) {
|
||||
defer func() {
|
||||
r := recover()
|
||||
require.Nilf(t, r, "testdata/cases test panic")
|
||||
}()
|
||||
f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name()))
|
||||
require.NoError(t, err)
|
||||
input, err := ioutil.ReadAll(f)
|
||||
require.NoError(t, err)
|
||||
mempoolv1.Fuzz(input)
|
||||
})
|
||||
}
|
||||
}
|
||||
0
test/fuzz/mempool/v1/testdata/cases/empty
vendored
Normal file
0
test/fuzz/mempool/v1/testdata/cases/empty
vendored
Normal file
@@ -15,12 +15,14 @@ type emptyMempool struct{}
|
||||
|
||||
var _ mempl.Mempool = emptyMempool{}
|
||||
|
||||
func (emptyMempool) Lock() {}
|
||||
func (emptyMempool) Unlock() {}
|
||||
func (emptyMempool) Size() int { return 0 }
|
||||
func (emptyMempool) Lock() {}
|
||||
func (emptyMempool) Unlock() {}
|
||||
func (emptyMempool) Size() int { return 0 }
|
||||
func (emptyMempool) SizeBytes() int64 { return 0 }
|
||||
func (emptyMempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
|
||||
return nil
|
||||
}
|
||||
func (emptyMempool) RemoveTxByKey(txKey types.TxKey) error { return nil }
|
||||
func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) Update(
|
||||
|
||||
@@ -32,6 +32,8 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/pex"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
@@ -238,7 +240,7 @@ type Node struct {
|
||||
stateStore sm.Store
|
||||
blockStore *store.BlockStore // store the blockchain to disk
|
||||
bcReactor p2p.Reactor // for fast-syncing
|
||||
mempoolReactor *mempl.Reactor // for gossipping transactions
|
||||
mempoolReactor p2p.Reactor // for gossipping transactions
|
||||
mempool mempl.Mempool
|
||||
stateSync bool // whether the node should state sync on startup
|
||||
stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots
|
||||
@@ -378,24 +380,56 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
|
||||
}
|
||||
|
||||
func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
|
||||
state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) {
|
||||
state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (p2p.Reactor, mempl.Mempool) {
|
||||
|
||||
mempool := mempl.NewCListMempool(
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempl.WithMetrics(memplMetrics),
|
||||
mempl.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempl.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
mempoolLogger := logger.With("module", "mempool")
|
||||
mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
|
||||
mempoolReactor.SetLogger(mempoolLogger)
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV1:
|
||||
mp := mempoolv1.NewTxMempool(
|
||||
logger,
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
reactor := mempoolv1.NewReactor(
|
||||
config.Mempool,
|
||||
mp,
|
||||
)
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mp.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
return reactor, mp
|
||||
|
||||
case cfg.MempoolV0:
|
||||
mp := mempoolv0.NewCListMempool(
|
||||
config.Mempool,
|
||||
proxyApp.Mempool(),
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
|
||||
mp.SetLogger(logger)
|
||||
mp.SetLogger(logger)
|
||||
|
||||
reactor := mempoolv0.NewReactor(
|
||||
config.Mempool,
|
||||
mp,
|
||||
)
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mp.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
return reactor, mp
|
||||
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
return mempoolReactor, mempool
|
||||
}
|
||||
|
||||
func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
|
||||
@@ -441,7 +475,7 @@ func createConsensusReactor(config *cfg.Config,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore sm.BlockStore,
|
||||
mempool *mempl.CListMempool,
|
||||
mempool mempl.Mempool,
|
||||
evidencePool *evidence.Pool,
|
||||
privValidator types.PrivValidator,
|
||||
csMetrics *consensus.Metrics,
|
||||
@@ -545,7 +579,7 @@ func createSwitch(config *cfg.Config,
|
||||
transport p2p.Transport,
|
||||
p2pMetrics *p2p.Metrics,
|
||||
peerFilters []p2p.PeerFilterFunc,
|
||||
mempoolReactor *mempl.Reactor,
|
||||
mempoolReactor p2p.Reactor,
|
||||
bcReactor p2p.Reactor,
|
||||
stateSyncReactor *statesync.Reactor,
|
||||
consensusReactor *cs.Reactor,
|
||||
@@ -947,13 +981,6 @@ func (n *Node) OnStart() error {
|
||||
|
||||
n.isListening = true
|
||||
|
||||
if n.config.Mempool.WalEnabled() {
|
||||
err = n.mempool.InitWAL()
|
||||
if err != nil {
|
||||
return fmt.Errorf("init mempool WAL: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the switch (the P2P server).
|
||||
err = n.sw.Start()
|
||||
if err != nil {
|
||||
@@ -1001,11 +1028,6 @@ func (n *Node) OnStop() {
|
||||
n.Logger.Error("Error closing switch", "err", err)
|
||||
}
|
||||
|
||||
// stop mempool WAL
|
||||
if n.config.Mempool.WalEnabled() {
|
||||
n.mempool.CloseWAL()
|
||||
}
|
||||
|
||||
if err := n.transport.Close(); err != nil {
|
||||
n.Logger.Error("Error closing transport", "err", err)
|
||||
}
|
||||
@@ -1224,7 +1246,7 @@ func (n *Node) ConsensusReactor() *cs.Reactor {
|
||||
}
|
||||
|
||||
// MempoolReactor returns the Node's mempool reactor.
|
||||
func (n *Node) MempoolReactor() *mempl.Reactor {
|
||||
func (n *Node) MempoolReactor() p2p.Reactor {
|
||||
return n.mempoolReactor
|
||||
}
|
||||
|
||||
|
||||
21
types/tx.go
21
types/tx.go
@@ -2,6 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
@@ -11,16 +12,28 @@ import (
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
// Tx is an arbitrary byte array.
|
||||
// NOTE: Tx has no types at this level, so when wire encoded it's just length-prefixed.
|
||||
// Might we want types here ?
|
||||
type Tx []byte
|
||||
// TxKeySize is the size of the transaction key index
|
||||
const TxKeySize = sha256.Size
|
||||
|
||||
type (
|
||||
// Tx is an arbitrary byte array.
|
||||
// NOTE: Tx has no types at this level, so when wire encoded it's just length-prefixed.
|
||||
// Might we want types here ?
|
||||
Tx []byte
|
||||
|
||||
// TxKey is the fixed length array key used as an index.
|
||||
TxKey [TxKeySize]byte
|
||||
)
|
||||
|
||||
// Hash computes the TMHASH hash of the wire encoded transaction.
|
||||
func (tx Tx) Hash() []byte {
|
||||
return tmhash.Sum(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) Key() TxKey {
|
||||
return sha256.Sum256(tx)
|
||||
}
|
||||
|
||||
// String returns the hex-encoded transaction as a string.
|
||||
func (tx Tx) String() string {
|
||||
return fmt.Sprintf("Tx{%X}", []byte(tx))
|
||||
|
||||
Reference in New Issue
Block a user