Compare commits

..

79 Commits

Author SHA1 Message Date
Callum Waters
ccb69945b7 update mockery client 2022-07-27 00:03:46 +02:00
Callum Waters
26b4b84ab2 Merge branch 'main' into cal/prepare-proposal 2022-07-26 23:54:31 +02:00
Callum Waters
bdc1fa171a generate protos for prepare proposal 2022-07-26 23:54:13 +02:00
Callum Waters
a11b7743d4 fix mockery generation script (#9094) 2022-07-26 22:53:15 +02:00
Marko
46badfabd9 cli: add command to manually reindex tx and block events (backport: #6676) (#9034) 2022-07-22 16:37:08 +02:00
dependabot[bot]
eb465a3f4f build(deps): Bump github.com/golangci/golangci-lint (#9071)
Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.47.1 to 1.47.2.
- [Release notes](https://github.com/golangci/golangci-lint/releases)
- [Changelog](https://github.com/golangci/golangci-lint/blob/master/CHANGELOG.md)
- [Commits](https://github.com/golangci/golangci-lint/compare/v1.47.1...v1.47.2)

---
updated-dependencies:
- dependency-name: github.com/golangci/golangci-lint
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-07-22 10:16:06 -04:00
dependabot[bot]
d6be59788b build(deps): Bump github.com/BurntSushi/toml from 1.1.0 to 1.2.0 (#9062)
Bumps [github.com/BurntSushi/toml](https://github.com/BurntSushi/toml) from 1.1.0 to 1.2.0.
- [Release notes](https://github.com/BurntSushi/toml/releases)
- [Commits](https://github.com/BurntSushi/toml/compare/v1.1.0...v1.2.0)

---
updated-dependencies:
- dependency-name: github.com/BurntSushi/toml
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-07-21 09:25:40 -04:00
dependabot[bot]
f9f3bed808 build(deps): Bump github.com/golangci/golangci-lint (#9043) 2022-07-20 12:26:25 +02:00
M. J. Fromberger
64dfeb775c mempool: rework lock discipline to mitigate callback deadlocks (backport #9030) (#9033)
(manual cherry-pick of commit 22ed610083)
2022-07-19 13:48:47 -07:00
dependabot[bot]
6688db7be1 build(deps): Bump github.com/golangci/golangci-lint (#9036)
Bumps [github.com/golangci/golangci-lint](https://github.com/golangci/golangci-lint) from 1.46.2 to 1.47.0.
- [Release notes](https://github.com/golangci/golangci-lint/releases)
- [Changelog](https://github.com/golangci/golangci-lint/blob/master/CHANGELOG.md)
- [Commits](https://github.com/golangci/golangci-lint/compare/v1.46.2...v1.47.0)

---
updated-dependencies:
- dependency-name: github.com/golangci/golangci-lint
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-07-19 06:57:04 -07:00
M. J. Fromberger
d32df22c8b Prepare changelog for Release v0.34.20 (#9032) 2022-07-18 10:04:38 -07:00
M. J. Fromberger
223ece93c8 mempool: ensure async requests are flushed to the server (#9010)
In the v0.34 line, the socket and gRPC clients require explicit flushes to
ensure that the client and server have received an async request.  Add these
calls explicitly where required in the backport of the priority mempool.

In addition, the gRPC client's flush plumbing was not fully hooked up in the
v0.34 line, so this change includes that update as well.
2022-07-14 15:45:58 -07:00
M. J. Fromberger
ba1711e706 mempool: ensure evicted transactions are removed from the cache (backport #9000) (#9004)
This is a manual cherry-pick of commit b94470a6a4.

In the original implementation transactions evicted for priority were also
removed from the cache. In addition, remove expired transactions from the
cache.

Related:

- Add Has method to cache implementations.
- Update tests to exercise this condition.
2022-07-14 07:23:50 -07:00
dependabot[bot]
8df725f92f build(deps): Bump google.golang.org/grpc from 1.47.0 to 1.48.0 (#8991)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.47.0 to 1.48.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.47.0...v1.48.0)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-07-13 10:16:18 -07:00
M. J. Fromberger
2b3737333f config: remove obsolete mempool v1 warning (#8987) 2022-07-13 18:27:06 +02:00
M. J. Fromberger
bbb5f3bfef Prepare changelog for v0.34.20-rc1. (#8966) 2022-07-12 11:03:23 -07:00
M. J. Fromberger
d6b413ff8e mempool: release lock during app connection flush (#8986)
A manual backport of #8984.

This case is symmetric to what we did for CheckTx calls, where we release the
mempool mutex to ensure callbacks can fire during call setup.  We also need
this behaviour for application flush, for the same reason: The caller holds the
lock by contract from the Mempool interface.
2022-07-12 10:46:27 -07:00
M. J. Fromberger
7b615f8123 mempool: reduce lock contention during CheckTx (backport #8983) (#8985)
A manual cherry-pick of 9e64c95.

The way this was originally structured, we reacquired the lock after issuing
the initial ABCI CheckTx call, only to immediately release it. Restructure the
code so that this redundant acquire is no longer necessary.
2022-07-12 10:28:41 -07:00
M. J. Fromberger
7d9447198c mempool: minor cleanup after backport from v0.35 (#8971)
- Remove warning log for issue #8775.
- Fix call to FlushAsync (no error is reported).
- Don't log on rechecks, it's the default (manual backport of #8969).
2022-07-11 08:26:05 -07:00
M. J. Fromberger
5276400c30 Update generated mocks after upgrade of Mockery v2. (#8974) 2022-07-11 09:18:43 -04:00
M. J. Fromberger
493dd69f31 Backport priority mempool fixes from v0.35.x to v0.34.x. (#8962)
This is a manual backport of the changes from these commits:

- bc49f66 Add more unit tests for the priority mempool. (#8961)
- 9b02094 Fix unbounded heap growth in the priority mempool. (#8944)

Imports and type signatures have been updated to match the v0.34 usage.
2022-07-08 07:22:07 -07:00
dependabot[bot]
1d9d947d88 build(deps): Bump github.com/libp2p/go-buffer-pool from 0.0.2 to 0.1.0 (#8933) 2022-07-05 11:56:20 +02:00
dependabot[bot]
479bdd71e6 build(deps): Bump github.com/vektra/mockery/v2 from 2.13.1 to 2.14.0 (#8923)
Bumps [github.com/vektra/mockery/v2](https://github.com/vektra/mockery) from 2.13.1 to 2.14.0.
- [Release notes](https://github.com/vektra/mockery/releases)
- [Changelog](https://github.com/vektra/mockery/blob/master/.goreleaser.yml)
- [Commits](https://github.com/vektra/mockery/compare/v2.13.1...v2.14.0)

---
updated-dependencies:
- dependency-name: github.com/vektra/mockery/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-07-01 10:04:58 -04:00
dependabot[bot]
17f4ea3680 build(deps): Bump github.com/stretchr/testify from 1.7.5 to 1.8.0 (#8909)
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.5 to 1.8.0.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.7.5...v1.8.0)

---
updated-dependencies:
- dependency-name: github.com/stretchr/testify
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-30 08:35:54 -07:00
yihuang
5c32cfa00e Work around indexing problem for duplicate transactions (#8625)
Port the bug fix terra-money#76 to upstream. This is critical for ethermint json-rpc to work.

fix: prevent duplicate tx index if it succeeded before
fix: use CodeTypeOk instead of 0
fix: handle duplicate txs within the same block
Co-authored-by: jess jesse@soob.co

ref: #5281
2022-06-30 07:53:48 -07:00
Callum Waters
5e354a3a57 release: prepare v0.34.20-rc0 (#8888) 2022-06-27 18:44:21 +02:00
dependabot[bot]
9e14e954f9 build(deps): Bump bufbuild/buf-setup-action from 1.5.0 to 1.6.0 (#8880)
Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.5.0 to 1.6.0.
- [Release notes](https://github.com/bufbuild/buf-setup-action/releases)
- [Commits](https://github.com/bufbuild/buf-setup-action/compare/v1.5.0...v1.6.0)

---
updated-dependencies:
- dependency-name: bufbuild/buf-setup-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-06-27 10:20:25 -04:00
Aleksandr Bezobchuk
6b7d30cf37 feat: v0.34.x Prioritized Mempool (#8695)
* Updated mocks

* add reactor tests

* add v1 reactor tests

* Fix fuzz test for priority mempool

* e2e adapted to mempool v1; prio pool is default now

* Reverted default mempool to be fifo

* Changed buf version

* Added priority mempool to ci testnet

* Fixed linter

* Updated makefile

* Aligned makefile changes to v0.34.x

* Added go install for proto

* Add log message to warn about prioritized mempool bug

Signed-off-by: Thane Thomson <connect@thanethomson.com>

* Changelog message

Co-authored-by: Jasmina Malicevic <jasmina.dustinac@gmail.com>
Co-authored-by: Callum Waters <cmwaters19@gmail.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
Co-authored-by: Thane Thomson <connect@thanethomson.com>
2022-06-27 11:34:28 +02:00
dependabot[bot]
25101d1116 build(deps): Bump github.com/stretchr/testify from 1.7.4 to 1.7.5 (#8865) 2022-06-24 17:15:22 -07:00
Jasmina Malicevic
b83cc0aeda makefile: buf setup backport v0.34 (#8863) 2022-06-24 13:58:25 -04:00
Joe Abbey
4a1df4911d fix: "Lazy" Stringers to defer Sprintf and Hash until logs print (#8845) 2022-06-23 14:56:34 -04:00
Jasmina Malicevic
a3cc3d98b9 makefile: change buf to use tools.go ; backport v0.34 (#8852)
* makefile: update buf commands to use tools.go (#8609)

This will keep the version of `buf` consistent between all developer machines.
2022-06-23 16:47:44 +02:00
mergify[bot]
fe024521ef cmd: add tool for compaction of goleveldb (backport #8564) (#8674) 2022-06-21 23:04:58 +02:00
dependabot[bot]
02def9ca64 build(deps): Bump github.com/spf13/cobra from 1.4.0 to 1.5.0 (#8810)
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.4.0 to 1.5.0.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Commits](https://github.com/spf13/cobra/compare/v1.4.0...v1.5.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-21 14:42:30 -04:00
dependabot[bot]
ce2409f3ff build(deps): Bump github.com/stretchr/testify from 1.7.2 to 1.7.4 (#8809)
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.2 to 1.7.4.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.7.2...v1.7.4)

---
updated-dependencies:
- dependency-name: github.com/stretchr/testify
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-21 11:14:07 -07:00
dependabot[bot]
30915e9337 build(deps): Bump github.com/adlio/schema from 1.3.0 to 1.3.3 (#8799)
Bumps [github.com/adlio/schema](https://github.com/adlio/schema) from 1.3.0 to 1.3.3.
- [Release notes](https://github.com/adlio/schema/releases)
- [Commits](https://github.com/adlio/schema/compare/v1.3.0...v1.3.3)

---
updated-dependencies:
- dependency-name: github.com/adlio/schema
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-20 07:58:56 -04:00
M. J. Fromberger
8a7affe3a0 Update default config docs for PSQL indexer settings. (#8728) 2022-06-08 13:59:49 -07:00
dependabot[bot]
851f404305 build(deps): Bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#8709)
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.1 to 1.7.2.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.7.1...v1.7.2)

---
updated-dependencies:
- dependency-name: github.com/stretchr/testify
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-07 04:59:20 -04:00
dependabot[bot]
f63496dcd6 build(deps): Bump google.golang.org/grpc from 1.46.2 to 1.47.0 (#8665)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.46.2 to 1.47.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.46.2...v1.47.0)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-06-01 06:22:12 -04:00
dependabot[bot]
044b12585f build(deps): Bump github.com/spf13/viper from 1.11.0 to 1.12.0 (#8629)
Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.11.0 to 1.12.0.
- [Release notes](https://github.com/spf13/viper/releases)
- [Commits](https://github.com/spf13/viper/compare/v1.11.0...v1.12.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/viper
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-27 12:19:27 -04:00
dependabot[bot]
ac2e7fab3d build(deps): Bump goreleaser/goreleaser-action from 2 to 3 (#8588)
Bumps [goreleaser/goreleaser-action](https://github.com/goreleaser/goreleaser-action) from 2 to 3.
- [Release notes](https://github.com/goreleaser/goreleaser-action/releases)
- [Commits](https://github.com/goreleaser/goreleaser-action/compare/v2...v3)

---
updated-dependencies:
- dependency-name: goreleaser/goreleaser-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-23 08:31:32 -04:00
dependabot[bot]
e6f0711648 build(deps): Bump github.com/lib/pq from 1.10.5 to 1.10.6 (#8566) 2022-05-17 04:57:23 -07:00
dependabot[bot]
603a1d6610 build(deps): Bump github.com/go-kit/log from 0.2.0 to 0.2.1 (#8556)
Bumps [github.com/go-kit/log](https://github.com/go-kit/log) from 0.2.0 to 0.2.1.
- [Release notes](https://github.com/go-kit/log/releases)
- [Commits](https://github.com/go-kit/log/compare/v0.2.0...v0.2.1)

---
updated-dependencies:
- dependency-name: github.com/go-kit/log
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-16 09:02:16 -07:00
dependabot[bot]
ad72896ca5 build(deps): Update rtCamp/action-slack-notify requirement to f565a63638bd3615e76249bffab00fcb9dab90f7 (#8553)
Updates the requirements on [rtCamp/action-slack-notify](https://github.com/rtCamp/action-slack-notify) to permit the latest version.
- [Release notes](https://github.com/rtCamp/action-slack-notify/releases)
- [Commits](f565a63638)

---
updated-dependencies:
- dependency-name: rtCamp/action-slack-notify
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-16 08:14:55 -07:00
dependabot[bot]
9afdac6b52 build(deps): Bump actions/checkout from 2 to 3 (#8555)
Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v2...v3)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-16 08:12:15 -07:00
dependabot[bot]
a694dad540 build(deps): Bump google.golang.org/grpc from 1.46.0 to 1.46.2 (#8558)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.46.0 to 1.46.2.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.46.0...v1.46.2)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Sam Kleinman <garen@tychoish.com>
2022-05-16 08:05:01 -07:00
dependabot[bot]
9d1556a5bc build(deps): Bump actions/stale from 3 to 5 (#8554)
Bumps [actions/stale](https://github.com/actions/stale) from 3 to 5.
- [Release notes](https://github.com/actions/stale/releases)
- [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/stale/compare/v3...v5)

---
updated-dependencies:
- dependency-name: actions/stale
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-16 07:26:22 -07:00
dependabot[bot]
7ae73be891 build(deps): Bump gaurav-nelson/github-action-markdown-link-check from 1.0.7 to 1.0.14 (#8552)
* build(deps): Bump gaurav-nelson/github-action-markdown-link-check

Bumps [gaurav-nelson/github-action-markdown-link-check](https://github.com/gaurav-nelson/github-action-markdown-link-check) from 1.0.7 to 1.0.14.
- [Release notes](https://github.com/gaurav-nelson/github-action-markdown-link-check/releases)
- [Commits](https://github.com/gaurav-nelson/github-action-markdown-link-check/compare/1.0.7...1.0.14)

---
updated-dependencies:
- dependency-name: gaurav-nelson/github-action-markdown-link-check
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-16 07:00:02 -07:00
dependabot[bot]
bbb1506f5e build(deps): Bump docker/login-action from 1 to 2 (#8551) 2022-05-16 09:45:49 -04:00
dependabot[bot]
7cb014cf27 build(deps): Bump docker/build-push-action from 2 to 3 (#8539)
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 2 to 3.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](https://github.com/docker/build-push-action/compare/v2...v3)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 13:24:41 -07:00
dependabot[bot]
004967f962 build(deps): Bump technote-space/get-diff-action from 4 to 6 (#8538)
Bumps [technote-space/get-diff-action](https://github.com/technote-space/get-diff-action) from 4 to 6.
- [Release notes](https://github.com/technote-space/get-diff-action/releases)
- [Changelog](https://github.com/technote-space/get-diff-action/blob/main/.releasegarc)
- [Commits](https://github.com/technote-space/get-diff-action/compare/v4...v6)

---
updated-dependencies:
- dependency-name: technote-space/get-diff-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 13:21:29 -07:00
dependabot[bot]
6670c24e5f build(deps): Bump actions/upload-artifact from 2 to 3 (#8537)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 2 to 3.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v2...v3)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 13:19:41 -07:00
dependabot[bot]
b685ec7f2d build(deps): Bump actions/setup-go from 2 to 3 (#8532)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 2 to 3.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v2...v3)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 13:17:59 -07:00
dependabot[bot]
016c91b50f build(deps): Bump github.com/prometheus/client_golang (#8541)
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.12.1 to 1.12.2.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/v1.12.2/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.12.1...v1.12.2)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 13:16:14 -07:00
dependabot[bot]
34f23ab88a build(deps): Bump actions/cache from 2.1.4 to 3.0.2 (#8531)
* build(deps): Bump actions/cache from 2.1.4 to 3.0.2

Bumps [actions/cache](https://github.com/actions/cache) from 2.1.4 to 3.0.2.
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](https://github.com/actions/cache/compare/v2.1.4...v3.0.2)

---
updated-dependencies:
- dependency-name: actions/cache
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 13:05:58 -07:00
dependabot[bot]
aef0bd874d build(deps): Bump actions/download-artifact from 2 to 3 (#8528)
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 2 to 3.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/v2...v3)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 13:02:48 -07:00
dependabot[bot]
d2bd4471bc build(deps): Bump codecov/codecov-action from 1.0.13 to 3.1.0 (#8527)
* build(deps): Bump codecov/codecov-action from 1.0.13 to 3.1.0

Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 1.0.13 to 3.1.0.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v1.0.13...v3.1.0)

---
updated-dependencies:
- dependency-name: codecov/codecov-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 12:59:18 -07:00
dependabot[bot]
c7006af6fd build(deps): Bump golangci/golangci-lint-action from 3.1.0 to 3.2.0 (#8524)
* build(deps): Bump golangci/golangci-lint-action from 3.1.0 to 3.2.0

Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3.1.0 to 3.2.0.
- [Release notes](https://github.com/golangci/golangci-lint-action/releases)
- [Commits](https://github.com/golangci/golangci-lint-action/compare/v3.1.0...v3.2.0)

---
updated-dependencies:
- dependency-name: golangci/golangci-lint-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-05-13 12:56:52 -07:00
dependabot[bot]
972eee6ebc build(deps): Bump docker/setup-buildx-action from 1 to 2 (#8522)
Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 1 to 2.
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](https://github.com/docker/setup-buildx-action/compare/v1...v2)

---
updated-dependencies:
- dependency-name: docker/setup-buildx-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: M. J. Fromberger <fromberger@interchain.io>
2022-05-13 12:49:28 -07:00
M. J. Fromberger
b06540b5ff Remove backport-specific Dependabot config (v0.34.x). (#8519)
After #8518, this separate configuration is no longer needed.
The master copy will target updates to this branch.
2022-05-13 08:09:42 -07:00
Callum Waters
d1213f7e5f docs: remove dev sessions (#8505) 2022-05-11 18:15:39 +02:00
mergify[bot]
624bbac8f6 blocksync: validate block before persisting it (backport #8493) (#8495) 2022-05-11 16:20:23 +02:00
dependabot[bot]
874c9a0951 build(deps): Bump github.com/btcsuite/btcd from 0.22.0-beta to 0.22.1 (#8436) 2022-04-29 04:27:01 -07:00
dependabot[bot]
986f8d6532 build(deps): Bump google.golang.org/grpc from 1.45.0 to 1.46.0 (#8407)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.45.0 to 1.46.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.45.0...v1.46.0)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-04-25 09:00:00 -04:00
dependabot[bot]
e40e7ea46e build(deps): Bump github.com/spf13/viper from 1.10.1 to 1.11.0 (#8343) 2022-04-14 16:29:50 -07:00
M. J. Fromberger
7fa34c63af Set a cap on the length of subscription queries. (#8349)
A manual backport of #7263.

As a safety measure, don't allow a query string to be unreasonably long. The
query filter is not especially efficient, so a query that needs more than basic
detail should filter coarsely in the subscriber and refine on the client side.

This affects Subscribe and TxSearch queries.
2022-04-14 08:15:18 -07:00
William Banfield
da6ec8f082 invoke callbacks when set late in socket client (#8331) 2022-04-13 15:30:36 -04:00
Sam Kleinman
efddab0734 rpc: avoid leaking threads (#8329) 2022-04-13 13:28:03 -04:00
dependabot[bot]
3454f8cb89 build(deps): Bump github.com/lib/pq from 1.10.4 to 1.10.5 (#8284) 2022-04-08 09:05:40 -04:00
M. J. Fromberger
2f231ceb95 Prepare changelog for v0.34.19. (#8278) 2022-04-07 14:39:44 -07:00
Aleksandr Bezobchuk
6e85f46d9a cli: fix reset commands #8270
Some applications use the command-line implementations directly,
rather than through the root command. Because the implentations
obtained config from an unexported global, this would not work.

Instead, have each command that needs the config parse it where
needed.
2022-04-07 14:15:25 -07:00
M. J. Fromberger
89139784c5 Fix broken links in the changelog. (#8269) 2022-04-06 16:16:18 -07:00
M. J. Fromberger
ecf19029b4 Prepare changelog for v0.34.18 release. (#8263)
* Update branch changelogs.
* Update version marker.
2022-04-06 11:55:55 -07:00
Callum Waters
9ae5797866 cli: fix reset command for v0.34 (#8258) 2022-04-06 16:09:31 +02:00
dependabot[bot]
07670318a9 build(deps): Bump github.com/BurntSushi/toml from 1.0.0 to 1.1.0 (#8252)
Bumps [github.com/BurntSushi/toml](https://github.com/BurntSushi/toml) from 1.0.0 to 1.1.0.
- [Release notes](https://github.com/BurntSushi/toml/releases)
- [Commits](https://github.com/BurntSushi/toml/compare/v1.0.0...v1.1.0)

---
updated-dependencies:
- dependency-name: github.com/BurntSushi/toml
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-04-05 08:52:10 -07:00
M. J. Fromberger
1e32a149dd Update golangci-lint-action and golang-ci versions. (#8255)
Also specify Go toolchain version in actions (now required).
2022-04-05 08:19:58 -07:00
William Banfield
2c553d735a Update changelog for release v0.34.17 (#8239) 2022-04-01 17:50:48 -04:00
William Banfield
799489e474 consensus: change lock to handle panics (#8237) 2022-04-01 14:43:20 -04:00
William Banfield
66b1a3ee4c stop using %w in testing errors (#8241) 2022-04-01 13:49:54 -04:00
166 changed files with 9002 additions and 1691 deletions

View File

@@ -1,27 +0,0 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: daily
time: "11:00"
open-pull-requests-limit: 10
- package-ecosystem: npm
directory: "/docs"
schedule:
interval: daily
time: "11:00"
open-pull-requests-limit: 10
reviewers:
- fadeev
- package-ecosystem: gomod
directory: "/"
schedule:
interval: daily
time: "11:00"
open-pull-requests-limit: 10
reviewers:
- melekes
- tessr
labels:
- T:dependencies

75
.github/workflows/check-generated.yml vendored Normal file
View File

@@ -0,0 +1,75 @@
# Verify that generated code is up-to-date.
#
# Note that we run these checks regardless whether the input files have
# changed, because generated code can change in response to toolchain updates
# even if no files in the repository are modified.
name: Check generated code
on:
pull_request:
branches:
- main
permissions:
contents: read
jobs:
check-mocks:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.17'
- uses: actions/checkout@v3
- name: "Check generated mocks"
run: |
set -euo pipefail
readonly MOCKERY=2.12.3 # N.B. no leading "v"
curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf -
make mockery 2>/dev/null
if ! git diff --stat --exit-code ; then
echo ">> ERROR:"
echo ">>"
echo ">> Generated mocks require update (either Mockery or source files may have changed)."
echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR."
echo ">>"
exit 1
fi
check-proto:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.17'
- uses: actions/checkout@v3
with:
fetch-depth: 1 # we need a .git directory to run git diff
- name: "Check protobuf generated code"
run: |
set -euo pipefail
# Install buf and gogo tools, so that differences that arise from
# toolchain differences are also caught.
readonly tools="$(mktemp -d)"
export PATH="${PATH}:${tools}/bin"
export GOBIN="${tools}/bin"
go install github.com/bufbuild/buf/cmd/buf
go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
make proto-gen
if ! git diff --stat --exit-code ; then
echo ">> ERROR:"
echo ">>"
echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)."
echo ">> Ensure your tools are up-to-date, re-run 'make proto-gen' and update this PR."
echo ">>"
exit 1
fi

View File

@@ -10,25 +10,25 @@ jobs:
split-test-files:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Create a file with all the pkgs
run: go list ./... > pkgs.txt
- name: Split pkgs into 4 files
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
# cache multiple
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: "${{ github.sha }}-00"
path: ./pkgs.txt.part.00
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: "${{ github.sha }}-01"
path: ./pkgs.txt.part.01
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: "${{ github.sha }}-02"
path: ./pkgs.txt.part.02
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: "${{ github.sha }}-03"
path: ./pkgs.txt.part.03
@@ -42,11 +42,11 @@ jobs:
goarch: ["arm", "amd64"]
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
@@ -64,17 +64,17 @@ jobs:
matrix:
part: ["00", "01", "02", "03"]
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: "${{ github.sha }}-${{ matrix.part }}"
if: env.GIT_DIFF
@@ -82,7 +82,7 @@ jobs:
run: |
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
if: env.GIT_DIFF
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
path: ./${{ matrix.part }}profile.out
@@ -91,33 +91,33 @@ jobs:
runs-on: ubuntu-latest
needs: tests
steps:
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: "${{ github.sha }}-00-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: "${{ github.sha }}-01-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: "${{ github.sha }}-02-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: "${{ github.sha }}-03-coverage"
if: env.GIT_DIFF
- run: |
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v1.0.13
- uses: codecov/codecov-action@v3
with:
file: ./coverage.txt
if: env.GIT_DIFF

View File

@@ -14,7 +14,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
@@ -40,17 +40,17 @@ jobs:
platforms: all
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v1
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2
uses: docker/build-push-action@v3
with:
context: .
file: ./DOCKER/Dockerfile

View File

@@ -14,11 +14,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: actions/checkout@v3
- name: Build
working-directory: test/e2e

View File

@@ -21,11 +21,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '^1.15.4'
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
ref: 'v0.34.x'
@@ -49,7 +49,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on failure
uses: rtCamp/action-slack-notify@e9db0ef
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
@@ -65,7 +65,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on success
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal

View File

@@ -20,11 +20,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.15'
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Build
working-directory: test/e2e
@@ -46,7 +46,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on failure
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
@@ -62,7 +62,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on success
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal

View File

@@ -13,11 +13,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '^1.15.4'
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go

View File

@@ -9,11 +9,11 @@ jobs:
fuzz-nightly-test:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '1.15'
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install go-fuzz
working-directory: test/fuzz
@@ -45,14 +45,14 @@ jobs:
continue-on-error: true
- name: Archive crashers
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: crashers
path: test/fuzz/**/crashers
retention-days: 1
- name: Archive suppressions
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: suppressions
path: test/fuzz/**/suppressions
@@ -72,7 +72,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack if any crashers
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal

View File

@@ -1,12 +1,12 @@
name: Check Markdown links
on:
on:
schedule:
- cron: '* */24 * * *'
jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.7
- uses: actions/checkout@v3
- uses: creachadair/github-action-markdown-link-check@master
with:
folder-path: "docs"

View File

@@ -13,17 +13,20 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 8
steps:
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '^1.16'
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: golangci/golangci-lint-action@v2.5.2
- uses: golangci/golangci-lint-action@v3
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.42.1
version: v1.45
args: --timeout 10m
github-token: ${{ secrets.github_token }}
if: env.GIT_DIFF

View File

@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v2.4.0
uses: actions/checkout@v3
- name: Lint Code Base
uses: docker://github/super-linter:v3
env:

View File

@@ -1,51 +0,0 @@
name: Build & Push TM Proto Builder
on:
pull_request:
paths:
- "tools/proto/*"
push:
branches:
- master
paths:
- "tools/proto/*"
schedule:
# run this job once a month to recieve any go or buf updates
- cron: "* * 1 * *"
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=tendermintdev/docker-build-proto
VERSION=noop
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
elif [[ $GITHUB_REF == refs/heads/* ]]; then
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
VERSION=latest
fi
fi
TAGS="${DOCKER_IMAGE}:${VERSION}"
echo ::set-output name=tags::${TAGS}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2
with:
context: ./tools/proto
file: ./tools/proto/Dockerfile
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags }}

21
.github/workflows/proto-lint.yml vendored Normal file
View File

@@ -0,0 +1,21 @@
name: Protobuf Lint
on:
pull_request:
paths:
- 'proto/**'
push:
branches:
- v0.34.x
paths:
- 'proto/**'
jobs:
lint:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v3
- uses: bufbuild/buf-setup-action@v1.6.0
- uses: bufbuild/buf-lint-action@v1
with:
input: 'proto'

View File

@@ -10,13 +10,13 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@master
- uses: actions/checkout@v3
- name: lint
run: make proto-lint
proto-breakage:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@master
- uses: actions/checkout@v3
- name: check-breakage
run: make proto-check-breaking-ci

View File

@@ -10,18 +10,18 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '^1.15.4'
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
uses: goreleaser/goreleaser-action@v3
with:
version: latest
args: release --rm-dist --release-notes=../release_notes.md

View File

@@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v3
- uses: actions/stale@v5
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: "This pull request has been automatically marked as stale because it has not had

View File

@@ -23,11 +23,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
@@ -36,7 +36,7 @@ jobs:
- name: install
run: make install install_abci
if: "env.GIT_DIFF != ''"
- uses: actions/cache@v2.1.4
- uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -44,7 +44,7 @@ jobs:
${{ runner.os }}-go-
if: env.GIT_DIFF
# Cache binaries for use by other jobs
- uses: actions/cache@v2.1.4
- uses: actions/cache@v3
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
@@ -55,24 +55,24 @@ jobs:
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.4
- uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.4
- uses: actions/cache@v3
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
@@ -87,24 +87,24 @@ jobs:
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.4
- uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.4
- uses: actions/cache@v3
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
@@ -118,24 +118,24 @@ jobs:
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: "^1.15.4"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.4
- uses: actions/cache@v3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.4
- uses: actions/cache@v3
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary

View File

@@ -2,6 +2,42 @@
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
## v0.34.20
Special thanks to external contributors on this release: @joeabbey @yihuang
This release introduces a prioritized mempool. Further notes can be found in UPGRADING.md.
NOTE: There's a known issue when combining the prioritized mempool with the ABCI socket client, that the team are curently working to resolve. Read more about the issue [here](https://github.com/tendermint/tendermint/pull/9030).
### BUG FIXES
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
- [indexer] [#8625](https://github.com/tendermint/tendermint/pull/8625) Fix overriding tx index of duplicated txs. (@yihuang)
- [mempool] [\#8962](https://github.com/tendermint/tendermint/issues/8962) Backport priority mempool fixes from v0.35.x to v0.34.x (@creachadair).
### FEATURES
- [cli] [\#8674] Add command to force compact goleveldb databases (@cmwaters)
- [mempool] [\#8695] Port back the priority mempool. (@alexanderbez, @jmalicevic, @cmwaters)
### IMPROVEMENTS
- [logging] [\#8845](https://github.com/tendermint/tendermint/issues/8845) Add "Lazy" Stringers to defer Sprintf and Hash until logs print. (@joeabbey)
## v0.34.19
### BUG FIXES
- [cli] [\#8270](https://github.com/tendermint/tendermint/issues/8270) fix reset commands (@alexanderbez).
## v0.34.18
### BREAKING CHANGES
- CLI/RPC/Config
- [cli] [\#8258](https://github.com/tendermint/tendermint/pull/8258) Fix a bug in the cli that caused `unsafe-reset-all` to panic
## v0.34.17
### BREAKING CHANGES
@@ -13,7 +49,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
### BUG FIXES
- [consensus] [\#8079](https://github.com/tendermint/tendermint/issues/8079) start the timeout ticker before relay (backport #7844) (@creachadair).
- [consensus] [\#7992](https://github.com/tendermint/tendermint/issues/7992) [\#7994](https://github.com/tendermint/tendermint/issues/7994) change lock handling in reactor for handleMsg and reactor to alleviate issues gossiping during long ABCI calls (@williambanfield).
- [consensus] [\#7992](https://github.com/tendermint/tendermint/issues/7992) [\#7994](https://github.com/tendermint/tendermint/issues/7994) change lock handling in handleMsg and reactor to alleviate issues gossiping during long ABCI calls (@williambanfield).
## v0.34.16
@@ -1730,7 +1766,7 @@ more details.
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
* Apps
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
@@ -1783,7 +1819,7 @@ more details.
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md)
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
(@guagualvcha)
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
@@ -2481,7 +2517,7 @@ Special thanks to external contributors on this release:
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
It also addresses some issues found via security audit, removes various unused
functions from `libs/common`, and implements
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md).
BREAKING CHANGES:
@@ -2562,7 +2598,7 @@ BREAKING CHANGES:
- [abci] Added address of the original proposer of the block to Header
- [abci] Change ABCI Header to match Tendermint exactly
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
[ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)):
- Remove PubKey from `Validator` (so it's just Address and Power)
- Introduce `ValidatorUpdate` (with just PubKey and Power)
- InitChain and EndBlock use ValidatorUpdate
@@ -2584,7 +2620,7 @@ BREAKING CHANGES:
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
- Add NextValidatorSet to State, changes on-disk representation of state
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
[ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)).
- Remove ConsensusParams.BlockSize.MaxTxs
- Introduce maximum sizes for all components of a block, including ChainID
- [types] Updates to the block Header:
@@ -2595,7 +2631,7 @@ BREAKING CHANGES:
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
[ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)):
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
- malleability removed by requiring `s` to be in canonical form.

View File

@@ -1,6 +1,6 @@
# Unreleased Changes
## v0.34.18
## v0.34.21
Special thanks to external contributors on this release:
@@ -20,6 +20,8 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
### FEATURES
- [#9083] backport cli command to reindex missed events (@cmwaters)
### IMPROVEMENTS
### BUG FIXES

View File

@@ -13,7 +13,6 @@ endif
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
HTTPS_GIT := https://github.com/tendermint/tendermint.git
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
CGO_ENABLED ?= 0
# handle nostrip
@@ -70,34 +69,59 @@ install:
CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
.PHONY: install
###############################################################################
### Mocks ###
###############################################################################
mockery:
go generate -run="./scripts/mockery_generate.sh" ./...
.PHONY: mockery
###############################################################################
### Protobuf ###
###############################################################################
proto-all: proto-gen proto-lint proto-check-breaking
.PHONY: proto-all
check-proto-deps:
ifeq (,$(shell which protoc-gen-gogofaster))
@go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
endif
.PHONY: check-proto-deps
proto-gen:
@docker pull -q tendermintdev/docker-build-proto
check-proto-format-deps:
ifeq (,$(shell which clang-format))
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
endif
.PHONY: check-proto-format-deps
proto-gen: check-proto-deps
@echo "Generating Protobuf files"
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
@go run github.com/bufbuild/buf/cmd/buf generate
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
.PHONY: proto-gen
proto-lint:
@$(DOCKER_BUF) check lint --error-format=json
# These targets are provided for convenience and are intended for local
# execution only.
proto-lint: check-proto-deps
@echo "Linting Protobuf files"
@go run github.com/bufbuild/buf/cmd/buf lint
.PHONY: proto-lint
proto-format:
proto-format: check-proto-format-deps
@echo "Formatting Protobuf files"
docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
.PHONY: proto-format
proto-check-breaking:
@$(DOCKER_BUF) check breaking --against-input .git#branch=master
proto-check-breaking: check-proto-deps
@echo "Checking for breaking changes in Protobuf files against local branch"
@echo "Note: This is only useful if your changes have not yet been committed."
@echo " Otherwise read up on buf's \"breaking\" command usage:"
@echo " https://docs.buf.build/breaking/usage"
@go run github.com/bufbuild/buf/cmd/buf breaking --against ".git"
.PHONY: proto-check-breaking
proto-check-breaking-ci:
@$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master
@go run github.com/bufbuild/buf/cmd/buf breaking --against $(HTTPS_GIT)#branch=v0.34.x
.PHONY: proto-check-breaking-ci
###############################################################################

View File

@@ -2,13 +2,28 @@
This guide provides instructions for upgrading to specific versions of Tendermint Core.
## v0.34.20
### Feature: Priority Mempool
This release backports an implementation of the Priority Mempool from the v0.35
branch. This implementation of the mempool permits the application to set a
priority on each transaction during CheckTx, and during block selection the
highest-priority transactions are chosen (subject to the constraints on size
and gas cost).
Operators can enable the priority mempool by setting `mempool.version` to
`"v1"` in the `config.toml`. For more technical details about the priority
mempool, see [ADR 067: Mempool
Refactor](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-067-mempool-refactor.md).
## v0.34.0
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
This release is not compatible with previous blockchains due to changes to
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
### ABCI Changes
@@ -112,7 +127,7 @@ Tendermint 0.34 includes new and updated consensus parameters.
#### Evidence Parameters
* `MaxBytes`, which caps the total amount of evidence. The default is 1048576 (1 MB).
* `MaxBytes`, which caps the total amount of evidence. The default is 1048576 (1 MB).
### Crypto
@@ -188,7 +203,7 @@ blockchains, we recommend that you check the chain ID.
### Version
Version is now set through Go linker flags `ld_flags`. Applications that are using tendermint as a library should set this at compile time.
Version is now set through Go linker flags `ld_flags`. Applications that are using tendermint as a library should set this at compile time.
Example:
@@ -196,7 +211,7 @@ Example:
go install -mod=readonly -ldflags "-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(go list -m github.com/tendermint/tendermint | sed 's/ /\@/g') -s -w " -trimpath ./cmd
```
Additionally, the exported constant `version.Version` is now `version.TMCoreSemVer`.
Additionally, the exported constant `version.Version` is now `version.TMCoreSemVer`.
## v0.33.4

View File

@@ -14,6 +14,8 @@ const (
echoRetryIntervalSeconds = 1
)
//go:generate ../../scripts/mockery_generate.sh Client
// Client defines an interface for an ABCI client.
// All `Async` methods return a `ReqRes` object.
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
@@ -40,6 +42,7 @@ type Client interface {
OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes
LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes
ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes
PrepareProposalAsync(types.RequestPrepareProposal) *ReqRes
FlushSync() error
EchoSync(msg string) (*types.ResponseEcho, error)
@@ -56,6 +59,7 @@ type Client interface {
OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
PrepareProposalSync(types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)
}
//----------------------------------------
@@ -81,9 +85,15 @@ type ReqRes struct {
*sync.WaitGroup
*types.Response // Not set atomically, so be sure to use WaitGroup.
mtx tmsync.Mutex
done bool // Gets set to true once *after* WaitGroup.Done().
cb func(*types.Response) // A single callback that may be set.
mtx tmsync.Mutex
// callbackInvoked as a variable to track if the callback was already
// invoked during the regular execution of the request. This variable
// allows clients to set the callback simultaneously without potentially
// invoking the callback twice by accident, once when 'SetCallback' is
// called and once during the normal request.
callbackInvoked bool
cb func(*types.Response) // A single callback that may be set.
}
func NewReqRes(req *types.Request) *ReqRes {
@@ -92,8 +102,8 @@ func NewReqRes(req *types.Request) *ReqRes {
WaitGroup: waitGroup1(),
Response: nil,
done: false,
cb: nil,
callbackInvoked: false,
cb: nil,
}
}
@@ -103,7 +113,7 @@ func NewReqRes(req *types.Request) *ReqRes {
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
r.mtx.Lock()
if r.done {
if r.callbackInvoked {
r.mtx.Unlock()
cb(r.Response)
return
@@ -122,6 +132,7 @@ func (r *ReqRes) InvokeCallback() {
if r.cb != nil {
r.cb(r.Response)
}
r.callbackInvoked = true
}
// GetCallback returns the configured callback of the ReqRes object which may be
@@ -136,13 +147,6 @@ func (r *ReqRes) GetCallback() func(*types.Response) {
return r.cb
}
// SetDone marks the ReqRes object as done.
func (r *ReqRes) SetDone() {
r.mtx.Lock()
r.done = true
r.mtx.Unlock()
}
func waitGroup1() (wg *sync.WaitGroup) {
wg = &sync.WaitGroup{}
wg.Add(1)

View File

@@ -66,7 +66,6 @@ func (cli *grpcClient) OnStart() error {
cli.mtx.Lock()
defer cli.mtx.Unlock()
reqres.SetDone()
reqres.Done()
// Notify client listener if set
@@ -75,9 +74,7 @@ func (cli *grpcClient) OnStart() error {
}
// Notify reqRes listener if set
if cb := reqres.GetCallback(); cb != nil {
cb(reqres.Response)
}
reqres.InvokeCallback()
}
for reqres := range cli.chReqRes {
if reqres != nil {
@@ -303,6 +300,22 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshot
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}})
}
func (cli *grpcClient) PrepareProposalAsync(params types.RequestPrepareProposal) *ReqRes {
req := types.ToRequestPrepareProposal(params)
res, err := cli.client.PrepareProposal(context.Background(), req.GetPrepareProposal(), grpc.WaitForReady(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(
req,
&types.Response{
Value: &types.Response_PrepareProposal{
PrepareProposal: res,
},
},
)
}
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
// with the response. We don't complete it until it's been ordered via the channel.
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
@@ -343,7 +356,9 @@ func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
//----------------------------------------
func (cli *grpcClient) FlushSync() error {
return nil
reqres := cli.FlushAsync()
cli.finishSyncCall(reqres).GetFlush()
return cli.Error()
}
func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) {
@@ -418,3 +433,10 @@ func (cli *grpcClient) ApplySnapshotChunkSync(
reqres := cli.ApplySnapshotChunkAsync(params)
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
}
func (cli *grpcClient) PrepareProposalSync(
params types.RequestPrepareProposal,
) (*types.ResponsePrepareProposal, error) {
reqres := cli.PrepareProposalAsync(params)
return cli.finishSyncCall(reqres).GetPrepareProposal(), cli.Error()
}

View File

@@ -207,6 +207,17 @@ func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotCh
)
}
func (app *localClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.PrepareProposal(req)
return app.callback(
types.ToRequestPrepareProposal(req),
types.ToResponsePrepareProposal(res),
)
}
//-------------------------------------------------------
func (app *localClient) FlushSync() error {
@@ -323,16 +334,26 @@ func (app *localClient) ApplySnapshotChunkSync(
return &res, nil
}
func (app *localClient) PrepareProposalSync(
req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.PrepareProposal(req)
return &res, nil
}
//-------------------------------------------------------
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
app.Callback(req, res)
return newLocalReqRes(req, res)
rr := newLocalReqRes(req, res)
rr.callbackInvoked = true
return rr
}
func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
reqRes := NewReqRes(req)
reqRes.Response = res
reqRes.SetDone()
return reqRes
}

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v1.1.1. DO NOT EDIT.
// Code generated by mockery. DO NOT EDIT.
package mocks
@@ -575,6 +575,45 @@ func (_m *Client) OnStop() {
_m.Called()
}
// PrepareProposalAsync provides a mock function with given fields: _a0
func (_m *Client) PrepareProposalAsync(_a0 types.RequestPrepareProposal) *abcicli.ReqRes {
ret := _m.Called(_a0)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) *abcicli.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
return r0
}
// PrepareProposalSync provides a mock function with given fields: _a0
func (_m *Client) PrepareProposalSync(_a0 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
ret := _m.Called(_a0)
var r0 *types.ResponsePrepareProposal
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(types.RequestPrepareProposal) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// QueryAsync provides a mock function with given fields: _a0
func (_m *Client) QueryAsync(_a0 types.RequestQuery) *abcicli.ReqRes {
ret := _m.Called(_a0)
@@ -734,3 +773,18 @@ func (_m *Client) String() string {
return r0
}
type NewClientT interface {
mock.TestingT
Cleanup(func())
}
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewClient(t NewClientT) *Client {
mock := &Client{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -279,6 +279,10 @@ func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotC
return cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
}
func (cli *socketClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
return cli.queueRequest(types.ToRequestPrepareProposal(req))
}
//----------------------------------------
func (cli *socketClient) FlushSync() error {
@@ -417,6 +421,16 @@ func (cli *socketClient) ApplySnapshotChunkSync(
return reqres.Response.GetApplySnapshotChunk(), cli.Error()
}
func (cli *socketClient) PrepareProposalSync(
req types.RequestPrepareProposal,
) (*types.ResponsePrepareProposal, error) {
reqres := cli.queueRequest(types.ToRequestPrepareProposal(req))
if err := cli.FlushSync(); err != nil {
return nil, err
}
return reqres.Response.GetPrepareProposal(), nil
}
//----------------------------------------
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {

View File

@@ -2,6 +2,7 @@ package abcicli_test
import (
"fmt"
"sync"
"testing"
"time"
@@ -118,3 +119,71 @@ func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock
time.Sleep(200 * time.Millisecond)
return types.ResponseBeginBlock{}
}
// TestCallbackInvokedWhenSetLaet ensures that the callback is invoked when
// set after the client completes the call into the app. Currently this
// test relies on the callback being allowed to be invoked twice if set multiple
// times, once when set early and once when set late.
func TestCallbackInvokedWhenSetLate(t *testing.T) {
wg := &sync.WaitGroup{}
wg.Add(1)
app := blockedABCIApplication{
wg: wg,
}
_, c := setupClientServer(t, app)
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
done := make(chan struct{})
cb := func(_ *types.Response) {
close(done)
}
reqRes.SetCallback(cb)
app.wg.Done()
<-done
var called bool
cb = func(_ *types.Response) {
called = true
}
reqRes.SetCallback(cb)
require.True(t, called)
}
type blockedABCIApplication struct {
wg *sync.WaitGroup
types.BaseApplication
}
func (b blockedABCIApplication) CheckTx(r types.RequestCheckTx) types.ResponseCheckTx {
b.wg.Wait()
return b.BaseApplication.CheckTx(r)
}
// TestCallbackInvokedWhenSetEarly ensures that the callback is invoked when
// set before the client completes the call into the app.
func TestCallbackInvokedWhenSetEarly(t *testing.T) {
wg := &sync.WaitGroup{}
wg.Add(1)
app := blockedABCIApplication{
wg: wg,
}
_, c := setupClientServer(t, app)
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
done := make(chan struct{})
cb := func(_ *types.Response) {
close(done)
}
reqRes.SetCallback(cb)
app.wg.Done()
called := func() bool {
select {
case <-done:
return true
default:
return false
}
}
require.Eventually(t, called, time.Second, time.Millisecond*25)
}

View File

@@ -169,4 +169,4 @@ func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.Respo
resQuery.Height = app.state.Height
return resQuery
}
}

View File

@@ -126,7 +126,7 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock)
// Punish validators who committed equivocation.
for _, ev := range req.ByzantineValidators {
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE {
addr := string(ev.Validator.Address)
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
app.updateValidator(types.ValidatorUpdate{
@@ -170,6 +170,11 @@ func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
}
func (app *PersistentKVStoreApplication) PrepareProposal(
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
return types.ResponsePrepareProposal{}
}
//---------------------------------------------
// update validators

View File

@@ -19,6 +19,7 @@ type Application interface {
// Consensus Connection
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
@@ -95,6 +96,10 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
return ResponseApplySnapshotChunk{}
}
func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal {
return ResponsePrepareProposal{}
}
//-------------------------------------------------------
// GRPCApplication is a GRPC wrapper for Application
@@ -182,3 +187,9 @@ func (app *GRPCApplication) ApplySnapshotChunk(
res := app.app.ApplySnapshotChunk(*req)
return &res, nil
}
func (app *GRPCApplication) PrepareProposal(
ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
res := app.app.PrepareProposal(*req)
return &res, nil
}

View File

@@ -159,6 +159,12 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
}
}
func ToRequestPrepareProposal(req RequestPrepareProposal) *Request {
return &Request{
Value: &Request_PrepareProposal{&req},
}
}
//----------------------------------------
func ToResponseException(errStr string) *Response {
@@ -256,3 +262,9 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
Value: &Response_ApplySnapshotChunk{&res},
}
}
func ToResponsePrepareProposal(res ResponsePrepareProposal) *Response {
return &Response{
Value: &Response_PrepareProposal{&res},
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -369,6 +369,12 @@ FOR_LOOP:
// currently necessary.
err := state.Validators.VerifyCommitLight(
chainID, firstID, first.Height, second.LastCommit)
if err == nil {
// validate the block before we persist it
err = bcR.blockExec.ValidateBlock(state, first)
}
if err != nil {
bcR.Logger.Error("Error in validation", "err", err)
peerID := bcR.pool.RedoRequest(first.Height)
@@ -386,29 +392,29 @@ FOR_LOOP:
bcR.Switch.StopPeerForError(peer2, fmt.Errorf("blockchainReactor validation error: %v", err))
}
continue FOR_LOOP
} else {
bcR.pool.PopRequest()
// TODO: batch saves so we dont persist to disk every block
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: same thing for app - but we would need a way to
// get the hash without persisting the state
var err error
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
// TODO This is bad, are we zombie?
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
blocksSynced++
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
lastHundred = time.Now()
}
}
bcR.pool.PopRequest()
// TODO: batch saves so we dont persist to disk every block
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: same thing for app - but we would need a way to
// get the hash without persisting the state
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
// TODO This is bad, are we zombie?
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
blocksSynced++
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
lastHundred = time.Now()
}
continue FOR_LOOP
case <-bcR.Quit():

View File

@@ -58,7 +58,7 @@ func (rt *Routine) setMetrics(metrics *Metrics) {
}
func (rt *Routine) start() {
rt.logger.Info(fmt.Sprintf("%s: run", rt.name))
rt.logger.Info("routine start", "msg", log.NewLazySprintf("%s: run", rt.name))
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
if !running {
panic(fmt.Sprintf("%s is already running", rt.name))
@@ -98,7 +98,7 @@ func (rt *Routine) start() {
return
}
rt.metrics.EventsOut.With("routine", rt.name).Add(1)
rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
rt.logger.Debug("routine start", "msg", log.NewLazySprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
// Skip rTrySchedule and rProcessBlock events as they clutter the history
// due to their frequency.
@@ -118,7 +118,7 @@ func (rt *Routine) start() {
// XXX: look into returning OpError in the net package
func (rt *Routine) send(event Event) bool {
rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event))
rt.logger.Debug("routine send", "msg", log.NewLazySprintf("%s: received %T %+v", rt.name, event, event))
if !rt.isRunning() {
return false
}
@@ -150,7 +150,7 @@ func (rt *Routine) stop() {
return
}
rt.logger.Info(fmt.Sprintf("%s: stop", rt.name))
rt.logger.Info("routine stop", "msg", log.NewLazySprintf("%s: stop", rt.name))
rt.queue.Dispose() // this should block until all queue items are free?
}

9
buf.gen.yaml Normal file
View File

@@ -0,0 +1,9 @@
version: v1
plugins:
- name: gogofaster
out: ./proto/
opt:
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
- plugins=grpc
- paths=source_relative

3
buf.work.yaml Normal file
View File

@@ -0,0 +1,3 @@
version: v1
directories:
- proto

View File

@@ -0,0 +1,66 @@
package commands
import (
"errors"
"path/filepath"
"sync"
"github.com/spf13/cobra"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
"github.com/tendermint/tendermint/libs/log"
)
var CompactGoLevelDBCmd = &cobra.Command{
Use: "experimental-compact-goleveldb",
Short: "force compacts the tendermint storage engine (only GoLevelDB supported)",
Long: `
This is a temporary utility command that performs a force compaction on the state
and blockstores to reduce disk space for a pruning node. This should only be run
once the node has stopped. This command will likely be omitted in the future after
the planned refactor to the storage engine.
Currently, only GoLevelDB is supported.
`,
RunE: func(cmd *cobra.Command, args []string) error {
if config.DBBackend != "goleveldb" {
return errors.New("compaction is currently only supported with goleveldb")
}
compactGoLevelDBs(config.RootDir, logger)
return nil
},
}
func compactGoLevelDBs(rootDir string, logger log.Logger) {
dbNames := []string{"state", "blockstore"}
o := &opt.Options{
DisableSeeksCompaction: true,
}
wg := sync.WaitGroup{}
for _, dbName := range dbNames {
dbName := dbName
wg.Add(1)
go func() {
defer wg.Done()
dbPath := filepath.Join(rootDir, "data", dbName+".db")
store, err := leveldb.OpenFile(dbPath, o)
if err != nil {
logger.Error("failed to initialize tendermint db", "path", dbPath, "err", err)
return
}
defer store.Close()
logger.Info("starting compaction...", "db", dbPath)
err = store.CompactRange(util.Range{Start: nil, Limit: nil})
if err != nil {
logger.Error("failed to compact tendermint db", "path", dbPath, "err", err)
}
}()
}
wg.Wait()
}

View File

@@ -0,0 +1,229 @@
package commands
import (
"errors"
"fmt"
"strings"
"github.com/spf13/cobra"
dbm "github.com/tendermint/tm-db"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/progressbar"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv"
"github.com/tendermint/tendermint/state/indexer/sink/psql"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/state/txindex/kv"
"github.com/tendermint/tendermint/types"
)
const (
reindexFailed = "event re-index failed: "
)
var (
ErrHeightNotAvailable = errors.New("height is not available")
ErrInvalidRequest = errors.New("invalid request")
)
// ReIndexEventCmd constructs a command to re-index events in a block height interval.
var ReIndexEventCmd = &cobra.Command{
Use: "reindex-event",
Short: "reindex events to the event store backends",
Long: `
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
you can run this command when the event store backend dropped/disconnected or you want to
replace the backend. The default start-height is 0, meaning the tooling will start
reindex from the base block height(inclusive); and the default end-height is 0, meaning
the tooling will reindex until the latest block height(inclusive). User can omit
either or both arguments.
`,
Example: `
tendermint reindex-event
tendermint reindex-event --start-height 2
tendermint reindex-event --end-height 10
tendermint reindex-event --start-height 2 --end-height 10
`,
Run: func(cmd *cobra.Command, args []string) {
bs, ss, err := loadStateAndBlockStore(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err := checkValidHeight(bs); err != nil {
fmt.Println(reindexFailed, err)
return
}
bi, ti, err := loadEventSinks(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
riArgs := eventReIndexArgs{
startHeight: startHeight,
endHeight: endHeight,
blockIndexer: bi,
txIndexer: ti,
blockStore: bs,
stateStore: ss,
}
if err := eventReIndex(cmd, riArgs); err != nil {
panic(fmt.Errorf("%s: %w", reindexFailed, err))
}
fmt.Println("event re-index finished")
},
}
var (
startHeight int64
endHeight int64
)
func init() {
ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index")
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
}
func loadEventSinks(cfg *tmcfg.Config) (indexer.BlockIndexer, txindex.TxIndexer, error) {
switch strings.ToLower(cfg.TxIndex.Indexer) {
case "null":
return nil, nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
case "psql":
conn := cfg.TxIndex.PsqlConn
if conn == "" {
return nil, nil, errors.New("the psql connection settings cannot be empty")
}
es, err := psql.NewEventSink(conn, cfg.ChainID())
if err != nil {
return nil, nil, err
}
return es.BlockIndexer(), es.TxIndexer(), nil
case "kv":
store, err := dbm.NewDB("tx_index", dbm.BackendType(cfg.DBBackend), cfg.DBDir())
if err != nil {
return nil, nil, err
}
txIndexer := kv.NewTxIndex(store)
blockIndexer := blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
return blockIndexer, txIndexer, nil
default:
return nil, nil, fmt.Errorf("unsupported event sink type: %s", cfg.TxIndex.Indexer)
}
}
type eventReIndexArgs struct {
startHeight int64
endHeight int64
blockIndexer indexer.BlockIndexer
txIndexer txindex.TxIndexer
blockStore state.BlockStore
stateStore state.Store
}
func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
var bar progressbar.Bar
bar.NewOption(args.startHeight-1, args.endHeight)
fmt.Println("start re-indexing events:")
defer bar.Finish()
for i := args.startHeight; i <= args.endHeight; i++ {
select {
case <-cmd.Context().Done():
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
default:
b := args.blockStore.LoadBlock(i)
if b == nil {
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
}
r, err := args.stateStore.LoadABCIResponses(i)
if err != nil {
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
}
e := types.EventDataNewBlockHeader{
Header: b.Header,
NumTxs: int64(len(b.Txs)),
ResultBeginBlock: *r.BeginBlock,
ResultEndBlock: *r.EndBlock,
}
var batch *txindex.Batch
if e.NumTxs > 0 {
batch = txindex.NewBatch(e.NumTxs)
for i := range b.Data.Txs {
tr := abcitypes.TxResult{
Height: b.Height,
Index: uint32(i),
Tx: b.Data.Txs[i],
Result: *(r.DeliverTxs[i]),
}
if err = batch.Add(&tr); err != nil {
return fmt.Errorf("adding tx to batch: %w", err)
}
}
if err := args.txIndexer.AddBatch(batch); err != nil {
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
}
}
if err := args.blockIndexer.Index(e); err != nil {
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
}
}
bar.Play(i)
}
return nil
}
func checkValidHeight(bs state.BlockStore) error {
base := bs.Base()
if startHeight == 0 {
startHeight = base
fmt.Printf("set the start block height to the base height of the blockstore %d \n", base)
}
if startHeight < base {
return fmt.Errorf("%s (requested start height: %d, base height: %d)",
ErrHeightNotAvailable, startHeight, base)
}
height := bs.Height()
if startHeight > height {
return fmt.Errorf(
"%s (requested start height: %d, store height: %d)", ErrHeightNotAvailable, startHeight, height)
}
if endHeight == 0 || endHeight > height {
endHeight = height
fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height)
}
if endHeight < base {
return fmt.Errorf(
"%s (requested end height: %d, base height: %d)", ErrHeightNotAvailable, endHeight, base)
}
if endHeight < startHeight {
return fmt.Errorf(
"%s (requested the end height: %d is less than the start height: %d)",
ErrInvalidRequest, startHeight, endHeight)
}
return nil
}

View File

@@ -0,0 +1,192 @@
package commands
import (
"context"
"errors"
"testing"
"github.com/spf13/cobra"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
blockmocks "github.com/tendermint/tendermint/state/indexer/mocks"
"github.com/tendermint/tendermint/state/mocks"
txmocks "github.com/tendermint/tendermint/state/txindex/mocks"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
)
const (
height int64 = 10
base int64 = 2
)
func setupReIndexEventCmd() *cobra.Command {
reIndexEventCmd := &cobra.Command{
Use: ReIndexEventCmd.Use,
Run: func(cmd *cobra.Command, args []string) {},
}
_ = reIndexEventCmd.ExecuteContext(context.Background())
return reIndexEventCmd
}
func TestReIndexEventCheckHeight(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height)
testCases := []struct {
startHeight int64
endHeight int64
validHeight bool
}{
{0, 0, true},
{0, base, true},
{0, base - 1, false},
{0, height, true},
{0, height + 1, true},
{0, 0, true},
{base - 1, 0, false},
{base, 0, true},
{base, base, true},
{base, base - 1, false},
{base, height, true},
{base, height + 1, true},
{height, 0, true},
{height, base, false},
{height, height - 1, false},
{height, height, true},
{height, height + 1, true},
{height + 1, 0, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := checkValidHeight(mockBlockStore)
if tc.validHeight {
require.NoError(t, err)
} else {
require.Error(t, err)
}
}
}
func TestLoadEventSink(t *testing.T) {
testCases := []struct {
sinks string
connURL string
loadErr bool
}{
{"", "", true},
{"NULL", "", true},
{"KV", "", false},
{"PSQL", "", true}, // true because empty connect url
// skip to test PSQL connect with correct url
{"UnsupportedSinkType", "wrongUrl", true},
}
for idx, tc := range testCases {
cfg := tmcfg.TestConfig()
cfg.TxIndex.Indexer = tc.sinks
cfg.TxIndex.PsqlConn = tc.connURL
_, _, err := loadEventSinks(cfg)
if tc.loadErr {
require.Error(t, err, idx)
} else {
require.NoError(t, err, idx)
}
}
}
func TestLoadBlockStore(t *testing.T) {
cfg := tmcfg.TestConfig()
cfg.DBPath = t.TempDir()
_, _, err := loadStateAndBlockStore(cfg)
require.Error(t, err)
_, err = dbm.NewDB("blockstore", dbm.GoLevelDBBackend, cfg.DBDir())
require.NoError(t, err)
// Get StateStore
_, err = dbm.NewDB("state", dbm.GoLevelDBBackend, cfg.DBDir())
require.NoError(t, err)
bs, ss, err := loadStateAndBlockStore(cfg)
require.NoError(t, err)
require.NotNil(t, bs)
require.NotNil(t, ss)
}
func TestReIndexEvent(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockStateStore := &mocks.Store{}
mockBlockIndexer := &blockmocks.BlockIndexer{}
mockTxIndexer := &txmocks.TxIndexer{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height).
On("LoadBlock", base).Return(nil).Once().
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
dtx := abcitypes.ResponseDeliverTx{}
abciResp := &prototmstate.ABCIResponses{
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
EndBlock: &abcitypes.ResponseEndBlock{},
BeginBlock: &abcitypes.ResponseBeginBlock{},
}
mockBlockIndexer.
On("Index", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
On("Index", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil)
mockTxIndexer.
On("AddBatch", mock.AnythingOfType("*txindex.Batch")).Return(errors.New("")).Once().
On("AddBatch", mock.AnythingOfType("*txindex.Batch")).Return(nil)
mockStateStore.
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
On("LoadABCIResponses", base).Return(abciResp, nil).
On("LoadABCIResponses", height).Return(abciResp, nil)
testCases := []struct {
startHeight int64
endHeight int64
reIndexErr bool
}{
{base, height, true}, // LoadBlock error
{base, height, true}, // LoadABCIResponses error
{base, height, true}, // index block event error
{base, height, true}, // index tx event error
{base, base, false},
{height, height, false},
}
for _, tc := range testCases {
args := eventReIndexArgs{
startHeight: tc.startHeight,
endHeight: tc.endHeight,
blockIndexer: mockBlockIndexer,
txIndexer: mockTxIndexer,
blockStore: mockBlockStore,
stateStore: mockStateStore,
}
err := eventReIndex(setupReIndexEventCmd(), args)
if tc.reIndexErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}

View File

@@ -25,12 +25,17 @@ var keepAddrBook bool
// ResetStateCmd removes the database of the specified Tendermint core instance.
var ResetStateCmd = &cobra.Command{
Use: "reset-state",
Short: "Remove all the data and WAL",
RunE: func(cmd *cobra.Command, args []string) error {
Use: "reset-state",
Short: "Remove all the data and WAL",
PreRun: deprecateSnakeCase,
RunE: func(cmd *cobra.Command, args []string) (err error) {
config, err = ParseConfig()
if err != nil {
return err
}
return resetState(config.DBDir(), logger)
},
PreRun: deprecateSnakeCase,
}
func init() {
@@ -42,21 +47,37 @@ var ResetPrivValidatorCmd = &cobra.Command{
Use: "unsafe-reset-priv-validator",
Aliases: []string{"unsafe_reset_priv_validator"},
Short: "(unsafe) Reset this node's validator to genesis state",
Run: resetPrivValidator,
PreRun: deprecateSnakeCase,
RunE: resetPrivValidator,
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetAllCmd(cmd *cobra.Command, args []string) error {
return resetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
config.PrivValidatorStateFile(), logger)
func resetAllCmd(cmd *cobra.Command, args []string) (err error) {
config, err = ParseConfig()
if err != nil {
return err
}
return resetAll(
config.DBDir(),
config.P2P.AddrBookFile(),
config.PrivValidatorKeyFile(),
config.PrivValidatorStateFile(),
logger,
)
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) {
func resetPrivValidator(cmd *cobra.Command, args []string) (err error) {
config, err = ParseConfig()
if err != nil {
return err
}
resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
return nil
}
// resetAll removes address book files plus all data, and resets the privValdiator data.
@@ -66,12 +87,18 @@ func resetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logg
} else {
removeAddrBook(addrBookFile, logger)
}
if err := os.RemoveAll(dbDir); err == nil {
logger.Info("Removed all blockchain history", "dir", dbDir)
} else {
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
}
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
logger.Error("unable to recreate dbDir", "err", err)
}
// recreate the dbDir since the privVal state needs to live there
resetFilePV(privValKeyFile, privValStateFile, logger)
return nil
}
@@ -83,7 +110,6 @@ func resetState(dbDir string, logger log.Logger) error {
wal := filepath.Join(dbDir, "cs.wal")
evidence := filepath.Join(dbDir, "evidence.db")
txIndex := filepath.Join(dbDir, "tx_index.db")
peerstore := filepath.Join(dbDir, "peerstore.db")
if tmos.FileExists(blockdb) {
if err := os.RemoveAll(blockdb); err == nil {
@@ -125,13 +151,6 @@ func resetState(dbDir string, logger log.Logger) error {
}
}
if tmos.FileExists(peerstore) {
if err := os.RemoveAll(peerstore); err == nil {
logger.Info("Removed peerstore.db", "dir", peerstore)
} else {
logger.Error("error removing peerstore.db", "dir", peerstore, "err", err)
}
}
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
logger.Error("unable to recreate dbDir", "err", err)
}
@@ -142,13 +161,19 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
if _, err := os.Stat(privValKeyFile); err == nil {
pv := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
pv.Reset()
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
logger.Info(
"Reset private validator file to genesis state",
"keyFile", privValKeyFile,
"stateFile", privValStateFile,
)
} else {
pv := privval.GenFilePV(privValKeyFile, privValStateFile)
pv.Save()
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
logger.Info(
"Generated private validator file",
"keyFile", privValKeyFile,
"stateFile", privValStateFile,
)
}
}

View File

@@ -0,0 +1,53 @@
package commands
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/privval"
)
func Test_ResetAll(t *testing.T) {
config := cfg.TestConfig()
dir := t.TempDir()
config.SetRoot(dir)
cfg.EnsureRoot(dir)
require.NoError(t, initFilesWithConfig(config))
pv := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
pv.LastSignState.Height = 10
pv.Save()
require.NoError(t, resetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
config.PrivValidatorStateFile(), logger))
require.DirExists(t, config.DBDir())
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
require.FileExists(t, config.PrivValidatorStateFile())
pv = privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
require.Equal(t, int64(0), pv.LastSignState.Height)
}
func Test_ResetState(t *testing.T) {
config := cfg.TestConfig()
dir := t.TempDir()
config.SetRoot(dir)
cfg.EnsureRoot(dir)
require.NoError(t, initFilesWithConfig(config))
pv := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
pv.LastSignState.Height = 10
pv.Save()
require.NoError(t, resetState(config.DBDir(), logger))
require.DirExists(t, config.DBDir())
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
require.FileExists(t, config.PrivValidatorStateFile())
pv = privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
// private validator state should still be in tact.
require.Equal(t, int64(10), pv.LastSignState.Height)
}

View File

@@ -2,12 +2,14 @@ package commands
import (
"fmt"
"path/filepath"
"github.com/spf13/cobra"
dbm "github.com/tendermint/tm-db"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
)
@@ -55,6 +57,10 @@ func RollbackState(config *cfg.Config) (int64, []byte, error) {
func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store, error) {
dbType := dbm.BackendType(config.DBBackend)
if !os.FileExists(filepath.Join(config.DBDir(), "blockstore.db")) {
return nil, nil, fmt.Errorf("no blockstore found in %v", config.DBDir())
}
// Get BlockStore
blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir())
if err != nil {
@@ -62,6 +68,10 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store,
}
blockStore := store.NewBlockStore(blockStoreDB)
if !os.FileExists(filepath.Join(config.DBDir(), "state.db")) {
return nil, nil, fmt.Errorf("no statestore found in %v", config.DBDir())
}
// Get StateStore
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
if err != nil {

View File

@@ -29,6 +29,7 @@ func main() {
cmd.GenNodeKeyCmd,
cmd.VersionCmd,
cmd.RollbackStateCmd,
cmd.CompactGoLevelDBCmd,
debug.DebugCmd,
cli.NewCompletionCmd(rootCmd, true),
)

View File

@@ -23,6 +23,11 @@ const (
// DefaultLogLevel defines a default log level as INFO.
DefaultLogLevel = "info"
// Mempool versions. V1 is prioritized mempool, v0 is regular mempool.
// Default is v0.
MempoolV0 = "v0"
MempoolV1 = "v1"
)
// NOTE: Most of the structs & relevant comments + the
@@ -676,6 +681,13 @@ func DefaultFuzzConnConfig() *FuzzConnConfig {
// MempoolConfig defines the configuration options for the Tendermint mempool
type MempoolConfig struct {
// Mempool version to use:
// 1) "v0" - (default) FIFO mempool.
// 2) "v1" - prioritized mempool.
// WARNING: There's a known memory leak with the prioritized mempool
// that the team are working on. Read more here:
// https://github.com/tendermint/tendermint/issues/8775
Version string `mapstructure:"version"`
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
Broadcast bool `mapstructure:"broadcast"`
@@ -699,20 +711,39 @@ type MempoolConfig struct {
// Including space needed by encoding (one varint per transaction).
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
MaxBatchBytes int `mapstructure:"max_batch_bytes"`
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
// can exist for in the mempool.
//
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
// insertion time into the mempool is beyond TTLDuration.
TTLDuration time.Duration `mapstructure:"ttl-duration"`
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
// can exist for in the mempool.
//
// Note, if TTLDuration is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if
// it's insertion time into the mempool is beyond TTLDuration.
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
}
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
func DefaultMempoolConfig() *MempoolConfig {
return &MempoolConfig{
Version: MempoolV0,
Recheck: true,
Broadcast: true,
WalPath: "",
// Each signature verification takes .5ms, Size reduced until we implement
// ABCI Recheck
Size: 5000,
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
CacheSize: 10000,
MaxTxBytes: 1024 * 1024, // 1MB
Size: 5000,
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
CacheSize: 10000,
MaxTxBytes: 1024 * 1024, // 1MB
TTLDuration: 0 * time.Second,
TTLNumBlocks: 0,
}
}

View File

@@ -342,6 +342,11 @@ dial_timeout = "{{ .P2P.DialTimeout }}"
#######################################################
[mempool]
# Mempool version to use:
# 1) "v0" - (default) FIFO mempool.
# 2) "v1" - prioritized mempool.
version = "{{ .Mempool.Version }}"
recheck = {{ .Mempool.Recheck }}
broadcast = {{ .Mempool.Broadcast }}
wal_dir = "{{ js .Mempool.WalPath }}"
@@ -371,6 +376,22 @@ max_tx_bytes = {{ .Mempool.MaxTxBytes }}
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max_batch_bytes = {{ .Mempool.MaxBatchBytes }}
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
# can exist for in the mempool.
#
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
# insertion time into the mempool is beyond ttl-duration.
ttl-duration = "{{ .Mempool.TTLDuration }}"
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
# can exist for in the mempool.
#
# Note, if ttl-duration is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if
# it's insertion time into the mempool is beyond ttl-duration.
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
#######################################################
### State Sync Configuration Options ###
#######################################################
@@ -473,8 +494,14 @@ peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
# 3) "psql" - the indexer services backed by PostgreSQL.
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
indexer = "{{ .TxIndex.Indexer }}"
# The PostgreSQL connection configuration, the connection format:
# postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
psql-conn = "{{ .TxIndex.PsqlConn }}"
#######################################################
### Instrumentation Configuration Options ###
#######################################################

View File

@@ -21,6 +21,10 @@ import (
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
mempl "github.com/tendermint/tendermint/mempool"
cfg "github.com/tendermint/tendermint/config"
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
"github.com/tendermint/tendermint/p2p"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
sm "github.com/tendermint/tendermint/state"
@@ -58,14 +62,31 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
blockDB := dbm.NewMemDB()
blockStore := store.NewBlockStore(blockDB)
// one for mempool, one for consensus
mtx := new(tmsync.Mutex)
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
// one for mempool, one for consensus
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
var mempool mempl.Mempool
switch thisConfig.Mempool.Version {
case cfg.MempoolV0:
mempool = mempoolv0.NewCListMempool(config.Mempool,
proxyAppConnConMem,
state.LastBlockHeight,
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
case cfg.MempoolV1:
mempool = mempoolv1.NewTxMempool(logger,
config.Mempool,
proxyAppConnConMem,
state.LastBlockHeight,
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
)
}
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
}

View File

@@ -31,6 +31,8 @@ import (
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
tmsync "github.com/tendermint/tendermint/libs/sync"
mempl "github.com/tendermint/tendermint/mempool"
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
@@ -390,12 +392,34 @@ func newStateWithConfigAndBlockStore(
// one for mempool, one for consensus
mtx := new(tmsync.Mutex)
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
// Make Mempool
memplMetrics := mempl.NopMetrics()
// Make Mempool
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
var mempool mempl.Mempool
switch config.Mempool.Version {
case cfg.MempoolV0:
mempool = mempoolv0.NewCListMempool(config.Mempool,
proxyAppConnConMem,
state.LastBlockHeight,
mempoolv0.WithMetrics(memplMetrics),
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
case cfg.MempoolV1:
logger := consensusLogger()
mempool = mempoolv1.NewTxMempool(logger,
config.Mempool,
proxyAppConnConMem,
state.LastBlockHeight,
mempoolv1.WithMetrics(memplMetrics),
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
)
}
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
}

View File

@@ -1265,12 +1265,12 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
}
func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) {
logger := ps.logger.With(
ps.logger.Debug("setHasVote",
"peerH/R",
fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
log.NewLazySprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
"H/R",
fmt.Sprintf("%d/%d", height, round))
logger.Debug("setHasVote", "type", voteType, "index", index)
log.NewLazySprintf("%d/%d", height, round),
"type", voteType, "index", index)
// NOTE: some may be nil BitArrays -> no side effects.
psVotes := ps.getVoteBitArray(height, round, voteType)

View File

@@ -29,6 +29,8 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmsync "github.com/tendermint/tendermint/libs/sync"
mempl "github.com/tendermint/tendermint/mempool"
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
"github.com/tendermint/tendermint/p2p"
p2pmock "github.com/tendermint/tendermint/p2p/mock"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
@@ -152,14 +154,33 @@ func TestReactorWithEvidence(t *testing.T) {
blockDB := dbm.NewMemDB()
blockStore := store.NewBlockStore(blockDB)
// one for mempool, one for consensus
mtx := new(tmsync.Mutex)
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
memplMetrics := mempl.NopMetrics()
// one for mempool, one for consensus
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
var mempool mempl.Mempool
switch config.Mempool.Version {
case cfg.MempoolV0:
mempool = mempoolv0.NewCListMempool(config.Mempool,
proxyAppConnConMem,
state.LastBlockHeight,
mempoolv0.WithMetrics(memplMetrics),
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
case cfg.MempoolV1:
mempool = mempoolv1.NewTxMempool(logger,
config.Mempool,
proxyAppConnConMem,
state.LastBlockHeight,
mempoolv1.WithMetrics(memplMetrics),
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
)
}
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
}

View File

@@ -15,12 +15,18 @@ type emptyMempool struct{}
var _ mempl.Mempool = emptyMempool{}
func (emptyMempool) Lock() {}
func (emptyMempool) Unlock() {}
func (emptyMempool) Size() int { return 0 }
func (emptyMempool) Lock() {}
func (emptyMempool) Unlock() {}
func (emptyMempool) Size() int { return 0 }
func (emptyMempool) SizeBytes() int64 { return 0 }
func (emptyMempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
return nil
}
func (txmp emptyMempool) RemoveTxByKey(txKey types.TxKey) error {
return nil
}
func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} }
func (emptyMempool) Update(

View File

@@ -800,6 +800,8 @@ func (cs *State) receiveRoutine(maxSteps int) {
// state transitions on complete-proposal, 2/3-any, 2/3-one
func (cs *State) handleMsg(mi msgInfo) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
var (
added bool
err error
@@ -811,13 +813,10 @@ func (cs *State) handleMsg(mi msgInfo) {
case *ProposalMessage:
// will not cause transition.
// once proposal is set, we can receive block parts
cs.mtx.Lock()
err = cs.setProposal(msg.Proposal)
cs.mtx.Unlock()
case *BlockPartMessage:
// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
cs.mtx.Lock()
added, err = cs.addProposalBlockPart(msg, peerID)
// We unlock here to yield to any routines that need to read the the RoundState.
@@ -850,17 +849,14 @@ func (cs *State) handleMsg(mi msgInfo) {
)
err = nil
}
cs.mtx.Unlock()
case *VoteMessage:
// attempt to add the vote and dupeout the validator if its a duplicate signature
// if the vote gives us a 2/3-any or 2/3-one, we transition
cs.mtx.Lock()
added, err = cs.tryAddVote(msg.Vote, peerID)
if added {
cs.statsMsgQueue <- mi
}
cs.mtx.Unlock()
// if err == ErrAddingVote {
// TODO: punish peer
@@ -985,7 +981,7 @@ func (cs *State) enterNewRound(height int64, round int32) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
logger.Debug(
"entering new round with invalid args",
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
)
return
}
@@ -994,7 +990,7 @@ func (cs *State) enterNewRound(height int64, round int32) {
logger.Debug("need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now)
}
logger.Debug("entering new round", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
logger.Debug("entering new round", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
// increment validators if necessary
validators := cs.Validators
@@ -1067,12 +1063,12 @@ func (cs *State) enterPropose(height int64, round int32) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
logger.Debug(
"entering propose step with invalid args",
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
)
return
}
logger.Debug("entering propose step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
logger.Debug("entering propose step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPropose:
@@ -1234,7 +1230,7 @@ func (cs *State) enterPrevote(height int64, round int32) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) {
logger.Debug(
"entering prevote step with invalid args",
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
)
return
}
@@ -1245,7 +1241,7 @@ func (cs *State) enterPrevote(height int64, round int32) {
cs.newStep()
}()
logger.Debug("entering prevote step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
logger.Debug("entering prevote step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
// Sign and broadcast vote as necessary
cs.doPrevote(height, round)
@@ -1294,7 +1290,7 @@ func (cs *State) enterPrevoteWait(height int64, round int32) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
logger.Debug(
"entering prevote wait step with invalid args",
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
)
return
}
@@ -1306,7 +1302,7 @@ func (cs *State) enterPrevoteWait(height int64, round int32) {
))
}
logger.Debug("entering prevote wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
logger.Debug("entering prevote wait step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrevoteWait:
@@ -1330,12 +1326,12 @@ func (cs *State) enterPrecommit(height int64, round int32) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
logger.Debug(
"entering precommit step with invalid args",
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
)
return
}
logger.Debug("entering precommit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
logger.Debug("entering precommit step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrecommit:
@@ -1453,7 +1449,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) {
logger.Debug(
"entering precommit wait step with invalid args",
"triggered_timeout", cs.TriggeredTimeoutPrecommit,
"current", fmt.Sprintf("%v/%v", cs.Height, cs.Round),
"current", log.NewLazySprintf("%v/%v", cs.Height, cs.Round),
)
return
}
@@ -1465,7 +1461,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) {
))
}
logger.Debug("entering precommit wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
logger.Debug("entering precommit wait step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrecommitWait:
@@ -1484,12 +1480,12 @@ func (cs *State) enterCommit(height int64, commitRound int32) {
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
logger.Debug(
"entering commit step with invalid args",
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
)
return
}
logger.Debug("entering commit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
logger.Debug("entering commit step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterCommit:
@@ -1522,7 +1518,7 @@ func (cs *State) enterCommit(height int64, commitRound int32) {
if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) {
logger.Info(
"commit is for a block we do not know about; set ProposalBlock=nil",
"proposal", cs.ProposalBlock.Hash(),
"proposal", log.NewLazyBlockHash(cs.ProposalBlock),
"commit", blockID.Hash,
)
@@ -1559,7 +1555,7 @@ func (cs *State) tryFinalizeCommit(height int64) {
// TODO: ^^ wait, why does it matter that we're a validator?
logger.Debug(
"failed attempt to finalize commit; we do not have the commit block",
"proposal_block", cs.ProposalBlock.Hash(),
"proposal_block", log.NewLazyBlockHash(cs.ProposalBlock),
"commit_block", blockID.Hash,
)
return
@@ -1575,7 +1571,7 @@ func (cs *State) finalizeCommit(height int64) {
if cs.Height != height || cs.Step != cstypes.RoundStepCommit {
logger.Debug(
"entering finalize commit step",
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
)
return
}
@@ -1601,11 +1597,11 @@ func (cs *State) finalizeCommit(height int64) {
logger.Info(
"finalizing commit of block",
"hash", block.Hash(),
"hash", log.NewLazyBlockHash(block),
"root", block.AppHash,
"num_txs", len(block.Txs),
)
logger.Debug(fmt.Sprintf("%v", block))
logger.Debug("committed block", "block", log.NewLazySprintf("%v", block))
fail.Fail() // XXX
@@ -1922,7 +1918,7 @@ func (cs *State) handleCompleteProposal(blockHeight int64) {
cs.Logger.Debug(
"updating valid block to new proposal block",
"valid_round", cs.Round,
"valid_block_hash", cs.ProposalBlock.Hash(),
"valid_block_hash", log.NewLazyBlockHash(cs.ProposalBlock),
)
cs.ValidRound = cs.Round
@@ -2096,7 +2092,7 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error
} else {
cs.Logger.Debug(
"valid block we do not know about; set ProposalBlock=nil",
"proposal", cs.ProposalBlock.Hash(),
"proposal", log.NewLazyBlockHash(cs.ProposalBlock),
"block_id", blockID.Hash,
)

View File

@@ -25,19 +25,19 @@ func TestRandom(t *testing.T) {
plaintext := make([]byte, pl)
_, err := cr.Read(key[:])
if err != nil {
t.Errorf("error on read: %w", err)
t.Errorf("error on read: %v", err)
}
_, err = cr.Read(nonce[:])
if err != nil {
t.Errorf("error on read: %w", err)
t.Errorf("error on read: %v", err)
}
_, err = cr.Read(ad)
if err != nil {
t.Errorf("error on read: %w", err)
t.Errorf("error on read: %v", err)
}
_, err = cr.Read(plaintext)
if err != nil {
t.Errorf("error on read: %w", err)
t.Errorf("error on read: %v", err)
}
aead, err := New(key[:])

View File

@@ -44,10 +44,6 @@ module.exports = {
{
title: 'Resources',
children: [
{
title: 'Developer Sessions',
path: '/DEV_SESSIONS.html'
},
{
// TODO(creachadair): Figure out how to make this per-branch.
// See: https://github.com/tendermint/tendermint/issues/7908

View File

@@ -1,10 +1,9 @@
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
types "github.com/tendermint/tendermint/types"
)
@@ -58,3 +57,18 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
return r0
}
type NewBlockStoreT interface {
mock.TestingT
Cleanup(func())
}
// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewBlockStore(t NewBlockStoreT) *BlockStore {
mock := &BlockStore{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -4,7 +4,7 @@ import (
"github.com/tendermint/tendermint/types"
)
//go:generate mockery --case underscore --name BlockStore
//go:generate ../scripts/mockery_generate.sh BlockStore
type BlockStore interface {
LoadBlockMeta(height int64) *types.BlockMeta

40
go.mod
View File

@@ -1,41 +1,53 @@
module github.com/tendermint/tendermint
go 1.15
go 1.16
require (
github.com/BurntSushi/toml v1.0.0
github.com/BurntSushi/toml v1.2.0
github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d
github.com/Workiva/go-datastructures v1.0.53
github.com/adlio/schema v1.3.0
github.com/btcsuite/btcd v0.22.0-beta
github.com/adlio/schema v1.3.3
github.com/btcsuite/btcd v0.22.1
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
github.com/fortytw2/leaktest v1.3.0
github.com/go-kit/kit v0.12.0
github.com/go-kit/log v0.2.0
github.com/go-kit/log v0.2.1
github.com/go-logfmt/logfmt v0.5.1
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.2
github.com/google/orderedcode v0.0.1
github.com/gorilla/websocket v1.5.0
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
github.com/gtank/merlin v0.1.1
github.com/lib/pq v1.10.4
github.com/libp2p/go-buffer-pool v0.0.2
github.com/lib/pq v1.10.6
github.com/libp2p/go-buffer-pool v0.1.0
github.com/minio/highwayhash v1.0.2
github.com/ory/dockertest v3.3.5+incompatible
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.12.1
github.com/prometheus/client_golang v1.12.2
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
github.com/rs/cors v1.8.2
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
github.com/spf13/cobra v1.4.0
github.com/spf13/viper v1.10.1
github.com/stretchr/testify v1.7.1
github.com/spf13/cobra v1.5.0
github.com/spf13/viper v1.12.0
github.com/stretchr/testify v1.8.0
github.com/tendermint/tm-db v0.6.6
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b
google.golang.org/grpc v1.45.0
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9
google.golang.org/grpc v1.48.0
gotest.tools v2.2.0+incompatible // indirect
)
require (
github.com/bufbuild/buf v1.4.0
github.com/creachadair/taskgroup v0.3.2
github.com/golangci/golangci-lint v1.47.2
github.com/prometheus/common v0.34.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca
github.com/vektra/mockery/v2 v2.14.0
)

861
go.sum

File diff suppressed because it is too large Load Diff

42
libs/log/lazy.go Normal file
View File

@@ -0,0 +1,42 @@
package log
import (
"fmt"
tmbytes "github.com/tendermint/tendermint/libs/bytes"
)
type LazySprintf struct {
format string
args []interface{}
}
// NewLazySprintf defers fmt.Sprintf until the Stringer interface is invoked.
// This is particularly useful for avoiding calling Sprintf when debugging is not
// active.
func NewLazySprintf(format string, args ...interface{}) *LazySprintf {
return &LazySprintf{format, args}
}
func (l *LazySprintf) String() string {
return fmt.Sprintf(l.format, l.args...)
}
type LazyBlockHash struct {
block hashable
}
type hashable interface {
Hash() tmbytes.HexBytes
}
// NewLazyBlockHash defers block Hash until the Stringer interface is invoked.
// This is particularly useful for avoiding calling Sprintf when debugging is not
// active.
func NewLazyBlockHash(block hashable) *LazyBlockHash {
return &LazyBlockHash{block}
}
func (l *LazyBlockHash) String() string {
return l.block.Hash().String()
}

View File

@@ -87,6 +87,12 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error {
if b, ok := keyvals[i+1].([]byte); ok {
keyvals[i+1] = strings.ToUpper(hex.EncodeToString(b))
}
// Realize stringers
if s, ok := keyvals[i+1].(fmt.Stringer); ok {
keyvals[i+1] = s.String()
}
}
// Form a custom Tendermint line

View File

@@ -8,6 +8,8 @@ import (
"os"
"os/signal"
"syscall"
"github.com/tendermint/tendermint/libs/log"
)
type logger interface {
@@ -21,7 +23,7 @@ func TrapSignal(logger logger, cb func()) {
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
for sig := range c {
logger.Info(fmt.Sprintf("captured %v, exiting...", sig))
logger.Info("signal trapped", "msg", log.NewLazySprintf("captured %v, exiting...", sig))
if cb != nil {
cb()
}

View File

@@ -0,0 +1,41 @@
package progressbar
import "fmt"
// the progressbar indicates the current status and progress would be desired.
// ref: https://www.pixelstech.net/article/1596946473-A-simple-example-on-implementing-progress-bar-in-GoLang
type Bar struct {
percent int64 // progress percentage
cur int64 // current progress
start int64 // the init starting value for progress
total int64 // total value for progress
rate string // the actual progress bar to be printed
graph string // the fill value for progress bar
}
func (bar *Bar) NewOption(start, total int64) {
bar.cur = start
bar.start = start
bar.total = total
bar.graph = "█"
bar.percent = bar.getPercent()
}
func (bar *Bar) getPercent() int64 {
return int64(float32(bar.cur-bar.start) / float32(bar.total-bar.start) * 100)
}
func (bar *Bar) Play(cur int64) {
bar.cur = cur
last := bar.percent
bar.percent = bar.getPercent()
if bar.percent != last && bar.percent%2 == 0 {
bar.rate += bar.graph
}
fmt.Printf("\r[%-50s]%3d%% %8d/%d", bar.rate, bar.percent, bar.cur, bar.total)
}
func (bar *Bar) Finish() {
fmt.Println()
}

View File

@@ -0,0 +1,41 @@
package progressbar
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestProgressBar(t *testing.T) {
zero := int64(0)
hundred := int64(100)
var bar Bar
bar.NewOption(zero, hundred)
require.Equal(t, zero, bar.start)
require.Equal(t, zero, bar.cur)
require.Equal(t, hundred, bar.total)
require.Equal(t, zero, bar.percent)
require.Equal(t, "█", bar.graph)
require.Equal(t, "", bar.rate)
defer bar.Finish()
for i := zero; i <= hundred; i++ {
time.Sleep(1 * time.Millisecond)
bar.Play(i)
}
require.Equal(t, zero, bar.start)
require.Equal(t, hundred, bar.cur)
require.Equal(t, hundred, bar.total)
require.Equal(t, hundred, bar.percent)
var rate string
for i := zero; i < hundred/2; i++ {
rate += "█"
}
require.Equal(t, rate, bar.rate)
}

View File

@@ -136,7 +136,11 @@ func (bs *BaseService) Start() error {
atomic.StoreUint32(&bs.started, 0)
return ErrAlreadyStopped
}
bs.Logger.Info(fmt.Sprintf("Starting %v service", bs.name), "impl", bs.impl.String())
bs.Logger.Info("service start",
"msg",
log.NewLazySprintf("Starting %v service", bs.name),
"impl",
bs.impl.String())
err := bs.impl.OnStart()
if err != nil {
// revert flag
@@ -145,7 +149,11 @@ func (bs *BaseService) Start() error {
}
return nil
}
bs.Logger.Debug(fmt.Sprintf("Not starting %v service -- already started", bs.name), "impl", bs.impl)
bs.Logger.Debug("service start",
"msg",
log.NewLazySprintf("Not starting %v service -- already started", bs.name),
"impl",
bs.impl)
return ErrAlreadyStarted
}
@@ -165,12 +173,20 @@ func (bs *BaseService) Stop() error {
atomic.StoreUint32(&bs.stopped, 0)
return ErrNotStarted
}
bs.Logger.Info(fmt.Sprintf("Stopping %v service", bs.name), "impl", bs.impl)
bs.Logger.Info("service stop",
"msg",
log.NewLazySprintf("Stopping %v service", bs.name),
"impl",
bs.impl)
bs.impl.OnStop()
close(bs.quit)
return nil
}
bs.Logger.Debug(fmt.Sprintf("Stopping %v service (already stopped)", bs.name), "impl", bs.impl)
bs.Logger.Debug("service stop",
"msg",
log.NewLazySprintf("Stopping %v service (already stopped)", bs.name),
"impl",
bs.impl)
return ErrAlreadyStopped
}
@@ -183,7 +199,11 @@ func (bs *BaseService) OnStop() {}
// will be returned if the service is running.
func (bs *BaseService) Reset() error {
if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) {
bs.Logger.Debug(fmt.Sprintf("Can't reset %v service. Not stopped", bs.name), "impl", bs.impl)
bs.Logger.Debug("service reset",
"msg",
log.NewLazySprintf("Can't reset %v service. Not stopped", bs.name),
"impl",
bs.impl)
return fmt.Errorf("can't reset running %s", bs.name)
}

View File

@@ -27,7 +27,7 @@ var errNegOrZeroHeight = errors.New("negative or zero height")
type KeyPathFunc func(path string, key []byte) (merkle.KeyPath, error)
// LightClient is an interface that contains functionality needed by Client from the light client.
//go:generate mockery --case underscore --name LightClient
//go:generate ../../scripts/mockery_generate.sh LightClient
type LightClient interface {
ChainID() string
Update(ctx context.Context, now time.Time) (*types.LightBlock, error)

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
// Code generated by mockery. DO NOT EDIT.
package mocks
@@ -99,3 +99,18 @@ func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int6
return r0, r1
}
type NewLightClientT interface {
mock.TestingT
Cleanup(func())
}
// NewLightClient creates a new instance of LightClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewLightClient(t NewLightClientT) *LightClient {
mock := &LightClient{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -1,73 +0,0 @@
package mempool
import (
"encoding/binary"
"testing"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/proxy"
)
func BenchmarkReap(b *testing.B) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
size := 10000
for i := 0; i < size; i++ {
tx := make([]byte, 8)
binary.BigEndian.PutUint64(tx, uint64(i))
if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil {
b.Error(err)
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
mempool.ReapMaxBytesMaxGas(100000000, 10000000)
}
}
func BenchmarkCheckTx(b *testing.B) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
for i := 0; i < b.N; i++ {
tx := make([]byte, 8)
binary.BigEndian.PutUint64(tx, uint64(i))
if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil {
b.Error(err)
}
}
}
func BenchmarkCacheInsertTime(b *testing.B) {
cache := newMapTxCache(b.N)
txs := make([][]byte, b.N)
for i := 0; i < b.N; i++ {
txs[i] = make([]byte, 8)
binary.BigEndian.PutUint64(txs[i], uint64(i))
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Push(txs[i])
}
}
// This benchmark is probably skewed, since we actually will be removing
// txs in parallel, which may cause some overhead due to mutex locking.
func BenchmarkCacheRemoveTime(b *testing.B) {
cache := newMapTxCache(b.N)
txs := make([][]byte, b.N)
for i := 0; i < b.N; i++ {
txs[i] = make([]byte, 8)
binary.BigEndian.PutUint64(txs[i], uint64(i))
cache.Push(txs[i])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Remove(txs[i])
}
}

120
mempool/cache.go Normal file
View File

@@ -0,0 +1,120 @@
package mempool
import (
"container/list"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/types"
)
// TxCache defines an interface for raw transaction caching in a mempool.
// Currently, a TxCache does not allow direct reading or getting of transaction
// values. A TxCache is used primarily to push transactions and removing
// transactions. Pushing via Push returns a boolean telling the caller if the
// transaction already exists in the cache or not.
type TxCache interface {
// Reset resets the cache to an empty state.
Reset()
// Push adds the given raw transaction to the cache and returns true if it was
// newly added. Otherwise, it returns false.
Push(tx types.Tx) bool
// Remove removes the given raw transaction from the cache.
Remove(tx types.Tx)
// Has reports whether tx is present in the cache. Checking for presence is
// not treated as an access of the value.
Has(tx types.Tx) bool
}
var _ TxCache = (*LRUTxCache)(nil)
// LRUTxCache maintains a thread-safe LRU cache of raw transactions. The cache
// only stores the hash of the raw transaction.
type LRUTxCache struct {
mtx tmsync.Mutex
size int
cacheMap map[types.TxKey]*list.Element
list *list.List
}
func NewLRUTxCache(cacheSize int) *LRUTxCache {
return &LRUTxCache{
size: cacheSize,
cacheMap: make(map[types.TxKey]*list.Element, cacheSize),
list: list.New(),
}
}
// GetList returns the underlying linked-list that backs the LRU cache. Note,
// this should be used for testing purposes only!
func (c *LRUTxCache) GetList() *list.List {
return c.list
}
func (c *LRUTxCache) Reset() {
c.mtx.Lock()
defer c.mtx.Unlock()
c.cacheMap = make(map[types.TxKey]*list.Element, c.size)
c.list.Init()
}
func (c *LRUTxCache) Push(tx types.Tx) bool {
c.mtx.Lock()
defer c.mtx.Unlock()
key := tx.Key()
moved, ok := c.cacheMap[key]
if ok {
c.list.MoveToBack(moved)
return false
}
if c.list.Len() >= c.size {
front := c.list.Front()
if front != nil {
frontKey := front.Value.(types.TxKey)
delete(c.cacheMap, frontKey)
c.list.Remove(front)
}
}
e := c.list.PushBack(key)
c.cacheMap[key] = e
return true
}
func (c *LRUTxCache) Remove(tx types.Tx) {
c.mtx.Lock()
defer c.mtx.Unlock()
key := tx.Key()
e := c.cacheMap[key]
delete(c.cacheMap, key)
if e != nil {
c.list.Remove(e)
}
}
func (c *LRUTxCache) Has(tx types.Tx) bool {
c.mtx.Lock()
defer c.mtx.Unlock()
_, ok := c.cacheMap[tx.Key()]
return ok
}
// NopTxCache defines a no-op raw transaction cache.
type NopTxCache struct{}
var _ TxCache = (*NopTxCache)(nil)
func (NopTxCache) Reset() {}
func (NopTxCache) Push(types.Tx) bool { return true }
func (NopTxCache) Remove(types.Tx) {}
func (NopTxCache) Has(types.Tx) bool { return false }

View File

@@ -0,0 +1,41 @@
package mempool
import (
"encoding/binary"
"testing"
)
func BenchmarkCacheInsertTime(b *testing.B) {
cache := NewLRUTxCache(b.N)
txs := make([][]byte, b.N)
for i := 0; i < b.N; i++ {
txs[i] = make([]byte, 8)
binary.BigEndian.PutUint64(txs[i], uint64(i))
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Push(txs[i])
}
}
// This benchmark is probably skewed, since we actually will be removing
// txs in parallel, which may cause some overhead due to mutex locking.
func BenchmarkCacheRemoveTime(b *testing.B) {
cache := NewLRUTxCache(b.N)
txs := make([][]byte, b.N)
for i := 0; i < b.N; i++ {
txs[i] = make([]byte, 8)
binary.BigEndian.PutUint64(txs[i], uint64(i))
cache.Push(txs[i])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Remove(txs[i])
}
}

View File

@@ -2,32 +2,30 @@ package mempool
import (
"crypto/rand"
"crypto/sha256"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
)
func TestCacheRemove(t *testing.T) {
cache := newMapTxCache(100)
cache := NewLRUTxCache(100)
numTxs := 10
txs := make([][]byte, numTxs)
for i := 0; i < numTxs; i++ {
// probability of collision is 2**-256
txBytes := make([]byte, 32)
_, err := rand.Read(txBytes)
require.NoError(t, err)
txs[i] = txBytes
cache.Push(txBytes)
// make sure its added to both the linked list and the map
require.Equal(t, i+1, len(cache.cacheMap))
require.Equal(t, i+1, cache.list.Len())
}
for i := 0; i < numTxs; i++ {
cache.Remove(txs[i])
// make sure its removed from both the map and the linked list
@@ -35,70 +33,3 @@ func TestCacheRemove(t *testing.T) {
require.Equal(t, numTxs-(i+1), cache.list.Len())
}
}
func TestCacheAfterUpdate(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
// reAddIndices & txsInCache can have elements > numTxsToCreate
// also assumes max index is 255 for convenience
// txs in cache also checks order of elements
tests := []struct {
numTxsToCreate int
updateIndices []int
reAddIndices []int
txsInCache []int
}{
{1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works
{2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache
{2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache
{2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe
}
for tcIndex, tc := range tests {
for i := 0; i < tc.numTxsToCreate; i++ {
tx := types.Tx{byte(i)}
err := mempool.CheckTx(tx, nil, TxInfo{})
require.NoError(t, err)
}
updateTxs := []types.Tx{}
for _, v := range tc.updateIndices {
tx := types.Tx{byte(v)}
updateTxs = append(updateTxs, tx)
}
err := mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
for _, v := range tc.reAddIndices {
tx := types.Tx{byte(v)}
_ = mempool.CheckTx(tx, nil, TxInfo{})
}
cache := mempool.cache.(*mapTxCache)
node := cache.list.Front()
counter := 0
for node != nil {
require.NotEqual(t, len(tc.txsInCache), counter,
"cache larger than expected on testcase %d", tcIndex)
nodeVal := node.Value.([sha256.Size]byte)
expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])})
// Reference for reading the errors:
// >>> sha256('\x00').hexdigest()
// '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d'
// >>> sha256('\x01').hexdigest()
// '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a'
// >>> sha256('\x02').hexdigest()
// 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986'
require.Equal(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex)
counter++
node = node.Next()
}
require.Equal(t, len(tc.txsInCache), counter,
"cache smaller than expected on testcase %d", tcIndex)
mempool.Flush()
}
}

View File

@@ -1,52 +0,0 @@
package mempool
import (
"errors"
"fmt"
)
var (
// ErrTxInCache is returned to the client if we saw tx earlier
ErrTxInCache = errors.New("tx already exists in cache")
)
// ErrTxTooLarge means the tx is too big to be sent in a message to other peers
type ErrTxTooLarge struct {
max int
actual int
}
func (e ErrTxTooLarge) Error() string {
return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.max, e.actual)
}
// ErrMempoolIsFull means Tendermint & an application can't handle that much load
type ErrMempoolIsFull struct {
numTxs int
maxTxs int
txsBytes int64
maxTxsBytes int64
}
func (e ErrMempoolIsFull) Error() string {
return fmt.Sprintf(
"mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)",
e.numTxs, e.maxTxs,
e.txsBytes, e.maxTxsBytes)
}
// ErrPreCheck is returned when tx is too big
type ErrPreCheck struct {
Reason error
}
func (e ErrPreCheck) Error() string {
return e.Reason.Error()
}
// IsPreCheckError returns true if err is due to pre check failure.
func IsPreCheckError(err error) bool {
_, ok := err.(ErrPreCheck)
return ok
}

3
mempool/ids.go Normal file
View File

@@ -0,0 +1,3 @@
package mempool
// These functions were moved into v0/reactor.go and v1/reactor.go

23
mempool/ids_test.go Normal file
View File

@@ -0,0 +1,23 @@
package mempool
// import (
// "testing"
// "github.com/stretchr/testify/require"
// "github.com/tendermint/tendermint/types"
// )
// func TestMempoolIDsBasic(t *testing.T) {
// ids := NewMempoolIDs()
// peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
// require.NoError(t, err)
// ids.ReserveForPeer(peerID)
// require.EqualValues(t, 1, ids.GetForPeer(peerID))
// ids.Reclaim(peerID)
// ids.ReserveForPeer(peerID)
// require.EqualValues(t, 2, ids.GetForPeer(peerID))
// ids.Reclaim(peerID)
// }

View File

@@ -1,43 +1,67 @@
package mempool
import (
"crypto/sha256"
"errors"
"fmt"
"math"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
const (
MempoolChannel = byte(0x30)
// PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind
PeerCatchupSleepIntervalMS = 100
// UnknownPeerID is the peer ID to use when running CheckTx when there is
// no peer (e.g. RPC)
UnknownPeerID uint16 = 0
MaxActiveIDs = math.MaxUint16
)
// Mempool defines the mempool interface.
//
// Updates to the mempool need to be synchronized with committing a block so
// apps can reset their transient state on Commit.
// applications can reset their transient state on Commit.
type Mempool interface {
// CheckTx executes a new transaction against the application to determine
// its validity and whether it should be added to the mempool.
CheckTx(tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error
// RemoveTxByKey removes a transaction, identified by its key,
// from the mempool.
RemoveTxByKey(txKey types.TxKey) error
// ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes
// bytes total with the condition that the total gasWanted must be less than
// maxGas.
//
// If both maxes are negative, there is no cap on the size of all returned
// transactions (~ all available transactions).
ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs
// ReapMaxTxs reaps up to max transactions from the mempool.
// If max is negative, there is no cap on the size of all returned
// transactions (~ all available transactions).
// ReapMaxTxs reaps up to max transactions from the mempool. If max is
// negative, there is no cap on the size of all returned transactions
// (~ all available transactions).
ReapMaxTxs(max int) types.Txs
// Lock locks the mempool. The consensus must be able to hold lock to safely update.
// Lock locks the mempool. The consensus must be able to hold lock to safely
// update.
Lock()
// Unlock unlocks the mempool.
Unlock()
// Update informs the mempool that the given txs were committed and can be discarded.
// NOTE: this should be called *after* block is committed by consensus.
// NOTE: Lock/Unlock must be managed by caller
// Update informs the mempool that the given txs were committed and can be
// discarded.
//
// NOTE:
// 1. This should be called *after* block is committed by consensus.
// 2. Lock/Unlock must be managed by the caller.
Update(
blockHeight int64,
blockTxs types.Txs,
@@ -46,17 +70,21 @@ type Mempool interface {
newPostFn PostCheckFunc,
) error
// FlushAppConn flushes the mempool connection to ensure async reqResCb calls are
// done. E.g. from CheckTx.
// NOTE: Lock/Unlock must be managed by caller
// FlushAppConn flushes the mempool connection to ensure async callback calls
// are done, e.g. from CheckTx.
//
// NOTE:
// 1. Lock/Unlock must be managed by caller.
FlushAppConn() error
// Flush removes all transactions from the mempool and cache
// Flush removes all transactions from the mempool and caches.
Flush()
// TxsAvailable returns a channel which fires once for every height,
// and only when transactions are available in the mempool.
// NOTE: the returned channel may be nil if EnableTxsAvailable was not called.
// TxsAvailable returns a channel which fires once for every height, and only
// when transactions are available in the mempool.
//
// NOTE:
// 1. The returned channel may be nil if EnableTxsAvailable was not called.
TxsAvailable() <-chan struct{}
// EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will
@@ -66,20 +94,10 @@ type Mempool interface {
// Size returns the number of transactions in the mempool.
Size() int
// TxsBytes returns the total size of all txs in the mempool.
TxsBytes() int64
// InitWAL creates a directory for the WAL file and opens a file itself. If
// there is an error, it will be of type *PathError.
InitWAL() error
// CloseWAL closes and discards the underlying WAL file.
// Any further writes will not be relayed to disk.
CloseWAL()
// SizeBytes returns the total size of all txs in the mempool.
SizeBytes() int64
}
//--------------------------------------------------------------------------------
// PreCheckFunc is an optional filter executed before CheckTx and rejects
// transaction if false is returned. An example would be to ensure that a
// transaction doesn't exceeded the block size.
@@ -90,27 +108,16 @@ type PreCheckFunc func(types.Tx) error
// transaction doesn't require more gas than available for the block.
type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error
// TxInfo are parameters that get passed when attempting to add a tx to the
// mempool.
type TxInfo struct {
// SenderID is the internal peer ID used in the mempool to identify the
// sender, storing 2 bytes with each tx instead of 20 bytes for the p2p.ID.
SenderID uint16
// SenderP2PID is the actual p2p.ID of the sender, used e.g. for logging.
SenderP2PID p2p.ID
}
//--------------------------------------------------------------------------------
// PreCheckMaxBytes checks that the size of the transaction is smaller or equal to the expected maxBytes.
// PreCheckMaxBytes checks that the size of the transaction is smaller or equal
// to the expected maxBytes.
func PreCheckMaxBytes(maxBytes int64) PreCheckFunc {
return func(tx types.Tx) error {
txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx})
if txSize > maxBytes {
return fmt.Errorf("tx size is too big: %d, max: %d",
txSize, maxBytes)
return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes)
}
return nil
}
}
@@ -130,6 +137,57 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc {
return fmt.Errorf("gas wanted %d is greater than max gas %d",
res.GasWanted, maxGas)
}
return nil
}
}
// ErrTxInCache is returned to the client if we saw tx earlier
var ErrTxInCache = errors.New("tx already exists in cache")
// TxKey is the fixed length array key used as an index.
type TxKey [sha256.Size]byte
// ErrTxTooLarge defines an error when a transaction is too big to be sent in a
// message to other peers.
type ErrTxTooLarge struct {
Max int
Actual int
}
func (e ErrTxTooLarge) Error() string {
return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.Max, e.Actual)
}
// ErrMempoolIsFull defines an error where Tendermint and the application cannot
// handle that much load.
type ErrMempoolIsFull struct {
NumTxs int
MaxTxs int
TxsBytes int64
MaxTxsBytes int64
}
func (e ErrMempoolIsFull) Error() string {
return fmt.Sprintf(
"mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)",
e.NumTxs,
e.MaxTxs,
e.TxsBytes,
e.MaxTxsBytes,
)
}
// ErrPreCheck defines an error where a transaction fails a pre-check.
type ErrPreCheck struct {
Reason error
}
func (e ErrPreCheck) Error() string {
return e.Reason.Error()
}
// IsPreCheckError returns true if err is due to pre check failure.
func IsPreCheckError(err error) bool {
return errors.As(err, &ErrPreCheck{})
}

View File

@@ -18,10 +18,25 @@ const (
type Metrics struct {
// Size of the mempool.
Size metrics.Gauge
// Histogram of transaction sizes, in bytes.
TxSizeBytes metrics.Histogram
// Number of failed transactions.
FailedTxs metrics.Counter
// RejectedTxs defines the number of rejected transactions. These are
// transactions that passed CheckTx but failed to make it into the mempool
// due to resource limits, e.g. mempool is full and no lower priority
// transactions exist in the mempool.
RejectedTxs metrics.Counter
// EvictedTxs defines the number of evicted transactions. These are valid
// transactions that passed CheckTx and existed in the mempool but were later
// evicted to make room for higher priority valid transactions that passed
// CheckTx.
EvictedTxs metrics.Counter
// Number of times transactions are rechecked in the mempool.
RecheckTimes metrics.Counter
}
@@ -41,6 +56,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
Name: "size",
Help: "Size of the mempool (number of uncommitted transactions).",
}, labels).With(labelsAndValues...),
TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
@@ -48,12 +64,28 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
Help: "Transaction sizes in bytes.",
Buckets: stdprometheus.ExponentialBuckets(1, 3, 17),
}, labels).With(labelsAndValues...),
FailedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "failed_txs",
Help: "Number of failed transactions.",
}, labels).With(labelsAndValues...),
RejectedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "rejected_txs",
Help: "Number of rejected transactions.",
}, labels).With(labelsAndValues...),
EvictedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "evicted_txs",
Help: "Number of evicted transactions.",
}, labels).With(labelsAndValues...),
RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
@@ -69,6 +101,8 @@ func NopMetrics() *Metrics {
Size: discard.NewGauge(),
TxSizeBytes: discard.NewHistogram(),
FailedTxs: discard.NewCounter(),
RejectedTxs: discard.NewCounter(),
EvictedTxs: discard.NewCounter(),
RecheckTimes: discard.NewCounter(),
}
}

View File

@@ -3,29 +3,30 @@ package mock
import (
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/clist"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/types"
)
// Mempool is an empty implementation of a Mempool, useful for testing.
type Mempool struct{}
var _ mempl.Mempool = Mempool{}
var _ mempool.Mempool = Mempool{}
func (Mempool) Lock() {}
func (Mempool) Unlock() {}
func (Mempool) Size() int { return 0 }
func (Mempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
func (Mempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error {
return nil
}
func (Mempool) RemoveTxByKey(txKey types.TxKey) error { return nil }
func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} }
func (Mempool) Update(
_ int64,
_ types.Txs,
_ []*abci.ResponseDeliverTx,
_ mempl.PreCheckFunc,
_ mempl.PostCheckFunc,
_ mempool.PreCheckFunc,
_ mempool.PostCheckFunc,
) error {
return nil
}
@@ -33,7 +34,7 @@ func (Mempool) Flush() {}
func (Mempool) FlushAppConn() error { return nil }
func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) }
func (Mempool) EnableTxsAvailable() {}
func (Mempool) TxsBytes() int64 { return 0 }
func (Mempool) SizeBytes() int64 { return 0 }
func (Mempool) TxsFront() *clist.CElement { return nil }
func (Mempool) TxsWaitChan() <-chan struct{} { return nil }

17
mempool/tx.go Normal file
View File

@@ -0,0 +1,17 @@
package mempool
import (
"github.com/tendermint/tendermint/p2p"
)
// TxInfo are parameters that get passed when attempting to add a tx to the
// mempool.
type TxInfo struct {
// SenderID is the internal peer ID used in the mempool to identify the
// sender, storing two bytes with each transaction instead of 20 bytes for
// the types.NodeID.
SenderID uint16
// SenderP2PID is the actual p2p.ID of the sender, used e.g. for logging.
SenderP2PID p2p.ID
}

101
mempool/v0/bench_test.go Normal file
View File

@@ -0,0 +1,101 @@
package v0
import (
"encoding/binary"
"sync/atomic"
"testing"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
)
func BenchmarkReap(b *testing.B) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mp, cleanup := newMempoolWithApp(cc)
defer cleanup()
mp.config.Size = 100000
size := 10000
for i := 0; i < size; i++ {
tx := make([]byte, 8)
binary.BigEndian.PutUint64(tx, uint64(i))
if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil {
b.Fatal(err)
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
mp.ReapMaxBytesMaxGas(100000000, 10000000)
}
}
func BenchmarkCheckTx(b *testing.B) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mp, cleanup := newMempoolWithApp(cc)
defer cleanup()
mp.config.Size = 1000000
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
tx := make([]byte, 8)
binary.BigEndian.PutUint64(tx, uint64(i))
b.StartTimer()
if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkParallelCheckTx(b *testing.B) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mp, cleanup := newMempoolWithApp(cc)
defer cleanup()
mp.config.Size = 100000000
var txcnt uint64
next := func() uint64 {
return atomic.AddUint64(&txcnt, 1) - 1
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tx := make([]byte, 8)
binary.BigEndian.PutUint64(tx, next())
if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkCheckDuplicateTx(b *testing.B) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mp, cleanup := newMempoolWithApp(cc)
defer cleanup()
mp.config.Size = 1000000
for i := 0; i < b.N; i++ {
tx := make([]byte, 8)
binary.BigEndian.PutUint64(tx, uint64(i))
if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err != nil {
b.Fatal(err)
}
if err := mp.CheckTx(tx, nil, mempool.TxInfo{}); err == nil {
b.Fatal("tx should be duplicate")
}
}
}

81
mempool/v0/cache_test.go Normal file
View File

@@ -0,0 +1,81 @@
package v0
import (
"crypto/sha256"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
)
func TestCacheAfterUpdate(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mp, cleanup := newMempoolWithApp(cc)
defer cleanup()
// reAddIndices & txsInCache can have elements > numTxsToCreate
// also assumes max index is 255 for convenience
// txs in cache also checks order of elements
tests := []struct {
numTxsToCreate int
updateIndices []int
reAddIndices []int
txsInCache []int
}{
{1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works
{2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache
{2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache
{2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe
}
for tcIndex, tc := range tests {
for i := 0; i < tc.numTxsToCreate; i++ {
tx := types.Tx{byte(i)}
err := mp.CheckTx(tx, nil, mempool.TxInfo{})
require.NoError(t, err)
}
updateTxs := []types.Tx{}
for _, v := range tc.updateIndices {
tx := types.Tx{byte(v)}
updateTxs = append(updateTxs, tx)
}
err := mp.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
for _, v := range tc.reAddIndices {
tx := types.Tx{byte(v)}
_ = mp.CheckTx(tx, nil, mempool.TxInfo{})
}
cache := mp.cache.(*mempool.LRUTxCache)
node := cache.GetList().Front()
counter := 0
for node != nil {
require.NotEqual(t, len(tc.txsInCache), counter,
"cache larger than expected on testcase %d", tcIndex)
nodeVal := node.Value.(types.TxKey)
expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])})
// Reference for reading the errors:
// >>> sha256('\x00').hexdigest()
// '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d'
// >>> sha256('\x01').hexdigest()
// '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a'
// >>> sha256('\x02').hexdigest()
// 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986'
require.EqualValues(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex)
counter++
node = node.Next()
}
require.Equal(t, len(tc.txsInCache), counter,
"cache smaller than expected on testcase %d", tcIndex)
mp.Flush()
}
}

View File

@@ -1,33 +1,23 @@
package mempool
package v0
import (
"bytes"
"container/list"
"crypto/sha256"
"fmt"
"errors"
"sync"
"sync/atomic"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
auto "github.com/tendermint/tendermint/libs/autofile"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/clist"
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
tmos "github.com/tendermint/tendermint/libs/os"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
)
// TxKeySize is the size of the transaction key index
const TxKeySize = sha256.Size
var newline = []byte("\n")
//--------------------------------------------------------------------------------
// CListMempool is an ordered in-memory pool for transactions before they are
// proposed in a consensus round. Transaction validity is checked using the
// CheckTx abci message before the transaction is added to the pool. The
@@ -42,16 +32,15 @@ type CListMempool struct {
notifiedTxsAvailable bool
txsAvailable chan struct{} // fires once for each height, when the mempool is not empty
config *cfg.MempoolConfig
config *config.MempoolConfig
// Exclusive mutex for Update method to prevent concurrent execution of
// CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods.
updateMtx tmsync.RWMutex
preCheck PreCheckFunc
postCheck PostCheckFunc
preCheck mempool.PreCheckFunc
postCheck mempool.PostCheckFunc
wal *auto.AutoFile // a log of mempool txs
txs *clist.CList // concurrent linked-list of good txs
txs *clist.CList // concurrent linked-list of good txs
proxyAppConn proxy.AppConnMempool
// Track whether we're rechecking txs.
@@ -66,45 +55,50 @@ type CListMempool struct {
// Keep a cache of already-seen txs.
// This reduces the pressure on the proxyApp.
cache txCache
cache mempool.TxCache
logger log.Logger
metrics *Metrics
logger log.Logger
metrics *mempool.Metrics
}
var _ Mempool = &CListMempool{}
var _ mempool.Mempool = &CListMempool{}
// CListMempoolOption sets an optional parameter on the mempool.
type CListMempoolOption func(*CListMempool)
// NewCListMempool returns a new mempool with the given configuration and connection to an application.
// NewCListMempool returns a new mempool with the given configuration and
// connection to an application.
func NewCListMempool(
config *cfg.MempoolConfig,
cfg *config.MempoolConfig,
proxyAppConn proxy.AppConnMempool,
height int64,
options ...CListMempoolOption,
) *CListMempool {
mempool := &CListMempool{
config: config,
mp := &CListMempool{
config: cfg,
proxyAppConn: proxyAppConn,
txs: clist.New(),
height: height,
recheckCursor: nil,
recheckEnd: nil,
logger: log.NewNopLogger(),
metrics: NopMetrics(),
metrics: mempool.NopMetrics(),
}
if config.CacheSize > 0 {
mempool.cache = newMapTxCache(config.CacheSize)
if cfg.CacheSize > 0 {
mp.cache = mempool.NewLRUTxCache(cfg.CacheSize)
} else {
mempool.cache = nopTxCache{}
mp.cache = mempool.NopTxCache{}
}
proxyAppConn.SetResponseCallback(mempool.globalCb)
proxyAppConn.SetResponseCallback(mp.globalCb)
for _, option := range options {
option(mempool)
option(mp)
}
return mempool
return mp
}
// NOTE: not thread safe - should only be called once, on startup
@@ -120,49 +114,22 @@ func (mem *CListMempool) SetLogger(l log.Logger) {
// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns
// false. This is ran before CheckTx. Only applies to the first created block.
// After that, Update overwrites the existing value.
func WithPreCheck(f PreCheckFunc) CListMempoolOption {
func WithPreCheck(f mempool.PreCheckFunc) CListMempoolOption {
return func(mem *CListMempool) { mem.preCheck = f }
}
// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns
// false. This is ran after CheckTx. Only applies to the first created block.
// After that, Update overwrites the existing value.
func WithPostCheck(f PostCheckFunc) CListMempoolOption {
func WithPostCheck(f mempool.PostCheckFunc) CListMempoolOption {
return func(mem *CListMempool) { mem.postCheck = f }
}
// WithMetrics sets the metrics.
func WithMetrics(metrics *Metrics) CListMempoolOption {
func WithMetrics(metrics *mempool.Metrics) CListMempoolOption {
return func(mem *CListMempool) { mem.metrics = metrics }
}
func (mem *CListMempool) InitWAL() error {
var (
walDir = mem.config.WalDir()
walFile = walDir + "/wal"
)
const perm = 0700
if err := tmos.EnsureDir(walDir, perm); err != nil {
return err
}
af, err := auto.OpenAutoFile(walFile)
if err != nil {
return fmt.Errorf("can't open autofile %s: %w", walFile, err)
}
mem.wal = af
return nil
}
func (mem *CListMempool) CloseWAL() {
if err := mem.wal.Close(); err != nil {
mem.logger.Error("Error closing WAL", "err", err)
}
mem.wal = nil
}
// Safe for concurrent use by multiple goroutines.
func (mem *CListMempool) Lock() {
mem.updateMtx.Lock()
@@ -179,7 +146,7 @@ func (mem *CListMempool) Size() int {
}
// Safe for concurrent use by multiple goroutines.
func (mem *CListMempool) TxsBytes() int64 {
func (mem *CListMempool) SizeBytes() int64 {
return atomic.LoadInt64(&mem.txsBytes)
}
@@ -231,7 +198,12 @@ func (mem *CListMempool) TxsWaitChan() <-chan struct{} {
// CONTRACT: Either cb will get called, or err returned.
//
// Safe for concurrent use by multiple goroutines.
func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) error {
func (mem *CListMempool) CheckTx(
tx types.Tx,
cb func(*abci.Response),
txInfo mempool.TxInfo,
) error {
mem.updateMtx.RLock()
// use defer to unlock mutex because application (*local client*) might panic
defer mem.updateMtx.RUnlock()
@@ -243,24 +215,17 @@ func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo Tx
}
if txSize > mem.config.MaxTxBytes {
return ErrTxTooLarge{mem.config.MaxTxBytes, txSize}
return mempool.ErrTxTooLarge{
Max: mem.config.MaxTxBytes,
Actual: txSize,
}
}
if mem.preCheck != nil {
if err := mem.preCheck(tx); err != nil {
return ErrPreCheck{err}
}
}
// NOTE: writing to the WAL and calling proxy must be done before adding tx
// to the cache. otherwise, if either of them fails, next time CheckTx is
// called with tx, ErrTxInCache will be returned without tx being checked at
// all even once.
if mem.wal != nil {
// TODO: Notify administrators when WAL fails
_, err := mem.wal.Write(append([]byte(tx), newline...))
if err != nil {
return fmt.Errorf("wal.Write: %w", err)
return mempool.ErrPreCheck{
Reason: err,
}
}
}
@@ -269,20 +234,19 @@ func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo Tx
return err
}
if !mem.cache.Push(tx) {
if !mem.cache.Push(tx) { // if the transaction already exists in the cache
// Record a new sender for a tx we've already seen.
// Note it's possible a tx is still in the cache but no longer in the mempool
// (eg. after committing a block, txs are removed from mempool but not cache),
// so we only record the sender for txs still in the mempool.
if e, ok := mem.txsMap.Load(TxKey(tx)); ok {
if e, ok := mem.txsMap.Load(tx.Key()); ok {
memTx := e.(*clist.CElement).Value.(*mempoolTx)
memTx.senders.LoadOrStore(txInfo.SenderID, true)
// TODO: consider punishing peer for dups,
// its non-trivial since invalid txs can become valid,
// but they can spam the same tx with little cost to them atm.
}
return ErrTxInCache
return mempool.ErrTxInCache
}
reqRes := mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx})
@@ -349,7 +313,7 @@ func (mem *CListMempool) reqResCb(
// - resCbFirstTime (lock not held) if tx is valid
func (mem *CListMempool) addTx(memTx *mempoolTx) {
e := mem.txs.PushBack(memTx)
mem.txsMap.Store(TxKey(memTx.tx), e)
mem.txsMap.Store(memTx.tx.Key(), e)
atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx)))
mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx)))
}
@@ -360,7 +324,7 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) {
func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
mem.txs.Remove(elem)
elem.DetachPrev()
mem.txsMap.Delete(TxKey(tx))
mem.txsMap.Delete(tx.Key())
atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
if removeFromCache {
@@ -369,25 +333,30 @@ func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromC
}
// RemoveTxByKey removes a transaction from the mempool by its TxKey index.
func (mem *CListMempool) RemoveTxByKey(txKey [TxKeySize]byte, removeFromCache bool) {
func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error {
if e, ok := mem.txsMap.Load(txKey); ok {
memTx := e.(*clist.CElement).Value.(*mempoolTx)
if memTx != nil {
mem.removeTx(memTx.tx, e.(*clist.CElement), removeFromCache)
mem.removeTx(memTx.tx, e.(*clist.CElement), false)
return nil
}
return errors.New("transaction not found")
}
return errors.New("invalid transaction found")
}
func (mem *CListMempool) isFull(txSize int) error {
var (
memSize = mem.Size()
txsBytes = mem.TxsBytes()
txsBytes = mem.SizeBytes()
)
if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes {
return ErrMempoolIsFull{
memSize, mem.config.Size,
txsBytes, mem.config.MaxTxsBytes,
return mempool.ErrMempoolIsFull{
NumTxs: memSize,
MaxTxs: mem.config.Size,
TxsBytes: txsBytes,
MaxTxsBytes: mem.config.MaxTxsBytes,
}
}
@@ -427,8 +396,9 @@ func (mem *CListMempool) resCbFirstTime(
}
memTx.senders.Store(peerID, true)
mem.addTx(memTx)
mem.logger.Debug("added good transaction",
"tx", txID(tx),
mem.logger.Debug(
"added good transaction",
"tx", types.Tx(tx).Hash(),
"res", r,
"height", memTx.height,
"total", mem.Size(),
@@ -436,14 +406,21 @@ func (mem *CListMempool) resCbFirstTime(
mem.notifyTxsAvailable()
} else {
// ignore bad transaction
mem.logger.Debug("rejected bad transaction",
"tx", txID(tx), "peerID", peerP2PID, "res", r, "err", postCheckErr)
mem.logger.Debug(
"rejected bad transaction",
"tx", types.Tx(tx).Hash(),
"peerID", peerP2PID,
"res", r,
"err", postCheckErr,
)
mem.metrics.FailedTxs.Add(1)
if !mem.config.KeepInvalidTxsInCache {
// remove from cache (it might be good later)
mem.cache.Remove(tx)
}
}
default:
// ignore other messages
}
@@ -458,21 +435,45 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) {
case *abci.Response_CheckTx:
tx := req.GetCheckTx().Tx
memTx := mem.recheckCursor.Value.(*mempoolTx)
if !bytes.Equal(tx, memTx.tx) {
panic(fmt.Sprintf(
"Unexpected tx response from proxy during recheck\nExpected %X, got %X",
memTx.tx,
tx))
// Search through the remaining list of tx to recheck for a transaction that matches
// the one we received from the ABCI application.
for {
if bytes.Equal(tx, memTx.tx) {
// We've found a tx in the recheck list that matches the tx that we
// received from the ABCI application.
// Break, and use this transaction for further checks.
break
}
mem.logger.Error(
"re-CheckTx transaction mismatch",
"got", types.Tx(tx),
"expected", memTx.tx,
)
if mem.recheckCursor == mem.recheckEnd {
// we reached the end of the recheckTx list without finding a tx
// matching the one we received from the ABCI application.
// Return without processing any tx.
mem.recheckCursor = nil
return
}
mem.recheckCursor = mem.recheckCursor.Next()
memTx = mem.recheckCursor.Value.(*mempoolTx)
}
var postCheckErr error
if mem.postCheck != nil {
postCheckErr = mem.postCheck(tx, r.CheckTx)
}
if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil {
// Good, nothing to do.
} else {
// Tx became invalidated due to newly committed block.
mem.logger.Debug("tx is no longer valid", "tx", txID(tx), "res", r, "err", postCheckErr)
mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr)
// NOTE: we remove tx from the cache because it might be good later
mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache)
}
@@ -519,7 +520,10 @@ func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
mem.updateMtx.RLock()
defer mem.updateMtx.RUnlock()
var totalGas int64
var (
totalGas int64
runningSize int64
)
// TODO: we will get a performance boost if we have a good estimate of avg
// size per tx, and set the initial capacity based off of that.
@@ -528,22 +532,26 @@ func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
for e := mem.txs.Front(); e != nil; e = e.Next() {
memTx := e.Value.(*mempoolTx)
dataSize := types.ComputeProtoSizeForTxs(append(txs, memTx.tx))
txs = append(txs, memTx.tx)
dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx})
// Check total size requirement
if maxBytes > -1 && dataSize > maxBytes {
return txs
if maxBytes > -1 && runningSize+dataSize > maxBytes {
return txs[:len(txs)-1]
}
runningSize += dataSize
// Check total gas requirement.
// If maxGas is negative, skip this check.
// Since newTotalGas < masGas, which
// must be non-negative, it follows that this won't overflow.
newTotalGas := totalGas + memTx.gasWanted
if maxGas > -1 && newTotalGas > maxGas {
return txs
return txs[:len(txs)-1]
}
totalGas = newTotalGas
txs = append(txs, memTx.tx)
}
return txs
}
@@ -570,8 +578,8 @@ func (mem *CListMempool) Update(
height int64,
txs types.Txs,
deliverTxResponses []*abci.ResponseDeliverTx,
preCheck PreCheckFunc,
postCheck PostCheckFunc,
preCheck mempool.PreCheckFunc,
postCheck mempool.PostCheckFunc,
) error {
// Set height
mem.height = height
@@ -603,7 +611,7 @@ func (mem *CListMempool) Update(
// Mempool after:
// 100
// https://github.com/tendermint/tendermint/issues/3322.
if e, ok := mem.txsMap.Load(TxKey(tx)); ok {
if e, ok := mem.txsMap.Load(tx.Key()); ok {
mem.removeTx(tx, e.(*clist.CElement), false)
}
}
@@ -666,98 +674,3 @@ type mempoolTx struct {
func (memTx *mempoolTx) Height() int64 {
return atomic.LoadInt64(&memTx.height)
}
//--------------------------------------------------------------------------------
type txCache interface {
Reset()
Push(tx types.Tx) bool
Remove(tx types.Tx)
}
// mapTxCache maintains a LRU cache of transactions. This only stores the hash
// of the tx, due to memory concerns.
type mapTxCache struct {
mtx tmsync.Mutex
size int
cacheMap map[[TxKeySize]byte]*list.Element
list *list.List
}
var _ txCache = (*mapTxCache)(nil)
// newMapTxCache returns a new mapTxCache.
func newMapTxCache(cacheSize int) *mapTxCache {
return &mapTxCache{
size: cacheSize,
cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize),
list: list.New(),
}
}
// Reset resets the cache to an empty state.
func (cache *mapTxCache) Reset() {
cache.mtx.Lock()
cache.cacheMap = make(map[[TxKeySize]byte]*list.Element, cache.size)
cache.list.Init()
cache.mtx.Unlock()
}
// Push adds the given tx to the cache and returns true. It returns
// false if tx is already in the cache.
func (cache *mapTxCache) Push(tx types.Tx) bool {
cache.mtx.Lock()
defer cache.mtx.Unlock()
// Use the tx hash in the cache
txHash := TxKey(tx)
if moved, exists := cache.cacheMap[txHash]; exists {
cache.list.MoveToBack(moved)
return false
}
if cache.list.Len() >= cache.size {
popped := cache.list.Front()
if popped != nil {
poppedTxHash := popped.Value.([TxKeySize]byte)
delete(cache.cacheMap, poppedTxHash)
cache.list.Remove(popped)
}
}
e := cache.list.PushBack(txHash)
cache.cacheMap[txHash] = e
return true
}
// Remove removes the given tx from the cache.
func (cache *mapTxCache) Remove(tx types.Tx) {
cache.mtx.Lock()
txHash := TxKey(tx)
popped := cache.cacheMap[txHash]
delete(cache.cacheMap, txHash)
if popped != nil {
cache.list.Remove(popped)
}
cache.mtx.Unlock()
}
type nopTxCache struct{}
var _ txCache = (*nopTxCache)(nil)
func (nopTxCache) Reset() {}
func (nopTxCache) Push(types.Tx) bool { return true }
func (nopTxCache) Remove(types.Tx) {}
//--------------------------------------------------------------------------------
// TxKey is the fixed length array hash used as the key in maps.
func TxKey(tx types.Tx) [TxKeySize]byte {
return sha256.Sum256(tx)
}
// txID is a hash of the Tx.
func txID(tx []byte) []byte {
return types.Tx(tx).Hash()
}

View File

@@ -1,30 +1,30 @@
package mempool
package v0
import (
"crypto/rand"
"crypto/sha256"
"encoding/binary"
"fmt"
"io/ioutil"
mrand "math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/gogo/protobuf/proto"
gogotypes "github.com/gogo/protobuf/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/counter"
abciclient "github.com/tendermint/tendermint/abci/client"
abciclimocks "github.com/tendermint/tendermint/abci/client/mocks"
"github.com/tendermint/tendermint/abci/example/kvstore"
abciserver "github.com/tendermint/tendermint/abci/server"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
)
@@ -33,20 +33,48 @@ import (
// test.
type cleanupFunc func()
func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) {
return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test"))
func newMempoolWithAppMock(cc proxy.ClientCreator, client abciclient.Client) (*CListMempool, cleanupFunc, error) {
conf := config.ResetTestRoot("mempool_test")
mp, cu := newMempoolWithAppAndConfigMock(cc, conf, client)
return mp, cu, nil
}
func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) {
func newMempoolWithAppAndConfigMock(cc proxy.ClientCreator,
cfg *config.Config,
client abciclient.Client) (*CListMempool, cleanupFunc) {
appConnMem := client
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
err := appConnMem.Start()
if err != nil {
panic(err)
}
mp := NewCListMempool(cfg.Mempool, appConnMem, 0)
mp.SetLogger(log.TestingLogger())
return mp, func() { os.RemoveAll(cfg.RootDir) }
}
func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) {
conf := config.ResetTestRoot("mempool_test")
mp, cu := newMempoolWithAppAndConfig(cc, conf)
return mp, cu
}
func newMempoolWithAppAndConfig(cc proxy.ClientCreator, cfg *config.Config) (*CListMempool, cleanupFunc) {
appConnMem, _ := cc.NewABCIClient()
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
err := appConnMem.Start()
if err != nil {
panic(err)
}
mempool := NewCListMempool(config.Mempool, appConnMem, 0)
mempool.SetLogger(log.TestingLogger())
return mempool, func() { os.RemoveAll(config.RootDir) }
mp := NewCListMempool(cfg.Mempool, appConnMem, 0)
mp.SetLogger(log.TestingLogger())
return mp, func() { os.RemoveAll(cfg.RootDir) }
}
func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
@@ -67,9 +95,9 @@ func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
}
}
func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs {
func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs {
txs := make(types.Txs, count)
txInfo := TxInfo{SenderID: peerID}
txInfo := mempool.TxInfo{SenderID: peerID}
for i := 0; i < count; i++ {
txBytes := make([]byte, 20)
txs[i] = txBytes
@@ -77,11 +105,11 @@ func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs
if err != nil {
t.Error(err)
}
if err := mempool.CheckTx(txBytes, nil, txInfo); err != nil {
if err := mp.CheckTx(txBytes, nil, txInfo); err != nil {
// Skip invalid txs.
// TestMempoolFilters will fail otherwise. It asserts a number of txs
// returned.
if IsPreCheckError(err) {
if mempool.IsPreCheckError(err) {
continue
}
t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i)
@@ -93,18 +121,18 @@ func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs
func TestReapMaxBytesMaxGas(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
mp, cleanup := newMempoolWithApp(cc)
defer cleanup()
// Ensure gas calculation behaves as expected
checkTxs(t, mempool, 1, UnknownPeerID)
tx0 := mempool.TxsFront().Value.(*mempoolTx)
checkTxs(t, mp, 1, mempool.UnknownPeerID)
tx0 := mp.TxsFront().Value.(*mempoolTx)
// assert that kv store has gas wanted = 1.
require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1")
require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly")
// ensure each tx is 20 bytes long
require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes")
mempool.Flush()
mp.Flush()
// each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
// each tx has 20 bytes
@@ -131,18 +159,18 @@ func TestReapMaxBytesMaxGas(t *testing.T) {
{20, 20000, 30, 20},
}
for tcIndex, tt := range tests {
checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID)
got := mempool.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas)
checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID)
got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas)
assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d",
len(got), tt.expectedNumTxs, tcIndex)
mempool.Flush()
mp.Flush()
}
}
func TestMempoolFilters(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
mp, cleanup := newMempoolWithApp(cc)
defer cleanup()
emptyTxArr := []types.Tx{[]byte{}}
@@ -153,75 +181,125 @@ func TestMempoolFilters(t *testing.T) {
// each tx has 20 bytes
tests := []struct {
numTxsToCreate int
preFilter PreCheckFunc
postFilter PostCheckFunc
preFilter mempool.PreCheckFunc
postFilter mempool.PostCheckFunc
expectedNumTxs int
}{
{10, nopPreFilter, nopPostFilter, 10},
{10, PreCheckMaxBytes(10), nopPostFilter, 0},
{10, PreCheckMaxBytes(22), nopPostFilter, 10},
{10, nopPreFilter, PostCheckMaxGas(-1), 10},
{10, nopPreFilter, PostCheckMaxGas(0), 0},
{10, nopPreFilter, PostCheckMaxGas(1), 10},
{10, nopPreFilter, PostCheckMaxGas(3000), 10},
{10, PreCheckMaxBytes(10), PostCheckMaxGas(20), 0},
{10, PreCheckMaxBytes(30), PostCheckMaxGas(20), 10},
{10, PreCheckMaxBytes(22), PostCheckMaxGas(1), 10},
{10, PreCheckMaxBytes(22), PostCheckMaxGas(0), 0},
{10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0},
{10, mempool.PreCheckMaxBytes(22), nopPostFilter, 10},
{10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10},
{10, nopPreFilter, mempool.PostCheckMaxGas(0), 0},
{10, nopPreFilter, mempool.PostCheckMaxGas(1), 10},
{10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10},
{10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0},
{10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10},
{10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(1), 10},
{10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0},
}
for tcIndex, tt := range tests {
err := mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter)
err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter)
require.NoError(t, err)
checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID)
require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex)
mempool.Flush()
checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID)
require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex)
mp.Flush()
}
}
func TestMempoolUpdate(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
mp, cleanup := newMempoolWithApp(cc)
defer cleanup()
// 1. Adds valid txs to the cache
{
err := mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
err = mempool.CheckTx([]byte{0x01}, nil, TxInfo{})
err = mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{})
if assert.Error(t, err) {
assert.Equal(t, ErrTxInCache, err)
assert.Equal(t, mempool.ErrTxInCache, err)
}
}
// 2. Removes valid txs from the mempool
{
err := mempool.CheckTx([]byte{0x02}, nil, TxInfo{})
err := mp.CheckTx([]byte{0x02}, nil, mempool.TxInfo{})
require.NoError(t, err)
err = mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
err = mp.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
assert.Zero(t, mempool.Size())
assert.Zero(t, mp.Size())
}
// 3. Removes invalid transactions from the cache and the mempool (if present)
{
err := mempool.CheckTx([]byte{0x03}, nil, TxInfo{})
err := mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{})
require.NoError(t, err)
err = mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil)
err = mp.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil)
require.NoError(t, err)
assert.Zero(t, mempool.Size())
assert.Zero(t, mp.Size())
err = mempool.CheckTx([]byte{0x03}, nil, TxInfo{})
err = mp.CheckTx([]byte{0x03}, nil, mempool.TxInfo{})
require.NoError(t, err)
}
}
func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
app := counter.NewApplication(true)
func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) {
var callback abciclient.Callback
mockClient := new(abciclimocks.Client)
mockClient.On("Start").Return(nil)
mockClient.On("SetLogger", mock.Anything)
mockClient.On("Error").Return(nil).Times(4)
mockClient.On("FlushAsync", mock.Anything).Return(abciclient.NewReqRes(abci.ToRequestFlush()), nil)
mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true }))
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
wcfg := cfg.DefaultConfig()
mp, cleanup, err := newMempoolWithAppMock(cc, mockClient)
require.NoError(t, err)
defer cleanup()
// Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them.
txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}}
for _, tx := range txs {
reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx}))
reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK})
mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil)
err := mp.CheckTx(tx, nil, mempool.TxInfo{})
require.NoError(t, err)
// ensure that the callback that the mempool sets on the ReqRes is run.
reqRes.InvokeCallback()
}
// Calling update to remove the first transaction from the mempool.
// This call also triggers the mempool to recheck its remaining transactions.
err = mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil)
require.Nil(t, err)
// The mempool has now sent its requests off to the client to be rechecked
// and is waiting for the corresponding callbacks to be called.
// We now call the mempool-supplied callback on the first and third transaction.
// This simulates the client dropping the second request.
// Previous versions of this code panicked when the ABCI application missed
// a recheck-tx request.
resp := abci.ResponseCheckTx{Code: abci.CodeTypeOK}
req := abci.RequestCheckTx{Tx: txs[1]}
callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp))
req = abci.RequestCheckTx{Tx: txs[3]}
callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp))
mockClient.AssertExpectations(t)
}
func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
wcfg := config.DefaultConfig()
wcfg.Mempool.KeepInvalidTxsInCache = true
mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
defer cleanup()
// 1. An invalid transaction must remain in the cache after Update
@@ -232,26 +310,26 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, 1)
err := mempool.CheckTx(b, nil, TxInfo{})
err := mp.CheckTx(b, nil, mempool.TxInfo{})
require.NoError(t, err)
// simulate new block
_ = app.DeliverTx(abci.RequestDeliverTx{Tx: a})
_ = app.DeliverTx(abci.RequestDeliverTx{Tx: b})
err = mempool.Update(1, []types.Tx{a, b},
err = mp.Update(1, []types.Tx{a, b},
[]*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil)
require.NoError(t, err)
// a must be added to the cache
err = mempool.CheckTx(a, nil, TxInfo{})
err = mp.CheckTx(a, nil, mempool.TxInfo{})
if assert.Error(t, err) {
assert.Equal(t, ErrTxInCache, err)
assert.Equal(t, mempool.ErrTxInCache, err)
}
// b must remain in the cache
err = mempool.CheckTx(b, nil, TxInfo{})
err = mp.CheckTx(b, nil, mempool.TxInfo{})
if assert.Error(t, err) {
assert.Equal(t, ErrTxInCache, err)
assert.Equal(t, mempool.ErrTxInCache, err)
}
}
@@ -261,68 +339,62 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
binary.BigEndian.PutUint64(a, 0)
// remove a from the cache to test (2)
mempool.cache.Remove(a)
mp.cache.Remove(a)
err := mempool.CheckTx(a, nil, TxInfo{})
err := mp.CheckTx(a, nil, mempool.TxInfo{})
require.NoError(t, err)
err = mempool.CheckTx(a, nil, TxInfo{})
if assert.Error(t, err) {
assert.Equal(t, ErrTxInCache, err)
}
}
}
func TestTxsAvailable(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
mp, cleanup := newMempoolWithApp(cc)
defer cleanup()
mempool.EnableTxsAvailable()
mp.EnableTxsAvailable()
timeoutMS := 500
// with no txs, it shouldnt fire
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
// send a bunch of txs, it should only fire once
txs := checkTxs(t, mempool, 100, UnknownPeerID)
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
txs := checkTxs(t, mp, 100, mempool.UnknownPeerID)
ensureFire(t, mp.TxsAvailable(), timeoutMS)
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
// call update with half the txs.
// it should fire once now for the new height
// since there are still txs left
committedTxs, txs := txs[:50], txs[50:]
if err := mempool.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
if err := mp.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
t.Error(err)
}
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
ensureFire(t, mp.TxsAvailable(), timeoutMS)
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
// send a bunch more txs. we already fired for this height so it shouldnt fire again
moreTxs := checkTxs(t, mempool, 50, UnknownPeerID)
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID)
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
// now call update with all the txs. it should not fire as there are no txs left
committedTxs = append(txs, moreTxs...) //nolint: gocritic
if err := mempool.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
committedTxs = append(txs, moreTxs...)
if err := mp.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
t.Error(err)
}
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
// send a bunch more txs, it should only fire once
checkTxs(t, mempool, 100, UnknownPeerID)
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
checkTxs(t, mp, 100, mempool.UnknownPeerID)
ensureFire(t, mp.TxsAvailable(), timeoutMS)
ensureNoFire(t, mp.TxsAvailable(), timeoutMS)
}
func TestSerialReap(t *testing.T) {
app := counter.NewApplication(true)
app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"})
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
mp, cleanup := newMempoolWithApp(cc)
defer cleanup()
appConnCon, _ := cc.NewABCIClient()
@@ -338,7 +410,7 @@ func TestSerialReap(t *testing.T) {
// This will succeed
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(i))
err := mempool.CheckTx(txBytes, nil, TxInfo{})
err := mp.CheckTx(txBytes, nil, mempool.TxInfo{})
_, cached := cacheMap[string(txBytes)]
if cached {
require.NotNil(t, err, "expected error for cached tx")
@@ -348,13 +420,13 @@ func TestSerialReap(t *testing.T) {
cacheMap[string(txBytes)] = struct{}{}
// Duplicates are cached and should return error
err = mempool.CheckTx(txBytes, nil, TxInfo{})
err = mp.CheckTx(txBytes, nil, mempool.TxInfo{})
require.NotNil(t, err, "Expected error after CheckTx on duplicated tx")
}
}
reapCheck := func(exp int) {
txs := mempool.ReapMaxBytesMaxGas(-1, -1)
txs := mp.ReapMaxBytesMaxGas(-1, -1)
require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs)))
}
@@ -365,7 +437,7 @@ func TestSerialReap(t *testing.T) {
binary.BigEndian.PutUint64(txBytes, uint64(i))
txs = append(txs, txBytes)
}
if err := mempool.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil {
if err := mp.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil {
t.Error(err)
}
}
@@ -428,58 +500,10 @@ func TestSerialReap(t *testing.T) {
reapCheck(600)
}
func TestMempoolCloseWAL(t *testing.T) {
// 1. Create the temporary directory for mempool and WAL testing.
rootDir, err := ioutil.TempDir("", "mempool-test")
require.Nil(t, err, "expecting successful tmpdir creation")
// 2. Ensure that it doesn't contain any elements -- Sanity check
m1, err := filepath.Glob(filepath.Join(rootDir, "*"))
require.Nil(t, err, "successful globbing expected")
require.Equal(t, 0, len(m1), "no matches yet")
// 3. Create the mempool
wcfg := cfg.DefaultConfig()
wcfg.Mempool.RootDir = rootDir
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
defer cleanup()
mempool.height = 10
err = mempool.InitWAL()
require.NoError(t, err)
// 4. Ensure that the directory contains the WAL file
m2, err := filepath.Glob(filepath.Join(rootDir, "*"))
require.Nil(t, err, "successful globbing expected")
require.Equal(t, 1, len(m2), "expecting the wal match in")
// 5. Write some contents to the WAL
err = mempool.CheckTx(types.Tx([]byte("foo")), nil, TxInfo{})
require.NoError(t, err)
walFilepath := mempool.wal.Path
sum1 := checksumFile(walFilepath, t)
// 6. Sanity check to ensure that the written TX matches the expectation.
require.Equal(t, sum1, checksumIt([]byte("foo\n")), "foo with a newline should be written")
// 7. Invoke CloseWAL() and ensure it discards the
// WAL thus any other write won't go through.
mempool.CloseWAL()
err = mempool.CheckTx(types.Tx([]byte("bar")), nil, TxInfo{})
require.NoError(t, err)
sum2 := checksumFile(walFilepath, t)
require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded")
// 8. Sanity check to ensure that the WAL file still exists
m3, err := filepath.Glob(filepath.Join(rootDir, "*"))
require.Nil(t, err, "successful globbing expected")
require.Equal(t, 1, len(m3), "expecting the wal match in")
}
func TestMempool_CheckTxChecksTxSize(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempl, cleanup := newMempoolWithApp(cc)
defer cleanup()
@@ -505,7 +529,7 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) {
tx := tmrand.Bytes(testCase.len)
err := mempl.CheckTx(tx, nil, TxInfo{})
err := mempl.CheckTx(tx, nil, mempool.TxInfo{})
bv := gogotypes.BytesValue{Value: tx}
bz, err2 := bv.Marshal()
require.NoError(t, err2)
@@ -514,7 +538,10 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) {
if !testCase.err {
require.NoError(t, err, caseString)
} else {
require.Equal(t, err, ErrTxTooLarge{maxTxSize, testCase.len}, caseString)
require.Equal(t, err, mempool.ErrTxTooLarge{
Max: maxTxSize,
Actual: testCase.len,
}, caseString)
}
}
}
@@ -522,52 +549,60 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) {
func TestMempoolTxsBytes(t *testing.T) {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
config := cfg.ResetTestRoot("mempool_test")
config.Mempool.MaxTxsBytes = 10
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
cfg := config.ResetTestRoot("mempool_test")
cfg.Mempool.MaxTxsBytes = 10
mp, cleanup := newMempoolWithAppAndConfig(cc, cfg)
defer cleanup()
// 1. zero by default
assert.EqualValues(t, 0, mempool.TxsBytes())
assert.EqualValues(t, 0, mp.SizeBytes())
// 2. len(tx) after CheckTx
err := mempool.CheckTx([]byte{0x01}, nil, TxInfo{})
err := mp.CheckTx([]byte{0x01}, nil, mempool.TxInfo{})
require.NoError(t, err)
assert.EqualValues(t, 1, mempool.TxsBytes())
assert.EqualValues(t, 1, mp.SizeBytes())
// 3. zero again after tx is removed by Update
err = mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
err = mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
assert.EqualValues(t, 0, mempool.TxsBytes())
assert.EqualValues(t, 0, mp.SizeBytes())
// 4. zero after Flush
err = mempool.CheckTx([]byte{0x02, 0x03}, nil, TxInfo{})
err = mp.CheckTx([]byte{0x02, 0x03}, nil, mempool.TxInfo{})
require.NoError(t, err)
assert.EqualValues(t, 2, mempool.TxsBytes())
assert.EqualValues(t, 2, mp.SizeBytes())
mempool.Flush()
assert.EqualValues(t, 0, mempool.TxsBytes())
mp.Flush()
assert.EqualValues(t, 0, mp.SizeBytes())
// 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached.
err = mempool.CheckTx([]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, nil, TxInfo{})
err = mp.CheckTx(
[]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04},
nil,
mempool.TxInfo{},
)
require.NoError(t, err)
err = mempool.CheckTx([]byte{0x05}, nil, TxInfo{})
err = mp.CheckTx([]byte{0x05}, nil, mempool.TxInfo{})
if assert.Error(t, err) {
assert.IsType(t, ErrMempoolIsFull{}, err)
assert.IsType(t, mempool.ErrMempoolIsFull{}, err)
}
// 6. zero after tx is rechecked and removed due to not being valid anymore
app2 := counter.NewApplication(true)
app2 := kvstore.NewApplication()
cc = proxy.NewLocalClientCreator(app2)
mempool, cleanup = newMempoolWithApp(cc)
mp, cleanup = newMempoolWithApp(cc)
defer cleanup()
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
err = mempool.CheckTx(txBytes, nil, TxInfo{})
err = mp.CheckTx(txBytes, nil, mempool.TxInfo{})
require.NoError(t, err)
assert.EqualValues(t, 8, mempool.TxsBytes())
assert.EqualValues(t, 8, mp.SizeBytes())
appConnCon, _ := cc.NewABCIClient()
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
@@ -578,26 +613,28 @@ func TestMempoolTxsBytes(t *testing.T) {
t.Error(err)
}
})
res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
require.NoError(t, err)
require.EqualValues(t, 0, res.Code)
res2, err := appConnCon.CommitSync()
require.NoError(t, err)
require.NotEmpty(t, res2.Data)
// Pretend like we committed nothing so txBytes gets rechecked and removed.
err = mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
require.NoError(t, err)
assert.EqualValues(t, 0, mempool.TxsBytes())
assert.EqualValues(t, 8, mp.SizeBytes())
// 7. Test RemoveTxByKey function
err = mempool.CheckTx([]byte{0x06}, nil, TxInfo{})
err = mp.CheckTx([]byte{0x06}, nil, mempool.TxInfo{})
require.NoError(t, err)
assert.EqualValues(t, 1, mempool.TxsBytes())
mempool.RemoveTxByKey(TxKey([]byte{0x07}), true)
assert.EqualValues(t, 1, mempool.TxsBytes())
mempool.RemoveTxByKey(TxKey([]byte{0x06}), true)
assert.EqualValues(t, 0, mempool.TxsBytes())
assert.EqualValues(t, 9, mp.SizeBytes())
assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key()))
assert.EqualValues(t, 9, mp.SizeBytes())
assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key()))
assert.EqualValues(t, 8, mp.SizeBytes())
}
@@ -608,14 +645,16 @@ func TestMempoolTxsBytes(t *testing.T) {
func TestMempoolRemoteAppConcurrency(t *testing.T) {
sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6))
app := kvstore.NewApplication()
cc, server := newRemoteApp(t, sockPath, app)
_, server := newRemoteApp(t, sockPath, app)
t.Cleanup(func() {
if err := server.Stop(); err != nil {
t.Error(err)
}
})
config := cfg.ResetTestRoot("mempool_test")
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
cfg := config.ResetTestRoot("mempool_test")
mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg)
defer cleanup()
// generate small number of txs
@@ -627,7 +666,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
}
// simulate a group of peers sending them over and over
N := config.Mempool.Size
N := cfg.Mempool.Size
maxPeers := 5
for i := 0; i < N; i++ {
peerID := mrand.Intn(maxPeers)
@@ -635,41 +674,25 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
tx := txs[txNum]
// this will err with ErrTxInCache many times ...
mempool.CheckTx(tx, nil, TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error
mp.CheckTx(tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error
}
err := mempool.FlushAppConn()
require.NoError(t, err)
require.NoError(t, mp.FlushAppConn())
}
// caller must close server
func newRemoteApp(
t *testing.T,
addr string,
app abci.Application,
) (
clientCreator proxy.ClientCreator,
server service.Service,
) {
clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true)
func newRemoteApp(t *testing.T, addr string, app abci.Application) (abciclient.Client, service.Service) {
clientCreator, err := abciclient.NewClient(addr, "socket", true)
require.NoError(t, err)
// Start server
server = abciserver.NewSocketServer(addr, app)
server := abciserver.NewSocketServer(addr, app)
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
if err := server.Start(); err != nil {
t.Fatalf("Error starting socket server: %v", err.Error())
}
return clientCreator, server
}
func checksumIt(data []byte) string {
h := sha256.New()
h.Write(data)
return fmt.Sprintf("%x", h.Sum(nil))
}
func checksumFile(p string, t *testing.T) string {
data, err := ioutil.ReadFile(p)
require.Nil(t, err, "expecting successful read of %q", p)
return checksumIt(data)
return clientCreator, server
}
func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx {

View File

@@ -20,4 +20,4 @@
// broadcastTxRoutine().
// TODO: Better handle abci client errors. (make it automatically handle connection errors)
package mempool
package v0

View File

@@ -1,32 +1,20 @@
package mempool
package v0
import (
"errors"
"fmt"
"math"
"time"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/clist"
"github.com/tendermint/tendermint/libs/log"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
"github.com/tendermint/tendermint/types"
)
const (
MempoolChannel = byte(0x30)
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
// UnknownPeerID is the peer ID to use when running CheckTx when there is
// no peer (e.g. RPC)
UnknownPeerID uint16 = 0
maxActiveIDs = math.MaxUint16
)
// Reactor handles mempool tx broadcasting amongst peers.
// It maintains a map from peer ID to counter, to prevent gossiping txs to the
// peers you received it from.
@@ -58,8 +46,8 @@ func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
// nextPeerID returns the next unused peer ID to use.
// This assumes that ids's mutex is already locked.
func (ids *mempoolIDs) nextPeerID() uint16 {
if len(ids.activeIDs) == maxActiveIDs {
panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs))
if len(ids.activeIDs) == mempool.MaxActiveIDs {
panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", mempool.MaxActiveIDs))
}
_, idExists := ids.activeIDs[ids.nextID]
@@ -143,7 +131,7 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
ID: MempoolChannel,
ID: mempool.MempoolChannel,
Priority: 5,
RecvMessageCapacity: batchMsg.Size(),
},
@@ -175,18 +163,20 @@ func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
}
memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
txInfo := TxInfo{SenderID: memR.ids.GetForPeer(src)}
txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)}
if src != nil {
txInfo.SenderP2PID = src.ID()
}
for _, tx := range msg.Txs {
err = memR.mempool.CheckTx(tx, nil, txInfo)
if err == ErrTxInCache {
memR.Logger.Debug("Tx already exists in cache", "tx", txID(tx))
if errors.Is(err, mempool.ErrTxInCache) {
memR.Logger.Debug("Tx already exists in cache", "tx", tx.String())
} else if err != nil {
memR.Logger.Info("Could not check tx", "tx", txID(tx), "err", err)
memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err)
}
}
// broadcasting happens from go routines per peer
}
@@ -229,14 +219,14 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
// different every time due to us using a map. Sometimes other reactors
// will be initialized before the consensus reactor. We should wait a few
// milliseconds and retry.
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
continue
}
// Allow for a lag of 1 block.
memTx := next.Value.(*mempoolTx)
if peerState.GetHeight() < memTx.Height()-1 {
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
continue
}
@@ -249,13 +239,15 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}},
},
}
bz, err := msg.Marshal()
if err != nil {
panic(err)
}
success := peer.Send(MempoolChannel, bz)
success := peer.Send(mempool.MempoolChannel, bz)
if !success {
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
continue
}
}
@@ -272,9 +264,6 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
}
}
//-----------------------------------------------------------------------------
// Messages
func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) {
msg := protomem.Message{}
err := msg.Unmarshal(bz)
@@ -304,8 +293,6 @@ func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) {
return message, fmt.Errorf("msg type: %T is not supported", msg)
}
//-------------------------------------
// TxsMessage is a Message containing transactions.
type TxsMessage struct {
Txs []types.Tx

View File

@@ -1,4 +1,4 @@
package mempool
package v0
import (
"encoding/hex"
@@ -18,6 +18,7 @@ import (
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/mock"
memproto "github.com/tendermint/tendermint/proto/tendermint/mempool"
@@ -61,7 +62,7 @@ func TestReactorBroadcastTxsMessage(t *testing.T) {
}
}
txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID)
txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
waitForTxsOnReactors(t, txs, reactors)
}
@@ -91,7 +92,7 @@ func TestReactorConcurrency(t *testing.T) {
// 1. submit a bunch of txs
// 2. update the whole mempool
txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID)
txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
go func() {
defer wg.Done()
@@ -108,7 +109,7 @@ func TestReactorConcurrency(t *testing.T) {
// 1. submit a bunch of txs
// 2. update none
_ = checkTxs(t, reactors[1].mempool, numTxs, UnknownPeerID)
_ = checkTxs(t, reactors[1].mempool, numTxs, mempool.UnknownPeerID)
go func() {
defer wg.Done()
@@ -170,7 +171,7 @@ func TestReactor_MaxTxBytes(t *testing.T) {
// Broadcast a tx, which has the max size
// => ensure it's received by the second reactor.
tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes)
err := reactors[0].mempool.CheckTx(tx1, nil, TxInfo{SenderID: UnknownPeerID})
err := reactors[0].mempool.CheckTx(tx1, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
require.NoError(t, err)
waitForTxsOnReactors(t, []types.Tx{tx1}, reactors)
@@ -180,7 +181,7 @@ func TestReactor_MaxTxBytes(t *testing.T) {
// Broadcast a tx, which is beyond the max size
// => ensure it's not sent
tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1)
err = reactors[0].mempool.CheckTx(tx2, nil, TxInfo{SenderID: UnknownPeerID})
err = reactors[0].mempool.CheckTx(tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID})
require.Error(t, err)
}
@@ -252,7 +253,7 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
// 0 is already reserved for UnknownPeerID
ids := newMempoolIDs()
for i := 0; i < maxActiveIDs-1; i++ {
for i := 0; i < mempool.MaxActiveIDs-1; i++ {
peer := mock.NewPeer(net.IP{127, 0, 0, 1})
ids.ReserveForPeer(peer)
}
@@ -276,9 +277,9 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) {
}()
reactor := reactors[0]
for i := 0; i < maxActiveIDs+1; i++ {
for i := 0; i < mempool.MaxActiveIDs+1; i++ {
peer := mock.NewPeer(nil)
reactor.Receive(MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
reactor.Receive(mempool.MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
reactor.AddPeer(peer)
}
}

768
mempool/v1/mempool.go Normal file
View File

@@ -0,0 +1,768 @@
package v1
import (
"fmt"
"runtime"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/creachadair/taskgroup"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/clist"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
)
var _ mempool.Mempool = (*TxMempool)(nil)
// TxMempoolOption sets an optional parameter on the TxMempool.
type TxMempoolOption func(*TxMempool)
// TxMempool implemements the Mempool interface and allows the application to
// set priority values on transactions in the CheckTx response. When selecting
// transactions to include in a block, higher-priority transactions are chosen
// first. When evicting transactions from the mempool for size constraints,
// lower-priority transactions are evicted sooner.
//
// Within the mempool, transactions are ordered by time of arrival, and are
// gossiped to the rest of the network based on that order (gossip order does
// not take priority into account).
type TxMempool struct {
// Immutable fields
logger log.Logger
config *config.MempoolConfig
proxyAppConn proxy.AppConnMempool
metrics *mempool.Metrics
cache mempool.TxCache // seen transactions
// Atomically-updated fields
txsBytes int64 // atomic: the total size of all transactions in the mempool, in bytes
// Synchronized fields, protected by mtx.
mtx *sync.RWMutex
notifiedTxsAvailable bool
txsAvailable chan struct{} // one value sent per height when mempool is not empty
preCheck mempool.PreCheckFunc
postCheck mempool.PostCheckFunc
height int64 // the latest height passed to Update
txs *clist.CList // valid transactions (passed CheckTx)
txByKey map[types.TxKey]*clist.CElement
txBySender map[string]*clist.CElement // for sender != ""
}
// NewTxMempool constructs a new, empty priority mempool at the specified
// initial height and using the given config and options.
func NewTxMempool(
logger log.Logger,
cfg *config.MempoolConfig,
proxyAppConn proxy.AppConnMempool,
height int64,
options ...TxMempoolOption,
) *TxMempool {
txmp := &TxMempool{
logger: logger,
config: cfg,
proxyAppConn: proxyAppConn,
metrics: mempool.NopMetrics(),
cache: mempool.NopTxCache{},
txs: clist.New(),
mtx: new(sync.RWMutex),
height: height,
txByKey: make(map[types.TxKey]*clist.CElement),
txBySender: make(map[string]*clist.CElement),
}
if cfg.CacheSize > 0 {
txmp.cache = mempool.NewLRUTxCache(cfg.CacheSize)
}
for _, opt := range options {
opt(txmp)
}
return txmp
}
// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx)
// returns an error. This is executed before CheckTx. It only applies to the
// first created block. After that, Update() overwrites the existing value.
func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption {
return func(txmp *TxMempool) { txmp.preCheck = f }
}
// WithPostCheck sets a filter for the mempool to reject a transaction if
// f(tx, resp) returns an error. This is executed after CheckTx. It only applies
// to the first created block. After that, Update overwrites the existing value.
func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption {
return func(txmp *TxMempool) { txmp.postCheck = f }
}
// WithMetrics sets the mempool's metrics collector.
func WithMetrics(metrics *mempool.Metrics) TxMempoolOption {
return func(txmp *TxMempool) { txmp.metrics = metrics }
}
// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly
// release the lock when finished.
func (txmp *TxMempool) Lock() { txmp.mtx.Lock() }
// Unlock releases a write-lock on the mempool.
func (txmp *TxMempool) Unlock() { txmp.mtx.Unlock() }
// Size returns the number of valid transactions in the mempool. It is
// thread-safe.
func (txmp *TxMempool) Size() int { return txmp.txs.Len() }
// SizeBytes return the total sum in bytes of all the valid transactions in the
// mempool. It is thread-safe.
func (txmp *TxMempool) SizeBytes() int64 { return atomic.LoadInt64(&txmp.txsBytes) }
// FlushAppConn executes FlushSync on the mempool's proxyAppConn.
//
// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before
// calling FlushAppConn.
func (txmp *TxMempool) FlushAppConn() error {
// N.B.: We have to issue the call outside the lock so that its callback can
// fire. It's safe to do this, the flush will block until complete.
//
// We could just not require the caller to hold the lock at all, but the
// semantics of the Mempool interface require the caller to hold it, and we
// can't change that without disrupting existing use.
txmp.mtx.Unlock()
defer txmp.mtx.Lock()
return txmp.proxyAppConn.FlushSync()
}
// EnableTxsAvailable enables the mempool to trigger events when transactions
// are available on a block by block basis.
func (txmp *TxMempool) EnableTxsAvailable() {
txmp.mtx.Lock()
defer txmp.mtx.Unlock()
txmp.txsAvailable = make(chan struct{}, 1)
}
// TxsAvailable returns a channel which fires once for every height, and only
// when transactions are available in the mempool. It is thread-safe.
func (txmp *TxMempool) TxsAvailable() <-chan struct{} { return txmp.txsAvailable }
// CheckTx adds the given transaction to the mempool if it fits and passes the
// application's ABCI CheckTx method.
//
// CheckTx reports an error without adding tx if:
//
// - The size of tx exceeds the configured maximum transaction size.
// - The pre-check hook is defined and reports an error for tx.
// - The transaction already exists in the cache.
// - The proxy connection to the application fails.
//
// If tx passes all of the above conditions, it is passed (asynchronously) to
// the application's ABCI CheckTx method and this CheckTx method returns nil.
// If cb != nil, it is called when the ABCI request completes to report the
// application response.
//
// If the application accepts the transaction and the mempool is full, the
// mempool evicts one or more of the lowest-priority transaction whose priority
// is (strictly) lower than the priority of tx and whose size together exceeds
// the size of tx, and adds tx instead. If no such transactions exist, tx is
// discarded.
func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo mempool.TxInfo) error {
// During the initial phase of CheckTx, we do not need to modify any state.
// A transaction will not actually be added to the mempool until it survives
// a call to the ABCI CheckTx method and size constraint checks.
height, err := func() (int64, error) {
txmp.mtx.RLock()
defer txmp.mtx.RUnlock()
// Reject transactions in excess of the configured maximum transaction size.
if len(tx) > txmp.config.MaxTxBytes {
return 0, mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)}
}
// If a precheck hook is defined, call it before invoking the application.
if txmp.preCheck != nil {
if err := txmp.preCheck(tx); err != nil {
return 0, mempool.ErrPreCheck{Reason: err}
}
}
// Early exit if the proxy connection has an error.
if err := txmp.proxyAppConn.Error(); err != nil {
return 0, err
}
txKey := tx.Key()
// Check for the transaction in the cache.
if !txmp.cache.Push(tx) {
// If the cached transaction is also in the pool, record its sender.
if elt, ok := txmp.txByKey[txKey]; ok {
w := elt.Value.(*WrappedTx)
w.SetPeer(txInfo.SenderID)
}
return 0, mempool.ErrTxInCache
}
return txmp.height, nil
}()
if err != nil {
return err
}
// Invoke an ABCI CheckTx for this transaction.
rsp, err := txmp.proxyAppConn.CheckTxSync(abci.RequestCheckTx{Tx: tx})
if err != nil {
txmp.cache.Remove(tx)
return err
}
wtx := &WrappedTx{
tx: tx,
hash: tx.Key(),
timestamp: time.Now().UTC(),
height: height,
}
wtx.SetPeer(txInfo.SenderID)
txmp.addNewTransaction(wtx, rsp)
if cb != nil {
cb(&abci.Response{Value: &abci.Response_CheckTx{CheckTx: rsp}})
}
return nil
}
// RemoveTxByKey removes the transaction with the specified key from the
// mempool. It reports an error if no such transaction exists. This operation
// does not remove the transaction from the cache.
func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error {
txmp.mtx.Lock()
defer txmp.mtx.Unlock()
return txmp.removeTxByKey(txKey)
}
// removeTxByKey removes the specified transaction key from the mempool.
// The caller must hold txmp.mtx excluxively.
func (txmp *TxMempool) removeTxByKey(key types.TxKey) error {
if elt, ok := txmp.txByKey[key]; ok {
w := elt.Value.(*WrappedTx)
delete(txmp.txByKey, key)
delete(txmp.txBySender, w.sender)
txmp.txs.Remove(elt)
elt.DetachPrev()
elt.DetachNext()
atomic.AddInt64(&txmp.txsBytes, -w.Size())
return nil
}
return fmt.Errorf("transaction %x not found", key)
}
// removeTxByElement removes the specified transaction element from the mempool.
// The caller must hold txmp.mtx exclusively.
func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) {
w := elt.Value.(*WrappedTx)
delete(txmp.txByKey, w.tx.Key())
delete(txmp.txBySender, w.sender)
txmp.txs.Remove(elt)
elt.DetachPrev()
elt.DetachNext()
atomic.AddInt64(&txmp.txsBytes, -w.Size())
}
// Flush purges the contents of the mempool and the cache, leaving both empty.
// The current height is not modified by this operation.
func (txmp *TxMempool) Flush() {
txmp.mtx.Lock()
defer txmp.mtx.Unlock()
// Remove all the transactions in the list explicitly, so that the sizes
// and indexes get updated properly.
cur := txmp.txs.Front()
for cur != nil {
next := cur.Next()
txmp.removeTxByElement(cur)
cur = next
}
txmp.cache.Reset()
}
// allEntriesSorted returns a slice of all the transactions currently in the
// mempool, sorted in nonincreasing order by priority with ties broken by
// increasing order of arrival time.
func (txmp *TxMempool) allEntriesSorted() []*WrappedTx {
txmp.mtx.RLock()
defer txmp.mtx.RUnlock()
all := make([]*WrappedTx, 0, len(txmp.txByKey))
for _, tx := range txmp.txByKey {
all = append(all, tx.Value.(*WrappedTx))
}
sort.Slice(all, func(i, j int) bool {
if all[i].priority == all[j].priority {
return all[i].timestamp.Before(all[j].timestamp)
}
return all[i].priority > all[j].priority // N.B. higher priorities first
})
return all
}
// ReapMaxBytesMaxGas returns a slice of valid transactions that fit within the
// size and gas constraints. The results are ordered by nonincreasing priority,
// with ties broken by increasing order of arrival. Reaping transactions does
// not remove them from the mempool.
//
// If maxBytes < 0, no limit is set on the total size in bytes.
// If maxGas < 0, no limit is set on the total gas cost.
//
// If the mempool is empty or has no transactions fitting within the given
// constraints, the result will also be empty.
func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
var totalGas, totalBytes int64
var keep []types.Tx //nolint:prealloc
for _, w := range txmp.allEntriesSorted() {
// N.B. When computing byte size, we need to include the overhead for
// encoding as protobuf to send to the application.
totalGas += w.gasWanted
totalBytes += types.ComputeProtoSizeForTxs([]types.Tx{w.tx})
if (maxGas >= 0 && totalGas > maxGas) || (maxBytes >= 0 && totalBytes > maxBytes) {
break
}
keep = append(keep, w.tx)
}
return keep
}
// TxsWaitChan returns a channel that is closed when there is at least one
// transaction available to be gossiped.
func (txmp *TxMempool) TxsWaitChan() <-chan struct{} { return txmp.txs.WaitChan() }
// TxsFront returns the frontmost element of the pending transaction list.
// It will be nil if the mempool is empty.
func (txmp *TxMempool) TxsFront() *clist.CElement { return txmp.txs.Front() }
// ReapMaxTxs returns up to max transactions from the mempool. The results are
// ordered by nonincreasing priority with ties broken by increasing order of
// arrival. Reaping transactions does not remove them from the mempool.
//
// If max < 0, all transactions in the mempool are reaped.
//
// The result may have fewer than max elements (possibly zero) if the mempool
// does not have that many transactions available.
func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs {
var keep []types.Tx //nolint:prealloc
for _, w := range txmp.allEntriesSorted() {
if max >= 0 && len(keep) >= max {
break
}
keep = append(keep, w.tx)
}
return keep
}
// Update removes all the given transactions from the mempool and the cache,
// and updates the current block height. The blockTxs and deliverTxResponses
// must have the same length with each response corresponding to the tx at the
// same offset.
//
// If the configuration enables recheck, Update sends each remaining
// transaction after removing blockTxs to the ABCI CheckTx method. Any
// transactions marked as invalid during recheck are also removed.
//
// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before
// calling Update.
func (txmp *TxMempool) Update(
blockHeight int64,
blockTxs types.Txs,
deliverTxResponses []*abci.ResponseDeliverTx,
newPreFn mempool.PreCheckFunc,
newPostFn mempool.PostCheckFunc,
) error {
// Safety check: Transactions and responses must match in number.
if len(blockTxs) != len(deliverTxResponses) {
panic(fmt.Sprintf("mempool: got %d transactions but %d DeliverTx responses",
len(blockTxs), len(deliverTxResponses)))
}
txmp.height = blockHeight
txmp.notifiedTxsAvailable = false
if newPreFn != nil {
txmp.preCheck = newPreFn
}
if newPostFn != nil {
txmp.postCheck = newPostFn
}
for i, tx := range blockTxs {
// Add successful committed transactions to the cache (if they are not
// already present). Transactions that failed to commit are removed from
// the cache unless the operator has explicitly requested we keep them.
if deliverTxResponses[i].Code == abci.CodeTypeOK {
_ = txmp.cache.Push(tx)
} else if !txmp.config.KeepInvalidTxsInCache {
txmp.cache.Remove(tx)
}
// Regardless of success, remove the transaction from the mempool.
_ = txmp.removeTxByKey(tx.Key())
}
txmp.purgeExpiredTxs(blockHeight)
// If there any uncommitted transactions left in the mempool, we either
// initiate re-CheckTx per remaining transaction or notify that remaining
// transactions are left.
size := txmp.Size()
txmp.metrics.Size.Set(float64(size))
if size > 0 {
if txmp.config.Recheck {
txmp.recheckTransactions()
} else {
txmp.notifyTxsAvailable()
}
}
return nil
}
// addNewTransaction handles the ABCI CheckTx response for the first time a
// transaction is added to the mempool. A recheck after a block is committed
// goes to handleRecheckResult.
//
// If either the application rejected the transaction or a post-check hook is
// defined and rejects the transaction, it is discarded.
//
// Otherwise, if the mempool is full, check for lower-priority transactions
// that can be evicted to make room for the new one. If no such transactions
// exist, this transaction is logged and dropped; otherwise the selected
// transactions are evicted.
//
// Finally, the new transaction is added and size stats updated.
func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.ResponseCheckTx) {
txmp.mtx.Lock()
defer txmp.mtx.Unlock()
var err error
if txmp.postCheck != nil {
err = txmp.postCheck(wtx.tx, checkTxRes)
}
if err != nil || checkTxRes.Code != abci.CodeTypeOK {
txmp.logger.Info(
"rejected bad transaction",
"priority", wtx.Priority(),
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"peer_id", wtx.peers,
"code", checkTxRes.Code,
"post_check_err", err,
)
txmp.metrics.FailedTxs.Add(1)
// Remove the invalid transaction from the cache, unless the operator has
// instructed us to keep invalid transactions.
if !txmp.config.KeepInvalidTxsInCache {
txmp.cache.Remove(wtx.tx)
}
// If there was a post-check error, record its text in the result for
// debugging purposes.
if err != nil {
checkTxRes.MempoolError = err.Error()
}
return
}
priority := checkTxRes.Priority
sender := checkTxRes.Sender
// Disallow multiple concurrent transactions from the same sender assigned
// by the ABCI application. As a special case, an empty sender is not
// restricted.
if sender != "" {
elt, ok := txmp.txBySender[sender]
if ok {
w := elt.Value.(*WrappedTx)
txmp.logger.Debug(
"rejected valid incoming transaction; tx already exists for sender",
"tx", fmt.Sprintf("%X", w.tx.Hash()),
"sender", sender,
)
checkTxRes.MempoolError =
fmt.Sprintf("rejected valid incoming transaction; tx already exists for sender %q (%X)",
sender, w.tx.Hash())
txmp.metrics.RejectedTxs.Add(1)
return
}
}
// At this point the application has ruled the transaction valid, but the
// mempool might be full. If so, find the lowest-priority items with lower
// priority than the application assigned to this new one, and evict as many
// of them as necessary to make room for tx. If no such items exist, we
// discard tx.
if err := txmp.canAddTx(wtx); err != nil {
var victims []*clist.CElement // eligible transactions for eviction
var victimBytes int64 // total size of victims
for cur := txmp.txs.Front(); cur != nil; cur = cur.Next() {
cw := cur.Value.(*WrappedTx)
if cw.priority < priority {
victims = append(victims, cur)
victimBytes += cw.Size()
}
}
// If there are no suitable eviction candidates, or the total size of
// those candidates is not enough to make room for the new transaction,
// drop the new one.
if len(victims) == 0 || victimBytes < wtx.Size() {
txmp.cache.Remove(wtx.tx)
txmp.logger.Error(
"rejected valid incoming transaction; mempool is full",
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"err", err.Error(),
)
checkTxRes.MempoolError =
fmt.Sprintf("rejected valid incoming transaction; mempool is full (%X)",
wtx.tx.Hash())
txmp.metrics.RejectedTxs.Add(1)
return
}
txmp.logger.Debug("evicting lower-priority transactions",
"new_tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"new_priority", priority,
)
// Sort lowest priority items first so they will be evicted first. Break
// ties in favor of newer items (to maintain FIFO semantics in a group).
sort.Slice(victims, func(i, j int) bool {
iw := victims[i].Value.(*WrappedTx)
jw := victims[j].Value.(*WrappedTx)
if iw.Priority() == jw.Priority() {
return iw.timestamp.After(jw.timestamp)
}
return iw.Priority() < jw.Priority()
})
// Evict as many of the victims as necessary to make room.
var evictedBytes int64
for _, vic := range victims {
w := vic.Value.(*WrappedTx)
txmp.logger.Debug(
"evicted valid existing transaction; mempool full",
"old_tx", fmt.Sprintf("%X", w.tx.Hash()),
"old_priority", w.priority,
)
txmp.removeTxByElement(vic)
txmp.cache.Remove(w.tx)
txmp.metrics.EvictedTxs.Add(1)
// We may not need to evict all the eligible transactions. Bail out
// early if we have made enough room.
evictedBytes += w.Size()
if evictedBytes >= wtx.Size() {
break
}
}
}
wtx.SetGasWanted(checkTxRes.GasWanted)
wtx.SetPriority(priority)
wtx.SetSender(sender)
txmp.insertTx(wtx)
txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size()))
txmp.metrics.Size.Set(float64(txmp.Size()))
txmp.logger.Debug(
"inserted new valid transaction",
"priority", wtx.Priority(),
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"height", txmp.height,
"num_txs", txmp.Size(),
)
txmp.notifyTxsAvailable()
}
func (txmp *TxMempool) insertTx(wtx *WrappedTx) {
elt := txmp.txs.PushBack(wtx)
txmp.txByKey[wtx.tx.Key()] = elt
if s := wtx.Sender(); s != "" {
txmp.txBySender[s] = elt
}
atomic.AddInt64(&txmp.txsBytes, wtx.Size())
}
// handleRecheckResult handles the responses from ABCI CheckTx calls issued
// during the recheck phase of a block Update. It removes any transactions
// invalidated by the application.
//
// This method is NOT executed for the initial CheckTx on a new transaction;
// that case is handled by addNewTransaction instead.
func (txmp *TxMempool) handleRecheckResult(tx types.Tx, checkTxRes *abci.ResponseCheckTx) {
txmp.metrics.RecheckTimes.Add(1)
txmp.mtx.Lock()
defer txmp.mtx.Unlock()
// Find the transaction reported by the ABCI callback. It is possible the
// transaction was evicted during the recheck, in which case the transaction
// will be gone.
elt, ok := txmp.txByKey[tx.Key()]
if !ok {
return
}
wtx := elt.Value.(*WrappedTx)
// If a postcheck hook is defined, call it before checking the result.
var err error
if txmp.postCheck != nil {
err = txmp.postCheck(tx, checkTxRes)
}
if checkTxRes.Code == abci.CodeTypeOK && err == nil {
wtx.SetPriority(checkTxRes.Priority)
return // N.B. Size of mempool did not change
}
txmp.logger.Debug(
"existing transaction no longer valid; failed re-CheckTx callback",
"priority", wtx.Priority(),
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
"err", err,
"code", checkTxRes.Code,
)
txmp.removeTxByElement(elt)
txmp.metrics.FailedTxs.Add(1)
if !txmp.config.KeepInvalidTxsInCache {
txmp.cache.Remove(wtx.tx)
}
txmp.metrics.Size.Set(float64(txmp.Size()))
}
// recheckTransactions initiates re-CheckTx ABCI calls for all the transactions
// currently in the mempool. It reports the number of recheck calls that were
// successfully initiated.
//
// Precondition: The mempool is not empty.
// The caller must hold txmp.mtx exclusively.
func (txmp *TxMempool) recheckTransactions() {
if txmp.Size() == 0 {
panic("mempool: cannot run recheck on an empty mempool")
}
txmp.logger.Debug(
"executing re-CheckTx for all remaining transactions",
"num_txs", txmp.Size(),
"height", txmp.height,
)
// Collect transactions currently in the mempool requiring recheck.
wtxs := make([]*WrappedTx, 0, txmp.txs.Len())
for e := txmp.txs.Front(); e != nil; e = e.Next() {
wtxs = append(wtxs, e.Value.(*WrappedTx))
}
// Issue CheckTx calls for each remaining transaction, and when all the
// rechecks are complete signal watchers that transactions may be available.
go func() {
g, start := taskgroup.New(nil).Limit(2 * runtime.NumCPU())
for _, wtx := range wtxs {
wtx := wtx
start(func() error {
// The response for this CheckTx is handled by the default recheckTxCallback.
rsp, err := txmp.proxyAppConn.CheckTxSync(abci.RequestCheckTx{
Tx: wtx.tx,
Type: abci.CheckTxType_Recheck,
})
if err != nil {
txmp.logger.Error("failed to execute CheckTx during recheck",
"err", err, "hash", fmt.Sprintf("%x", wtx.tx.Hash()))
} else {
txmp.handleRecheckResult(wtx.tx, rsp)
}
return nil
})
}
_ = txmp.proxyAppConn.FlushAsync()
// When recheck is complete, trigger a notification for more transactions.
_ = g.Wait()
txmp.mtx.Lock()
defer txmp.mtx.Unlock()
txmp.notifyTxsAvailable()
}()
}
// canAddTx returns an error if we cannot insert the provided *WrappedTx into
// the mempool due to mempool configured constraints. Otherwise, nil is
// returned and the transaction can be inserted into the mempool.
func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error {
numTxs := txmp.Size()
txBytes := txmp.SizeBytes()
if numTxs >= txmp.config.Size || wtx.Size()+txBytes > txmp.config.MaxTxsBytes {
return mempool.ErrMempoolIsFull{
NumTxs: numTxs,
MaxTxs: txmp.config.Size,
TxsBytes: txBytes,
MaxTxsBytes: txmp.config.MaxTxsBytes,
}
}
return nil
}
// purgeExpiredTxs removes all transactions from the mempool that have exceeded
// their respective height or time-based limits as of the given blockHeight.
// Transactions removed by this operation are not removed from the cache.
//
// The caller must hold txmp.mtx exclusively.
func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) {
if txmp.config.TTLNumBlocks == 0 && txmp.config.TTLDuration == 0 {
return // nothing to do
}
now := time.Now()
cur := txmp.txs.Front()
for cur != nil {
// N.B. Grab the next element first, since if we remove cur its successor
// will be invalidated.
next := cur.Next()
w := cur.Value.(*WrappedTx)
if txmp.config.TTLNumBlocks > 0 && (blockHeight-w.height) > txmp.config.TTLNumBlocks {
txmp.removeTxByElement(cur)
txmp.cache.Remove(w.tx)
txmp.metrics.EvictedTxs.Add(1)
} else if txmp.config.TTLDuration > 0 && now.Sub(w.timestamp) > txmp.config.TTLDuration {
txmp.removeTxByElement(cur)
txmp.cache.Remove(w.tx)
txmp.metrics.EvictedTxs.Add(1)
}
cur = next
}
}
func (txmp *TxMempool) notifyTxsAvailable() {
if txmp.Size() == 0 {
return // nothing to do
}
if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable {
// channel cap is 1, so this will send once
txmp.notifiedTxsAvailable = true
select {
case txmp.txsAvailable <- struct{}{}:
default:
}
}
}

View File

@@ -0,0 +1,31 @@
package v1
import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/mempool"
)
func BenchmarkTxMempool_CheckTx(b *testing.B) {
txmp := setup(b, 10000)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
b.ResetTimer()
for n := 0; n < b.N; n++ {
b.StopTimer()
prefix := make([]byte, 20)
_, err := rng.Read(prefix)
require.NoError(b, err)
priority := int64(rng.Intn(9999-1000) + 1000)
tx := []byte(fmt.Sprintf("%X=%d", prefix, priority))
b.StartTimer()
require.NoError(b, txmp.CheckTx(tx, nil, mempool.TxInfo{}))
}
}

654
mempool/v1/mempool_test.go Normal file
View File

@@ -0,0 +1,654 @@
package v1
import (
"bytes"
"errors"
"fmt"
"math/rand"
"os"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
)
// application extends the KV store application by overriding CheckTx to provide
// transaction priority based on the value in the key/value pair.
type application struct {
*kvstore.Application
}
type testTx struct {
tx types.Tx
priority int64
}
func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
var (
priority int64
sender string
)
// infer the priority from the raw transaction value (sender=key=value)
parts := bytes.Split(req.Tx, []byte("="))
if len(parts) == 3 {
v, err := strconv.ParseInt(string(parts[2]), 10, 64)
if err != nil {
return abci.ResponseCheckTx{
Priority: priority,
Code: 100,
GasWanted: 1,
}
}
priority = v
sender = string(parts[0])
} else {
return abci.ResponseCheckTx{
Priority: priority,
Code: 101,
GasWanted: 1,
}
}
return abci.ResponseCheckTx{
Priority: priority,
Sender: sender,
Code: code.CodeTypeOK,
GasWanted: 1,
}
}
func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
t.Helper()
app := &application{kvstore.NewApplication()}
cc := proxy.NewLocalClientCreator(app)
cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|"))
cfg.Mempool.CacheSize = cacheSize
appConnMem, err := cc.NewABCIClient()
require.NoError(t, err)
require.NoError(t, appConnMem.Start())
t.Cleanup(func() {
os.RemoveAll(cfg.RootDir)
require.NoError(t, appConnMem.Stop())
})
return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
}
// mustCheckTx invokes txmp.CheckTx for the given transaction and waits until
// its callback has finished executing. It fails t if CheckTx fails.
func mustCheckTx(t *testing.T, txmp *TxMempool, spec string) {
done := make(chan struct{})
if err := txmp.CheckTx([]byte(spec), func(*abci.Response) {
close(done)
}, mempool.TxInfo{}); err != nil {
t.Fatalf("CheckTx for %q failed: %v", spec, err)
}
<-done
}
func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
txs := make([]testTx, numTxs)
txInfo := mempool.TxInfo{SenderID: peerID}
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < numTxs; i++ {
prefix := make([]byte, 20)
_, err := rng.Read(prefix)
require.NoError(t, err)
priority := int64(rng.Intn(9999-1000) + 1000)
txs[i] = testTx{
tx: []byte(fmt.Sprintf("sender-%d-%d=%X=%d", i, peerID, prefix, priority)),
priority: priority,
}
require.NoError(t, txmp.CheckTx(txs[i].tx, nil, txInfo))
}
return txs
}
func TestTxMempool_TxsAvailable(t *testing.T) {
txmp := setup(t, 0)
txmp.EnableTxsAvailable()
ensureNoTxFire := func() {
timer := time.NewTimer(500 * time.Millisecond)
select {
case <-txmp.TxsAvailable():
require.Fail(t, "unexpected transactions event")
case <-timer.C:
}
}
ensureTxFire := func() {
timer := time.NewTimer(500 * time.Millisecond)
select {
case <-txmp.TxsAvailable():
case <-timer.C:
require.Fail(t, "expected transactions event")
}
}
// ensure no event as we have not executed any transactions yet
ensureNoTxFire()
// Execute CheckTx for some transactions and ensure TxsAvailable only fires
// once.
txs := checkTxs(t, txmp, 100, 0)
ensureTxFire()
ensureNoTxFire()
rawTxs := make([]types.Tx, len(txs))
for i, tx := range txs {
rawTxs[i] = tx.tx
}
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
}
// commit half the transactions and ensure we fire an event
txmp.Lock()
require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
txmp.Unlock()
ensureTxFire()
ensureNoTxFire()
// Execute CheckTx for more transactions and ensure we do not fire another
// event as we're still on the same height (1).
_ = checkTxs(t, txmp, 100, 0)
ensureNoTxFire()
}
func TestTxMempool_Size(t *testing.T) {
txmp := setup(t, 0)
txs := checkTxs(t, txmp, 100, 0)
require.Equal(t, len(txs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
rawTxs := make([]types.Tx, len(txs))
for i, tx := range txs {
rawTxs[i] = tx.tx
}
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
}
txmp.Lock()
require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
txmp.Unlock()
require.Equal(t, len(rawTxs)/2, txmp.Size())
require.Equal(t, int64(2850), txmp.SizeBytes())
}
func TestTxMempool_Eviction(t *testing.T) {
txmp := setup(t, 1000)
txmp.config.Size = 5
txmp.config.MaxTxsBytes = 60
txExists := func(spec string) bool {
txmp.Lock()
defer txmp.Unlock()
key := types.Tx(spec).Key()
_, ok := txmp.txByKey[key]
return ok
}
// A transaction bigger than the mempool should be rejected even when there
// are slots available.
mustCheckTx(t, txmp, "big=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef=1")
require.Equal(t, 0, txmp.Size())
// Nearly-fill the mempool with a low-priority transaction, to show that it
// is evicted even when slots are available for a higher-priority tx.
const bigTx = "big=0123456789abcdef0123456789abcdef0123456789abcdef01234=2"
mustCheckTx(t, txmp, bigTx)
require.Equal(t, 1, txmp.Size()) // bigTx is the only element
require.True(t, txExists(bigTx))
require.Equal(t, int64(len(bigTx)), txmp.SizeBytes())
// The next transaction should evict bigTx, because it is higher priority
// but does not fit on size.
mustCheckTx(t, txmp, "key1=0000=25")
require.True(t, txExists("key1=0000=25"))
require.False(t, txExists(bigTx))
require.False(t, txmp.cache.Has([]byte(bigTx)))
require.Equal(t, int64(len("key1=0000=25")), txmp.SizeBytes())
// Now fill up the rest of the slots with other transactions.
mustCheckTx(t, txmp, "key2=0001=5")
mustCheckTx(t, txmp, "key3=0002=10")
mustCheckTx(t, txmp, "key4=0003=3")
mustCheckTx(t, txmp, "key5=0004=3")
// A new transaction with low priority should be discarded.
mustCheckTx(t, txmp, "key6=0005=1")
require.False(t, txExists("key6=0005=1"))
// A new transaction with higher priority should evict key5, which is the
// newest of the two transactions with lowest priority.
mustCheckTx(t, txmp, "key7=0006=7")
require.True(t, txExists("key7=0006=7")) // new transaction added
require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted
require.True(t, txExists("key4=0003=3")) // older low-priority tx retained
// Another new transaction evicts the other low-priority element.
mustCheckTx(t, txmp, "key8=0007=20")
require.True(t, txExists("key8=0007=20"))
require.False(t, txExists("key4=0003=3"))
// Now the lowest-priority tx is 5, so that should be the next to go.
mustCheckTx(t, txmp, "key9=0008=9")
require.True(t, txExists("key9=0008=9"))
require.False(t, txExists("k3y2=0001=5"))
// Add a transaction that requires eviction of multiple lower-priority
// entries, in order to fit the size of the element.
mustCheckTx(t, txmp, "key10=0123456789abcdef=11") // evict 10, 9, 7; keep 25, 20, 11
require.True(t, txExists("key1=0000=25"))
require.True(t, txExists("key8=0007=20"))
require.True(t, txExists("key10=0123456789abcdef=11"))
require.False(t, txExists("key3=0002=10"))
require.False(t, txExists("key9=0008=9"))
require.False(t, txExists("key7=0006=7"))
}
func TestTxMempool_Flush(t *testing.T) {
txmp := setup(t, 0)
txs := checkTxs(t, txmp, 100, 0)
require.Equal(t, len(txs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
rawTxs := make([]types.Tx, len(txs))
for i, tx := range txs {
rawTxs[i] = tx.tx
}
responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50]))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
}
txmp.Lock()
require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil))
txmp.Unlock()
txmp.Flush()
require.Zero(t, txmp.Size())
require.Equal(t, int64(0), txmp.SizeBytes())
}
func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
txmp := setup(t, 0)
tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
txMap := make(map[types.TxKey]testTx)
priorities := make([]int64, len(tTxs))
for i, tTx := range tTxs {
txMap[tTx.tx.Key()] = tTx
priorities[i] = tTx.priority
}
sort.Slice(priorities, func(i, j int) bool {
// sort by priority, i.e. decreasing order
return priorities[i] > priorities[j]
})
ensurePrioritized := func(reapedTxs types.Txs) {
reapedPriorities := make([]int64, len(reapedTxs))
for i, rTx := range reapedTxs {
reapedPriorities[i] = txMap[rTx.Key()].priority
}
require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
}
// reap by gas capacity only
reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
require.Len(t, reapedTxs, 50)
// reap by transaction bytes only
reapedTxs = txmp.ReapMaxBytesMaxGas(1000, -1)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
require.GreaterOrEqual(t, len(reapedTxs), 16)
// Reap by both transaction bytes and gas, where the size yields 31 reaped
// transactions and the gas limit reaps 25 transactions.
reapedTxs = txmp.ReapMaxBytesMaxGas(1500, 30)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
require.Len(t, reapedTxs, 25)
}
func TestTxMempool_ReapMaxTxs(t *testing.T) {
txmp := setup(t, 0)
tTxs := checkTxs(t, txmp, 100, 0)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
txMap := make(map[types.TxKey]testTx)
priorities := make([]int64, len(tTxs))
for i, tTx := range tTxs {
txMap[tTx.tx.Key()] = tTx
priorities[i] = tTx.priority
}
sort.Slice(priorities, func(i, j int) bool {
// sort by priority, i.e. decreasing order
return priorities[i] > priorities[j]
})
ensurePrioritized := func(reapedTxs types.Txs) {
reapedPriorities := make([]int64, len(reapedTxs))
for i, rTx := range reapedTxs {
reapedPriorities[i] = txMap[rTx.Key()].priority
}
require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
}
// reap all transactions
reapedTxs := txmp.ReapMaxTxs(-1)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
require.Len(t, reapedTxs, len(tTxs))
// reap a single transaction
reapedTxs = txmp.ReapMaxTxs(1)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
require.Len(t, reapedTxs, 1)
// reap half of the transactions
reapedTxs = txmp.ReapMaxTxs(len(tTxs) / 2)
ensurePrioritized(reapedTxs)
require.Equal(t, len(tTxs), txmp.Size())
require.Equal(t, int64(5690), txmp.SizeBytes())
require.Len(t, reapedTxs, len(tTxs)/2)
}
func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) {
txmp := setup(t, 0)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
tx := make([]byte, txmp.config.MaxTxBytes+1)
_, err := rng.Read(tx)
require.NoError(t, err)
require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}))
tx = make([]byte, txmp.config.MaxTxBytes-1)
_, err = rng.Read(tx)
require.NoError(t, err)
require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}))
}
func TestTxMempool_CheckTxSamePeer(t *testing.T) {
txmp := setup(t, 100)
peerID := uint16(1)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
prefix := make([]byte, 20)
_, err := rng.Read(prefix)
require.NoError(t, err)
tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50))
require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID}))
require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID}))
}
func TestTxMempool_CheckTxSameSender(t *testing.T) {
txmp := setup(t, 100)
peerID := uint16(1)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
prefix1 := make([]byte, 20)
_, err := rng.Read(prefix1)
require.NoError(t, err)
prefix2 := make([]byte, 20)
_, err = rng.Read(prefix2)
require.NoError(t, err)
tx1 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix1, 50))
tx2 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix2, 50))
require.NoError(t, txmp.CheckTx(tx1, nil, mempool.TxInfo{SenderID: peerID}))
require.Equal(t, 1, txmp.Size())
require.NoError(t, txmp.CheckTx(tx2, nil, mempool.TxInfo{SenderID: peerID}))
require.Equal(t, 1, txmp.Size())
}
func TestTxMempool_ConcurrentTxs(t *testing.T) {
txmp := setup(t, 100)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
checkTxDone := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go func() {
for i := 0; i < 20; i++ {
_ = checkTxs(t, txmp, 100, 0)
dur := rng.Intn(1000-500) + 500
time.Sleep(time.Duration(dur) * time.Millisecond)
}
wg.Done()
close(checkTxDone)
}()
wg.Add(1)
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
defer wg.Done()
var height int64 = 1
for range ticker.C {
reapedTxs := txmp.ReapMaxTxs(200)
if len(reapedTxs) > 0 {
responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
for i := 0; i < len(responses); i++ {
var code uint32
if i%10 == 0 {
code = 100
} else {
code = abci.CodeTypeOK
}
responses[i] = &abci.ResponseDeliverTx{Code: code}
}
txmp.Lock()
require.NoError(t, txmp.Update(height, reapedTxs, responses, nil, nil))
txmp.Unlock()
height++
} else {
// only return once we know we finished the CheckTx loop
select {
case <-checkTxDone:
return
default:
}
}
}
}()
wg.Wait()
require.Zero(t, txmp.Size())
require.Zero(t, txmp.SizeBytes())
}
func TestTxMempool_ExpiredTxs_Timestamp(t *testing.T) {
txmp := setup(t, 5000)
txmp.config.TTLDuration = 5 * time.Millisecond
added1 := checkTxs(t, txmp, 10, 0)
require.Equal(t, len(added1), txmp.Size())
// Wait a while, then add some more transactions that should not be expired
// when the first batch TTLs out.
//
// ms: 0 1 2 3 4 5 6
// ^ ^ ^ ^
// | | | +-- Update (triggers pruning)
// | | +------ first batch expires
// | +-------------- second batch added
// +-------------------------- first batch added
//
// The exact intervals are not important except that the delta should be
// large relative to the cost of CheckTx (ms vs. ns is fine here).
time.Sleep(3 * time.Millisecond)
added2 := checkTxs(t, txmp, 10, 1)
// Wait a while longer, so that the first batch will expire.
time.Sleep(3 * time.Millisecond)
// Trigger an update so that pruning will occur.
txmp.Lock()
defer txmp.Unlock()
require.NoError(t, txmp.Update(txmp.height+1, nil, nil, nil, nil))
// All the transactions in the original set should have been purged.
for _, tx := range added1 {
if _, ok := txmp.txByKey[tx.tx.Key()]; ok {
t.Errorf("Transaction %X should have been purged for TTL", tx.tx.Key())
}
if txmp.cache.Has(tx.tx) {
t.Errorf("Transaction %X should have been removed from the cache", tx.tx.Key())
}
}
// All the transactions added later should still be around.
for _, tx := range added2 {
if _, ok := txmp.txByKey[tx.tx.Key()]; !ok {
t.Errorf("Transaction %X should still be in the mempool, but is not", tx.tx.Key())
}
}
}
func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
txmp := setup(t, 500)
txmp.height = 100
txmp.config.TTLNumBlocks = 10
tTxs := checkTxs(t, txmp, 100, 0)
require.Equal(t, len(tTxs), txmp.Size())
// reap 5 txs at the next height -- no txs should expire
reapedTxs := txmp.ReapMaxTxs(5)
responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
}
txmp.Lock()
require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil))
txmp.Unlock()
require.Equal(t, 95, txmp.Size())
// check more txs at height 101
_ = checkTxs(t, txmp, 50, 1)
require.Equal(t, 145, txmp.Size())
// Reap 5 txs at a height that would expire all the transactions from before
// the previous Update (height 100).
//
// NOTE: When we reap txs below, we do not know if we're picking txs from the
// initial CheckTx calls or from the second round of CheckTx calls. Thus, we
// cannot guarantee that all 95 txs are remaining that should be expired and
// removed. However, we do know that that at most 95 txs can be expired and
// removed.
reapedTxs = txmp.ReapMaxTxs(5)
responses = make([]*abci.ResponseDeliverTx, len(reapedTxs))
for i := 0; i < len(responses); i++ {
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
}
txmp.Lock()
require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil))
txmp.Unlock()
require.GreaterOrEqual(t, txmp.Size(), 45)
}
func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
cases := []struct {
name string
err error
}{
{
name: "error",
err: errors.New("test error"),
},
{
name: "no error",
err: nil,
},
}
for _, tc := range cases {
testCase := tc
t.Run(testCase.name, func(t *testing.T) {
postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error {
return testCase.err
}
txmp := setup(t, 0, WithPostCheck(postCheckFn))
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
tx := make([]byte, txmp.config.MaxTxBytes-1)
_, err := rng.Read(tx)
require.NoError(t, err)
callback := func(res *abci.Response) {
checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
require.True(t, ok)
expectedErrString := ""
if testCase.err != nil {
expectedErrString = testCase.err.Error()
}
require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError)
}
require.NoError(t, txmp.CheckTx(tx, callback, mempool.TxInfo{SenderID: 0}))
})
}
}

310
mempool/v1/reactor.go Normal file
View File

@@ -0,0 +1,310 @@
package v1
import (
"errors"
"fmt"
"time"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/clist"
"github.com/tendermint/tendermint/libs/log"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
"github.com/tendermint/tendermint/types"
)
// Reactor handles mempool tx broadcasting amongst peers.
// It maintains a map from peer ID to counter, to prevent gossiping txs to the
// peers you received it from.
type Reactor struct {
p2p.BaseReactor
config *cfg.MempoolConfig
mempool *TxMempool
ids *mempoolIDs
}
type mempoolIDs struct {
mtx tmsync.RWMutex
peerMap map[p2p.ID]uint16
nextID uint16 // assumes that a node will never have over 65536 active peers
activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
}
// Reserve searches for the next unused ID and assigns it to the
// peer.
func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
ids.mtx.Lock()
defer ids.mtx.Unlock()
curID := ids.nextPeerID()
ids.peerMap[peer.ID()] = curID
ids.activeIDs[curID] = struct{}{}
}
// nextPeerID returns the next unused peer ID to use.
// This assumes that ids's mutex is already locked.
func (ids *mempoolIDs) nextPeerID() uint16 {
if len(ids.activeIDs) == mempool.MaxActiveIDs {
panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", mempool.MaxActiveIDs))
}
_, idExists := ids.activeIDs[ids.nextID]
for idExists {
ids.nextID++
_, idExists = ids.activeIDs[ids.nextID]
}
curID := ids.nextID
ids.nextID++
return curID
}
// Reclaim returns the ID reserved for the peer back to unused pool.
func (ids *mempoolIDs) Reclaim(peer p2p.Peer) {
ids.mtx.Lock()
defer ids.mtx.Unlock()
removedID, ok := ids.peerMap[peer.ID()]
if ok {
delete(ids.activeIDs, removedID)
delete(ids.peerMap, peer.ID())
}
}
// GetForPeer returns an ID reserved for the peer.
func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 {
ids.mtx.RLock()
defer ids.mtx.RUnlock()
return ids.peerMap[peer.ID()]
}
func newMempoolIDs() *mempoolIDs {
return &mempoolIDs{
peerMap: make(map[p2p.ID]uint16),
activeIDs: map[uint16]struct{}{0: {}},
nextID: 1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx
}
}
// NewReactor returns a new Reactor with the given config and mempool.
func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool) *Reactor {
memR := &Reactor{
config: config,
mempool: mempool,
ids: newMempoolIDs(),
}
memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR)
return memR
}
// InitPeer implements Reactor by creating a state for the peer.
func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
memR.ids.ReserveForPeer(peer)
return peer
}
// SetLogger sets the Logger on the reactor and the underlying mempool.
func (memR *Reactor) SetLogger(l log.Logger) {
memR.Logger = l
}
// OnStart implements p2p.BaseReactor.
func (memR *Reactor) OnStart() error {
if !memR.config.Broadcast {
memR.Logger.Info("Tx broadcasting is disabled")
}
return nil
}
// GetChannels implements Reactor by returning the list of channels for this
// reactor.
func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
largestTx := make([]byte, memR.config.MaxTxBytes)
batchMsg := protomem.Message{
Sum: &protomem.Message_Txs{
Txs: &protomem.Txs{Txs: [][]byte{largestTx}},
},
}
return []*p2p.ChannelDescriptor{
{
ID: mempool.MempoolChannel,
Priority: 5,
RecvMessageCapacity: batchMsg.Size(),
},
}
}
// AddPeer implements Reactor.
// It starts a broadcast routine ensuring all txs are forwarded to the given peer.
func (memR *Reactor) AddPeer(peer p2p.Peer) {
if memR.config.Broadcast {
go memR.broadcastTxRoutine(peer)
}
}
// RemovePeer implements Reactor.
func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
memR.ids.Reclaim(peer)
// broadcast routine checks if peer is gone and returns
}
// Receive implements Reactor.
// It adds any received transactions to the mempool.
func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
msg, err := memR.decodeMsg(msgBytes)
if err != nil {
memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
memR.Switch.StopPeerForError(src, err)
return
}
memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
txInfo := mempool.TxInfo{SenderID: memR.ids.GetForPeer(src)}
if src != nil {
txInfo.SenderP2PID = src.ID()
}
for _, tx := range msg.Txs {
err = memR.mempool.CheckTx(tx, nil, txInfo)
if err == mempool.ErrTxInCache {
memR.Logger.Debug("Tx already exists in cache", "tx", tx.String())
} else if err != nil {
memR.Logger.Info("Could not check tx", "tx", tx.String(), "err", err)
}
}
// broadcasting happens from go routines per peer
}
// PeerState describes the state of a peer.
type PeerState interface {
GetHeight() int64
}
// Send new mempool txs to peer.
func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) {
peerID := memR.ids.GetForPeer(peer)
var next *clist.CElement
for {
// In case of both next.NextWaitChan() and peer.Quit() are variable at the same time
if !memR.IsRunning() || !peer.IsRunning() {
return
}
// This happens because the CElement we were looking at got garbage
// collected (removed). That is, .NextWait() returned nil. Go ahead and
// start from the beginning.
if next == nil {
select {
case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available
if next = memR.mempool.TxsFront(); next == nil {
continue
}
case <-peer.Quit():
return
case <-memR.Quit():
return
}
}
// Make sure the peer is up to date.
peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
if !ok {
// Peer does not have a state yet. We set it in the consensus reactor, but
// when we add peer in Switch, the order we call reactors#AddPeer is
// different every time due to us using a map. Sometimes other reactors
// will be initialized before the consensus reactor. We should wait a few
// milliseconds and retry.
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
continue
}
// Allow for a lag of 1 block.
memTx := next.Value.(*WrappedTx)
if peerState.GetHeight() < memTx.height-1 {
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
continue
}
// NOTE: Transaction batching was disabled due to
// https://github.com/tendermint/tendermint/issues/5796
if !memTx.HasPeer(peerID) {
msg := protomem.Message{
Sum: &protomem.Message_Txs{
Txs: &protomem.Txs{Txs: [][]byte{memTx.tx}},
},
}
bz, err := msg.Marshal()
if err != nil {
panic(err)
}
success := peer.Send(mempool.MempoolChannel, bz)
if !success {
time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond)
continue
}
}
select {
case <-next.NextWaitChan():
// see the start of the for loop for nil check
next = next.Next()
case <-peer.Quit():
return
case <-memR.Quit():
return
}
}
}
//-----------------------------------------------------------------------------
// Messages
func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) {
msg := protomem.Message{}
err := msg.Unmarshal(bz)
if err != nil {
return TxsMessage{}, err
}
var message TxsMessage
if i, ok := msg.Sum.(*protomem.Message_Txs); ok {
txs := i.Txs.GetTxs()
if len(txs) == 0 {
return message, errors.New("empty TxsMessage")
}
decoded := make([]types.Tx, len(txs))
for j, tx := range txs {
decoded[j] = types.Tx(tx)
}
message = TxsMessage{
Txs: decoded,
}
return message, nil
}
return message, fmt.Errorf("msg type: %T is not supported", msg)
}
//-------------------------------------
// TxsMessage is a Message containing transactions.
type TxsMessage struct {
Txs []types.Tx
}
// String returns a string representation of the TxsMessage.
func (m *TxsMessage) String() string {
return fmt.Sprintf("[TxsMessage %v]", m.Txs)
}

186
mempool/v1/reactor_test.go Normal file
View File

@@ -0,0 +1,186 @@
package v1
import (
"encoding/hex"
"os"
"sync"
"testing"
"time"
"github.com/go-kit/log/term"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
memproto "github.com/tendermint/tendermint/proto/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
)
const (
numTxs = 1000
timeout = 120 * time.Second // ridiculously high because CircleCI is slow
)
type peerState struct {
height int64
}
func (ps peerState) GetHeight() int64 {
return ps.height
}
// Send a bunch of txs to the first reactor's mempool and wait for them all to
// be received in the others.
func TestReactorBroadcastTxsMessage(t *testing.T) {
config := cfg.TestConfig()
// if there were more than two reactors, the order of transactions could not be
// asserted in waitForTxsOnReactors (due to transactions gossiping). If we
// replace Connect2Switches (full mesh) with a func, which connects first
// reactor to others and nothing else, this test should also pass with >2 reactors.
const N = 2
reactors := makeAndConnectReactors(config, N)
defer func() {
for _, r := range reactors {
if err := r.Stop(); err != nil {
assert.NoError(t, err)
}
}
}()
for _, r := range reactors {
for _, peer := range r.Switch.Peers().List() {
peer.Set(types.PeerStateKey, peerState{1})
}
}
txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID)
transactions := make(types.Txs, len(txs))
for idx, tx := range txs {
transactions[idx] = tx.tx
}
waitForTxsOnReactors(t, transactions, reactors)
}
func TestMempoolVectors(t *testing.T) {
testCases := []struct {
testName string
tx []byte
expBytes string
}{
{"tx 1", []byte{123}, "0a030a017b"},
{"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"},
}
for _, tc := range testCases {
tc := tc
msg := memproto.Message{
Sum: &memproto.Message_Txs{
Txs: &memproto.Txs{Txs: [][]byte{tc.tx}},
},
}
bz, err := msg.Marshal()
require.NoError(t, err, tc.testName)
require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
}
}
func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor {
reactors := make([]*Reactor, n)
logger := mempoolLogger()
for i := 0; i < n; i++ {
app := kvstore.NewApplication()
cc := proxy.NewLocalClientCreator(app)
mempool, cleanup := newMempoolWithApp(cc)
defer cleanup()
reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states
reactors[i].SetLogger(logger.With("validator", i))
}
p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("MEMPOOL", reactors[i])
return s
}, p2p.Connect2Switches)
return reactors
}
// mempoolLogger is a TestingLogger which uses a different
// color for each validator ("validator" key must exist).
func mempoolLogger() log.Logger {
return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
for i := 0; i < len(keyvals)-1; i += 2 {
if keyvals[i] == "validator" {
return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
}
}
return term.FgBgColor{}
})
}
func newMempoolWithApp(cc proxy.ClientCreator) (*TxMempool, func()) {
conf := cfg.ResetTestRoot("mempool_test")
mp, cu := newMempoolWithAppAndConfig(cc, conf)
return mp, cu
}
func newMempoolWithAppAndConfig(cc proxy.ClientCreator, conf *cfg.Config) (*TxMempool, func()) {
appConnMem, _ := cc.NewABCIClient()
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
err := appConnMem.Start()
if err != nil {
panic(err)
}
mp := NewTxMempool(log.TestingLogger(), conf.Mempool, appConnMem, 0)
return mp, func() { os.RemoveAll(conf.RootDir) }
}
func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) {
// wait for the txs in all mempools
wg := new(sync.WaitGroup)
for i, reactor := range reactors {
wg.Add(1)
go func(r *Reactor, reactorIndex int) {
defer wg.Done()
waitForTxsOnReactor(t, txs, r, reactorIndex)
}(reactor, i)
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
timer := time.After(timeout)
select {
case <-timer:
t.Fatal("Timed out waiting for txs")
case <-done:
}
}
func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) {
mempool := reactor.mempool
for mempool.Size() < len(txs) {
time.Sleep(time.Millisecond * 100)
}
reapedTxs := mempool.ReapMaxTxs(len(txs))
for i, tx := range txs {
assert.Equalf(t, tx, reapedTxs[i],
"txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i])
}
}

87
mempool/v1/tx.go Normal file
View File

@@ -0,0 +1,87 @@
package v1
import (
"sync"
"time"
"github.com/tendermint/tendermint/types"
)
// WrappedTx defines a wrapper around a raw transaction with additional metadata
// that is used for indexing.
type WrappedTx struct {
tx types.Tx // the original transaction data
hash types.TxKey // the transaction hash
height int64 // height when this transaction was initially checked (for expiry)
timestamp time.Time // time when transaction was entered (for TTL)
mtx sync.Mutex
gasWanted int64 // app: gas required to execute this transaction
priority int64 // app: priority value for this transaction
sender string // app: assigned sender label
peers map[uint16]bool // peer IDs who have sent us this transaction
}
// Size reports the size of the raw transaction in bytes.
func (w *WrappedTx) Size() int64 { return int64(len(w.tx)) }
// SetPeer adds the specified peer ID as a sender of w.
func (w *WrappedTx) SetPeer(id uint16) {
w.mtx.Lock()
defer w.mtx.Unlock()
if w.peers == nil {
w.peers = map[uint16]bool{id: true}
} else {
w.peers[id] = true
}
}
// HasPeer reports whether the specified peer ID is a sender of w.
func (w *WrappedTx) HasPeer(id uint16) bool {
w.mtx.Lock()
defer w.mtx.Unlock()
_, ok := w.peers[id]
return ok
}
// SetGasWanted sets the application-assigned gas requirement of w.
func (w *WrappedTx) SetGasWanted(gas int64) {
w.mtx.Lock()
defer w.mtx.Unlock()
w.gasWanted = gas
}
// GasWanted reports the application-assigned gas requirement of w.
func (w *WrappedTx) GasWanted() int64 {
w.mtx.Lock()
defer w.mtx.Unlock()
return w.gasWanted
}
// SetSender sets the application-assigned sender of w.
func (w *WrappedTx) SetSender(sender string) {
w.mtx.Lock()
defer w.mtx.Unlock()
w.sender = sender
}
// Sender reports the application-assigned sender of w.
func (w *WrappedTx) Sender() string {
w.mtx.Lock()
defer w.mtx.Unlock()
return w.sender
}
// SetPriority sets the application-assigned priority of w.
func (w *WrappedTx) SetPriority(p int64) {
w.mtx.Lock()
defer w.mtx.Unlock()
w.priority = p
}
// Priority reports the application-assigned priority of w.
func (w *WrappedTx) Priority() int64 {
w.mtx.Lock()
defer w.mtx.Unlock()
return w.priority
}

View File

@@ -23,12 +23,15 @@ import (
cs "github.com/tendermint/tendermint/consensus"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/evidence"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/libs/log"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/light"
mempl "github.com/tendermint/tendermint/mempool"
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/pex"
"github.com/tendermint/tendermint/privval"
@@ -209,7 +212,7 @@ type Node struct {
stateStore sm.Store
blockStore *store.BlockStore // store the blockchain to disk
bcReactor p2p.Reactor // for fast-syncing
mempoolReactor *mempl.Reactor // for gossipping transactions
mempoolReactor p2p.Reactor // for gossipping transactions
mempool mempl.Mempool
stateSync bool // whether the node should state sync on startup
stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots
@@ -361,25 +364,61 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
return bytes.Equal(pubKey.Address(), addr)
}
func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns,
state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) {
func createMempoolAndMempoolReactor(
config *cfg.Config,
proxyApp proxy.AppConns,
state sm.State,
memplMetrics *mempl.Metrics,
logger log.Logger,
) (mempl.Mempool, p2p.Reactor) {
mempool := mempl.NewCListMempool(
config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,
mempl.WithMetrics(memplMetrics),
mempl.WithPreCheck(sm.TxPreCheck(state)),
mempl.WithPostCheck(sm.TxPostCheck(state)),
)
mempoolLogger := logger.With("module", "mempool")
mempoolReactor := mempl.NewReactor(config.Mempool, mempool)
mempoolReactor.SetLogger(mempoolLogger)
switch config.Mempool.Version {
case cfg.MempoolV1:
mp := mempoolv1.NewTxMempool(
logger,
config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,
mempoolv1.WithMetrics(memplMetrics),
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
)
if config.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
reactor := mempoolv1.NewReactor(
config.Mempool,
mp,
)
if config.Consensus.WaitForTxs() {
mp.EnableTxsAvailable()
}
return mp, reactor
case cfg.MempoolV0:
mp := mempoolv0.NewCListMempool(
config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,
mempoolv0.WithMetrics(memplMetrics),
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
)
mp.SetLogger(logger)
reactor := mempoolv0.NewReactor(
config.Mempool,
mp,
)
if config.Consensus.WaitForTxs() {
mp.EnableTxsAvailable()
}
return mp, reactor
default:
return nil, nil
}
return mempoolReactor, mempool
}
func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
@@ -425,7 +464,7 @@ func createConsensusReactor(config *cfg.Config,
state sm.State,
blockExec *sm.BlockExecutor,
blockStore sm.BlockStore,
mempool *mempl.CListMempool,
mempool mempl.Mempool,
evidencePool *evidence.Pool,
privValidator types.PrivValidator,
csMetrics *cs.Metrics,
@@ -527,7 +566,7 @@ func createSwitch(config *cfg.Config,
transport p2p.Transport,
p2pMetrics *p2p.Metrics,
peerFilters []p2p.PeerFilterFunc,
mempoolReactor *mempl.Reactor,
mempoolReactor p2p.Reactor,
bcReactor p2p.Reactor,
stateSyncReactor *statesync.Reactor,
consensusReactor *cs.Reactor,
@@ -752,7 +791,7 @@ func NewNode(config *cfg.Config,
csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
// Make MempoolReactor
mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
// Make Evidence Reactor
evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger)
@@ -930,13 +969,6 @@ func (n *Node) OnStart() error {
n.isListening = true
if n.config.Mempool.WalEnabled() {
err = n.mempool.InitWAL()
if err != nil {
return fmt.Errorf("init mempool WAL: %w", err)
}
}
// Start the switch (the P2P server).
err = n.sw.Start()
if err != nil {
@@ -984,11 +1016,6 @@ func (n *Node) OnStop() {
n.Logger.Error("Error closing switch", "err", err)
}
// stop mempool WAL
if n.config.Mempool.WalEnabled() {
n.mempool.CloseWAL()
}
if err := n.transport.Close(); err != nil {
n.Logger.Error("Error closing transport", "err", err)
}
@@ -1224,7 +1251,7 @@ func (n *Node) ConsensusReactor() *cs.Reactor {
}
// MempoolReactor returns the Node's mempool reactor.
func (n *Node) MempoolReactor() *mempl.Reactor {
func (n *Node) MempoolReactor() p2p.Reactor {
return n.mempoolReactor
}

View File

@@ -21,6 +21,8 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
mempl "github.com/tendermint/tendermint/mempool"
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/conn"
p2pmock "github.com/tendermint/tendermint/p2p/mock"
@@ -242,16 +244,27 @@ func TestCreateProposalBlock(t *testing.T) {
proposerAddr, _ := state.Validators.GetByIndex(0)
// Make Mempool
memplMetrics := mempl.PrometheusMetrics("node_test_1")
mempool := mempl.NewCListMempool(
config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,
mempl.WithMetrics(memplMetrics),
mempl.WithPreCheck(sm.TxPreCheck(state)),
mempl.WithPostCheck(sm.TxPostCheck(state)),
)
mempool.SetLogger(logger)
memplMetrics := mempl.NopMetrics()
var mempool mempl.Mempool
switch config.Mempool.Version {
case cfg.MempoolV0:
mempool = mempoolv0.NewCListMempool(config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,
mempoolv0.WithMetrics(memplMetrics),
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
case cfg.MempoolV1:
mempool = mempoolv1.NewTxMempool(logger,
config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,
mempoolv1.WithMetrics(memplMetrics),
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
)
}
// Make EvidencePool
evidenceDB := dbm.NewMemDB()
@@ -334,16 +347,26 @@ func TestMaxProposalBlockSize(t *testing.T) {
proposerAddr, _ := state.Validators.GetByIndex(0)
// Make Mempool
memplMetrics := mempl.PrometheusMetrics("node_test_2")
mempool := mempl.NewCListMempool(
config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,
mempl.WithMetrics(memplMetrics),
mempl.WithPreCheck(sm.TxPreCheck(state)),
mempl.WithPostCheck(sm.TxPostCheck(state)),
)
mempool.SetLogger(logger)
memplMetrics := mempl.NopMetrics()
var mempool mempl.Mempool
switch config.Mempool.Version {
case cfg.MempoolV0:
mempool = mempoolv0.NewCListMempool(config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,
mempoolv0.WithMetrics(memplMetrics),
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
case cfg.MempoolV1:
mempool = mempoolv1.NewTxMempool(logger,
config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,
mempoolv1.WithMetrics(memplMetrics),
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
)
}
// fill the mempool with one txs just below the maximum size
txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1))

View File

@@ -352,7 +352,7 @@ func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
return false
}
c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", log.NewLazySprintf("%X", msgBytes))
// Send message to channel.
channel, ok := c.channelsIdx[chID]
@@ -369,7 +369,7 @@ func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
default:
}
} else {
c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", log.NewLazySprintf("%X", msgBytes))
}
return success
}
@@ -381,7 +381,7 @@ func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool {
return false
}
c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", log.NewLazySprintf("%X", msgBytes))
// Send message to channel.
channel, ok := c.channelsIdx[chID]

View File

@@ -1,4 +1,4 @@
// Code generated by mockery v2.1.0. DO NOT EDIT.
// Code generated by mockery. DO NOT EDIT.
package mocks
@@ -329,3 +329,18 @@ func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool {
return r0
}
type NewPeerT interface {
mock.TestingT
Cleanup(func())
}
// NewPeer creates a new instance of Peer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewPeer(t NewPeerT) *Peer {
mock := &Peer{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -12,7 +12,7 @@ import (
tmconn "github.com/tendermint/tendermint/p2p/conn"
)
//go:generate mockery --case underscore --name Peer
//go:generate ../scripts/mockery_generate.sh Peer
const metricsTickerDuration = 10 * time.Second

View File

@@ -17,6 +17,7 @@ import (
"github.com/minio/highwayhash"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/libs/service"
@@ -739,7 +740,7 @@ func (a *addrBook) expireNew(bucketIdx int) {
for addrStr, ka := range a.bucketsNew[bucketIdx] {
// If an entry is bad, throw it away
if ka.isBad() {
a.Logger.Info(fmt.Sprintf("expiring bad address %v", addrStr))
a.Logger.Info("expire new", "msg", log.NewLazySprintf("expiring bad address %v", addrStr))
a.removeFromBucket(ka, bucketTypeNew, bucketIdx)
return
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cmap"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/p2p/conn"
@@ -261,7 +262,7 @@ func (sw *Switch) OnStop() {
//
// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved.
func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool {
sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", fmt.Sprintf("%X", msgBytes))
sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", log.NewLazySprintf("%X", msgBytes))
peers := sw.peers.List()
var wg sync.WaitGroup

View File

@@ -18,19 +18,19 @@ func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Lis
if err != nil {
return nil, nil, nil, fmt.Errorf("nat upnp could not be discovered: %v", err)
}
logger.Info(fmt.Sprintf("ourIP: %v", nat.(*upnpNAT).ourIP))
logger.Info("make upnp listener", "msg", log.NewLazySprintf("ourIP: %v", nat.(*upnpNAT).ourIP))
ext, err := nat.GetExternalAddress()
if err != nil {
return nat, nil, nil, fmt.Errorf("external address error: %v", err)
}
logger.Info(fmt.Sprintf("External address: %v", ext))
logger.Info("make upnp listener", "msg", log.NewLazySprintf("External address: %v", ext))
port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0)
if err != nil {
return nat, nil, ext, fmt.Errorf("port mapping error: %v", err)
}
logger.Info(fmt.Sprintf("Port mapping mapped: %v", port))
logger.Info("make upnp listener", "msg", log.NewLazySprintf("Port mapping mapped: %v", port))
// also run the listener, open for all remote addresses.
listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort))
@@ -45,17 +45,23 @@ func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supp
go func() {
inConn, err := listener.Accept()
if err != nil {
logger.Info(fmt.Sprintf("Listener.Accept() error: %v", err))
logger.Info("test hair pin", "msg", log.NewLazySprintf("Listener.Accept() error: %v", err))
return
}
logger.Info(fmt.Sprintf("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr()))
logger.Info("test hair pin",
"msg",
log.NewLazySprintf("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr()))
buf := make([]byte, 1024)
n, err := inConn.Read(buf)
if err != nil {
logger.Info(fmt.Sprintf("Incoming connection read error: %v", err))
logger.Info("test hair pin",
"msg",
log.NewLazySprintf("Incoming connection read error: %v", err))
return
}
logger.Info(fmt.Sprintf("Incoming connection read %v bytes: %X", n, buf))
logger.Info("test hair pin",
"msg",
log.NewLazySprintf("Incoming connection read %v bytes: %X", n, buf))
if string(buf) == "test data" {
supportsHairpin = true
return
@@ -65,16 +71,16 @@ func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supp
// Establish outgoing
outConn, err := net.Dial("tcp", extAddr)
if err != nil {
logger.Info(fmt.Sprintf("Outgoing connection dial error: %v", err))
logger.Info("test hair pin", "msg", log.NewLazySprintf("Outgoing connection dial error: %v", err))
return
}
n, err := outConn.Write([]byte("test data"))
if err != nil {
logger.Info(fmt.Sprintf("Outgoing connection write error: %v", err))
logger.Info("test hair pin", "msg", log.NewLazySprintf("Outgoing connection write error: %v", err))
return
}
logger.Info(fmt.Sprintf("Outgoing connection wrote %v bytes", n))
logger.Info("test hair pin", "msg", log.NewLazySprintf("Outgoing connection wrote %v bytes", n))
// Wait for data receipt
time.Sleep(1 * time.Second)

View File

@@ -63,11 +63,10 @@ func (pvKey FilePVKey) Save() {
if err != nil {
panic(err)
}
err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600)
if err != nil {
if err := tempfile.WriteFileAtomic(outFile, jsonBytes, 0600); err != nil {
panic(err)
}
}
//-------------------------------------------------------------------------------

7
proto/buf.lock Normal file
View File

@@ -0,0 +1,7 @@
# Generated by buf. DO NOT EDIT.
version: v1
deps:
- remote: buf.build
owner: gogo
repository: protobuf
commit: 4df00b267f944190a229ce3695781e99

Some files were not shown because too many files have changed in this diff Show More