mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-13 16:22:53 +00:00
Compare commits
35 Commits
main-libp2
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
66ac0bf8b0 | ||
|
|
85a584dd2e | ||
|
|
d0c3457343 | ||
|
|
d433ebe68d | ||
|
|
48147e1fb9 | ||
|
|
b9d6bb4cd1 | ||
|
|
7a84425aec | ||
|
|
488e1d4bd2 | ||
|
|
89246e993a | ||
|
|
6c302218e3 | ||
|
|
023c21f307 | ||
|
|
65c0fba564 | ||
|
|
87708b8ae7 | ||
|
|
d320b3088e | ||
|
|
e33635a601 | ||
|
|
fa32078ceb | ||
|
|
fc3a24a669 | ||
|
|
525624f1ce | ||
|
|
8a815417b4 | ||
|
|
066b7a9999 | ||
|
|
48f3062d9d | ||
|
|
cc07318866 | ||
|
|
503ddf1c4d | ||
|
|
18b5a500da | ||
|
|
d2db54ae9a | ||
|
|
31457ad361 | ||
|
|
4214d998f2 | ||
|
|
c1c501ecd4 | ||
|
|
b71ec8c83f | ||
|
|
b421138e53 | ||
|
|
e9239e9ca8 | ||
|
|
136b62762f | ||
|
|
d5fb82e414 | ||
|
|
6902fa9282 | ||
|
|
61ce384d75 |
13
.github/dependabot.yml
vendored
13
.github/dependabot.yml
vendored
@@ -4,7 +4,6 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
target-branch: "master"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
@@ -15,7 +14,6 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
@@ -26,7 +24,6 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
target-branch: "v0.35.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
@@ -37,7 +34,6 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
target-branch: "v0.36.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
@@ -48,7 +44,6 @@ updates:
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
###################################
|
||||
@@ -58,7 +53,7 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
interval: weekly
|
||||
target-branch: "master"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
@@ -68,7 +63,7 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
@@ -78,7 +73,7 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
interval: weekly
|
||||
target-branch: "v0.35.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
@@ -88,7 +83,7 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
interval: weekly
|
||||
target-branch: "v0.36.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -49,7 +49,7 @@ jobs:
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3.0.0
|
||||
uses: docker/build-push-action@v3.1.0
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
8
.github/workflows/docs-deployment.yml
vendored
8
.github/workflows/docs-deployment.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
- name: Install generator dependencies
|
||||
run: |
|
||||
apk add --no-cache make bash git npm
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
# We need to fetch full history so the backport branches for previous
|
||||
# versions will be available for the build.
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
run: |
|
||||
git config --global --add safe.directory "$PWD"
|
||||
make build-docs
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output/
|
||||
@@ -49,8 +49,8 @@ jobs:
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output
|
||||
|
||||
41
.github/workflows/tests.yml
vendored
41
.github/workflows/tests.yml
vendored
@@ -32,44 +32,3 @@ jobs:
|
||||
run: |
|
||||
make test-group-${{ matrix.part }} NUM_SPLIT=6
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./build/${{ matrix.part }}.profile.out
|
||||
|
||||
upload-coverage-report:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v3.1.0
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
84
CHANGELOG.md
84
CHANGELOG.md
@@ -2,6 +2,88 @@
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.35.9
|
||||
|
||||
July 20, 2022
|
||||
|
||||
This release fixes a deadlock that could occur in some cases when using the
|
||||
priority mempool with the ABCI socket client.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [mempool] [\#9030](https://github.com/tendermint/tendermint/pull/9030) rework lock discipline to mitigate callback deadlocks (@creachadair)
|
||||
|
||||
|
||||
## v0.35.8
|
||||
|
||||
July 12, 2022
|
||||
|
||||
Special thanks to external contributors on this release: @joeabbey
|
||||
|
||||
This release fixes an unbounded heap growth issue in the implementation of the
|
||||
priority mempool, as well as some configuration, logging, and peer dialing
|
||||
improvements in the non-legacy p2p stack. It also adds a new opt-in
|
||||
"simple-priority" value for the `p2p.queue-type` setting, that should improve
|
||||
gossip performance for non-legacy peer networks.
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [node] [\#8902](https://github.com/tendermint/tendermint/pull/8902) Always start blocksync and avoid misconfiguration (@tychoish)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8675](https://github.com/tendermint/tendermint/pull/8675) Add command to force compact goleveldb databases (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [p2p] [\#8914](https://github.com/tendermint/tendermint/pull/8914) [\#8875](https://github.com/tendermint/tendermint/pull/8875) Improvements to peer dialing (backported). (@tychoish)
|
||||
- [p2p] [\#8820](https://github.com/tendermint/tendermint/pull/8820) add eviction metrics and cleanup dialing error handling (backport #8819) (@tychoish)
|
||||
- [logging] [\#8896](https://github.com/tendermint/tendermint/pull/8896) Do not pre-process log results (backport #8895). (@tychoish)
|
||||
- [p2p] [\#8956](https://github.com/tendermint/tendermint/pull/8956) Simpler priority queue (backport #8929). (@tychoish)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [mempool] [\#8944](https://github.com/tendermint/tendermint/pull/8944) Fix unbounded heap growth in the priority mempool. (@creachadair)
|
||||
- [p2p] [\#8869](https://github.com/tendermint/tendermint/pull/8869) Set empty timeouts to configed values. (backport #8847). (@williambanfield)
|
||||
|
||||
|
||||
## v0.35.7
|
||||
|
||||
June 16, 2022
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [p2p] [\#8692](https://github.com/tendermint/tendermint/pull/8692) scale the number of stored peers by the configured maximum connections (#8684)
|
||||
- [rpc] [\#8715](https://github.com/tendermint/tendermint/pull/8715) always close http bodies (backport #8712)
|
||||
- [p2p] [\#8760](https://github.com/tendermint/tendermint/pull/8760) accept should not abort on first error (backport #8759)
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish)
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish)
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish)
|
||||
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish)
|
||||
|
||||
## v0.35.6
|
||||
|
||||
June 3, 2022
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [migrate] [\#8672](https://github.com/tendermint/tendermint/pull/8672) provide function for database production (backport #8614) (@tychoish)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#8651](https://github.com/tendermint/tendermint/pull/8651) restructure peer catchup sleep (@tychoish)
|
||||
- [pex] [\#8657](https://github.com/tendermint/tendermint/pull/8657) align max address thresholds (@cmwaters)
|
||||
- [cmd] [\#8668](https://github.com/tendermint/tendermint/pull/8668) don't used global config for reset commands (@cmwaters)
|
||||
- [p2p] [\#8681](https://github.com/tendermint/tendermint/pull/8681) shed peers from store from other networks (backport #8678) (@tychoish)
|
||||
|
||||
|
||||
## v0.35.5
|
||||
|
||||
May 26, 2022
|
||||
@@ -665,7 +747,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [light] [\#5347](https://github.com/tendermint/tendermint/pull/5347) `NewClient`, `NewHTTPClient`, `VerifyHeader` and `VerifyLightBlockAtHeight` now accept `context.Context` as 1st param (@melekes)
|
||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) `HashFromByteSlices` and `ProofsFromByteSlices` now return a hash for empty inputs, following RFC6962 (@erikgrinaker)
|
||||
- [proto] [\#5025](https://github.com/tendermint/tendermint/pull/5025) All proto files have been moved to `/proto` directory. (@marbar3778)
|
||||
- Using the recommended the file layout from buf, [see here for more info](https://buf.build/docs/lint-checkers#file_layout)
|
||||
- Using the recommended the file layout from buf, [see here for more info](https://docs.buf.build/lint/rules) <!-- markdown-link-check-disable-line -->
|
||||
- [rpc/client] [\#4947](https://github.com/tendermint/tendermint/pull/4947) `Validators`, `TxSearch` `page`/`per_page` params become pointers (@melekes)
|
||||
- `UnconfirmedTxs` `limit` param is a pointer
|
||||
- [rpc/jsonrpc/server] [\#5141](https://github.com/tendermint/tendermint/pull/5141) Remove `WriteRPCResponseArrayHTTP` (use `WriteRPCResponseHTTP` instead) (@melekes)
|
||||
|
||||
2
Makefile
2
Makefile
@@ -224,7 +224,7 @@ build-docs:
|
||||
@cd docs && \
|
||||
while read -r branch path_prefix; do \
|
||||
( git checkout $${branch} && npm ci --quiet && \
|
||||
VUEPRESS_BASE="/$${path_prefix}/" npm run build --quiet ) ; \
|
||||
VUEPRESS_BASE="/$${path_prefix}/" NODE_OPTIONS="--openssl-legacy-provider" npm run build --quiet ) ; \
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
|
||||
21
README.md
21
README.md
@@ -36,20 +36,17 @@ Please do not depend on master as your production branch. Use [releases](https:/
|
||||
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.io) or [join the chat](https://discord.gg/cosmosnetwork).
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help.
|
||||
You can contact us [over email](mailto:hello@newtendermint.org) or [join the
|
||||
chat](https://discord.gg/gnoland).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/cosmos).
|
||||
To report a security vulnerability, please [email us](mailto:security@newtendermint.org).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
|
||||
|
||||
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
|
||||
to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
| Requirement | Notes |
|
||||
@@ -139,9 +136,9 @@ We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap
|
||||
|
||||
## Join us!
|
||||
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
|
||||
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/)!
|
||||
The development of Tendermint Core was led primarily by All in Bits, Inc. The
|
||||
Tendermint trademark is owned by New Tendermint, LLC. If you'd like to work
|
||||
full-time on Tendermint2 or [gno.land](https://gno.land), [we're
|
||||
hiring](mailto:hiring@newtendermint.org)!
|
||||
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
|
||||
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
|
||||
that also maintains [tendermint.com](https://tendermint.com).
|
||||
|
||||
168
RELEASES.md
168
RELEASES.md
@@ -1,8 +1,9 @@
|
||||
# Releases
|
||||
|
||||
Tendermint uses [semantic versioning](https://semver.org/) with each release following
|
||||
a `vX.Y.Z` format. The `master` branch is used for active development and thus it's
|
||||
advisable not to build against it.
|
||||
Tendermint uses modified [semantic versioning](https://semver.org/) with each
|
||||
release following a `vX.Y.Z` format. Tendermint is currently on major version
|
||||
0 and uses the minor version to signal breaking changes. The `master` branch is
|
||||
used for active development and thus it is not advisable to build against it.
|
||||
|
||||
The latest changes are always initially merged into `master`.
|
||||
Releases are specified using tags and are built from long-lived "backport" branches
|
||||
@@ -29,8 +30,8 @@ merging the pull request.
|
||||
|
||||
### Creating a backport branch
|
||||
|
||||
If this is the first release candidate for a major release, you get to have the
|
||||
honor of creating the backport branch!
|
||||
If this is the first release candidate for a minor version release, e.g.
|
||||
v0.25.0, you get to have the honor of creating the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the
|
||||
tags on `master` so that `go mod` is able to order the branches correctly. You
|
||||
@@ -77,7 +78,8 @@ the 0.35.x line.
|
||||
|
||||
After doing these steps, go back to `master` and do the following:
|
||||
|
||||
1. Tag `master` as the dev branch for the _next_ major release and push it up to GitHub.
|
||||
1. Tag `master` as the dev branch for the _next_ minor version release and push
|
||||
it up to GitHub.
|
||||
For example:
|
||||
```sh
|
||||
git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."
|
||||
@@ -99,7 +101,7 @@ After doing these steps, go back to `master` and do the following:
|
||||
|
||||
## Release candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
Before creating an official release, especially a minor release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of backport branches.
|
||||
|
||||
@@ -109,7 +111,7 @@ Tags for RCs should follow the "standard" release naming conventions, with `-rcX
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
|
||||
If this is the first RC for a minor release, you'll have to make a new backport branch (see above).
|
||||
Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
@@ -140,11 +142,13 @@ Note that this process should only be used for "true" RCs--
|
||||
release candidates that, if successful, will be the next release.
|
||||
For more experimental "RCs," create a new, short-lived branch and tag that instead.
|
||||
|
||||
## Major release
|
||||
## Minor release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
This minor release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, begin by creating a backport branch, as described above.
|
||||
|
||||
Before performing these steps, be sure the [Minor Release Checklist](#minor-release-checklist) has been completed.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
@@ -176,16 +180,16 @@ If there were no release candidates, begin by creating a backport branch, as des
|
||||
- Commit these changes to `master` and backport them into the backport
|
||||
branch for this release.
|
||||
|
||||
## Minor release (point releases)
|
||||
## Patch release
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of
|
||||
Patch releases are done differently from minor releases: They are built off of
|
||||
long-lived backport branches, rather than from master. As non-breaking changes
|
||||
land on `master`, they should also be backported into these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky
|
||||
Patch releases don't have release candidates by default, although any tricky
|
||||
changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
To create a patch release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
@@ -197,11 +201,143 @@ To create a minor release:
|
||||
- Bump the TMDefaultVersion in `version.go`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
which can change during patch releases, and only field additions are valid patch changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Remove all `R:patch` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
|
||||
## Minor Release Checklist
|
||||
|
||||
The following set of steps are performed on all releases that increment the
|
||||
_minor_ version, e.g. v0.25 to v0.26. These steps ensure that Tendermint is
|
||||
well tested, stable, and suitable for adoption by the various diverse projects
|
||||
that rely on Tendermint.
|
||||
|
||||
### Feature Freeze
|
||||
|
||||
Ahead of any minor version release of Tendermint, the software enters 'Feature
|
||||
Freeze' for at least two weeks. A feature freeze means that _no_ new features
|
||||
are added to the code being prepared for release. No code changes should be made
|
||||
to the code being released that do not directly improve pressing issues of code
|
||||
quality. The following must not be merged during a feature freeze:
|
||||
|
||||
* Refactors that are not related to specific bug fixes.
|
||||
* Dependency upgrades.
|
||||
* New test code that does not test a discovered regression.
|
||||
* New features of any kind.
|
||||
* Documentation or spec improvements that are not related to the newly developed
|
||||
code.
|
||||
|
||||
This period directly follows the creation of the [backport
|
||||
branch](#creating-a-backport-branch). The Tendermint team instead directs all
|
||||
attention to ensuring that the existing code is stable and reliable. Broken
|
||||
tests are fixed, flakey-tests are remedied, end-to-end test failures are
|
||||
thoroughly diagnosed and all efforts of the team are aimed at improving the
|
||||
quality of the code. During this period, the upgrade harness tests are run
|
||||
repeatedly and a variety of in-house testnets are run to ensure Tendermint
|
||||
functions at the scale it will be used by application developers and node
|
||||
operators.
|
||||
|
||||
### Nightly End-To-End Tests
|
||||
|
||||
The Tendermint team maintains [a set of end-to-end
|
||||
tests](https://github.com/tendermint/tendermint/blob/master/test/e2e/README.md#L1)
|
||||
that run each night on the latest commit of the project and on the code in the
|
||||
tip of each supported backport branch. These tests start a network of containerized
|
||||
Tendermint processes and run automated checks that the network functions as
|
||||
expected in both stable and unstable conditions. During the feature freeze,
|
||||
these tests are run nightly and must pass consistently for a release of
|
||||
Tendermint to be considered stable.
|
||||
|
||||
### Upgrade Harness
|
||||
|
||||
> TODO(williambanfield): Change to past tense and clarify this section once
|
||||
> upgrade harness is complete.
|
||||
|
||||
The Tendermint team is creating an upgrade test harness to exercise the
|
||||
workflow of stopping an instance of Tendermint running one version of the
|
||||
software and starting up the same application running the next version. To
|
||||
support upgrade testing, we will add the ability to terminate the Tendermint
|
||||
process at specific pre-defined points in its execution so that we can verify
|
||||
upgrades work in a representative sample of stop conditions.
|
||||
|
||||
### Large Scale Testnets
|
||||
|
||||
The Tendermint end-to-end tests run a small network (~10s of nodes) to exercise
|
||||
basic consensus interactions. Real world deployments of Tendermint often have over
|
||||
a hundred nodes just in the validator set, with many others acting as full
|
||||
nodes and sentry nodes. To gain more assurance before a release, we will also run
|
||||
larger-scale test networks to shake out emergent behaviors at scale.
|
||||
|
||||
Large-scale test networks are run on a set of virtual machines (VMs). Each VM
|
||||
is equipped with 4 Gigabytes of RAM and 2 CPU cores. The network runs a very
|
||||
simple key-value store application. The application adds artificial delays to
|
||||
different ABCI calls to simulate a slow application. Each testnet is briefly
|
||||
run with no load being generated to collect a baseline performance. Once
|
||||
baseline is captured, a consistent load is applied across the network. This
|
||||
load takes the form of 10% of the running nodes all receiving a consistent
|
||||
stream of two hundred transactions per minute each.
|
||||
|
||||
During each test net, the following metrics are monitored and collected on each
|
||||
node:
|
||||
|
||||
* Consensus rounds per height
|
||||
* Maximum connected peers, Minimum connected peers, Rate of change of peer connections
|
||||
* Memory resident set size
|
||||
* CPU utilization
|
||||
* Blocks produced per minute
|
||||
* Seconds for each step of consensus (Propose, Prevote, Precommit, Commit)
|
||||
* Latency to receive block proposals
|
||||
|
||||
For these tests we intentionally target low-powered host machines (with low core
|
||||
counts and limited memory) to ensure we observe similar kinds of resource contention
|
||||
and limitation that real-world deployments of Tendermint experience in production.
|
||||
|
||||
#### 200 Node Testnet
|
||||
|
||||
To test the stability and performance of Tendermint in a real world scenario,
|
||||
a 200 node test network is run. The network comprises 5 seed nodes, 100
|
||||
validators and 95 non-validating full nodes. All nodes begin by dialing
|
||||
a subset of the seed nodes to discover peers. The network is run for several
|
||||
days, with metrics being collected continuously. In cases of changes to performance
|
||||
critical systems, testnets of larger sizes should be considered.
|
||||
|
||||
#### Rotating Node Testnet
|
||||
|
||||
Real-world deployments of Tendermint frequently see new nodes arrive and old
|
||||
nodes exit the network. The rotating node testnet ensures that Tendermint is
|
||||
able to handle this reliably. In this test, a network with 10 validators and
|
||||
3 seed nodes is started. A rolling set of 25 full nodes are started and each
|
||||
connects to the network by dialing one of the seed nodes. Once the node is able
|
||||
to blocksync to the head of the chain and begins producing blocks using
|
||||
Tendermint consensus it is stopped. Once stopped, a new node is started and
|
||||
takes its place. This network is run for several days.
|
||||
|
||||
#### Network Partition Testnet
|
||||
|
||||
Tendermint is expected to recover from network partitions. A partition where no
|
||||
subset of the nodes is left with the super-majority of the stake is expected to
|
||||
stop making blocks. Upon alleviation of the partition, the network is expected
|
||||
to once again become fully connected and capable of producing blocks. The
|
||||
network partition testnet ensures that Tendermint is able to handle this
|
||||
reliably at scale. In this test, a network with 100 validators and 95 full
|
||||
nodes is started. All validators have equal stake. Once the network is
|
||||
producing blocks, a set of firewall rules is deployed to create a partitioned
|
||||
network with 50% of the stake on one side and 50% on the other. Once the
|
||||
network stops producing blocks, the firewall rules are removed and the nodes
|
||||
are monitored to ensure they reconnect and that the network again begins
|
||||
producing blocks.
|
||||
|
||||
#### Absent Stake Testnet
|
||||
|
||||
Tendermint networks often run with _some_ portion of the voting power offline.
|
||||
The absent stake testnet ensures that large networks are able to handle this
|
||||
reliably. A set of 150 validator nodes and three seed nodes is started. The set
|
||||
of 150 validators is configured to only possess a cumulative stake of 67% of
|
||||
the total stake. The remaining 33% of the stake is configured to belong to
|
||||
a validator that is never actually run in the test network. The network is run
|
||||
for multiple days, ensuring that it is able to produce blocks without issue.
|
||||
|
||||
@@ -3,7 +3,6 @@ package abciclient
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -65,7 +64,7 @@ RETRY_LOOP:
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
}
|
||||
cli.logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err)
|
||||
cli.logger.Error("abci.grpcClient failed to connect, Retrying...", "addr", cli.addr, "err", err)
|
||||
timer.Reset(time.Second * dialRetryIntervalSeconds)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
||||
@@ -67,8 +67,11 @@ func (cli *socketClient) OnStart(ctx context.Context) error {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
}
|
||||
cli.logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...",
|
||||
cli.addr, dialRetryIntervalSeconds), "err", err)
|
||||
|
||||
cli.logger.Error("abci.socketClient failed to connect, retrying after",
|
||||
"retry_after", dialRetryIntervalSeconds,
|
||||
"target", cli.addr,
|
||||
"err", err)
|
||||
|
||||
timer.Reset(time.Second * dialRetryIntervalSeconds)
|
||||
select {
|
||||
@@ -77,7 +80,6 @@ func (cli *socketClient) OnStart(ctx context.Context) error {
|
||||
case <-timer.C:
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
cli.conn = conn
|
||||
|
||||
|
||||
@@ -34,7 +34,6 @@ func RunDatabaseMigration(ctx context.Context, logger log.Logger, conf *config.C
|
||||
// reduce the possibility of the
|
||||
// ephemeral data overwriting later data
|
||||
"tx_index",
|
||||
"peerstore",
|
||||
"light",
|
||||
"blockstore",
|
||||
"state",
|
||||
@@ -57,7 +56,7 @@ func RunDatabaseMigration(ctx context.Context, logger log.Logger, conf *config.C
|
||||
return fmt.Errorf("constructing database handle: %w", err)
|
||||
}
|
||||
|
||||
if err = keymigrate.Migrate(ctx, db); err != nil {
|
||||
if err = keymigrate.Migrate(ctx, dbctx, db); err != nil {
|
||||
return fmt.Errorf("running migration for context %q: %w",
|
||||
dbctx, err)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,10 @@ import (
|
||||
)
|
||||
|
||||
func TestRollbackIntegration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
var height int64
|
||||
dir := t.TempDir()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
@@ -659,8 +659,8 @@ type P2PConfig struct { //nolint: maligned
|
||||
DialTimeout time.Duration `mapstructure:"dial-timeout"`
|
||||
|
||||
// Makes it possible to configure which queue backend the p2p
|
||||
// layer uses. Options are: "fifo" and "priority",
|
||||
// with the default being "priority".
|
||||
// layer uses. Options are: "fifo" and "simple-priority", and "priority",
|
||||
// with the default being "simple-priority".
|
||||
QueueType string `mapstructure:"queue-type"`
|
||||
}
|
||||
|
||||
@@ -685,7 +685,7 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
PexReactor: true,
|
||||
HandshakeTimeout: 20 * time.Second,
|
||||
DialTimeout: 3 * time.Second,
|
||||
QueueType: "priority",
|
||||
QueueType: "simple-priority",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -25,5 +25,6 @@ type DBProvider func(*DBContext) (dbm.DB, error)
|
||||
// specified in the Config.
|
||||
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
|
||||
dbType := dbm.BackendType(ctx.Config.DBBackend)
|
||||
|
||||
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
|
||||
}
|
||||
|
||||
@@ -282,7 +282,9 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}"
|
||||
#######################################################
|
||||
[p2p]
|
||||
|
||||
# Select the p2p internal queue
|
||||
# Select the p2p internal queue.
|
||||
# Options are: "fifo" and "simple-priority", and "priority",
|
||||
# with the default being "simple-priority".
|
||||
queue-type = "{{ .P2P.QueueType }}"
|
||||
|
||||
# Address to listen for incoming connections
|
||||
|
||||
@@ -106,7 +106,7 @@ module.exports = {
|
||||
}
|
||||
],
|
||||
smallprint:
|
||||
'The development of Tendermint Core is led primarily by [Interchain GmbH](https://interchain.berlin/). Funding for this development comes primarily from the Interchain Foundation, a Swiss non-profit. The Tendermint trademark is owned by Tendermint Inc, the for-profit entity that also maintains this website.',
|
||||
'The development of Tendermint Core was led primarily by All in Bits, Inc. The Tendermint trademark is owned by New Tendermint, LLC.',
|
||||
links: [
|
||||
{
|
||||
title: 'Documentation',
|
||||
|
||||
@@ -31,3 +31,4 @@ To find out about the Tendermint ecosystem you can go [here](https://github.com/
|
||||
## Contribute
|
||||
|
||||
To contribute to the documentation, see [this file](https://github.com/tendermint/tendermint/blob/master/docs/DOCS_README.md) for details of the build process and considerations when making changes.
|
||||
|
||||
|
||||
@@ -247,8 +247,8 @@ Similarly, you could put the commands in a file and run
|
||||
|
||||
Want to write an app in your favorite language?! We'd be happy
|
||||
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
|
||||
See [funding](https://github.com/interchainio/funding) opportunities from the
|
||||
[Interchain Foundation](https://interchain.io/) for implementations in new languages and more.
|
||||
|
||||
TODO link to bounties page.
|
||||
|
||||
The `abci-cli` is designed strictly for testing and debugging. In a real
|
||||
deployment, the role of sending messages is taken by Tendermint, which
|
||||
|
||||
@@ -106,10 +106,10 @@ Next, use the `tendermint testnet` command to create four directories of config
|
||||
Before you can start the network, you'll need peers identifiers (IPs are not enough and can change). We'll refer to them as ID1, ID2, ID3, ID4.
|
||||
|
||||
```sh
|
||||
tendermint show_node_id --home ./mytestnet/node0
|
||||
tendermint show_node_id --home ./mytestnet/node1
|
||||
tendermint show_node_id --home ./mytestnet/node2
|
||||
tendermint show_node_id --home ./mytestnet/node3
|
||||
tendermint show-node-id --home ./mytestnet/node0
|
||||
tendermint show-node-id --home ./mytestnet/node1
|
||||
tendermint show-node-id --home ./mytestnet/node2
|
||||
tendermint show-node-id --home ./mytestnet/node3
|
||||
```
|
||||
|
||||
Finally, from each machine, run:
|
||||
|
||||
@@ -103,9 +103,9 @@ Another example of a cryptocurrency application built on Tendermint is
|
||||
to Tendermint, but is more opinionated about how the state is managed,
|
||||
and requires that all application behaviour runs in potentially many
|
||||
docker containers, modules it calls "chaincode". It uses an
|
||||
implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf).
|
||||
implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf)
|
||||
from a team at IBM that is augmented to handle potentially non-deterministic
|
||||
chaincode It is possible to implement this docker-based behaviour as a ABCI app
|
||||
chaincode. It is possible to implement this docker-based behaviour as a ABCI app
|
||||
in Tendermint, though extending Tendermint to handle non-determinism remains
|
||||
for future work.
|
||||
|
||||
|
||||
12
docs/package-lock.json
generated
12
docs/package-lock.json
generated
@@ -12288,9 +12288,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/terser": {
|
||||
"version": "4.8.0",
|
||||
"resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz",
|
||||
"integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==",
|
||||
"version": "4.8.1",
|
||||
"resolved": "https://registry.npmjs.org/terser/-/terser-4.8.1.tgz",
|
||||
"integrity": "sha512-4GnLC0x667eJG0ewJTa6z/yXrbLGv80D9Ru6HIpCQmO+Q4PfEtBFi0ObSckqwL6VyQv/7ENJieXHo2ANmdQwgw==",
|
||||
"dependencies": {
|
||||
"commander": "^2.20.0",
|
||||
"source-map": "~0.6.1",
|
||||
@@ -23925,9 +23925,9 @@
|
||||
"integrity": "sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg=="
|
||||
},
|
||||
"terser": {
|
||||
"version": "4.8.0",
|
||||
"resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz",
|
||||
"integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==",
|
||||
"version": "4.8.1",
|
||||
"resolved": "https://registry.npmjs.org/terser/-/terser-4.8.1.tgz",
|
||||
"integrity": "sha512-4GnLC0x667eJG0ewJTa6z/yXrbLGv80D9Ru6HIpCQmO+Q4PfEtBFi0ObSckqwL6VyQv/7ENJieXHo2ANmdQwgw==",
|
||||
"requires": {
|
||||
"commander": "^2.20.0",
|
||||
"source-map": "~0.6.1",
|
||||
|
||||
@@ -58,5 +58,6 @@ sections.
|
||||
- [RFC-018: BLS Signature Aggregation Exploration](./rfc-018-bls-agg-exploration.md)
|
||||
- [RFC-019: Configuration File Versioning](./rfc-019-config-version.md)
|
||||
- [RFC-020: Onboarding Projects](./rfc-020-onboarding-projects.rst)
|
||||
- [RFC-021: The Future of the Socket Protocol](./rfc-021-socket-protocol.md)
|
||||
|
||||
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->
|
||||
|
||||
266
docs/rfc/rfc-021-socket-protocol.md
Normal file
266
docs/rfc/rfc-021-socket-protocol.md
Normal file
@@ -0,0 +1,266 @@
|
||||
# RFC 021: The Future of the Socket Protocol
|
||||
|
||||
## Changelog
|
||||
|
||||
- 19-May-2022: Initial draft (@creachadair)
|
||||
- 19-Jul-2022: Converted from ADR to RFC (@creachadair)
|
||||
|
||||
## Abstract
|
||||
|
||||
This RFC captures some technical discussion about the ABCI socket protocol that
|
||||
was originally documented to solicit an architectural decision. This topic was
|
||||
not high-enough priority as of this writing to justify making a final decision.
|
||||
|
||||
For that reason, the text of this RFC has the general structure of an ADR, but
|
||||
should be viewed primarily as a record of the issue for future reference.
|
||||
|
||||
## Background
|
||||
|
||||
The [Application Blockchain Interface (ABCI)][abci] is a client-server protocol
|
||||
used by the Tendermint consensus engine to communicate with the application on
|
||||
whose behalf it performs state replication. There are currently three transport
|
||||
options available for ABCI applications:
|
||||
|
||||
1. **In-process**: Applications written in Go can be linked directly into the
|
||||
same binary as the consensus node. Such applications use a "local" ABCI
|
||||
connection, which exposes application methods to the node as direct function
|
||||
calls.
|
||||
|
||||
2. **Socket protocol**: Out-of-process applications may export the ABCI service
|
||||
via a custom socket protocol that sends requests and responses over a
|
||||
Unix-domain or TCP socket connection as length-prefixed protocol buffers.
|
||||
In Tendermint, this is handled by the [socket client][socket-client].
|
||||
|
||||
3. **gRPC**: Out-of-process applications may export the ABCI service via gRPC.
|
||||
In Tendermint, this is handled by the [gRPC client][grpc-client].
|
||||
|
||||
Both the out-of-process options (2) and (3) have a long history in Tendermint.
|
||||
The beginnings of the gRPC client were added in [May 2016][abci-start] when
|
||||
ABCI was still hosted in a separate repository, and the socket client (formerly
|
||||
called the "remote client") was part of ABCI from its inception in November
|
||||
2015.
|
||||
|
||||
At that time when ABCI was first being developed, the gRPC project was very new
|
||||
(it launched Q4 2015) and it was not an obvious choice for use in Tendermint.
|
||||
It took a while before the language coverage and quality of gRPC reached a
|
||||
point where it could be a viable solution for out-of-process applications. For
|
||||
that reason, it made sense for the initial design of ABCI to focus on a custom
|
||||
protocol for out-of-process applications.
|
||||
|
||||
## Problem Statement
|
||||
|
||||
For practical reasons, ABCI needs an interprocess communication option to
|
||||
support applications not written in Go. The two practical options are RPC and
|
||||
FFI, and for operational reasons an RPC mechanism makes more sense.
|
||||
|
||||
The socket protocol has not changed all that substantially since its original
|
||||
design, and has the advantage of being simple to implement in almost any
|
||||
reasonable language. However, its simplicity includes some limitations that
|
||||
have had a negative impact on the stability and performance of out-of-process
|
||||
applications using it. In particular:
|
||||
|
||||
- The protocol lacks request identifiers, so the client and server must return
|
||||
responses in strict FIFO order. Even if the client issues requests that have
|
||||
no dependency on each other, the protocol has no way except order of issue to
|
||||
map responses to requests.
|
||||
|
||||
This reduces (in some cases substantially) the concurrency an application can
|
||||
exploit, since the parallelism of requests in flight is gated by the slowest
|
||||
active request at any moment. There have been complaints from some network
|
||||
operators on that basis.
|
||||
|
||||
- The protocol lacks method identifiers, so the only way for the client and
|
||||
server to understand which operation is requested is to dispatch on the type
|
||||
of the request and response payloads. For responses, this means that [any
|
||||
error condition is terminal not only to the request, but to the entire ABCI
|
||||
client](https://github.com/tendermint/tendermint/blob/master/abci/client/socket_client.go#L149).
|
||||
|
||||
The historical intent of terminating for any error seems to have been that
|
||||
all ABCI errors are unrecoverable and hence protocol fatal <!-- markdown-link-check-disable-next-line -->
|
||||
(see [Note 1](#note1)). In practice, however, this greatly complicates
|
||||
debugging a faulty node, since the only way to respond to errors is to panic
|
||||
the node which loses valuable context that could have been logged.
|
||||
|
||||
- There are subtle concurrency management dependencies between the client and
|
||||
the server that are not clearly documented anywhere, and it is very easy for
|
||||
small changes in both the client and the server to lead to tricky deadlocks,
|
||||
panics, race conditions, and slowdowns. As a recent example of this, see
|
||||
https://github.com/tendermint/tendermint/pull/8581.
|
||||
|
||||
These limitations are fixable, but one important question is whether it is
|
||||
worthwhile to fix them. We can add request and method identifiers, for
|
||||
example, but doing so would be a breaking change to the protocol requiring
|
||||
every application using it to update. If applications have to migrate anyway,
|
||||
the stability and language coverage of gRPC have improved a lot, and today it
|
||||
is probably simpler to set up and maintain an application using gRPC transport
|
||||
than to reimplement the Tendermint socket protocol.
|
||||
|
||||
Moreover, gRPC addresses all the above issues out-of-the-box, and requires
|
||||
(much) less custom code for both the server (i.e., the application) and the
|
||||
client. The project is well-funded and widely-used, which makes it a safe bet
|
||||
for a dependency.
|
||||
|
||||
## Decision
|
||||
|
||||
There is a set of related alternatives to consider:
|
||||
|
||||
- Question 1: Designate a single IPC standard for out-of-process applications?
|
||||
|
||||
Claim: We should converge on one (and only one) IPC option for out-of-process
|
||||
applications. We should choose an option that, after a suitable period of
|
||||
deprecation for alternatives, will address most or all the highest-impact
|
||||
uses of Tendermint. Maintaining multiple options increases the surface area
|
||||
for bugs and vulnerabilities, and we should not have multiple options for
|
||||
basic interfaces without a clear and well-documented reason.
|
||||
|
||||
- Question 2a: Choose gRPC and deprecate/remove the socket protocol?
|
||||
|
||||
Claim: Maintaining and improving a custom RPC protocol is a substantial
|
||||
project and not directly relevant to the requirements of consensus. We would
|
||||
be better served by depending on a well-maintained open-source library like
|
||||
gRPC.
|
||||
|
||||
- Question 2b: Improve the socket protocol and deprecate/remove gRPC?
|
||||
|
||||
Claim: If we find meaningful advantages to maintaining our own custom RPC
|
||||
protocol in Tendermint, we should treat it as a first-class project within
|
||||
the core and invest in making it good enough that we do not require other
|
||||
options.
|
||||
|
||||
**One important consideration** when discussing these questions is that _any
|
||||
outcome which includes keeping the socket protocol will have eventual migration
|
||||
impacts for out-of-process applications_ regardless. To fix the limitations of
|
||||
the socket protocol as it is currently designed will require making _breaking
|
||||
changes_ to the protocol. So, while we may put off a migration cost for
|
||||
out-of-process applications by retaining the socket protocol in the short term,
|
||||
we will eventually have to pay those costs to fix the problems in its current
|
||||
design.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
1. If we choose to standardize on gRPC, the main work in Tendermint core will
|
||||
be removing and cleaning up the code for the socket client and server.
|
||||
|
||||
Besides the code cleanup, we will also need to clearly document a
|
||||
deprecation schedule, and invest time in making the migration easier for
|
||||
applications currently using the socket protocol.
|
||||
|
||||
> **Point for discussion:** Migrating from the socket protocol to gRPC
|
||||
> should mostly be a plumbing change, as long as we do it during a release
|
||||
> in which we are not making other breaking changes to ABCI. However, the
|
||||
> effort may be more or less depending on how gRPC integration works in the
|
||||
> application's implementation language, and would have to be sure networks
|
||||
> have plenty of time not only to make the change but to verify that it
|
||||
> preserves the function of the network.
|
||||
>
|
||||
> What questions should we be asking node operators and application
|
||||
> developers to understand the migration costs better?
|
||||
|
||||
2. If we choose to keep only the socket protocol, we will need to follow up
|
||||
with a more detailed design for extending and upgrading the protocol to fix
|
||||
the existing performance and operational issues with the protocol.
|
||||
|
||||
Moreover, since the gRPC interface has been around for a long time we will
|
||||
also need a deprecation plan for it.
|
||||
|
||||
3. If we choose to keep both options, we will still need to do all the work of
|
||||
(2), but the gRPC implementation should not require any immediate changes.
|
||||
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
- **FFI**. Another approach we could take is to use a C-based FFI interface so
|
||||
that applications written in other languages are linked directly with the
|
||||
consensus node, an option currently only available for Go applications.
|
||||
|
||||
An FFI interface is possible for a lot of languages, but FFI support varies
|
||||
widely in coverage and quality across languages and the points of friction
|
||||
can be tricky to work around. Moreover, it's much harder to add FFI support
|
||||
to a language where it's missing after-the-fact for an application developer.
|
||||
|
||||
Although a basic FFI interface is not too difficult on the Go side, the C
|
||||
shims for an FFI can get complicated if there's a lot of variability in the
|
||||
runtime environment on the other end.
|
||||
|
||||
If we want to have one answer for non-Go applications, we are better off
|
||||
picking an IPC-based solution (whether that's gRPC or an extension of our
|
||||
custom socket protocol or something else).
|
||||
|
||||
## Consequences
|
||||
|
||||
- **Standardize on gRPC**
|
||||
|
||||
- ✅ Addresses existing performance and operational issues.
|
||||
- ✅ Replaces custom code with a well-maintained widely-used library.
|
||||
- ✅ Aligns with Cosmos SDK, which already uses gRPC extensively.
|
||||
- ✅ Aligns with priv validator interface, for which the socket protocol is already deprecated for gRPC.
|
||||
- ❓ Applications will be hard to implement in a language without gRPC support.
|
||||
- ⛔ All users of the socket protocol have to migrate to gRPC, and we believe most current out-of-process applications use the socket protocol.
|
||||
|
||||
- **Standardize on socket protocol**
|
||||
|
||||
- ✅ Less immediate impact for existing users (but see below).
|
||||
- ✅ Simplifies ABCI API surface by removing gRPC.
|
||||
- ❓ Users of the socket protocol will have a (smaller) migration.
|
||||
- ❓ Potentially easier to implement for languages that do not have support.
|
||||
- ⛔ Need to do all the work to fix the socket protocol (which will require existing users to update anyway later).
|
||||
- ⛔ Ongoing maintenance burden for per-language server implementations.
|
||||
|
||||
- **Keep both options**
|
||||
|
||||
- ✅ Less immediate impact for existing users (but see below).
|
||||
- ❓ Users of the socket protocol will have a (smaller) migration.
|
||||
- ⛔ Still need to do all the work to fix the socket protocol (which will require existing users to update anyway later).
|
||||
- ⛔ Requires ongoing maintenance and support of both gRPC and socket protocol integrations.
|
||||
|
||||
|
||||
## References
|
||||
|
||||
- [Application Blockchain Interface (ABCI)][abci]
|
||||
- [Tendermint ABCI socket client][socket-client]
|
||||
- [Tendermint ABCI gRPC client][grpc-client]
|
||||
- [Initial commit of gRPC client][abci-start]
|
||||
|
||||
[abci]: https://github.com/tendermint/spec/tree/master/spec/abci
|
||||
[socket-client]: https://github.com/tendermint/tendermint/blob/master/abci/client/socket_client.go
|
||||
[socket-server]: https://github.com/tendermint/tendermint/blob/master/abci/server/socket_server.go
|
||||
[grpc-client]: https://github.com/tendermint/tendermint/blob/master/abci/client/grpc_client.go
|
||||
[abci-start]: https://github.com/tendermint/abci/commit/1ab3c747182aaa38418258679c667090c2bb1e0d
|
||||
|
||||
## Notes
|
||||
|
||||
- <a id=note1></a>**Note 1**: The choice to make all ABCI errors protocol-fatal
|
||||
was intended to avoid the risk that recovering an application error could
|
||||
cause application state to diverge. Divergence can break consensus, so it's
|
||||
essential to avoid it.
|
||||
|
||||
This is a sound principle, but conflates protocol errors with "mechanical"
|
||||
errors such as timeouts, resoures exhaustion, failed connections, and so on.
|
||||
Because the protocol has no way to distinguish these conditions, the only way
|
||||
for an application to report an error is to panic or crash.
|
||||
|
||||
Whether a node is running in the same process as the application or as a
|
||||
separate process, application errors should not be suppressed or hidden.
|
||||
However, it's important to ensure that errors are handled at a consistent and
|
||||
well-defined point in the protocol: Having the application panic or crash
|
||||
rather than reporting an error means the node sees different results
|
||||
depending on whether the application runs in-process or out-of-process, even
|
||||
if the application logic is otherwise identical.
|
||||
|
||||
## Appendix: Known Implementations of ABCI Socket Protocol
|
||||
|
||||
This is a list of known implementations of the Tendermint custom socket
|
||||
protocol. Note that in most cases I have not checked how complete or correct
|
||||
these implementations are; these are based on search results and a cursory
|
||||
visual inspection.
|
||||
|
||||
- Tendermint Core (Go): [client][socket-client], [server][socket-server]
|
||||
- Informal Systems [tendermint-rs](https://github.com/informalsystems/tendermint-rs) (Rust): [client](https://github.com/informalsystems/tendermint-rs/blob/master/abci/src/client.rs), [server](https://github.com/informalsystems/tendermint-rs/blob/master/abci/src/server.rs)
|
||||
- Tendermint [js-abci](https://github.com/tendermint/js-abci) (JS): [server](https://github.com/tendermint/js-abci/blob/master/src/server.js)
|
||||
- [Hotmoka](https://github.com/Hotmoka/hotmoka) ABCI (Java): [server](https://github.com/Hotmoka/hotmoka/blob/master/io-hotmoka-tendermint-abci/src/main/java/io/hotmoka/tendermint_abci/Server.java)
|
||||
- [Tower ABCI](https://github.com/penumbra-zone/tower-abci) (Rust): [server](https://github.com/penumbra-zone/tower-abci/blob/main/src/server.rs)
|
||||
- [abci-host](https://github.com/datopia/abci-host) (Clojure): [server](https://github.com/datopia/abci-host/blob/master/src/abci/host.clj)
|
||||
- [abci_server](https://github.com/KrzysiekJ/abci_server) (Erlang): [server](https://github.com/KrzysiekJ/abci_server/blob/master/src/abci_server.erl)
|
||||
- [py-abci](https://github.com/davebryson/py-abci) (Python): [server](https://github.com/davebryson/py-abci/blob/master/src/abci/server.py)
|
||||
- [scala-tendermint-server](https://github.com/intechsa/scala-tendermint-server) (Scala): [server](https://github.com/InTechSA/scala-tendermint-server/blob/master/src/main/scala/lu/intech/tendermint/Server.scala)
|
||||
- [kepler](https://github.com/f-o-a-m/kepler) (Rust): [server](https://github.com/f-o-a-m/kepler/blob/master/hs-abci-server/src/Network/ABCI/Server.hs)
|
||||
@@ -4,4 +4,4 @@ order: false
|
||||
|
||||
# Validators
|
||||
|
||||
This file has moved to the [node section](../nodes/validators.md).
|
||||
_This file has moved to the [node section](../nodes/validators.md)._
|
||||
|
||||
@@ -23,8 +23,6 @@ yourself with the syntax.
|
||||
By following along with this guide, you'll create a Tendermint Core project
|
||||
called kvstore, a (very) simple distributed BFT key-value store.
|
||||
|
||||
> Note: please use a released version of Tendermint with this guide. The guides will work with the latest version. Please, do not use master.
|
||||
|
||||
## Built-in app vs external app
|
||||
|
||||
Running your application inside the same process as Tendermint Core will give
|
||||
@@ -36,28 +34,27 @@ through a TCP, Unix domain socket or gRPC.
|
||||
## 1.1 Installing Go
|
||||
|
||||
Please refer to [the official guide for installing
|
||||
Go](https://golang.org/doc/install).
|
||||
Go](https://go.dev/doc/install).
|
||||
|
||||
Verify that you have the latest version of Go installed:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ go version
|
||||
go version go1.16.x darwin/amd64
|
||||
go version go1.18.x darwin/amd64
|
||||
```
|
||||
|
||||
## 1.2 Creating a new Go project
|
||||
|
||||
We'll start by creating a new Go project.
|
||||
We'll start by creating a new Go project. First, initialize the project folder with `go mod init`. Running this command should create the `go.mod` file.
|
||||
|
||||
```bash
|
||||
mkdir kvstore
|
||||
cd kvstore
|
||||
go mod init github.com/<github_username>/<repo_name>
|
||||
```sh
|
||||
$ mkdir kvstore
|
||||
$ cd kvstore
|
||||
$ go mod init github.com/<username>/kvstore
|
||||
go: creating new go.mod: module github.com/<username>/kvstore
|
||||
```
|
||||
|
||||
Inside the example directory create a `main.go` file with the following content:
|
||||
|
||||
> Note: there is no need to clone or fork Tendermint in this tutorial.
|
||||
Inside the project directory, create a `main.go` file with the following content:
|
||||
|
||||
```go
|
||||
package main
|
||||
@@ -73,7 +70,7 @@ func main() {
|
||||
|
||||
When run, this should print "Hello, Tendermint Core" to the standard output.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ go run main.go
|
||||
Hello, Tendermint Core
|
||||
```
|
||||
@@ -152,16 +149,43 @@ func (KVStoreApplication) ApplySnapshotChunk(abcitypes.RequestApplySnapshotChunk
|
||||
}
|
||||
```
|
||||
|
||||
Now I will go through each method explaining when it's called and adding
|
||||
Now, we will go through each method and explain when it is executed while adding
|
||||
required business logic.
|
||||
|
||||
### 1.3.1 CheckTx
|
||||
### 1.3.1 Key-value store setup
|
||||
|
||||
When a new transaction is added to the Tendermint Core, it will ask the
|
||||
application to check it (validate the format, signatures, etc.).
|
||||
For the underlying key-value store we'll use the latest version of [badger](https://github.com/dgraph-io/badger), which is an embeddable, persistent and fast key-value (KV) database.
|
||||
|
||||
```sh
|
||||
$ go get github.com/dgraph-io/badger/v3
|
||||
go: added github.com/dgraph-io/badger/v3 v3.2103.2
|
||||
```
|
||||
|
||||
```go
|
||||
import "bytes"
|
||||
import "github.com/dgraph-io/badger/v3"
|
||||
|
||||
type KVStoreApplication struct {
|
||||
db *badger.DB
|
||||
currentBatch *badger.Txn
|
||||
}
|
||||
|
||||
func NewKVStoreApplication(db *badger.DB) *KVStoreApplication {
|
||||
return &KVStoreApplication{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.2 CheckTx
|
||||
|
||||
When a new transaction is added to the Tendermint Core, it will ask the application to check it (validate the format, signatures, etc.).
|
||||
|
||||
```go
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
...
|
||||
)
|
||||
|
||||
func (app *KVStoreApplication) isValid(tx []byte) (code uint32) {
|
||||
// check format
|
||||
@@ -214,26 +238,7 @@ Valid transactions will eventually be committed given they are not too big and
|
||||
have enough gas. To learn more about gas, check out ["the
|
||||
specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas).
|
||||
|
||||
For the underlying key-value store we'll use
|
||||
[badger](https://github.com/dgraph-io/badger), which is an embeddable,
|
||||
persistent and fast key-value (KV) database.
|
||||
|
||||
```go
|
||||
import "github.com/dgraph-io/badger"
|
||||
|
||||
type KVStoreApplication struct {
|
||||
db *badger.DB
|
||||
currentBatch *badger.Txn
|
||||
}
|
||||
|
||||
func NewKVStoreApplication(db *badger.DB) *KVStoreApplication {
|
||||
return &KVStoreApplication{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.2 BeginBlock -> DeliverTx -> EndBlock -> Commit
|
||||
### 1.3.3 BeginBlock -> DeliverTx -> EndBlock -> Commit
|
||||
|
||||
When Tendermint Core has decided on the block, it's transfered to the
|
||||
application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and
|
||||
@@ -290,7 +295,7 @@ func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit {
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.3 Query
|
||||
### 1.3.4 Query
|
||||
|
||||
Now, when the client wants to know whenever a particular key/value exist, it
|
||||
will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call
|
||||
@@ -348,17 +353,15 @@ import (
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/dgraph-io/badger"
|
||||
"github.com/dgraph-io/badger/v3"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
tmflags "github.com/tendermint/tendermint/libs/cli/flags"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmconfig "github.com/tendermint/tendermint/config"
|
||||
tmlog "github.com/tendermint/tendermint/libs/log"
|
||||
tmservice "github.com/tendermint/tendermint/libs/service"
|
||||
tmnode "github.com/tendermint/tendermint/node"
|
||||
)
|
||||
|
||||
var configFile string
|
||||
@@ -395,57 +398,42 @@ func main() {
|
||||
<-c
|
||||
}
|
||||
|
||||
func newTendermint(app abci.Application, configFile string) (*nm.Node, error) {
|
||||
// read config
|
||||
config := cfg.DefaultValidatorConfig()
|
||||
config.RootDir = filepath.Dir(filepath.Dir(configFile))
|
||||
viper.SetConfigFile(configFile)
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return nil, fmt.Errorf("viper failed to read config file: %w", err)
|
||||
}
|
||||
if err := viper.Unmarshal(config); err != nil {
|
||||
return nil, fmt.Errorf("viper failed to unmarshal config: %w", err)
|
||||
}
|
||||
if err := config.ValidateBasic(); err != nil {
|
||||
return nil, fmt.Errorf("config is invalid: %w", err)
|
||||
}
|
||||
func newTendermint(app abcitypes.Application, configFile string) (tmservice.Service, error) {
|
||||
// read config
|
||||
config := tmconfig.DefaultValidatorConfig()
|
||||
config.SetRoot(filepath.Dir(filepath.Dir(configFile)))
|
||||
|
||||
// create logger
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
var err error
|
||||
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse log level: %w", err)
|
||||
}
|
||||
viper.SetConfigFile(configFile)
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return nil, fmt.Errorf("viper failed to read config file: %w", err)
|
||||
}
|
||||
if err := viper.Unmarshal(config); err != nil {
|
||||
return nil, fmt.Errorf("viper failed to unmarshal config: %w", err)
|
||||
}
|
||||
if err := config.ValidateBasic(); err != nil {
|
||||
return nil, fmt.Errorf("config is invalid: %w", err)
|
||||
}
|
||||
|
||||
// read private validator
|
||||
pv := privval.LoadFilePV(
|
||||
config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(),
|
||||
)
|
||||
// create logger
|
||||
logger, err := tmlog.NewDefaultLogger(tmlog.LogFormatPlain, config.LogLevel, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create logger: %w", err)
|
||||
}
|
||||
|
||||
// read node key
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load node's key: %w", err)
|
||||
}
|
||||
// create node
|
||||
node, err := tmnode.New(
|
||||
config,
|
||||
logger,
|
||||
abciclient.NewLocalCreator(app),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new Tendermint node: %w", err)
|
||||
}
|
||||
|
||||
// create node
|
||||
node, err := nm.NewNode(
|
||||
config,
|
||||
pv,
|
||||
nodeKey,
|
||||
abcicli.NewLocalClientCreator(app),
|
||||
nm.DefaultGenesisDocProviderFunc(config),
|
||||
nm.DefaultDBProvider,
|
||||
nm.DefaultMetricsProvider(config.Instrumentation),
|
||||
logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new Tendermint node: %w", err)
|
||||
}
|
||||
|
||||
return node, nil
|
||||
return node, nil
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
This is a huge blob of code, so let's break it down into pieces.
|
||||
@@ -469,7 +457,7 @@ This can be avoided by setting the truncate option to true, like this:
|
||||
db, err := badger.Open(badger.DefaultOptions("/tmp/badger").WithTruncate(true))
|
||||
```
|
||||
|
||||
Then we use it to create a Tendermint Core `Node` instance:
|
||||
Then we use it to create a Tendermint Core [Service](https://github.com/tendermint/tendermint/blob/v0.35.8/libs/service/service.go#L24) instance:
|
||||
|
||||
```go
|
||||
flag.Parse()
|
||||
@@ -483,75 +471,48 @@ if err != nil {
|
||||
...
|
||||
|
||||
// create node
|
||||
node, err := nm.NewNode(
|
||||
config,
|
||||
pv,
|
||||
nodeKey,
|
||||
abcicli.NewLocalClientCreator(app),
|
||||
nm.DefaultGenesisDocProviderFunc(config),
|
||||
nm.DefaultDBProvider,
|
||||
nm.DefaultMetricsProvider(config.Instrumentation),
|
||||
logger)
|
||||
node, err := tmnode.New(
|
||||
config,
|
||||
logger,
|
||||
abciclient.NewLocalCreator(app),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new Tendermint node: %w", err)
|
||||
return nil, fmt.Errorf("failed to create new Tendermint node: %w", err)
|
||||
}
|
||||
```
|
||||
|
||||
`NewNode` requires a few things including a configuration file, a private
|
||||
validator, a node key and a few others in order to construct the full node.
|
||||
[tmnode.New](https://github.com/tendermint/tendermint/blob/v0.35.8/node/public.go#L29) requires a few things including a configuration file, a logger and a few others in order to construct the full node.
|
||||
|
||||
Note we use `abcicli.NewLocalClientCreator` here to create a local client instead
|
||||
of one communicating through a socket or gRPC.
|
||||
Note that we use [abciclient.NewLocalCreator](https://github.com/tendermint/tendermint/blob/v0.35.8/abci/client/creators.go#L15) here to create a local client instead of one communicating through a socket or gRPC.
|
||||
|
||||
[viper](https://github.com/spf13/viper) is being used for reading the config,
|
||||
which we will generate later using the `tendermint init` command.
|
||||
|
||||
```go
|
||||
config := cfg.DefaultValidatorConfig()
|
||||
config.RootDir = filepath.Dir(filepath.Dir(configFile))
|
||||
// read config
|
||||
config := tmconfig.DefaultValidatorConfig()
|
||||
config.SetRoot(filepath.Dir(filepath.Dir(configFile)))
|
||||
viper.SetConfigFile(configFile)
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return nil, fmt.Errorf("viper failed to read config file: %w", err)
|
||||
return nil, fmt.Errorf("viper failed to read config file: %w", err)
|
||||
}
|
||||
if err := viper.Unmarshal(config); err != nil {
|
||||
return nil, fmt.Errorf("viper failed to unmarshal config: %w", err)
|
||||
return nil, fmt.Errorf("viper failed to unmarshal config: %w", err)
|
||||
}
|
||||
if err := config.ValidateBasic(); err != nil {
|
||||
return nil, fmt.Errorf("config is invalid: %w", err)
|
||||
return nil, fmt.Errorf("config is invalid: %w", err)
|
||||
}
|
||||
```
|
||||
|
||||
We use `FilePV`, which is a private validator (i.e. thing which signs consensus
|
||||
messages). Normally, you would use `SignerRemote` to connect to an external
|
||||
[HSM](https://kb.certus.one/hsm.html).
|
||||
As for the logger, we use the built-in library, which provides a nice
|
||||
abstraction over [zerolog](https://github.com/rs/zerolog).
|
||||
|
||||
```go
|
||||
pv := privval.LoadFilePV(
|
||||
config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(),
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
`nodeKey` is needed to identify the node in a p2p network.
|
||||
|
||||
```go
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
// create logger
|
||||
logger, err := tmlog.NewDefaultLogger(tmlog.LogFormatPlain, config.LogLevel, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load node's key: %w", err)
|
||||
}
|
||||
```
|
||||
|
||||
As for the logger, we use the build-in library, which provides a nice
|
||||
abstraction over [go-kit's
|
||||
logger](https://github.com/go-kit/kit/tree/master/log).
|
||||
|
||||
```go
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
var err error
|
||||
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse log level: %w", err)
|
||||
return nil, fmt.Errorf("failed to create logger: %w", err)
|
||||
}
|
||||
```
|
||||
|
||||
@@ -570,40 +531,41 @@ signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
<-c
|
||||
```
|
||||
|
||||
## 1.5 Getting Up and Running
|
||||
## 1.5 Getting up and running
|
||||
|
||||
We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for
|
||||
dependency management.
|
||||
|
||||
```bash
|
||||
export GO111MODULE=on
|
||||
go mod init github.com/me/example
|
||||
```
|
||||
|
||||
This should create a `go.mod` file. The current tutorial only works with
|
||||
the master branch of Tendermint. so let's make sure we're using the latest version:
|
||||
Make sure to enable [Go modules](https://github.com/golang/go/wiki/Modules). Run `go mod tidy` to download and add dependencies in `go.mod` file.
|
||||
|
||||
```sh
|
||||
go get github.com/tendermint/tendermint@master
|
||||
$ go mod tidy
|
||||
...
|
||||
```
|
||||
|
||||
Let's make sure we're using the latest version of Tendermint (currently `v0.35.8`).
|
||||
|
||||
```sh
|
||||
$ go get github.com/tendermint/tendermint@latest
|
||||
...
|
||||
```
|
||||
|
||||
This will populate the `go.mod` with a release number followed by a hash for Tendermint.
|
||||
|
||||
```go
|
||||
module github.com/me/example
|
||||
module github.com/<username>/kvstore
|
||||
|
||||
go 1.15
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/dgraph-io/badger v1.6.2
|
||||
github.com/tendermint/tendermint <vX>
|
||||
github.com/dgraph-io/badger/v3 v3.2103.2
|
||||
github.com/tendermint/tendermint v0.35.8
|
||||
...
|
||||
)
|
||||
```
|
||||
|
||||
Now we can build the binary:
|
||||
Now, we can build the binary:
|
||||
|
||||
```bash
|
||||
go build
|
||||
```sh
|
||||
$ go build
|
||||
...
|
||||
```
|
||||
|
||||
To create a default configuration, nodeKey and private validator files, let's
|
||||
@@ -614,66 +576,87 @@ installing from source, don't forget to checkout the latest release (`git
|
||||
checkout vX.Y.Z`). Don't forget to check that the application uses the same
|
||||
major version.
|
||||
|
||||
```bash
|
||||
$ rm -rf /tmp/example
|
||||
$ TMHOME="/tmp/example" tendermint init validator
|
||||
```sh
|
||||
$ rm -rf /tmp/kvstore /tmp/badger
|
||||
$ TMHOME="/tmp/kvstore" tendermint init validator
|
||||
|
||||
I[2019-07-16|18:40:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json
|
||||
I[2019-07-16|18:40:36.481] Generated node key module=main path=/tmp/example/config/node_key.json
|
||||
I[2019-07-16|18:40:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json
|
||||
I[2019-07-16|18:40:36.483] Generated config module=main mode=validator
|
||||
2022-07-20T17:04:41+08:00 INFO Generated private validator keyFile=/tmp/kvstore/config/priv_validator_key.json module=main stateFile=/tmp/kvstore/data/priv_validator_state.json
|
||||
2022-07-20T17:04:41+08:00 INFO Generated node key module=main path=/tmp/kvstore/config/node_key.json
|
||||
2022-07-20T17:04:41+08:00 INFO Generated genesis file module=main path=/tmp/kvstore/config/genesis.json
|
||||
2022-07-20T17:04:41+08:00 INFO Generated config mode=validator module=main
|
||||
```
|
||||
|
||||
Feel free to explore the generated files, which can be found at
|
||||
`/tmp/kvstore/config` directory. Documentation on the config can be found
|
||||
[here](https://docs.tendermint.com/master/tendermint-core/configuration.html).
|
||||
|
||||
We are ready to start our application:
|
||||
|
||||
```bash
|
||||
$ ./example -config "/tmp/example/config/config.toml"
|
||||
```sh
|
||||
$ ./kvstore -config "/tmp/kvstore/config/config.toml"
|
||||
|
||||
badger 2019/07/16 18:42:25 INFO: All 0 tables opened in 0s
|
||||
badger 2019/07/16 18:42:25 INFO: Replaying file id: 0 at offset: 0
|
||||
badger 2019/07/16 18:42:25 INFO: Replay took: 695.227s
|
||||
E[2019-07-16|18:42:25.818] Couldn't connect to any seeds module=p2p
|
||||
I[2019-07-16|18:42:26.853] Executed block module=state height=1 validTxs=0 invalidTxs=0
|
||||
I[2019-07-16|18:42:26.865] Committed state module=state height=1 txs=0 appHash=
|
||||
badger 2022/07/16 13:55:59 INFO: All 0 tables opened in 0s
|
||||
badger 2022/07/16 13:55:59 INFO: Replaying file id: 0 at offset: 0
|
||||
badger 2022/07/16 13:55:59 INFO: Replay took: 3.052µs
|
||||
badger 2022/07/16 13:55:59 DEBUG: Value log discard stats empty
|
||||
2022-07-16T13:55:59+08:00 INFO starting service impl=multiAppConn module=proxy service=multiAppConn
|
||||
2022-07-16T13:55:59+08:00 INFO starting service connection=query impl=localClient module=abci-client service=localClient
|
||||
2022-07-16T13:55:59+08:00 INFO starting service connection=snapshot impl=localClient module=abci-client service=localClient
|
||||
2022-07-16T13:55:59+08:00 INFO starting service connection=mempool impl=localClient module=abci-client service=localClient
|
||||
2022-07-16T13:55:59+08:00 INFO starting service connection=consensus impl=localClient module=abci-client service=localClient
|
||||
2022-07-16T13:55:59+08:00 INFO starting service impl=EventBus module=events service=EventBus
|
||||
2022-07-16T13:55:59+08:00 INFO starting service impl=PubSub module=pubsub service=PubSub
|
||||
2022-07-16T13:55:59+08:00 INFO starting service impl=IndexerService module=txindex service=IndexerService
|
||||
2022-07-16T13:55:59+08:00 INFO ABCI Handshake App Info hash= height=0 module=consensus protocol-version=0 software-version=
|
||||
2022-07-16T13:55:59+08:00 INFO ABCI Replay Blocks appHeight=0 module=consensus stateHeight=0 storeHeight=0
|
||||
2022-07-16T13:55:59+08:00 INFO Completed ABCI Handshake - Tendermint and App are synced appHash= appHeight=0 module=consensus
|
||||
2022-07-16T13:55:59+08:00 INFO Version info block=11 mode=validator p2p=8 tmVersion=0.35.8
|
||||
```
|
||||
|
||||
Now open another tab in your terminal and try sending a transaction:
|
||||
Let's try sending a transaction. Open another terminal and execute the below command.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"'
|
||||
{
|
||||
"check_tx": {
|
||||
"gasWanted": "1",
|
||||
...
|
||||
},
|
||||
"deliver_tx": { ... },
|
||||
"hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6",
|
||||
"height": "128"
|
||||
...
|
||||
"result": {
|
||||
"check_tx": {
|
||||
...
|
||||
"gas_wanted": "1",
|
||||
...
|
||||
},
|
||||
"deliver_tx": {...},
|
||||
"hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6",
|
||||
"height": "91"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Response should contain the height where this transaction was committed.
|
||||
|
||||
Now let's check if the given key now exists and its value:
|
||||
Let's check if the given key now exists and its value:
|
||||
|
||||
```json
|
||||
```sh
|
||||
$ curl -s 'localhost:26657/abci_query?data="tendermint"'
|
||||
{
|
||||
"response": {
|
||||
"code": 0,
|
||||
"log": "exists",
|
||||
"info": "",
|
||||
"index": "0",
|
||||
"key": "dGVuZGVybWludA==",
|
||||
"value": "cm9ja3M=",
|
||||
"proofOps": null,
|
||||
"height": "6",
|
||||
"codespace": ""
|
||||
...
|
||||
"result": {
|
||||
"response": {
|
||||
"code": 0,
|
||||
"log": "exists",
|
||||
"info": "",
|
||||
"index": "0",
|
||||
"key": "dGVuZGVybWludA==",
|
||||
"value": "cm9ja3M=",
|
||||
"proofOps": null,
|
||||
"height": "0",
|
||||
"codespace": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of
|
||||
`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of
|
||||
"tendermint" and "rocks" accordingly.
|
||||
|
||||
## Outro
|
||||
|
||||
@@ -37,25 +37,27 @@ Core will not have access to application's state.
|
||||
## 1.1 Installing Go
|
||||
|
||||
Please refer to [the official guide for installing
|
||||
Go](https://golang.org/doc/install).
|
||||
Go](https://go.dev/doc/install).
|
||||
|
||||
Verify that you have the latest version of Go installed:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ go version
|
||||
go version go1.16.x darwin/amd64
|
||||
go version go1.18.x darwin/amd64
|
||||
```
|
||||
|
||||
## 1.2 Creating a new Go project
|
||||
|
||||
We'll start by creating a new Go project.
|
||||
We'll start by creating a new Go project. Initialize the folder with `go mod init`. Running this command should create the `go.mod` file.
|
||||
|
||||
```bash
|
||||
mkdir kvstore
|
||||
cd kvstore
|
||||
```sh
|
||||
$ mkdir kvstore
|
||||
$ cd kvstore
|
||||
$ go mod init github.com/<username>/kvstore
|
||||
go: creating new go.mod: module github.com/<username>/kvstore
|
||||
```
|
||||
|
||||
Inside the example directory create a `main.go` file with the following content:
|
||||
Inside the project directory, create a `main.go` file with the following content:
|
||||
|
||||
```go
|
||||
package main
|
||||
@@ -71,8 +73,8 @@ func main() {
|
||||
|
||||
When run, this should print "Hello, Tendermint Core" to the standard output.
|
||||
|
||||
```bash
|
||||
go run main.go
|
||||
```sh
|
||||
$ go run main.go
|
||||
Hello, Tendermint Core
|
||||
```
|
||||
|
||||
@@ -150,10 +152,34 @@ func (KVStoreApplication) ApplySnapshotChunk(abcitypes.RequestApplySnapshotChunk
|
||||
}
|
||||
```
|
||||
|
||||
Now I will go through each method explaining when it's called and adding
|
||||
Now, we will go through each method and explain when it is executed while adding
|
||||
required business logic.
|
||||
|
||||
### 1.3.1 CheckTx
|
||||
### 1.3.1 Key-value store setup
|
||||
|
||||
For the underlying key-value store we'll use the latest version of [badger](https://github.com/dgraph-io/badger), which is an embeddable, persistent and fast key-value (KV) database.
|
||||
|
||||
```sh
|
||||
$ go get github.com/dgraph-io/badger/v3
|
||||
go: added github.com/dgraph-io/badger/v3 v3.2103.2
|
||||
```
|
||||
|
||||
```go
|
||||
import "github.com/dgraph-io/badger/v3"
|
||||
|
||||
type KVStoreApplication struct {
|
||||
db *badger.DB
|
||||
currentBatch *badger.Txn
|
||||
}
|
||||
|
||||
func NewKVStoreApplication(db *badger.DB) *KVStoreApplication {
|
||||
return &KVStoreApplication{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.2 CheckTx
|
||||
|
||||
When a new transaction is added to the Tendermint Core, it will ask the
|
||||
application to check it (validate the format, signatures, etc.).
|
||||
@@ -212,26 +238,8 @@ Valid transactions will eventually be committed given they are not too big and
|
||||
have enough gas. To learn more about gas, check out ["the
|
||||
specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas).
|
||||
|
||||
For the underlying key-value store we'll use
|
||||
[badger](https://github.com/dgraph-io/badger), which is an embeddable,
|
||||
persistent and fast key-value (KV) database.
|
||||
|
||||
```go
|
||||
import "github.com/dgraph-io/badger"
|
||||
|
||||
type KVStoreApplication struct {
|
||||
db *badger.DB
|
||||
currentBatch *badger.Txn
|
||||
}
|
||||
|
||||
func NewKVStoreApplication(db *badger.DB) *KVStoreApplication {
|
||||
return &KVStoreApplication{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.2 BeginBlock -> DeliverTx -> EndBlock -> Commit
|
||||
### 1.3.3 BeginBlock -> DeliverTx -> EndBlock -> Commit
|
||||
|
||||
When Tendermint Core has decided on the block, it's transferred to the
|
||||
application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and
|
||||
@@ -287,7 +295,7 @@ func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit {
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.3 Query
|
||||
### 1.3.4 Query
|
||||
|
||||
Now, when the client wants to know whenever a particular key/value exist, it
|
||||
will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call
|
||||
@@ -344,7 +352,7 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/dgraph-io/badger"
|
||||
"github.com/dgraph-io/badger/v3"
|
||||
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -353,7 +361,7 @@ import (
|
||||
var socketAddr string
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&socketAddr, "socket-addr", "unix://example.sock", "Unix domain socket address")
|
||||
flag.StringVar(&socketAddr, "socket-addr", "unix://kvstore.sock", "Unix domain socket address")
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -426,40 +434,41 @@ signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
<-c
|
||||
```
|
||||
|
||||
## 1.5 Getting Up and Running
|
||||
## 1.5 Getting up and running
|
||||
|
||||
We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for
|
||||
dependency management.
|
||||
|
||||
```bash
|
||||
export GO111MODULE=on
|
||||
go mod init github.com/me/example
|
||||
```
|
||||
|
||||
This should create a `go.mod` file. The current tutorial only works with
|
||||
the master branch of Tendermint, so let's make sure we're using the latest version:
|
||||
Make sure to enable [Go modules](https://github.com/golang/go/wiki/Modules). Run `go mod tidy` to download and add dependencies in `go.mod` file.
|
||||
|
||||
```sh
|
||||
go get github.com/tendermint/tendermint@97a3e44e0724f2017079ce24d36433f03124c09e
|
||||
$ go mod tidy
|
||||
...
|
||||
```
|
||||
|
||||
Let's make sure we're using the latest version of Tendermint (currently `v0.35.8`).
|
||||
|
||||
```sh
|
||||
$ go get github.com/tendermint/tendermint@latest
|
||||
...
|
||||
```
|
||||
|
||||
This will populate the `go.mod` with a release number followed by a hash for Tendermint.
|
||||
|
||||
```go
|
||||
module github.com/me/example
|
||||
module github.com/<username>/kvstore
|
||||
|
||||
go 1.16
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/dgraph-io/badger v1.6.2
|
||||
github.com/tendermint/tendermint <vX>
|
||||
github.com/dgraph-io/badger/v3 v3.2103.2
|
||||
github.com/tendermint/tendermint v0.35.8
|
||||
...
|
||||
)
|
||||
```
|
||||
|
||||
Now we can build the binary:
|
||||
Now, we can build the binary:
|
||||
|
||||
```bash
|
||||
go build
|
||||
```sh
|
||||
$ go build
|
||||
...
|
||||
```
|
||||
|
||||
To create a default configuration, nodeKey and private validator files, let's
|
||||
@@ -470,94 +479,112 @@ installing from source, don't forget to checkout the latest release (`git
|
||||
checkout vX.Y.Z`). Don't forget to check that the application uses the same
|
||||
major version.
|
||||
|
||||
```bash
|
||||
rm -rf /tmp/example
|
||||
TMHOME="/tmp/example" tendermint init validator
|
||||
```sh
|
||||
$ rm -rf /tmp/kvstore /tmp/badger
|
||||
$ TMHOME="/tmp/kvstore" tendermint init validator
|
||||
|
||||
I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json
|
||||
I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json
|
||||
I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json
|
||||
I[2019-07-16|18:20:36.483] Generated config module=main mode=validator
|
||||
2022-07-20T17:04:41+08:00 INFO Generated private validator keyFile=/tmp/kvstore/config/priv_validator_key.json module=main stateFile=/tmp/kvstore/data/priv_validator_state.json
|
||||
2022-07-20T17:04:41+08:00 INFO Generated node key module=main path=/tmp/kvstore/config/node_key.json
|
||||
2022-07-20T17:04:41+08:00 INFO Generated genesis file module=main path=/tmp/kvstore/config/genesis.json
|
||||
2022-07-20T17:04:41+08:00 INFO Generated config mode=validator module=main
|
||||
```
|
||||
|
||||
Feel free to explore the generated files, which can be found at
|
||||
`/tmp/example/config` directory. Documentation on the config can be found
|
||||
`/tmp/kvstore/config` directory. Documentation on the config can be found
|
||||
[here](https://docs.tendermint.com/master/tendermint-core/configuration.html).
|
||||
|
||||
We are ready to start our application:
|
||||
|
||||
```bash
|
||||
rm example.sock
|
||||
./example
|
||||
```sh
|
||||
$ rm kvstore.sock
|
||||
$ ./kvstore
|
||||
|
||||
badger 2019/07/16 18:25:11 INFO: All 0 tables opened in 0s
|
||||
badger 2019/07/16 18:25:11 INFO: Replaying file id: 0 at offset: 0
|
||||
badger 2019/07/16 18:25:11 INFO: Replay took: 300.4s
|
||||
I[2019-07-16|18:25:11.523] Starting ABCIServer impl=ABCIServ
|
||||
badger 2022/07/20 17:07:17 INFO: All 1 tables opened in 9ms
|
||||
badger 2022/07/20 17:07:17 INFO: Replaying file id: 0 at offset: 256
|
||||
badger 2022/07/20 17:07:17 INFO: Replay took: 9.077µs
|
||||
badger 2022/07/20 17:07:17 DEBUG: Value log discard stats empty
|
||||
2022-07-20T17:07:17+08:00 INFO starting service impl=ABCIServer service=ABCIServer
|
||||
2022-07-20T17:07:17+08:00 INFO Waiting for new connection...
|
||||
```
|
||||
|
||||
Then we need to start Tendermint Core and point it to our application. Staying
|
||||
within the application directory execute:
|
||||
Then, we need to start Tendermint Core and point it to our application. Staying
|
||||
within the project directory, open another terminal and execute the command below:
|
||||
|
||||
```bash
|
||||
TMHOME="/tmp/example" tendermint node --proxy-app=unix://example.sock
|
||||
```sh
|
||||
$ TMHOME="/tmp/kvstore" tendermint node --proxy-app=unix://kvstore.sock
|
||||
|
||||
I[2019-07-16|18:26:20.362] Version info module=main software=0.32.1 block=10 p2p=7
|
||||
I[2019-07-16|18:26:20.383] Starting Node module=main impl=Node
|
||||
E[2019-07-16|18:26:20.392] Couldn't connect to any seeds module=p2p
|
||||
I[2019-07-16|18:26:20.394] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:8dab80770ae8e295d4ce905d86af78c4ff634b79 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-nIO96P Version:0.32.1 Channels:4020212223303800 Moniker:app48.fun-box.ru Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}"
|
||||
I[2019-07-16|18:26:21.440] Executed block module=state height=1 validTxs=0 invalidTxs=0
|
||||
I[2019-07-16|18:26:21.446] Committed state module=state height=1 txs=0 appHash=
|
||||
2022-07-20T17:10:22+08:00 INFO starting service impl=multiAppConn module=proxy service=multiAppConn
|
||||
2022-07-20T17:10:22+08:00 INFO starting service connection=query impl=socketClient module=abci-client service=socketClient
|
||||
2022-07-20T17:10:22+08:00 INFO starting service connection=snapshot impl=socketClient module=abci-client service=socketClient
|
||||
2022-07-20T17:10:22+08:00 INFO starting service connection=mempool impl=socketClient module=abci-client service=socketClient
|
||||
2022-07-20T17:10:22+08:00 INFO starting service connection=consensus impl=socketClient module=abci-client service=socketClient
|
||||
2022-07-20T17:10:22+08:00 INFO starting service impl=EventBus module=events service=EventBus
|
||||
2022-07-20T17:10:22+08:00 INFO starting service impl=PubSub module=pubsub service=PubSub
|
||||
2022-07-20T17:10:22+08:00 INFO starting service impl=IndexerService module=txindex service=IndexerService
|
||||
...
|
||||
2022-07-20T17:10:22+08:00 INFO starting service impl=Node module=main service=Node
|
||||
2022-07-20T17:10:22+08:00 INFO Starting RPC HTTP server on 127.0.0.1:26657 module=rpc-server
|
||||
2022-07-20T17:10:22+08:00 INFO p2p service legacy_enabled=false module=main
|
||||
2022-07-20T17:10:22+08:00 INFO starting service impl=router module=p2p service=router
|
||||
2022-07-20T17:10:22+08:00 INFO starting router channels=402021222330386061626300 listen_addr=tcp://0.0.0.0:26656 module=p2p net_addr={"id":"715727499e94f8fcaef1763192ebcc8460f44666","ip":"0.0.0.0","port":26656} node_id=715727499e94f8fcaef1763192ebcc8460f44666
|
||||
...
|
||||
```
|
||||
|
||||
This should start the full node and connect to our ABCI application.
|
||||
|
||||
```sh
|
||||
I[2019-07-16|18:25:11.525] Waiting for new connection...
|
||||
I[2019-07-16|18:26:20.329] Accepted a new connection
|
||||
I[2019-07-16|18:26:20.329] Waiting for new connection...
|
||||
I[2019-07-16|18:26:20.330] Accepted a new connection
|
||||
I[2019-07-16|18:26:20.330] Waiting for new connection...
|
||||
I[2019-07-16|18:26:20.330] Accepted a new connection
|
||||
2022-07-20T17:09:55+08:00 INFO Waiting for new connection...
|
||||
2022-07-20T17:10:22+08:00 INFO Accepted a new connection
|
||||
2022-07-20T17:10:22+08:00 INFO Waiting for new connection...
|
||||
2022-07-20T17:10:22+08:00 INFO Accepted a new connection
|
||||
2022-07-20T17:10:22+08:00 INFO Waiting for new connection...
|
||||
2022-07-20T17:10:22+08:00 INFO Accepted a new connection
|
||||
```
|
||||
|
||||
Now open another tab in your terminal and try sending a transaction:
|
||||
Let's try sending a transaction. Open another terminal and execute the below command.
|
||||
|
||||
```json
|
||||
```sh
|
||||
$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"'
|
||||
{
|
||||
"check_tx": {
|
||||
"gasWanted": "1",
|
||||
...
|
||||
},
|
||||
"deliver_tx": { ... },
|
||||
"hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB",
|
||||
"height": "33"
|
||||
...
|
||||
"result": {
|
||||
"check_tx": {
|
||||
...
|
||||
"gas_wanted": "1",
|
||||
...
|
||||
},
|
||||
"deliver_tx": { ... },
|
||||
"hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6",
|
||||
"height": "15"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Response should contain the height where this transaction was committed.
|
||||
|
||||
Now let's check if the given key now exists and its value:
|
||||
Let's check if the given key now exists and its value:
|
||||
|
||||
```json
|
||||
```sh
|
||||
$ curl -s 'localhost:26657/abci_query?data="tendermint"'
|
||||
{
|
||||
"response": {
|
||||
"code": 0,
|
||||
"log": "exists",
|
||||
"info": "",
|
||||
"index": "0",
|
||||
"key": "dGVuZGVybWludA==",
|
||||
"value": "cm9ja3M=",
|
||||
"proofOps": null,
|
||||
"height": "6",
|
||||
"codespace": ""
|
||||
...
|
||||
"result": {
|
||||
"response": {
|
||||
"code": 0,
|
||||
"log": "exists",
|
||||
"info": "",
|
||||
"index": "0",
|
||||
"key": "dGVuZGVybWludA==",
|
||||
"value": "cm9ja3M=",
|
||||
"proofOps": null,
|
||||
"height": "0",
|
||||
"codespace": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of
|
||||
`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of
|
||||
"tendermint" and "rocks" accordingly.
|
||||
|
||||
## Outro
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
master master
|
||||
v0.33.x v0.33
|
||||
v0.34.x v0.34
|
||||
v0.35.x v0.35
|
||||
master master
|
||||
|
||||
64
go.mod
64
go.mod
@@ -3,7 +3,7 @@ module github.com/tendermint/tendermint
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.1.0
|
||||
github.com/BurntSushi/toml v1.2.0
|
||||
github.com/adlio/schema v1.3.3
|
||||
github.com/btcsuite/btcd v0.22.1
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
@@ -32,15 +32,15 @@ require (
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9
|
||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29
|
||||
google.golang.org/grpc v1.47.0
|
||||
pgregory.net/rapid v0.4.7
|
||||
google.golang.org/grpc v1.48.0
|
||||
pgregory.net/rapid v0.4.8
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/bufbuild/buf v1.4.0
|
||||
github.com/creachadair/atomicfile v0.2.6
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/golangci/golangci-lint v1.46.0
|
||||
github.com/golangci/golangci-lint v1.47.2
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/vektra/mockery/v2 v2.14.0
|
||||
gotest.tools v2.2.0+incompatible
|
||||
@@ -48,17 +48,18 @@ require (
|
||||
|
||||
require (
|
||||
4d63.com/gochecknoglobals v0.1.0 // indirect
|
||||
github.com/Antonboom/errname v0.1.6 // indirect
|
||||
github.com/Antonboom/errname v0.1.7 // indirect
|
||||
github.com/Antonboom/nilnil v0.1.1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/DataDog/zstd v1.4.1 // indirect
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 // indirect
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/OpenPeeDeeP/depguard v1.1.0 // indirect
|
||||
github.com/alexkohler/prealloc v1.0.0 // indirect
|
||||
github.com/alingse/asasalint v0.0.10 // indirect
|
||||
github.com/ashanbrown/forbidigo v1.3.0 // indirect
|
||||
github.com/ashanbrown/makezero v1.1.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@@ -75,7 +76,7 @@ require (
|
||||
github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/daixiang0/gci v0.3.3 // indirect
|
||||
github.com/daixiang0/gci v0.4.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/denis-tingaikin/go-header v0.4.3 // indirect
|
||||
github.com/dgraph-io/badger/v2 v2.2007.2 // indirect
|
||||
@@ -91,9 +92,9 @@ require (
|
||||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fatih/structtag v1.2.0 // indirect
|
||||
github.com/firefart/nonamedreturns v1.0.1 // indirect
|
||||
github.com/firefart/nonamedreturns v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/fzipp/gocyclo v0.5.1 // indirect
|
||||
github.com/fzipp/gocyclo v0.6.0 // indirect
|
||||
github.com/go-critic/go-critic v0.6.3 // indirect
|
||||
github.com/go-toolsmith/astcast v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astcopy v1.0.0 // indirect
|
||||
@@ -126,7 +127,7 @@ require (
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-version v1.4.0 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hexops/gotextdiff v1.0.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
@@ -138,19 +139,19 @@ require (
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
|
||||
github.com/jmhodges/levigo v1.0.0 // indirect
|
||||
github.com/julz/importas v0.1.0 // indirect
|
||||
github.com/kisielk/errcheck v1.6.0 // indirect
|
||||
github.com/kisielk/errcheck v1.6.1 // indirect
|
||||
github.com/kisielk/gotool v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.15.1 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/kulti/thelper v0.6.2 // indirect
|
||||
github.com/kunwardeep/paralleltest v1.0.3 // indirect
|
||||
github.com/kulti/thelper v0.6.3 // indirect
|
||||
github.com/kunwardeep/paralleltest v1.0.6 // indirect
|
||||
github.com/kyoh86/exportloopref v0.1.8 // indirect
|
||||
github.com/ldez/gomoddirectives v0.2.3 // indirect
|
||||
github.com/ldez/tagliatelle v0.3.1 // indirect
|
||||
github.com/leonklingele/grouper v1.1.0 // indirect
|
||||
github.com/lufeee/execinquery v1.0.0 // indirect
|
||||
github.com/lufeee/execinquery v1.2.1 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/maratori/testpackage v1.0.1 // indirect
|
||||
github.com/maratori/testpackage v1.1.0 // indirect
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
@@ -163,7 +164,7 @@ require (
|
||||
github.com/moricho/tparallel v0.2.1 // indirect
|
||||
github.com/nakabonne/nestif v0.3.1 // indirect
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
|
||||
github.com/nishanths/exhaustive v0.7.11 // indirect
|
||||
github.com/nishanths/exhaustive v0.8.1 // indirect
|
||||
github.com/nishanths/predeclared v0.2.2 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
@@ -176,7 +177,7 @@ require (
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pkg/profile v1.6.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b // indirect
|
||||
github.com/polyfloyd/go-errorlint v1.0.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a // indirect
|
||||
github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect
|
||||
@@ -186,11 +187,12 @@ require (
|
||||
github.com/ryancurrah/gomodguard v1.2.3 // indirect
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect
|
||||
github.com/securego/gosec/v2 v2.11.0 // indirect
|
||||
github.com/securego/gosec/v2 v2.12.0 // indirect
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/sivchari/containedctx v1.0.2 // indirect
|
||||
github.com/sivchari/tenv v1.5.0 // indirect
|
||||
github.com/sivchari/nosnakecase v1.5.0 // indirect
|
||||
github.com/sivchari/tenv v1.7.0 // indirect
|
||||
github.com/sonatard/noctx v0.0.1 // indirect
|
||||
github.com/sourcegraph/go-diff v0.6.1 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
@@ -206,40 +208,44 @@ require (
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect
|
||||
github.com/tetafro/godot v1.4.11 // indirect
|
||||
github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.1 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.2 // indirect
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect
|
||||
github.com/ultraware/funlen v0.0.3 // indirect
|
||||
github.com/ultraware/whitespace v0.0.5 // indirect
|
||||
github.com/uudashr/gocognit v1.0.5 // indirect
|
||||
github.com/uudashr/gocognit v1.0.6 // indirect
|
||||
github.com/yagipy/maintidx v1.0.0 // indirect
|
||||
github.com/yeya24/promlinter v0.2.0 // indirect
|
||||
gitlab.com/bosi/decorder v0.2.1 // indirect
|
||||
gitlab.com/bosi/decorder v0.2.2 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c // indirect
|
||||
golang.org/x/sys v0.0.0-20220702020025-31831981b65f // indirect
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.11 // indirect
|
||||
golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/ini.v1 v1.66.6 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
honnef.co/go/tools v0.3.1 // indirect
|
||||
honnef.co/go/tools v0.3.2 // indirect
|
||||
mvdan.cc/gofumpt v0.3.1 // indirect
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
|
||||
mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect
|
||||
mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/creachadair/tomledit v0.0.22
|
||||
github.com/creachadair/tomledit v0.0.23
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.35.0
|
||||
github.com/prometheus/common v0.37.0
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca
|
||||
)
|
||||
|
||||
retract (
|
||||
[v0.35.0,v0.35.9] // See https://github.com/tendermint/tendermint/discussions/9155
|
||||
)
|
||||
147
go.sum
147
go.sum
@@ -62,8 +62,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Antonboom/errname v0.1.6 h1:LzIJZlyLOCSu51o3/t2n9Ck7PcoP9wdbrdaW6J8fX24=
|
||||
github.com/Antonboom/errname v0.1.6/go.mod h1:7lz79JAnuoMNDAWE9MeeIr1/c/VpSUWatBv2FH9NYpI=
|
||||
github.com/Antonboom/errname v0.1.7 h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako=
|
||||
github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU=
|
||||
github.com/Antonboom/nilnil v0.1.1 h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q=
|
||||
github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZR+xdJEaI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw=
|
||||
@@ -74,8 +74,9 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOEl
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
|
||||
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
|
||||
github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
@@ -84,8 +85,8 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
|
||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0 h1:LAPPhJ4KR5Z8aKVZF5S48csJkxL5RMKmE/98fMs1u5M=
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.1.0/go.mod h1:LGOGuvEgCfCQsy3JF2tRmpGDpzA53iZfyGEWSPwQ6/4=
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0 h1:V9xVvhKbLt7unNEGAruK1xXglyc668Pq3Xx0MNTNqpo=
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.0/go.mod h1:n/vLeA7V+QY84iYAGwMkkUUp9ooeuftMEvaDrSVch+Q=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
@@ -119,6 +120,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw=
|
||||
github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
|
||||
github.com/alingse/asasalint v0.0.10 h1:qqGPDTV0ff0tWHN/nnIlSdjlU/EwRPaUY4SfpE1rnms=
|
||||
github.com/alingse/asasalint v0.0.10/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
|
||||
github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
|
||||
@@ -242,14 +245,14 @@ github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2
|
||||
github.com/creachadair/command v0.0.0-20220426235536-a748effdf6a1/go.mod h1:bAM+qFQb/KwWyCc9MLC4U1jvn3XyakqP5QRkds5T6cY=
|
||||
github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM=
|
||||
github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk=
|
||||
github.com/creachadair/tomledit v0.0.22 h1:lRtepmrwhzDq+g1gv5ftVn5itgo7CjYbm6abKTToqJ4=
|
||||
github.com/creachadair/tomledit v0.0.22/go.mod h1:cIu/4x5L855oSRejIqr+WRFh+mv9g4fWLiUFaApYn/Y=
|
||||
github.com/creachadair/tomledit v0.0.23 h1:ohYJjMsxwzj4dDzKaBWFbWH5J+3LO/8CYnlVY+baBWA=
|
||||
github.com/creachadair/tomledit v0.0.23/go.mod h1:cIu/4x5L855oSRejIqr+WRFh+mv9g4fWLiUFaApYn/Y=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/daixiang0/gci v0.3.3 h1:55xJKH7Gl9Vk6oQ1cMkwrDWjAkT1D+D1G9kNmRcAIY4=
|
||||
github.com/daixiang0/gci v0.3.3/go.mod h1:1Xr2bxnQbDxCqqulUOv8qpGqkgRw9RSCGGjEC2LjF8o=
|
||||
github.com/daixiang0/gci v0.4.3 h1:wf7x0xRjQqTlA2dzHTI0A/xPyp7VcBatBG9nwGatwbQ=
|
||||
github.com/daixiang0/gci v0.4.3/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c=
|
||||
github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -316,8 +319,8 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
|
||||
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
|
||||
github.com/firefart/nonamedreturns v1.0.1 h1:fSvcq6ZpK/uBAgJEGMvzErlzyM4NELLqqdTofVjVNag=
|
||||
github.com/firefart/nonamedreturns v1.0.1/go.mod h1:D3dpIBojGGNh5UfElmwPu73SwDCm+VKhHYqwlNOk2uQ=
|
||||
github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y=
|
||||
github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
@@ -333,8 +336,8 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5
|
||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM=
|
||||
github.com/fzipp/gocyclo v0.5.1 h1:L66amyuYogbxl0j2U+vGqJXusPF2IkduvXLnYD5TFgw=
|
||||
github.com/fzipp/gocyclo v0.5.1/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
|
||||
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-critic/go-critic v0.6.3 h1:abibh5XYBTASawfTQ0rA7dVtQT+6KzpGqb/J+DxRDaw=
|
||||
github.com/go-critic/go-critic v0.6.3/go.mod h1:c6b3ZP1MQ7o6lPR7Rv3lEf7pYQUmAcx8ABHgdZCQt/k=
|
||||
@@ -446,8 +449,8 @@ github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6
|
||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
|
||||
github.com/golangci/golangci-lint v1.46.0 h1:uz9AtEcIP63FH+FIyuAXcQGVQO4vCUavEsMTJpPeD4s=
|
||||
github.com/golangci/golangci-lint v1.46.0/go.mod h1:IJpcNOUfx/XLRwE95FHQ6QtbhYwwqcm0H5QkwUfF4ZE=
|
||||
github.com/golangci/golangci-lint v1.47.2 h1:qvMDVv49Hrx3PSEXZ0bD/yhwSbhsOihQjFYCKieegIw=
|
||||
github.com/golangci/golangci-lint v1.47.2/go.mod h1:lpS2pjBZtRyXewUcOY7yUL3K4KfpoWz072yRN8AuhHg=
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
|
||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
|
||||
@@ -520,7 +523,7 @@ github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/Oth
|
||||
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=
|
||||
github.com/gookit/color v1.5.1/go.mod h1:wZFzea4X8qN6vHOSP2apMb4/+w/orMznEzYsIHPaqKM=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U=
|
||||
@@ -589,8 +592,8 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4=
|
||||
github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
||||
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
@@ -670,8 +673,8 @@ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/errcheck v1.6.0 h1:YTDO4pNy7AUN/021p+JGHycQyYNIyMoenM1YDVK6RlY=
|
||||
github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/errcheck v1.6.1 h1:cErYo+J4SmEjdXZrVXGwLJCE2sB06s23LpkcyWNrT+s=
|
||||
github.com/kisielk/errcheck v1.6.1/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw=
|
||||
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
@@ -696,10 +699,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kulti/thelper v0.6.2 h1:K4xulKkwOCnT1CDms6Ex3uG1dvSMUUQe9zxgYQgbRXs=
|
||||
github.com/kulti/thelper v0.6.2/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
|
||||
github.com/kunwardeep/paralleltest v1.0.3 h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI=
|
||||
github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4=
|
||||
github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs=
|
||||
github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
|
||||
github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g=
|
||||
github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M=
|
||||
github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg=
|
||||
@@ -719,16 +722,16 @@ github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
|
||||
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/lufeee/execinquery v1.0.0 h1:1XUTuLIVPDlFvUU3LXmmZwHDsolsxXnY67lzhpeqe0I=
|
||||
github.com/lufeee/execinquery v1.0.0/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
|
||||
github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM=
|
||||
github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
|
||||
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ=
|
||||
github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU=
|
||||
github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q=
|
||||
github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc=
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA=
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
|
||||
github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE=
|
||||
@@ -822,8 +825,8 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6Fx
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nishanths/exhaustive v0.7.11 h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA=
|
||||
github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI=
|
||||
github.com/nishanths/exhaustive v0.8.1 h1:0QKNascWv9qIHY7zRoZSxeRr6kuk5aAT3YXLTiDmjTo=
|
||||
github.com/nishanths/exhaustive v0.8.1/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg=
|
||||
github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ=
|
||||
github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
|
||||
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
|
||||
@@ -846,17 +849,17 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k
|
||||
github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
||||
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
|
||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
|
||||
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
@@ -887,8 +890,6 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
|
||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw=
|
||||
github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI=
|
||||
@@ -913,8 +914,8 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR
|
||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b h1:/BDyEJWLnDUYKGWdlNx/82qSaVu2bUok/EvPUtIGuvw=
|
||||
github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw=
|
||||
github.com/polyfloyd/go-errorlint v1.0.0 h1:pDrQG0lrh68e602Wfp68BlUTRFoHn8PZYAjLgt2LFsM=
|
||||
github.com/polyfloyd/go-errorlint v1.0.0/go.mod h1:KZy4xxPJyy88/gldCe5OdW6OQRtNO3EZE7hXzmnebgA=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
@@ -938,8 +939,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE=
|
||||
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
@@ -954,7 +955,7 @@ github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a h1:sWFav
|
||||
github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a/go.mod h1:VMX+OnnSw4LicdiEGtRSD/1X8kW7GuEscjYNr4cOIT4=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.16/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.19/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.21/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc=
|
||||
github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50=
|
||||
github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 h1:PDWGei+Rf2bBiuZIbZmM20J2ftEy9IeUCHA8HbQqed8=
|
||||
@@ -990,19 +991,18 @@ github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
|
||||
github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA=
|
||||
github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8=
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA=
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/securego/gosec/v2 v2.11.0 h1:+PDkpzR41OI2jrw1q6AdXZCbsNGNGT7pQjal0H0cArI=
|
||||
github.com/securego/gosec/v2 v2.11.0/go.mod h1:SX8bptShuG8reGC0XS09+a4H2BoWSJi+fscA+Pulbpo=
|
||||
github.com/securego/gosec/v2 v2.12.0 h1:CQWdW7ATFpvLSohMVsajscfyHJ5rsGmEXmsNcsDNmAg=
|
||||
github.com/securego/gosec/v2 v2.12.0/go.mod h1:iTpT+eKTw59bSgklBHlSnH5O2tNygHMDxfvMubA4i7I=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
|
||||
github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM=
|
||||
github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
@@ -1014,8 +1014,10 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI=
|
||||
github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw=
|
||||
github.com/sivchari/tenv v1.5.0 h1:wxW0mFpKI6DIb3s6m1jCDYvkWXCskrimXMuGd0K/kSQ=
|
||||
github.com/sivchari/tenv v1.5.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
|
||||
github.com/sivchari/nosnakecase v1.5.0 h1:ZBvAu1H3uteN0KQ0IsLpIFOwYgPEhKLyv2ahrVkub6M=
|
||||
github.com/sivchari/nosnakecase v1.5.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY=
|
||||
github.com/sivchari/tenv v1.7.0 h1:d4laZMBK6jpe5PWepxlV9S+LC0yXqvYHiq8E6ceoVVE=
|
||||
github.com/sivchari/tenv v1.7.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY=
|
||||
@@ -1053,7 +1055,6 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
|
||||
github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk=
|
||||
github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=
|
||||
github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
|
||||
@@ -1077,6 +1078,7 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
@@ -1107,8 +1109,8 @@ github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hM
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.1 h1:Cf4a/iwuMp9s7kKrh74GTgijRVim0wEpKjgAsT7Wctw=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.1/go.mod h1:Eo+Opt6pyMW1b6cNllOcDSSoHO0aTJ+iF6BfCUbHltA=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.2 h1:3dI6YNcrJTQ/CJQ6M/DUkc0gnqYSIk6o0rChn9E/D0M=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.2/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg=
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
|
||||
@@ -1120,8 +1122,8 @@ github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqz
|
||||
github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=
|
||||
github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA=
|
||||
github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y=
|
||||
github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
|
||||
@@ -1156,8 +1158,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
gitlab.com/bosi/decorder v0.2.1 h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w=
|
||||
gitlab.com/bosi/decorder v0.2.1/go.mod h1:6C/nhLSbF6qZbYD8bRmISBwc6vcWdNsiIBkRvjJFrH0=
|
||||
gitlab.com/bosi/decorder v0.2.2 h1:LRfb3lP6mZWjUzpMOCLTVjcnl/SqZWBWmKNqQvMocQs=
|
||||
gitlab.com/bosi/decorder v0.2.2/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
@@ -1165,15 +1167,12 @@ go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k=
|
||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
|
||||
go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||
go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
|
||||
go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI=
|
||||
go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU=
|
||||
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
||||
go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
|
||||
@@ -1235,7 +1234,6 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
@@ -1254,8 +1252,9 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 h1:FR+oGxGfbQu1d+jglI3rCkjAjUnhRSZcUxr+DqlDLNo=
|
||||
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d h1:+W8Qf4iJtMGKkyAygcKohjxTk4JPsL9DpzApJ22m5Ic=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
@@ -1488,7 +1487,6 @@ golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -1496,16 +1494,17 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220403020550-483a9cbc67c0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c h1:aFV+BgZ4svzjfabn8ERpuB4JI4N6/rdy1iusx77G3oU=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220702020025-31831981b65f h1:xdsejrW/0Wf2diT5CPp3XmKUNbr7Xvw8kYilQ+6qjRY=
|
||||
golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -1584,7 +1583,6 @@ golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWc
|
||||
golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
@@ -1630,9 +1628,10 @@ golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY=
|
||||
golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1 h1:NHLFZ56qCjD+0hYY3kE5Wl40Z7q4Gn9Ln/7YU0lsGko=
|
||||
golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1815,8 +1814,8 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
|
||||
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
|
||||
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
@@ -1881,18 +1880,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.3.1 h1:1kJlrWJLkaGXgcaeosRXViwviqjI7nkBvU2+sZW0AYc=
|
||||
honnef.co/go/tools v0.3.1/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70=
|
||||
honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34=
|
||||
honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw=
|
||||
mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8=
|
||||
mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE=
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
|
||||
mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio=
|
||||
mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY=
|
||||
pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g=
|
||||
pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
|
||||
mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 h1:seuXWbRB1qPrS3NQnHmFKLJLtskWyueeIzmLXghMGgk=
|
||||
mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2YjFf5CaW0Bw4RL8RfbEf4GRggJk=
|
||||
pgregory.net/rapid v0.4.8 h1:d+5SGZWUbJPbl3ss6tmPFqnNeQR6VDOFly+eTjwPiEw=
|
||||
pgregory.net/rapid v0.4.8/go.mod h1:Z5PbWqjvWR1I3UGjvboUuan4fe4ZYEYNLNQLExzCoUs=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
|
||||
@@ -135,7 +135,7 @@ func (r *Reactor) OnStart(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.chCreator = func(context.Context, *conn.ChannelDescriptor) (*p2p.Channel, error) { return blockSyncCh, nil }
|
||||
r.chCreator = func(context.Context, *conn.ChannelDescriptor) (p2p.Channel, error) { return blockSyncCh, nil }
|
||||
|
||||
state, err := r.stateStore.Load()
|
||||
if err != nil {
|
||||
@@ -183,7 +183,7 @@ func (r *Reactor) OnStop() {
|
||||
|
||||
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
|
||||
// Otherwise, we'll respond saying we do not have it.
|
||||
func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh *p2p.Channel) error {
|
||||
func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh p2p.Channel) error {
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block == nil {
|
||||
r.logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height)
|
||||
@@ -223,7 +223,7 @@ func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest,
|
||||
// handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
|
||||
// It will handle errors and any possible panics gracefully. A caller can handle
|
||||
// any error returned by sending a PeerError on the respective channel.
|
||||
func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blockSyncCh *p2p.Channel) (err error) {
|
||||
func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blockSyncCh p2p.Channel) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
@@ -298,7 +298,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blo
|
||||
// message execution will result in a PeerError being sent on the BlockSyncChannel.
|
||||
// When the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// gracefully.
|
||||
func (r *Reactor) processBlockSyncCh(ctx context.Context, blockSyncCh *p2p.Channel) {
|
||||
func (r *Reactor) processBlockSyncCh(ctx context.Context, blockSyncCh p2p.Channel) {
|
||||
iter := blockSyncCh.Receive(ctx)
|
||||
for iter.Next(ctx) {
|
||||
envelope := iter.Envelope()
|
||||
@@ -319,7 +319,7 @@ func (r *Reactor) processBlockSyncCh(ctx context.Context, blockSyncCh *p2p.Chann
|
||||
}
|
||||
|
||||
// processPeerUpdate processes a PeerUpdate.
|
||||
func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, blockSyncCh *p2p.Channel) {
|
||||
func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, blockSyncCh p2p.Channel) {
|
||||
r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
|
||||
|
||||
// XXX: Pool#RedoRequest can sometimes give us an empty peer.
|
||||
@@ -354,7 +354,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda
|
||||
// processPeerUpdates initiates a blocking process where we listen for and handle
|
||||
// PeerUpdate messages. When the reactor is stopped, we will catch the signal and
|
||||
// close the p2p PeerUpdatesCh gracefully.
|
||||
func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, blockSyncCh *p2p.Channel) {
|
||||
func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, blockSyncCh p2p.Channel) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -396,7 +396,7 @@ func (r *Reactor) SwitchToBlockSync(ctx context.Context, state sm.State) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh *p2p.Channel) {
|
||||
func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh p2p.Channel) {
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
defer statusUpdateTicker.Stop()
|
||||
|
||||
@@ -438,7 +438,7 @@ func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh *p2p.Channel)
|
||||
// do.
|
||||
//
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh *p2p.Channel) {
|
||||
func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh p2p.Channel) {
|
||||
var (
|
||||
trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
|
||||
@@ -469,12 +469,10 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh
|
||||
lastAdvance = r.pool.LastAdvance()
|
||||
)
|
||||
|
||||
r.logger.Debug(
|
||||
"consensus ticker",
|
||||
r.logger.Debug("consensus ticker",
|
||||
"num_pending", numPending,
|
||||
"total", lenRequesters,
|
||||
"height", height,
|
||||
)
|
||||
"height", height)
|
||||
|
||||
switch {
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ type reactorTestSuite struct {
|
||||
reactors map[types.NodeID]*Reactor
|
||||
app map[types.NodeID]abciclient.Client
|
||||
|
||||
blockSyncChannels map[types.NodeID]*p2p.Channel
|
||||
blockSyncChannels map[types.NodeID]p2p.Channel
|
||||
peerChans map[types.NodeID]chan p2p.PeerUpdate
|
||||
peerUpdates map[types.NodeID]*p2p.PeerUpdates
|
||||
}
|
||||
@@ -64,7 +64,7 @@ func setup(
|
||||
nodes: make([]types.NodeID, 0, numNodes),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
app: make(map[types.NodeID]abciclient.Client, numNodes),
|
||||
blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
|
||||
blockSyncChannels: make(map[types.NodeID]p2p.Channel, numNodes),
|
||||
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
|
||||
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
|
||||
}
|
||||
@@ -177,7 +177,7 @@ func (rts *reactorTestSuite) addNode(
|
||||
rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1)
|
||||
rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID])
|
||||
|
||||
chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (p2p.Channel, error) {
|
||||
return rts.blockSyncChannels[nodeID], nil
|
||||
}
|
||||
|
||||
|
||||
@@ -33,6 +33,10 @@ import (
|
||||
// Byzantine node sends two different prevotes (nil and blockID) to the same
|
||||
// validator.
|
||||
func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
// empirically, this test either passes in <1s or hits some
|
||||
// kind of deadlock and hit the larger timeout. This timeout
|
||||
// can be extended a bunch if needed, but it's good to avoid
|
||||
|
||||
@@ -107,7 +107,7 @@ func invalidDoPrevoteFunc(
|
||||
round int32,
|
||||
cs *State,
|
||||
r *Reactor,
|
||||
voteCh *p2p.Channel,
|
||||
voteCh p2p.Channel,
|
||||
pv types.PrivValidator,
|
||||
) {
|
||||
// routine to:
|
||||
|
||||
@@ -214,12 +214,13 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
emptyMempoolCh := make(chan struct{})
|
||||
checkTxRespCh := make(chan struct{})
|
||||
go func() {
|
||||
// Try to send the tx through the mempool.
|
||||
// Try to send an out-of-sequence transaction through the mempool.
|
||||
// CheckTx should not err, but the app should return a bad abci code
|
||||
// and the tx should get removed from the pool
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(5))
|
||||
err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, func(r *abci.ResponseCheckTx) {
|
||||
if r.Code != code.CodeTypeBadNonce {
|
||||
t.Errorf("expected checktx to return bad nonce, got %v", r)
|
||||
t.Errorf("expected checktx to return bad nonce, got %#v", r)
|
||||
return
|
||||
}
|
||||
checkTxRespCh <- struct{}{}
|
||||
@@ -312,14 +313,15 @@ func (app *CounterApplication) FinalizeBlock(_ context.Context, req *abci.Reques
|
||||
func (app *CounterApplication) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
txValue := txAsUint64(req.Tx)
|
||||
if txValue != uint64(app.mempoolTxCount) {
|
||||
return &abci.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
}, nil
|
||||
if req.Type == abci.CheckTxType_New {
|
||||
txValue := txAsUint64(req.Tx)
|
||||
if txValue != uint64(app.mempoolTxCount) {
|
||||
return &abci.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
}, nil
|
||||
}
|
||||
app.mempoolTxCount++
|
||||
}
|
||||
app.mempoolTxCount++
|
||||
return &abci.ResponseCheckTx{Code: code.CodeTypeOK}, nil
|
||||
}
|
||||
|
||||
@@ -332,8 +334,6 @@ func txAsUint64(tx []byte) uint64 {
|
||||
func (app *CounterApplication) Commit(context.Context) (*abci.ResponseCommit, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
app.mempoolTxCount = app.txCount
|
||||
return &abci.ResponseCommit{}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
tmstrings "github.com/tendermint/tendermint/internal/libs/strings"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
@@ -165,10 +166,10 @@ func NewReactor(
|
||||
}
|
||||
|
||||
type channelBundle struct {
|
||||
state *p2p.Channel
|
||||
data *p2p.Channel
|
||||
vote *p2p.Channel
|
||||
votSet *p2p.Channel
|
||||
state p2p.Channel
|
||||
data p2p.Channel
|
||||
vote p2p.Channel
|
||||
votSet p2p.Channel
|
||||
}
|
||||
|
||||
// OnStart starts separate go routines for each p2p Channel and listens for
|
||||
@@ -310,14 +311,14 @@ func (r *Reactor) GetPeerState(peerID types.NodeID) (*PeerState, bool) {
|
||||
return ps, ok
|
||||
}
|
||||
|
||||
func (r *Reactor) broadcastNewRoundStepMessage(ctx context.Context, rs *cstypes.RoundState, stateCh *p2p.Channel) error {
|
||||
func (r *Reactor) broadcastNewRoundStepMessage(ctx context.Context, rs *cstypes.RoundState, stateCh p2p.Channel) error {
|
||||
return stateCh.Send(ctx, p2p.Envelope{
|
||||
Broadcast: true,
|
||||
Message: makeRoundStepMessage(rs),
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Reactor) broadcastNewValidBlockMessage(ctx context.Context, rs *cstypes.RoundState, stateCh *p2p.Channel) error {
|
||||
func (r *Reactor) broadcastNewValidBlockMessage(ctx context.Context, rs *cstypes.RoundState, stateCh p2p.Channel) error {
|
||||
psHeader := rs.ProposalBlockParts.Header()
|
||||
return stateCh.Send(ctx, p2p.Envelope{
|
||||
Broadcast: true,
|
||||
@@ -331,7 +332,7 @@ func (r *Reactor) broadcastNewValidBlockMessage(ctx context.Context, rs *cstypes
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Reactor) broadcastHasVoteMessage(ctx context.Context, vote *types.Vote, stateCh *p2p.Channel) error {
|
||||
func (r *Reactor) broadcastHasVoteMessage(ctx context.Context, vote *types.Vote, stateCh p2p.Channel) error {
|
||||
return stateCh.Send(ctx, p2p.Envelope{
|
||||
Broadcast: true,
|
||||
Message: &tmcons.HasVote{
|
||||
@@ -346,7 +347,7 @@ func (r *Reactor) broadcastHasVoteMessage(ctx context.Context, vote *types.Vote,
|
||||
// subscribeToBroadcastEvents subscribes for new round steps and votes using the
|
||||
// internal pubsub defined in the consensus state to broadcast them to peers
|
||||
// upon receiving.
|
||||
func (r *Reactor) subscribeToBroadcastEvents(ctx context.Context, stateCh *p2p.Channel) {
|
||||
func (r *Reactor) subscribeToBroadcastEvents(ctx context.Context, stateCh p2p.Channel) {
|
||||
onStopCh := r.state.getOnStopCh()
|
||||
|
||||
err := r.state.evsw.AddListenerForEvent(
|
||||
@@ -403,7 +404,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) *tmcons.NewRoundStep {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) sendNewRoundStepMessage(ctx context.Context, peerID types.NodeID, stateCh *p2p.Channel) error {
|
||||
func (r *Reactor) sendNewRoundStepMessage(ctx context.Context, peerID types.NodeID, stateCh p2p.Channel) error {
|
||||
return stateCh.Send(ctx, p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: makeRoundStepMessage(r.getRoundState()),
|
||||
@@ -433,7 +434,7 @@ func (r *Reactor) getRoundState() *cstypes.RoundState {
|
||||
return r.rs
|
||||
}
|
||||
|
||||
func (r *Reactor) gossipDataForCatchup(ctx context.Context, rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState, dataCh *p2p.Channel) {
|
||||
func (r *Reactor) gossipDataForCatchup(ctx context.Context, rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState, dataCh p2p.Channel) {
|
||||
logger := r.logger.With("height", prs.Height).With("peer", ps.peerID)
|
||||
|
||||
if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
|
||||
@@ -497,7 +498,7 @@ func (r *Reactor) gossipDataForCatchup(ctx context.Context, rs *cstypes.RoundSta
|
||||
time.Sleep(r.state.config.PeerGossipSleepDuration)
|
||||
}
|
||||
|
||||
func (r *Reactor) gossipDataRoutine(ctx context.Context, ps *PeerState, dataCh *p2p.Channel) {
|
||||
func (r *Reactor) gossipDataRoutine(ctx context.Context, ps *PeerState, dataCh p2p.Channel) {
|
||||
logger := r.logger.With("peer", ps.peerID)
|
||||
|
||||
timer := time.NewTimer(0)
|
||||
@@ -632,7 +633,7 @@ OUTER_LOOP:
|
||||
|
||||
// pickSendVote picks a vote and sends it to the peer. It will return true if
|
||||
// there is a vote to send and false otherwise.
|
||||
func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.VoteSetReader, voteCh *p2p.Channel) (bool, error) {
|
||||
func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.VoteSetReader, voteCh p2p.Channel) (bool, error) {
|
||||
vote, ok := ps.PickVoteToSend(votes)
|
||||
if !ok {
|
||||
return false, nil
|
||||
@@ -660,7 +661,7 @@ func (r *Reactor) gossipVotesForHeight(
|
||||
rs *cstypes.RoundState,
|
||||
prs *cstypes.PeerRoundState,
|
||||
ps *PeerState,
|
||||
voteCh *p2p.Channel,
|
||||
voteCh p2p.Channel,
|
||||
) (bool, error) {
|
||||
logger := r.logger.With("height", prs.Height).With("peer", ps.peerID)
|
||||
|
||||
@@ -732,7 +733,7 @@ func (r *Reactor) gossipVotesForHeight(
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *Reactor) gossipVotesRoutine(ctx context.Context, ps *PeerState, voteCh *p2p.Channel) {
|
||||
func (r *Reactor) gossipVotesRoutine(ctx context.Context, ps *PeerState, voteCh p2p.Channel) {
|
||||
logger := r.logger.With("peer", ps.peerID)
|
||||
|
||||
timer := time.NewTimer(0)
|
||||
@@ -804,7 +805,7 @@ func (r *Reactor) gossipVotesRoutine(ctx context.Context, ps *PeerState, voteCh
|
||||
|
||||
// NOTE: `queryMaj23Routine` has a simple crude design since it only comes
|
||||
// into play for liveness when there's a signature DDoS attack happening.
|
||||
func (r *Reactor) queryMaj23Routine(ctx context.Context, ps *PeerState, stateCh *p2p.Channel) {
|
||||
func (r *Reactor) queryMaj23Routine(ctx context.Context, ps *PeerState, stateCh p2p.Channel) {
|
||||
timer := time.NewTimer(0)
|
||||
defer timer.Stop()
|
||||
|
||||
@@ -1015,7 +1016,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda
|
||||
// If we fail to find the peer state for the envelope sender, we perform a no-op
|
||||
// and return. This can happen when we process the envelope after the peer is
|
||||
// removed.
|
||||
func (r *Reactor) handleStateMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message, voteSetCh *p2p.Channel) error {
|
||||
func (r *Reactor) handleStateMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message, voteSetCh p2p.Channel) error {
|
||||
ps, ok := r.GetPeerState(envelope.From)
|
||||
if !ok || ps == nil {
|
||||
r.logger.Debug("failed to find peer state", "peer", envelope.From, "ch_id", "StateChannel")
|
||||
@@ -1113,7 +1114,7 @@ func (r *Reactor) handleDataMessage(ctx context.Context, envelope *p2p.Envelope,
|
||||
}
|
||||
|
||||
if r.WaitSync() {
|
||||
logger.Info("ignoring message received during sync", "msg", fmt.Sprintf("%T", msgI))
|
||||
logger.Info("ignoring message received during sync", "msg", tmstrings.LazySprintf("%T", msgI))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -46,10 +46,10 @@ type reactorTestSuite struct {
|
||||
reactors map[types.NodeID]*Reactor
|
||||
subs map[types.NodeID]eventbus.Subscription
|
||||
blocksyncSubs map[types.NodeID]eventbus.Subscription
|
||||
stateChannels map[types.NodeID]*p2p.Channel
|
||||
dataChannels map[types.NodeID]*p2p.Channel
|
||||
voteChannels map[types.NodeID]*p2p.Channel
|
||||
voteSetBitsChannels map[types.NodeID]*p2p.Channel
|
||||
stateChannels map[types.NodeID]p2p.Channel
|
||||
dataChannels map[types.NodeID]p2p.Channel
|
||||
voteChannels map[types.NodeID]p2p.Channel
|
||||
voteSetBitsChannels map[types.NodeID]p2p.Channel
|
||||
}
|
||||
|
||||
func chDesc(chID p2p.ChannelID, size int) *p2p.ChannelDescriptor {
|
||||
@@ -86,7 +86,7 @@ func setup(
|
||||
t.Cleanup(cancel)
|
||||
|
||||
chCreator := func(nodeID types.NodeID) p2p.ChannelCreator {
|
||||
return func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
return func(ctx context.Context, desc *p2p.ChannelDescriptor) (p2p.Channel, error) {
|
||||
switch desc.ID {
|
||||
case StateChannel:
|
||||
return rts.stateChannels[nodeID], nil
|
||||
@@ -779,6 +779,10 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReactorVotingPowerChange(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
|
||||
@@ -885,6 +889,10 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
|
||||
@@ -118,6 +118,10 @@ func sendTxs(ctx context.Context, t *testing.T, cs *State) {
|
||||
|
||||
// TestWALCrash uses crashing WAL to test we can recover from any WAL failure.
|
||||
func TestWALCrash(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
initFn func(dbm.DB, *State, context.Context)
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
"github.com/tendermint/tendermint/internal/jsontypes"
|
||||
"github.com/tendermint/tendermint/internal/libs/autofile"
|
||||
tmstrings "github.com/tendermint/tendermint/internal/libs/strings"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -778,11 +779,9 @@ func (cs *State) updateToState(state sm.State) {
|
||||
// signal the new round step, because other services (eg. txNotifier)
|
||||
// depend on having an up-to-date peer state!
|
||||
if state.LastBlockHeight <= cs.state.LastBlockHeight {
|
||||
cs.logger.Debug(
|
||||
"ignoring updateToState()",
|
||||
cs.logger.Debug("ignoring updateToState()",
|
||||
"new_height", state.LastBlockHeight+1,
|
||||
"old_height", cs.state.LastBlockHeight+1,
|
||||
)
|
||||
"old_height", cs.state.LastBlockHeight+1)
|
||||
cs.newStep()
|
||||
return
|
||||
}
|
||||
@@ -1038,12 +1037,10 @@ func (cs *State) handleMsg(ctx context.Context, mi msgInfo) {
|
||||
}
|
||||
|
||||
if err != nil && msg.Round != cs.Round {
|
||||
cs.logger.Debug(
|
||||
"received block part from wrong round",
|
||||
cs.logger.Debug("received block part from wrong round",
|
||||
"height", cs.Height,
|
||||
"cs_round", cs.Round,
|
||||
"block_round", msg.Round,
|
||||
)
|
||||
"block_round", msg.Round)
|
||||
err = nil
|
||||
}
|
||||
|
||||
@@ -1073,7 +1070,7 @@ func (cs *State) handleMsg(ctx context.Context, mi msgInfo) {
|
||||
// We could make note of this and help filter in broadcastHasVoteMessage().
|
||||
|
||||
default:
|
||||
cs.logger.Error("unknown msg type", "type", fmt.Sprintf("%T", msg))
|
||||
cs.logger.Error("unknown msg type", "type", tmstrings.LazySprintf("%T", msg))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1184,10 +1181,10 @@ func (cs *State) enterNewRound(ctx context.Context, height int64, round int32) {
|
||||
logger := cs.logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
|
||||
logger.Debug(
|
||||
"entering new round with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
logger.Debug("entering new round with invalid args",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1195,7 +1192,10 @@ func (cs *State) enterNewRound(ctx context.Context, height int64, round int32) {
|
||||
logger.Debug("need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now)
|
||||
}
|
||||
|
||||
logger.Debug("entering new round", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering new round",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
|
||||
// increment validators if necessary
|
||||
validators := cs.Validators
|
||||
@@ -1274,10 +1274,10 @@ func (cs *State) enterPropose(ctx context.Context, height int64, round int32) {
|
||||
logger := cs.logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering propose step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
logger.Debug("entering propose step with invalid args",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1291,7 +1291,10 @@ func (cs *State) enterPropose(ctx context.Context, height int64, round int32) {
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("entering propose step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering propose step",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
|
||||
defer func() {
|
||||
// Done enterPropose:
|
||||
@@ -1333,17 +1336,13 @@ func (cs *State) enterPropose(ctx context.Context, height int64, round int32) {
|
||||
}
|
||||
|
||||
if cs.isProposer(addr) {
|
||||
logger.Debug(
|
||||
"propose step; our turn to propose",
|
||||
"proposer", addr,
|
||||
)
|
||||
logger.Debug("propose step; our turn to propose",
|
||||
"proposer", addr)
|
||||
|
||||
cs.decideProposal(ctx, height, round)
|
||||
} else {
|
||||
logger.Debug(
|
||||
"propose step; not our turn to propose",
|
||||
"proposer", cs.Validators.GetProposer().Address,
|
||||
)
|
||||
logger.Debug("propose step; not our turn to propose",
|
||||
"proposer", cs.Validators.GetProposer().Address)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1480,10 +1479,10 @@ func (cs *State) enterPrevote(ctx context.Context, height int64, round int32) {
|
||||
logger := cs.logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering prevote step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
logger.Debug("entering prevote step with invalid args",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1493,7 +1492,10 @@ func (cs *State) enterPrevote(ctx context.Context, height int64, round int32) {
|
||||
cs.newStep()
|
||||
}()
|
||||
|
||||
logger.Debug("entering prevote step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering prevote step",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
|
||||
// Sign and broadcast vote as necessary
|
||||
cs.doPrevote(ctx, height, round)
|
||||
@@ -1533,14 +1535,10 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32
|
||||
//TODO: Remove this temporary fix when the complete solution is ready. See #8739
|
||||
if !cs.replayMode && cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() {
|
||||
logger.Debug("prevote step: Proposal is not timely; prevoting nil",
|
||||
"proposed",
|
||||
tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano),
|
||||
"received",
|
||||
tmtime.Canonical(cs.ProposalReceiveTime).Format(time.RFC3339Nano),
|
||||
"msg_delay",
|
||||
sp.MessageDelay,
|
||||
"precision",
|
||||
sp.Precision)
|
||||
"proposed", tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano),
|
||||
"received", tmtime.Canonical(cs.ProposalReceiveTime).Format(time.RFC3339Nano),
|
||||
"msg_delay", sp.MessageDelay,
|
||||
"precision", sp.Precision)
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
@@ -1625,8 +1623,8 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32
|
||||
blockID, ok := cs.Votes.Prevotes(cs.Proposal.POLRound).TwoThirdsMajority()
|
||||
if ok && cs.ProposalBlock.HashesTo(blockID.Hash) && cs.Proposal.POLRound >= 0 && cs.Proposal.POLRound < cs.Round {
|
||||
if cs.LockedRound <= cs.Proposal.POLRound {
|
||||
logger.Debug("prevote step: ProposalBlock is valid and received a 2/3" +
|
||||
"majority in a round later than the locked round; prevoting the proposal")
|
||||
logger.Debug("prevote step: ProposalBlock is valid and received a 2/3 majority in a round later than the locked round",
|
||||
"outcome", "prevoting the proposal")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
|
||||
return
|
||||
}
|
||||
@@ -1637,8 +1635,8 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("prevote step: ProposalBlock is valid but was not our locked block or " +
|
||||
"did not receive a more recent majority; prevoting nil")
|
||||
logger.Debug("prevote step: ProposalBlock is valid but was not our locked block or did not receive a more recent majority",
|
||||
"outcome", "prevoting nil")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
}
|
||||
|
||||
@@ -1647,10 +1645,10 @@ func (cs *State) enterPrevoteWait(height int64, round int32) {
|
||||
logger := cs.logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering prevote wait step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
logger.Debug("entering prevote wait step with invalid args",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1661,7 +1659,10 @@ func (cs *State) enterPrevoteWait(height int64, round int32) {
|
||||
))
|
||||
}
|
||||
|
||||
logger.Debug("entering prevote wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering prevote wait step",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
|
||||
defer func() {
|
||||
// Done enterPrevoteWait:
|
||||
@@ -1679,17 +1680,21 @@ func (cs *State) enterPrevoteWait(height int64, round int32) {
|
||||
// Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round)
|
||||
// else, precommit nil otherwise.
|
||||
func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32) {
|
||||
logger := cs.logger.With("height", height, "round", round)
|
||||
logger := cs.logger.With("new_height", height, "new_round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering precommit step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
logger.Debug("entering precommit step with invalid args",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("entering precommit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering precommit step",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommit:
|
||||
@@ -1796,14 +1801,13 @@ func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32)
|
||||
|
||||
// Enter: any +2/3 precommits for next round.
|
||||
func (cs *State) enterPrecommitWait(height int64, round int32) {
|
||||
logger := cs.logger.With("height", height, "round", round)
|
||||
logger := cs.logger.With("new_height", height, "new_round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) {
|
||||
logger.Debug(
|
||||
"entering precommit wait step with invalid args",
|
||||
logger.Debug("entering precommit wait step with invalid args",
|
||||
"triggered_timeout", cs.TriggeredTimeoutPrecommit,
|
||||
"current", fmt.Sprintf("%v/%v", cs.Height, cs.Round),
|
||||
)
|
||||
"height", cs.Height,
|
||||
"round", cs.Round)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1814,7 +1818,10 @@ func (cs *State) enterPrecommitWait(height int64, round int32) {
|
||||
))
|
||||
}
|
||||
|
||||
logger.Debug("entering precommit wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering precommit wait step",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommitWait:
|
||||
@@ -1828,17 +1835,20 @@ func (cs *State) enterPrecommitWait(height int64, round int32) {
|
||||
|
||||
// Enter: +2/3 precommits for block
|
||||
func (cs *State) enterCommit(ctx context.Context, height int64, commitRound int32) {
|
||||
logger := cs.logger.With("height", height, "commit_round", commitRound)
|
||||
logger := cs.logger.With("new_height", height, "commit_round", commitRound)
|
||||
|
||||
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
|
||||
logger.Debug(
|
||||
"entering commit step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
logger.Debug("entering commit step with invalid args",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("entering commit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering commit step",
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"step", cs.Step)
|
||||
|
||||
defer func() {
|
||||
// Done enterCommit:
|
||||
@@ -1892,12 +1902,12 @@ func (cs *State) enterCommit(ctx context.Context, height int64, commitRound int3
|
||||
|
||||
// If we have the block AND +2/3 commits for it, finalize.
|
||||
func (cs *State) tryFinalizeCommit(ctx context.Context, height int64) {
|
||||
logger := cs.logger.With("height", height)
|
||||
|
||||
if cs.Height != height {
|
||||
panic(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
|
||||
}
|
||||
|
||||
logger := cs.logger.With("height", height)
|
||||
|
||||
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
if !ok || blockID.IsNil() {
|
||||
logger.Error("failed attempt to finalize commit; there was no +2/3 majority or +2/3 was for nil")
|
||||
@@ -1907,9 +1917,8 @@ func (cs *State) tryFinalizeCommit(ctx context.Context, height int64) {
|
||||
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
// TODO: this happens every time if we're not a validator (ugly logs)
|
||||
// TODO: ^^ wait, why does it matter that we're a validator?
|
||||
logger.Debug(
|
||||
"failed attempt to finalize commit; we do not have the commit block",
|
||||
"proposal_block", cs.ProposalBlock.Hash(),
|
||||
logger.Debug("failed attempt to finalize commit; we do not have the commit block",
|
||||
"proposal_block", tmstrings.LazyBlockHash(cs.ProposalBlock),
|
||||
"commit_block", blockID.Hash,
|
||||
)
|
||||
return
|
||||
@@ -1951,11 +1960,10 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) {
|
||||
|
||||
logger.Info(
|
||||
"finalizing commit of block",
|
||||
"hash", block.Hash(),
|
||||
"hash", tmstrings.LazyBlockHash(block),
|
||||
"root", block.AppHash,
|
||||
"num_txs", len(block.Txs),
|
||||
)
|
||||
logger.Debug(fmt.Sprintf("%v", block))
|
||||
|
||||
// Save to blockStore.
|
||||
if cs.blockStore.Height() < block.Height {
|
||||
@@ -2052,8 +2060,11 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
|
||||
address types.Address
|
||||
)
|
||||
if commitSize != valSetLen {
|
||||
cs.logger.Error(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v",
|
||||
commitSize, valSetLen, block.Height, block.LastCommit.Signatures, cs.LastValidators.Validators))
|
||||
cs.logger.Error("commit size doesn't match valset",
|
||||
"size", commitSize,
|
||||
"valset_len", valSetLen,
|
||||
"height", block.Height,
|
||||
"extra", tmstrings.LazySprintf("\n%v\n\n%v", block.LastCommit.Signatures, cs.LastValidators.Validators))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2187,8 +2198,7 @@ func (cs *State) addProposalBlockPart(
|
||||
cs.metrics.BlockGossipPartsReceived.With("matches_current", "false").Add(1)
|
||||
// NOTE: this can happen when we've gone to a higher round and
|
||||
// then receive parts from the previous round - not necessarily a bad peer.
|
||||
cs.logger.Debug(
|
||||
"received a block part when we are not expecting any",
|
||||
cs.logger.Debug("received a block part when we are not expecting any",
|
||||
"height", height,
|
||||
"round", round,
|
||||
"index", part.Index,
|
||||
@@ -2248,11 +2258,9 @@ func (cs *State) handleCompleteProposal(ctx context.Context, height int64) {
|
||||
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
|
||||
if hasTwoThirds && !blockID.IsNil() && (cs.ValidRound < cs.Round) {
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.logger.Debug(
|
||||
"updating valid block to new proposal block",
|
||||
cs.logger.Debug("updating valid block to new proposal block",
|
||||
"valid_round", cs.Round,
|
||||
"valid_block_hash", cs.ProposalBlock.Hash(),
|
||||
)
|
||||
"valid_block_hash", tmstrings.LazyBlockHash(cs.ProposalBlock))
|
||||
|
||||
cs.ValidRound = cs.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
@@ -2291,23 +2299,19 @@ func (cs *State) tryAddVote(ctx context.Context, vote *types.Vote, peerID types.
|
||||
}
|
||||
|
||||
if bytes.Equal(vote.ValidatorAddress, cs.privValidatorPubKey.Address()) {
|
||||
cs.logger.Error(
|
||||
"found conflicting vote from ourselves; did you unsafe_reset a validator?",
|
||||
cs.logger.Error("found conflicting vote from ourselves; did you unsafe_reset a validator?",
|
||||
"height", vote.Height,
|
||||
"round", vote.Round,
|
||||
"type", vote.Type,
|
||||
)
|
||||
"type", vote.Type)
|
||||
|
||||
return added, err
|
||||
}
|
||||
|
||||
// report conflicting votes to the evidence pool
|
||||
cs.evpool.ReportConflictingVotes(voteErr.VoteA, voteErr.VoteB)
|
||||
cs.logger.Debug(
|
||||
"found and sent conflicting votes to the evidence pool",
|
||||
cs.logger.Debug("found and sent conflicting votes to the evidence pool",
|
||||
"vote_a", voteErr.VoteA,
|
||||
"vote_b", voteErr.VoteB,
|
||||
)
|
||||
"vote_b", voteErr.VoteB)
|
||||
|
||||
return added, err
|
||||
} else if errors.Is(err, types.ErrVoteNonDeterministicSignature) {
|
||||
@@ -2331,13 +2335,11 @@ func (cs *State) addVote(
|
||||
vote *types.Vote,
|
||||
peerID types.NodeID,
|
||||
) (added bool, err error) {
|
||||
cs.logger.Debug(
|
||||
"adding vote",
|
||||
cs.logger.Debug("adding vote",
|
||||
"vote_height", vote.Height,
|
||||
"vote_type", vote.Type,
|
||||
"val_index", vote.ValidatorIndex,
|
||||
"cs_height", cs.Height,
|
||||
)
|
||||
"cs_height", cs.Height)
|
||||
|
||||
if vote.Height < cs.Height || (vote.Height == cs.Height && vote.Round < cs.Round) {
|
||||
cs.metrics.MarkLateVote(vote.Type)
|
||||
@@ -2458,11 +2460,9 @@ func (cs *State) addVote(
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
} else {
|
||||
cs.logger.Debug(
|
||||
"valid block we do not know about; set ProposalBlock=nil",
|
||||
"proposal", cs.ProposalBlock.Hash(),
|
||||
"block_id", blockID.Hash,
|
||||
)
|
||||
cs.logger.Debug("valid block we do not know about; set ProposalBlock=nil",
|
||||
"proposal", tmstrings.LazyBlockHash(cs.ProposalBlock),
|
||||
"block_id", blockID.Hash)
|
||||
|
||||
// we're getting the wrong block
|
||||
cs.ProposalBlock = nil
|
||||
|
||||
@@ -159,7 +159,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope) (er
|
||||
|
||||
// processEvidenceCh implements a blocking event loop where we listen for p2p
|
||||
// Envelope messages from the evidenceCh.
|
||||
func (r *Reactor) processEvidenceCh(ctx context.Context, evidenceCh *p2p.Channel) {
|
||||
func (r *Reactor) processEvidenceCh(ctx context.Context, evidenceCh p2p.Channel) {
|
||||
iter := evidenceCh.Receive(ctx)
|
||||
for iter.Next(ctx) {
|
||||
envelope := iter.Envelope()
|
||||
@@ -186,7 +186,7 @@ func (r *Reactor) processEvidenceCh(ctx context.Context, evidenceCh *p2p.Channel
|
||||
// connects/disconnects frequently from the broadcasting peer(s).
|
||||
//
|
||||
// REF: https://github.com/tendermint/tendermint/issues/4727
|
||||
func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, evidenceCh *p2p.Channel) {
|
||||
func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, evidenceCh p2p.Channel) {
|
||||
r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
|
||||
|
||||
r.mtx.Lock()
|
||||
@@ -227,7 +227,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda
|
||||
// processPeerUpdates initiates a blocking process where we listen for and handle
|
||||
// PeerUpdate messages. When the reactor is stopped, we will catch the signal and
|
||||
// close the p2p PeerUpdatesCh gracefully.
|
||||
func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, evidenceCh *p2p.Channel) {
|
||||
func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, evidenceCh p2p.Channel) {
|
||||
for {
|
||||
select {
|
||||
case peerUpdate := <-peerUpdates.Updates():
|
||||
@@ -249,7 +249,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerU
|
||||
// that the peer has already received or may not be ready for.
|
||||
//
|
||||
// REF: https://github.com/tendermint/tendermint/issues/4727
|
||||
func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID, evidenceCh *p2p.Channel) {
|
||||
func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID, evidenceCh p2p.Channel) {
|
||||
var next *clist.CElement
|
||||
|
||||
defer func() {
|
||||
|
||||
@@ -38,7 +38,7 @@ type reactorTestSuite struct {
|
||||
logger log.Logger
|
||||
reactors map[types.NodeID]*evidence.Reactor
|
||||
pools map[types.NodeID]*evidence.Pool
|
||||
evidenceChannels map[types.NodeID]*p2p.Channel
|
||||
evidenceChannels map[types.NodeID]p2p.Channel
|
||||
peerUpdates map[types.NodeID]*p2p.PeerUpdates
|
||||
peerChans map[types.NodeID]chan p2p.PeerUpdate
|
||||
nodes []*p2ptest.Node
|
||||
@@ -96,7 +96,7 @@ func setup(ctx context.Context, t *testing.T, stateStores []sm.Store) *reactorTe
|
||||
rts.network.Nodes[nodeID].PeerManager.Register(ctx, pu)
|
||||
rts.nodes = append(rts.nodes, rts.network.Nodes[nodeID])
|
||||
|
||||
chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (p2p.Channel, error) {
|
||||
return rts.evidenceChannels[nodeID], nil
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ func BenchmarkDetaching(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
start.removed = true
|
||||
start.detachNext()
|
||||
start.DetachNext()
|
||||
start.DetachPrev()
|
||||
tmp := nxt
|
||||
nxt = nxt.Next()
|
||||
|
||||
@@ -103,7 +103,7 @@ func (e *CElement) Removed() bool {
|
||||
return isRemoved
|
||||
}
|
||||
|
||||
func (e *CElement) detachNext() {
|
||||
func (e *CElement) DetachNext() {
|
||||
e.mtx.Lock()
|
||||
if !e.removed {
|
||||
e.mtx.Unlock()
|
||||
|
||||
155
internal/libs/confix/confix.go
Normal file
155
internal/libs/confix/confix.go
Normal file
@@ -0,0 +1,155 @@
|
||||
// Package confix applies changes to a Tendermint TOML configuration file, to
|
||||
// update configurations created with an older version of Tendermint to a
|
||||
// compatible format for a newer version.
|
||||
package confix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/creachadair/atomicfile"
|
||||
"github.com/creachadair/tomledit"
|
||||
"github.com/creachadair/tomledit/transform"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
)
|
||||
|
||||
// Upgrade reads the configuration file at configPath and applies any
|
||||
// transformations necessary to upgrade it to the current version. If this
|
||||
// succeeds, the transformed output is written to outputPath. As a special
|
||||
// case, if outputPath == "" the output is written to stdout.
|
||||
//
|
||||
// It is safe if outputPath == inputPath. If a regular file outputPath already
|
||||
// exists, it is overwritten. In case of error, the output is not written.
|
||||
//
|
||||
// Upgrade is a convenience wrapper for calls to LoadConfig, ApplyFixes, and
|
||||
// CheckValid. If the caller requires more control over the behavior of the
|
||||
// upgrade, call those functions directly.
|
||||
func Upgrade(ctx context.Context, configPath, outputPath string) error {
|
||||
if configPath == "" {
|
||||
return errors.New("empty input configuration path")
|
||||
}
|
||||
|
||||
doc, err := LoadConfig(configPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading config: %v", err)
|
||||
}
|
||||
|
||||
if err := ApplyFixes(ctx, doc); err != nil {
|
||||
return fmt.Errorf("updating %q: %v", configPath, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := tomledit.Format(&buf, doc); err != nil {
|
||||
return fmt.Errorf("formatting config: %v", err)
|
||||
}
|
||||
|
||||
// Verify that Tendermint can parse the results after our edits.
|
||||
if err := CheckValid(buf.Bytes()); err != nil {
|
||||
return fmt.Errorf("updated config is invalid: %v", err)
|
||||
}
|
||||
|
||||
if outputPath == "" {
|
||||
_, err = os.Stdout.Write(buf.Bytes())
|
||||
} else {
|
||||
err = atomicfile.WriteData(outputPath, buf.Bytes(), 0600)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ApplyFixes transforms doc and reports whether it succeeded.
|
||||
func ApplyFixes(ctx context.Context, doc *tomledit.Document) error {
|
||||
// Check what version of Tendermint might have created this config file, as
|
||||
// a safety check for the updates we are about to make.
|
||||
tmVersion := GuessConfigVersion(doc)
|
||||
if tmVersion == vUnknown {
|
||||
return errors.New("cannot tell what Tendermint version created this config")
|
||||
} else if tmVersion < v34 || tmVersion > v36 {
|
||||
// TODO(creachadair): Add in rewrites for older versions. This will
|
||||
// require some digging to discover what the changes were. The upgrade
|
||||
// instructions do not give specifics.
|
||||
return fmt.Errorf("unable to update version %s config", tmVersion)
|
||||
}
|
||||
return plan.Apply(ctx, doc)
|
||||
}
|
||||
|
||||
// LoadConfig loads and parses the TOML document from path.
|
||||
func LoadConfig(path string) (*tomledit.Document, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
return tomledit.Parse(f)
|
||||
}
|
||||
|
||||
const (
|
||||
vUnknown = ""
|
||||
v32 = "v0.32"
|
||||
v33 = "v0.33"
|
||||
v34 = "v0.34"
|
||||
v35 = "v0.35"
|
||||
v36 = "v0.36"
|
||||
)
|
||||
|
||||
// GuessConfigVersion attempts to figure out which version of Tendermint
|
||||
// created the specified config document. It returns "" if the creating version
|
||||
// cannot be determined, otherwise a string of the form "vX.YY".
|
||||
func GuessConfigVersion(doc *tomledit.Document) string {
|
||||
hasDisableWS := doc.First("rpc", "experimental-disable-websocket") != nil
|
||||
hasUseLegacy := doc.First("p2p", "use-legacy") != nil // v0.35 only
|
||||
if hasDisableWS && !hasUseLegacy {
|
||||
return v36
|
||||
}
|
||||
|
||||
hasBlockSync := transform.FindTable(doc, "blocksync") != nil // add: v0.35
|
||||
hasStateSync := transform.FindTable(doc, "statesync") != nil // add: v0.34
|
||||
if hasBlockSync && hasStateSync {
|
||||
return v35
|
||||
} else if hasStateSync {
|
||||
return v34
|
||||
}
|
||||
|
||||
hasIndexKeys := doc.First("tx_index", "index_keys") != nil // add: v0.33
|
||||
hasIndexTags := doc.First("tx_index", "index_tags") != nil // rem: v0.33
|
||||
if hasIndexKeys && !hasIndexTags {
|
||||
return v33
|
||||
}
|
||||
|
||||
hasFastSync := transform.FindTable(doc, "fastsync") != nil // add: v0.32
|
||||
if hasIndexTags && hasFastSync {
|
||||
return v32
|
||||
}
|
||||
|
||||
// Something older, probably.
|
||||
return vUnknown
|
||||
}
|
||||
|
||||
// CheckValid checks whether the specified config appears to be a valid
|
||||
// Tendermint config file. This emulates how the node loads the config.
|
||||
func CheckValid(data []byte) error {
|
||||
v := viper.New()
|
||||
v.SetConfigType("toml")
|
||||
|
||||
if err := v.ReadConfig(bytes.NewReader(data)); err != nil {
|
||||
return fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
|
||||
var cfg config.Config
|
||||
if err := v.Unmarshal(&cfg); err != nil {
|
||||
return fmt.Errorf("decoding config: %w", err)
|
||||
}
|
||||
|
||||
return cfg.ValidateBasic()
|
||||
}
|
||||
|
||||
// WithLogWriter returns a child of ctx with a logger attached that sends
|
||||
// output to w. This is a convenience wrapper for transform.WithLogWriter.
|
||||
func WithLogWriter(ctx context.Context, w io.Writer) context.Context {
|
||||
return transform.WithLogWriter(ctx, w)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package main_test
|
||||
package confix_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/creachadair/tomledit"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
confix "github.com/tendermint/tendermint/scripts/confix"
|
||||
"github.com/tendermint/tendermint/internal/libs/confix"
|
||||
)
|
||||
|
||||
func mustParseConfig(t *testing.T, path string) *tomledit.Document {
|
||||
@@ -1,4 +1,4 @@
|
||||
package main
|
||||
package confix
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -41,12 +41,12 @@ The files named `diff-XX-YY.txt` were generated by using the `condiff` tool on
|
||||
the config samples for versions v0.XX and v0.YY:
|
||||
|
||||
```shell
|
||||
go run ./scripts/confix/condiff -desnake vXX-config vYY-config.toml > diff-XX-YY.txt
|
||||
go run ./scripts/condiff -desnake vXX-config vYY-config.toml > diff-XX-YY.txt
|
||||
```
|
||||
|
||||
The `baseline.txt` was computed in the same way, but using an empty starting
|
||||
file so that we capture all the settings in the target:
|
||||
|
||||
```shell
|
||||
go run ./scripts/confix/condiff -desnake /dev/null v26-config.toml > baseline.txt
|
||||
go run ./scripts/condiff -desnake /dev/null v26-config.toml > baseline.txt
|
||||
```
|
||||
@@ -8,13 +8,11 @@
|
||||
+M blocksync.version
|
||||
-S fastsync
|
||||
-M fastsync.version
|
||||
+M mempool.ttl-duration
|
||||
+M mempool.ttl-num-blocks
|
||||
+M mempool.version
|
||||
-M mempool.wal-dir
|
||||
+M p2p.bootstrap-peers
|
||||
+M p2p.max-connections
|
||||
+M p2p.max-incoming-connection-attempts
|
||||
+M p2p.max-outgoing-connections
|
||||
+M p2p.queue-type
|
||||
-M p2p.seed-mode
|
||||
+M p2p.use-legacy
|
||||
@@ -28,4 +26,3 @@
|
||||
-M statesync.chunk-fetchers
|
||||
+M statesync.fetchers
|
||||
+M statesync.use-p2p
|
||||
+M tx-index.psql-conn
|
||||
@@ -272,6 +272,11 @@ dial_timeout = "3s"
|
||||
#######################################################
|
||||
[mempool]
|
||||
|
||||
# Mempool version to use:
|
||||
# 1) "v0" - (default) FIFO mempool.
|
||||
# 2) "v1" - prioritized mempool.
|
||||
version = "v0"
|
||||
|
||||
recheck = true
|
||||
broadcast = true
|
||||
wal_dir = ""
|
||||
@@ -301,6 +306,22 @@ max_tx_bytes = 1048576
|
||||
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
max_batch_bytes = 0
|
||||
|
||||
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
|
||||
# insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-duration = "0s"
|
||||
|
||||
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-duration is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if
|
||||
# it's insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-num-blocks = 0
|
||||
|
||||
#######################################################
|
||||
### State Sync Configuration Options ###
|
||||
#######################################################
|
||||
@@ -403,8 +424,14 @@ peer_query_maj23_sleep_duration = "2s"
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
# 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
indexer = "kv"
|
||||
|
||||
# The PostgreSQL connection configuration, the connection format:
|
||||
# postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
|
||||
psql-conn = ""
|
||||
|
||||
#######################################################
|
||||
### Instrumentation Configuration Options ###
|
||||
#######################################################
|
||||
@@ -227,7 +227,9 @@ pprof-laddr = ""
|
||||
# Enable the legacy p2p layer.
|
||||
use-legacy = false
|
||||
|
||||
# Select the p2p internal queue
|
||||
# Select the p2p internal queue.
|
||||
# Options are: "fifo", "simple-priority", "priority", and "wdrr"
|
||||
# with the default being "priority".
|
||||
queue-type = "priority"
|
||||
|
||||
# Address to listen for incoming connections
|
||||
@@ -281,6 +283,10 @@ max-num-outbound-peers = 10
|
||||
# Maximum number of connections (inbound and outbound).
|
||||
max-connections = 64
|
||||
|
||||
# Maximum number of connections reserved for outgoing
|
||||
# connections. Must be less than max-connections
|
||||
max-outgoing-connections = 12
|
||||
|
||||
# Rate limits the number of incoming connection attempts per IP address.
|
||||
max-incoming-connection-attempts = 100
|
||||
|
||||
@@ -208,8 +208,10 @@ pprof-laddr = ""
|
||||
#######################################################
|
||||
[p2p]
|
||||
|
||||
# Select the p2p internal queue
|
||||
queue-type = "priority"
|
||||
# Select the p2p internal queue.
|
||||
# Options are: "fifo", "simple-priority", and "priority",
|
||||
# with the default being "priority".
|
||||
queue-type = "simple-priority"
|
||||
|
||||
# Address to listen for incoming connections
|
||||
laddr = "tcp://0.0.0.0:26656"
|
||||
@@ -235,6 +237,10 @@ upnp = false
|
||||
# Maximum number of connections (inbound and outbound).
|
||||
max-connections = 64
|
||||
|
||||
# Maximum number of connections reserved for outgoing
|
||||
# connections. Must be less than max-connections
|
||||
max-outgoing-connections = 12
|
||||
|
||||
# Rate limits the number of incoming connection attempts per IP address.
|
||||
max-incoming-connection-attempts = 100
|
||||
|
||||
@@ -3,8 +3,94 @@ package strings
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
tmbytes "github.com/tendermint/tendermint/libs/bytes"
|
||||
)
|
||||
|
||||
type lazyStringf struct {
|
||||
tmpl string
|
||||
args []interface{}
|
||||
out string
|
||||
}
|
||||
|
||||
func (s *lazyStringf) String() string {
|
||||
if s.out == "" && s.tmpl != "" {
|
||||
s.out = fmt.Sprintf(s.tmpl, s.args)
|
||||
s.args = nil
|
||||
s.tmpl = ""
|
||||
}
|
||||
return s.out
|
||||
}
|
||||
|
||||
// LazySprintf creates a fmt.Stringer implementation with similar
|
||||
// semantics as fmt.Sprintf, *except* that the string is built when
|
||||
// String() is called on the object. This means that format arguments
|
||||
// are resolved/captured into string format when String() is called,
|
||||
// and not, as in fmt.Sprintf when that function returns.
|
||||
//
|
||||
// As a result, if you use this type in go routines or defer
|
||||
// statements it's possible to pass an argument to LazySprintf which
|
||||
// has one value at the call site and a different value when the
|
||||
// String() is evaluated, which may lead to unexpected outcomes. In
|
||||
// these situations, either be *extremely* careful about the arguments
|
||||
// passed to this function or use fmt.Sprintf.
|
||||
//
|
||||
// The implementation also caches the output of the underlying
|
||||
// fmt.Sprintf statement when String() is called, so subsequent calls
|
||||
// will produce the same result.
|
||||
func LazySprintf(t string, args ...interface{}) fmt.Stringer {
|
||||
return &lazyStringf{tmpl: t, args: args}
|
||||
}
|
||||
|
||||
type lazyStringer struct {
|
||||
val fmt.Stringer
|
||||
out string
|
||||
}
|
||||
|
||||
func (l *lazyStringer) String() string {
|
||||
if l.out == "" && l.val != nil {
|
||||
l.out = l.val.String()
|
||||
l.val = nil
|
||||
}
|
||||
return l.out
|
||||
}
|
||||
|
||||
// LazyStringer captures a fmt.Stringer implementation resolving the
|
||||
// underlying string *only* when the String() method is called and
|
||||
// caching the result for future use.
|
||||
func LazyStringer(v fmt.Stringer) fmt.Stringer { return &lazyStringer{val: v} }
|
||||
|
||||
type lazyBlockHash struct {
|
||||
block interface{ Hash() tmbytes.HexBytes }
|
||||
out string
|
||||
}
|
||||
|
||||
// LazyBlockHash defers block Hash until the Stringer interface is invoked.
|
||||
// This is particularly useful for avoiding calling Sprintf when debugging is not
|
||||
// active.
|
||||
//
|
||||
// As a result, if you use this type in go routines or defer
|
||||
// statements it's possible to pass an argument to LazyBlockHash that
|
||||
// has one value at the call site and a different value when the
|
||||
// String() is evaluated, which may lead to unexpected outcomes. In
|
||||
// these situations, either be *extremely* careful about the arguments
|
||||
// passed to this function or use fmt.Sprintf.
|
||||
//
|
||||
// The implementation also caches the output of the string form of the
|
||||
// block hash when String() is called, so subsequent calls will
|
||||
// produce the same result.
|
||||
func LazyBlockHash(block interface{ Hash() tmbytes.HexBytes }) fmt.Stringer {
|
||||
return &lazyBlockHash{block: block}
|
||||
}
|
||||
|
||||
func (l *lazyBlockHash) String() string {
|
||||
if l.out == "" && l.block != nil {
|
||||
l.out = l.block.Hash().String()
|
||||
l.block = nil
|
||||
}
|
||||
return l.out
|
||||
}
|
||||
|
||||
// SplitAndTrimEmpty slices s into all subslices separated by sep and returns a
|
||||
// slice of the string s with all leading and trailing Unicode code points
|
||||
// contained in cutset removed. If sep is empty, SplitAndTrim splits after each
|
||||
|
||||
@@ -22,6 +22,10 @@ type TxCache interface {
|
||||
|
||||
// Remove removes the given raw transaction from the cache.
|
||||
Remove(tx types.Tx)
|
||||
|
||||
// Has reports whether tx is present in the cache. Checking for presence is
|
||||
// not treated as an access of the value.
|
||||
Has(tx types.Tx) bool
|
||||
}
|
||||
|
||||
var _ TxCache = (*LRUTxCache)(nil)
|
||||
@@ -97,6 +101,14 @@ func (c *LRUTxCache) Remove(tx types.Tx) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *LRUTxCache) Has(tx types.Tx) bool {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
_, ok := c.cacheMap[tx.Key()]
|
||||
return ok
|
||||
}
|
||||
|
||||
// NopTxCache defines a no-op raw transaction cache.
|
||||
type NopTxCache struct{}
|
||||
|
||||
@@ -105,3 +117,4 @@ var _ TxCache = (*NopTxCache)(nil)
|
||||
func (NopTxCache) Reset() {}
|
||||
func (NopTxCache) Push(types.Tx) bool { return true }
|
||||
func (NopTxCache) Remove(types.Tx) {}
|
||||
func (NopTxCache) Has(types.Tx) bool { return false }
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -86,9 +86,19 @@ func setup(t testing.TB, app abciclient.Client, cacheSize int, options ...TxMemp
|
||||
return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, app, options...)
|
||||
}
|
||||
|
||||
func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
|
||||
t.Helper()
|
||||
// mustCheckTx invokes txmp.CheckTx for the given transaction and waits until
|
||||
// its callback has finished executing. It fails t if CheckTx fails.
|
||||
func mustCheckTx(ctx context.Context, t *testing.T, txmp *TxMempool, spec string) {
|
||||
done := make(chan struct{})
|
||||
if err := txmp.CheckTx(ctx, []byte(spec), func(*abci.ResponseCheckTx) {
|
||||
close(done)
|
||||
}, TxInfo{}); err != nil {
|
||||
t.Fatalf("CheckTx for %q failed: %v", spec, err)
|
||||
}
|
||||
<-done
|
||||
}
|
||||
|
||||
func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
|
||||
txs := make([]testTx, numTxs)
|
||||
txInfo := TxInfo{SenderID: peerID}
|
||||
|
||||
@@ -122,6 +132,10 @@ func convertTex(in []testTx) types.Txs {
|
||||
}
|
||||
|
||||
func TestTxMempool_TxsAvailable(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -217,6 +231,87 @@ func TestTxMempool_Size(t *testing.T) {
|
||||
require.Equal(t, int64(2850), txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_Eviction(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()})
|
||||
if err := client.Start(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(client.Wait)
|
||||
|
||||
txmp := setup(t, client, 1000)
|
||||
txmp.config.Size = 5
|
||||
txmp.config.MaxTxsBytes = 60
|
||||
txExists := func(spec string) bool {
|
||||
txmp.Lock()
|
||||
defer txmp.Unlock()
|
||||
key := types.Tx(spec).Key()
|
||||
_, ok := txmp.txByKey[key]
|
||||
return ok
|
||||
}
|
||||
t.Cleanup(client.Wait)
|
||||
|
||||
// A transaction bigger than the mempool should be rejected even when there
|
||||
// are slots available.
|
||||
mustCheckTx(ctx, t, txmp, "big=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef=1")
|
||||
require.Equal(t, 0, txmp.Size())
|
||||
|
||||
// Nearly-fill the mempool with a low-priority transaction, to show that it
|
||||
// is evicted even when slots are available for a higher-priority tx.
|
||||
const bigTx = "big=0123456789abcdef0123456789abcdef0123456789abcdef01234=2"
|
||||
mustCheckTx(ctx, t, txmp, bigTx)
|
||||
require.Equal(t, 1, txmp.Size()) // bigTx is the only element
|
||||
require.True(t, txExists(bigTx))
|
||||
require.Equal(t, int64(len(bigTx)), txmp.SizeBytes())
|
||||
|
||||
// The next transaction should evict bigTx, because it is higher priority
|
||||
// but does not fit on size.
|
||||
mustCheckTx(ctx, t, txmp, "key1=0000=25")
|
||||
require.True(t, txExists("key1=0000=25"))
|
||||
require.False(t, txExists(bigTx))
|
||||
require.False(t, txmp.cache.Has([]byte(bigTx)))
|
||||
require.Equal(t, int64(len("key1=0000=25")), txmp.SizeBytes())
|
||||
|
||||
// Now fill up the rest of the slots with other transactions.
|
||||
mustCheckTx(ctx, t, txmp, "key2=0001=5")
|
||||
mustCheckTx(ctx, t, txmp, "key3=0002=10")
|
||||
mustCheckTx(ctx, t, txmp, "key4=0003=3")
|
||||
mustCheckTx(ctx, t, txmp, "key5=0004=3")
|
||||
|
||||
// A new transaction with low priority should be discarded.
|
||||
mustCheckTx(ctx, t, txmp, "key6=0005=1")
|
||||
require.False(t, txExists("key6=0005=1"))
|
||||
|
||||
// A new transaction with higher priority should evict key5, which is the
|
||||
// newest of the two transactions with lowest priority.
|
||||
mustCheckTx(ctx, t, txmp, "key7=0006=7")
|
||||
require.True(t, txExists("key7=0006=7")) // new transaction added
|
||||
require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted
|
||||
require.True(t, txExists("key4=0003=3")) // older low-priority tx retained
|
||||
|
||||
// Another new transaction evicts the other low-priority element.
|
||||
mustCheckTx(ctx, t, txmp, "key8=0007=20")
|
||||
require.True(t, txExists("key8=0007=20"))
|
||||
require.False(t, txExists("key4=0003=3"))
|
||||
|
||||
// Now the lowest-priority tx is 5, so that should be the next to go.
|
||||
mustCheckTx(ctx, t, txmp, "key9=0008=9")
|
||||
require.True(t, txExists("key9=0008=9"))
|
||||
require.False(t, txExists("k3y2=0001=5"))
|
||||
|
||||
// Add a transaction that requires eviction of multiple lower-priority
|
||||
// entries, in order to fit the size of the element.
|
||||
mustCheckTx(ctx, t, txmp, "key10=0123456789abcdef=11") // evict 10, 9, 7; keep 25, 20, 11
|
||||
require.True(t, txExists("key1=0000=25"))
|
||||
require.True(t, txExists("key8=0007=20"))
|
||||
require.True(t, txExists("key10=0123456789abcdef=11"))
|
||||
require.False(t, txExists("key3=0002=10"))
|
||||
require.False(t, txExists("key9=0008=9"))
|
||||
require.False(t, txExists("key7=0006=7"))
|
||||
}
|
||||
|
||||
func TestTxMempool_Flush(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -449,6 +544,10 @@ func TestTxMempool_CheckTxSameSender(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTxMempool_ConcurrentTxs(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -537,7 +636,6 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
|
||||
tTxs := checkTxs(ctx, t, txmp, 100, 0)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, 100, txmp.heightIndex.Size())
|
||||
|
||||
// reap 5 txs at the next height -- no txs should expire
|
||||
reapedTxs := txmp.ReapMaxTxs(5)
|
||||
@@ -551,12 +649,10 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
txmp.Unlock()
|
||||
|
||||
require.Equal(t, 95, txmp.Size())
|
||||
require.Equal(t, 95, txmp.heightIndex.Size())
|
||||
|
||||
// check more txs at height 101
|
||||
_ = checkTxs(ctx, t, txmp, 50, 1)
|
||||
require.Equal(t, 145, txmp.Size())
|
||||
require.Equal(t, 145, txmp.heightIndex.Size())
|
||||
|
||||
// Reap 5 txs at a height that would expire all the transactions from before
|
||||
// the previous Update (height 100).
|
||||
@@ -577,7 +673,6 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
txmp.Unlock()
|
||||
|
||||
require.GreaterOrEqual(t, txmp.Size(), 45)
|
||||
require.GreaterOrEqual(t, txmp.heightIndex.Size(), 45)
|
||||
}
|
||||
|
||||
func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ heap.Interface = (*TxPriorityQueue)(nil)
|
||||
|
||||
// TxPriorityQueue defines a thread-safe priority queue for valid transactions.
|
||||
type TxPriorityQueue struct {
|
||||
mtx sync.RWMutex
|
||||
txs []*WrappedTx
|
||||
}
|
||||
|
||||
func NewTxPriorityQueue() *TxPriorityQueue {
|
||||
pq := &TxPriorityQueue{
|
||||
txs: make([]*WrappedTx, 0),
|
||||
}
|
||||
|
||||
heap.Init(pq)
|
||||
|
||||
return pq
|
||||
}
|
||||
|
||||
// GetEvictableTxs attempts to find and return a list of *WrappedTx than can be
|
||||
// evicted to make room for another *WrappedTx with higher priority. If no such
|
||||
// list of *WrappedTx exists, nil will be returned. The returned list of *WrappedTx
|
||||
// indicate that these transactions can be removed due to them being of lower
|
||||
// priority and that their total sum in size allows room for the incoming
|
||||
// transaction according to the mempool's configured limits.
|
||||
func (pq *TxPriorityQueue) GetEvictableTxs(priority, txSize, totalSize, cap int64) []*WrappedTx {
|
||||
pq.mtx.RLock()
|
||||
defer pq.mtx.RUnlock()
|
||||
|
||||
txs := make([]*WrappedTx, len(pq.txs))
|
||||
copy(txs, pq.txs)
|
||||
|
||||
sort.Slice(txs, func(i, j int) bool {
|
||||
return txs[i].priority < txs[j].priority
|
||||
})
|
||||
|
||||
var (
|
||||
toEvict []*WrappedTx
|
||||
i int
|
||||
)
|
||||
|
||||
currSize := totalSize
|
||||
|
||||
// Loop over all transactions in ascending priority order evaluating those
|
||||
// that are only of less priority than the provided argument. We continue
|
||||
// evaluating transactions until there is sufficient capacity for the new
|
||||
// transaction (size) as defined by txSize.
|
||||
for i < len(txs) && txs[i].priority < priority {
|
||||
toEvict = append(toEvict, txs[i])
|
||||
currSize -= int64(txs[i].Size())
|
||||
|
||||
if currSize+txSize <= cap {
|
||||
return toEvict
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumTxs returns the number of transactions in the priority queue. It is
|
||||
// thread safe.
|
||||
func (pq *TxPriorityQueue) NumTxs() int {
|
||||
pq.mtx.RLock()
|
||||
defer pq.mtx.RUnlock()
|
||||
|
||||
return len(pq.txs)
|
||||
}
|
||||
|
||||
// RemoveTx removes a specific transaction from the priority queue.
|
||||
func (pq *TxPriorityQueue) RemoveTx(tx *WrappedTx) {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
if tx.heapIndex < len(pq.txs) {
|
||||
heap.Remove(pq, tx.heapIndex)
|
||||
}
|
||||
}
|
||||
|
||||
// PushTx adds a valid transaction to the priority queue. It is thread safe.
|
||||
func (pq *TxPriorityQueue) PushTx(tx *WrappedTx) {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
heap.Push(pq, tx)
|
||||
}
|
||||
|
||||
// PopTx removes the top priority transaction from the queue. It is thread safe.
|
||||
func (pq *TxPriorityQueue) PopTx() *WrappedTx {
|
||||
pq.mtx.Lock()
|
||||
defer pq.mtx.Unlock()
|
||||
|
||||
x := heap.Pop(pq)
|
||||
if x != nil {
|
||||
return x.(*WrappedTx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Push implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Push. Use PushTx instead.
|
||||
func (pq *TxPriorityQueue) Push(x interface{}) {
|
||||
n := len(pq.txs)
|
||||
item := x.(*WrappedTx)
|
||||
item.heapIndex = n
|
||||
pq.txs = append(pq.txs, item)
|
||||
}
|
||||
|
||||
// Pop implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Pop. Use PopTx instead.
|
||||
func (pq *TxPriorityQueue) Pop() interface{} {
|
||||
old := pq.txs
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil // avoid memory leak
|
||||
item.heapIndex = -1 // for safety
|
||||
pq.txs = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// Len implements the Heap interface.
|
||||
//
|
||||
// NOTE: A caller should never call Len. Use NumTxs instead.
|
||||
func (pq *TxPriorityQueue) Len() int {
|
||||
return len(pq.txs)
|
||||
}
|
||||
|
||||
// Less implements the Heap interface. It returns true if the transaction at
|
||||
// position i in the queue is of less priority than the transaction at position j.
|
||||
func (pq *TxPriorityQueue) Less(i, j int) bool {
|
||||
// If there exists two transactions with the same priority, consider the one
|
||||
// that we saw the earliest as the higher priority transaction.
|
||||
if pq.txs[i].priority == pq.txs[j].priority {
|
||||
return pq.txs[i].timestamp.Before(pq.txs[j].timestamp)
|
||||
}
|
||||
|
||||
// We want Pop to give us the highest, not lowest, priority so we use greater
|
||||
// than here.
|
||||
return pq.txs[i].priority > pq.txs[j].priority
|
||||
}
|
||||
|
||||
// Swap implements the Heap interface. It swaps two transactions in the queue.
|
||||
func (pq *TxPriorityQueue) Swap(i, j int) {
|
||||
pq.txs[i], pq.txs[j] = pq.txs[j], pq.txs[i]
|
||||
pq.txs[i].heapIndex = i
|
||||
pq.txs[j].heapIndex = j
|
||||
}
|
||||
@@ -1,176 +0,0 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTxPriorityQueue(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
numTxs := 1000
|
||||
|
||||
priorities := make([]int, numTxs)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 1; i <= numTxs; i++ {
|
||||
priorities[i-1] = i
|
||||
wg.Add(1)
|
||||
|
||||
go func(i int) {
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: int64(i),
|
||||
timestamp: time.Now(),
|
||||
})
|
||||
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(sort.IntSlice(priorities)))
|
||||
|
||||
wg.Wait()
|
||||
require.Equal(t, numTxs, pq.NumTxs())
|
||||
|
||||
// Wait a second and push a tx with a duplicate priority
|
||||
time.Sleep(time.Second)
|
||||
now := time.Now()
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: 1000,
|
||||
timestamp: now,
|
||||
})
|
||||
require.Equal(t, 1001, pq.NumTxs())
|
||||
|
||||
tx := pq.PopTx()
|
||||
require.Equal(t, 1000, pq.NumTxs())
|
||||
require.Equal(t, int64(1000), tx.priority)
|
||||
require.NotEqual(t, now, tx.timestamp)
|
||||
|
||||
gotPriorities := make([]int, 0)
|
||||
for pq.NumTxs() > 0 {
|
||||
gotPriorities = append(gotPriorities, int(pq.PopTx().priority))
|
||||
}
|
||||
|
||||
require.Equal(t, priorities, gotPriorities)
|
||||
}
|
||||
|
||||
func TestTxPriorityQueue_GetEvictableTxs(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
values := make([]int, 1000)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
tx := make([]byte, 5) // each tx is 5 bytes
|
||||
_, err := rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
x := rng.Intn(100000)
|
||||
pq.PushTx(&WrappedTx{
|
||||
tx: tx,
|
||||
priority: int64(x),
|
||||
})
|
||||
|
||||
values[i] = x
|
||||
}
|
||||
|
||||
sort.Ints(values)
|
||||
|
||||
max := values[len(values)-1]
|
||||
min := values[0]
|
||||
totalSize := int64(len(values) * 5)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
priority, txSize, totalSize, cap int64
|
||||
expectedLen int
|
||||
}{
|
||||
{
|
||||
name: "largest priority; single tx",
|
||||
priority: int64(max + 1),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 1,
|
||||
},
|
||||
{
|
||||
name: "largest priority; multi tx",
|
||||
priority: int64(max + 1),
|
||||
txSize: 17,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 4,
|
||||
},
|
||||
{
|
||||
name: "largest priority; out of capacity",
|
||||
priority: int64(max + 1),
|
||||
txSize: totalSize + 1,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
{
|
||||
name: "smallest priority; no tx",
|
||||
priority: int64(min - 1),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
{
|
||||
name: "small priority; no tx",
|
||||
priority: int64(min),
|
||||
txSize: 5,
|
||||
totalSize: totalSize,
|
||||
cap: totalSize,
|
||||
expectedLen: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
evictTxs := pq.GetEvictableTxs(tc.priority, tc.txSize, tc.totalSize, tc.cap)
|
||||
require.Len(t, evictTxs, tc.expectedLen)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxPriorityQueue_RemoveTx(t *testing.T) {
|
||||
pq := NewTxPriorityQueue()
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
numTxs := 1000
|
||||
|
||||
values := make([]int, numTxs)
|
||||
|
||||
for i := 0; i < numTxs; i++ {
|
||||
x := rng.Intn(100000)
|
||||
pq.PushTx(&WrappedTx{
|
||||
priority: int64(x),
|
||||
})
|
||||
|
||||
values[i] = x
|
||||
}
|
||||
|
||||
require.Equal(t, numTxs, pq.NumTxs())
|
||||
|
||||
sort.Ints(values)
|
||||
max := values[len(values)-1]
|
||||
|
||||
wtx := pq.txs[pq.NumTxs()/2]
|
||||
pq.RemoveTx(wtx)
|
||||
require.Equal(t, numTxs-1, pq.NumTxs())
|
||||
require.Equal(t, int64(max), pq.PopTx().priority)
|
||||
require.Equal(t, numTxs-2, pq.NumTxs())
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
pq.RemoveTx(&WrappedTx{heapIndex: numTxs})
|
||||
pq.RemoveTx(&WrappedTx{heapIndex: numTxs + 1})
|
||||
})
|
||||
require.Equal(t, numTxs-2, pq.NumTxs())
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
tmstrings "github.com/tendermint/tendermint/internal/libs/strings"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
@@ -194,7 +195,7 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope) (er
|
||||
|
||||
// processMempoolCh implements a blocking event loop where we listen for p2p
|
||||
// Envelope messages from the mempoolCh.
|
||||
func (r *Reactor) processMempoolCh(ctx context.Context, mempoolCh *p2p.Channel) {
|
||||
func (r *Reactor) processMempoolCh(ctx context.Context, mempoolCh p2p.Channel) {
|
||||
iter := mempoolCh.Receive(ctx)
|
||||
for iter.Next(ctx) {
|
||||
envelope := iter.Envelope()
|
||||
@@ -215,7 +216,7 @@ func (r *Reactor) processMempoolCh(ctx context.Context, mempoolCh *p2p.Channel)
|
||||
// goroutine or not. If not, we start one for the newly added peer. For down or
|
||||
// removed peers, we remove the peer from the mempool peer ID set and signal to
|
||||
// stop the tx broadcasting goroutine.
|
||||
func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, mempoolCh *p2p.Channel) {
|
||||
func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, mempoolCh p2p.Channel) {
|
||||
r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
|
||||
|
||||
r.mtx.Lock()
|
||||
@@ -264,7 +265,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda
|
||||
// processPeerUpdates initiates a blocking process where we listen for and handle
|
||||
// PeerUpdate messages. When the reactor is stopped, we will catch the signal and
|
||||
// close the p2p PeerUpdatesCh gracefully.
|
||||
func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, mempoolCh *p2p.Channel) {
|
||||
func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, mempoolCh p2p.Channel) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -275,7 +276,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerU
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, mempoolCh *p2p.Channel) {
|
||||
func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, mempoolCh p2p.Channel) {
|
||||
peerMempoolID := r.ids.GetForPeer(peerID)
|
||||
var nextGossipTx *clist.CElement
|
||||
|
||||
@@ -307,8 +308,8 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, m
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-r.mempool.WaitForNextTx(): // wait until a tx is available
|
||||
if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil {
|
||||
case <-r.mempool.TxsWaitChan(): // wait until a tx is available
|
||||
if nextGossipTx = r.mempool.TxsFront(); nextGossipTx == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -318,7 +319,7 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, m
|
||||
|
||||
// NOTE: Transaction batching was disabled due to:
|
||||
// https://github.com/tendermint/tendermint/issues/5796
|
||||
if ok := r.mempool.txStore.TxHasPeer(memTx.hash, peerMempoolID); !ok {
|
||||
if !memTx.HasPeer(peerMempoolID) {
|
||||
// Send the mempool tx to the corresponding peer. Note, the peer may be
|
||||
// behind and thus would not be able to process the mempool tx correctly.
|
||||
if err := mempoolCh.Send(ctx, p2p.Envelope{
|
||||
@@ -330,9 +331,8 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, m
|
||||
return
|
||||
}
|
||||
|
||||
r.logger.Debug(
|
||||
"gossiped tx to peer",
|
||||
"tx", fmt.Sprintf("%X", memTx.tx.Hash()),
|
||||
r.logger.Debug("gossiped tx to peer",
|
||||
"tx", tmstrings.LazySprintf("%X", memTx.tx.Hash()),
|
||||
"peer", peerID,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ type reactorTestSuite struct {
|
||||
logger log.Logger
|
||||
|
||||
reactors map[types.NodeID]*Reactor
|
||||
mempoolChannels map[types.NodeID]*p2p.Channel
|
||||
mempoolChannels map[types.NodeID]p2p.Channel
|
||||
mempools map[types.NodeID]*TxMempool
|
||||
kvstores map[types.NodeID]*kvstore.Application
|
||||
|
||||
@@ -51,7 +51,7 @@ func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNode
|
||||
logger: log.NewNopLogger().With("testCase", t.Name()),
|
||||
network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
|
||||
mempoolChannels: make(map[types.NodeID]p2p.Channel, numNodes),
|
||||
mempools: make(map[types.NodeID]*TxMempool, numNodes),
|
||||
kvstores: make(map[types.NodeID]*kvstore.Application, numNodes),
|
||||
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
|
||||
@@ -68,14 +68,14 @@ func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNode
|
||||
require.NoError(t, client.Start(ctx))
|
||||
t.Cleanup(client.Wait)
|
||||
|
||||
mempool := setup(t, client, 0)
|
||||
mempool := setup(t, client, 1<<20)
|
||||
rts.mempools[nodeID] = mempool
|
||||
|
||||
rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf)
|
||||
rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1)
|
||||
rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID])
|
||||
|
||||
chCreator := func(ctx context.Context, chDesc *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
chCreator := func(ctx context.Context, chDesc *p2p.ChannelDescriptor) (p2p.Channel, error) {
|
||||
return rts.mempoolChannels[nodeID], nil
|
||||
}
|
||||
|
||||
@@ -170,7 +170,9 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) {
|
||||
secondaryReactor.observePanic = observePanic
|
||||
|
||||
firstTx := &WrappedTx{}
|
||||
primaryMempool.Lock()
|
||||
primaryMempool.insertTx(firstTx)
|
||||
primaryMempool.Unlock()
|
||||
|
||||
// run the router
|
||||
rts.start(ctx, t)
|
||||
@@ -183,6 +185,8 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
primaryMempool.Lock()
|
||||
defer primaryMempool.Unlock()
|
||||
primaryMempool.insertTx(next)
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -24,270 +22,78 @@ type TxInfo struct {
|
||||
// WrappedTx defines a wrapper around a raw transaction with additional metadata
|
||||
// that is used for indexing.
|
||||
type WrappedTx struct {
|
||||
// tx represents the raw binary transaction data
|
||||
tx types.Tx
|
||||
tx types.Tx // the original transaction data
|
||||
hash types.TxKey // the transaction hash
|
||||
height int64 // height when this transaction was initially checked (for expiry)
|
||||
timestamp time.Time // time when transaction was entered (for TTL)
|
||||
|
||||
// hash defines the transaction hash and the primary key used in the mempool
|
||||
hash types.TxKey
|
||||
|
||||
// height defines the height at which the transaction was validated at
|
||||
height int64
|
||||
|
||||
// gasWanted defines the amount of gas the transaction sender requires
|
||||
gasWanted int64
|
||||
|
||||
// priority defines the transaction's priority as specified by the application
|
||||
// in the ResponseCheckTx response.
|
||||
priority int64
|
||||
|
||||
// sender defines the transaction's sender as specified by the application in
|
||||
// the ResponseCheckTx response.
|
||||
sender string
|
||||
|
||||
// timestamp is the time at which the node first received the transaction from
|
||||
// a peer. It is used as a second dimension is prioritizing transactions when
|
||||
// two transactions have the same priority.
|
||||
timestamp time.Time
|
||||
|
||||
// peers records a mapping of all peers that sent a given transaction
|
||||
peers map[uint16]struct{}
|
||||
|
||||
// heapIndex defines the index of the item in the heap
|
||||
heapIndex int
|
||||
|
||||
// gossipEl references the linked-list element in the gossip index
|
||||
gossipEl *clist.CElement
|
||||
|
||||
// removed marks the transaction as removed from the mempool. This is set
|
||||
// during RemoveTx and is needed due to the fact that a given existing
|
||||
// transaction in the mempool can be evicted when it is simultaneously having
|
||||
// a reCheckTx callback executed.
|
||||
removed bool
|
||||
mtx sync.Mutex
|
||||
gasWanted int64 // app: gas required to execute this transaction
|
||||
priority int64 // app: priority value for this transaction
|
||||
sender string // app: assigned sender label
|
||||
peers map[uint16]bool // peer IDs who have sent us this transaction
|
||||
}
|
||||
|
||||
func (wtx *WrappedTx) Size() int {
|
||||
return len(wtx.tx)
|
||||
}
|
||||
// Size reports the size of the raw transaction in bytes.
|
||||
func (w *WrappedTx) Size() int64 { return int64(len(w.tx)) }
|
||||
|
||||
// TxStore implements a thread-safe mapping of valid transaction(s).
|
||||
//
|
||||
// NOTE:
|
||||
// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative
|
||||
// access is not allowed. Regardless, it is not expected for the mempool to
|
||||
// need mutative access.
|
||||
type TxStore struct {
|
||||
mtx sync.RWMutex
|
||||
hashTxs map[types.TxKey]*WrappedTx // primary index
|
||||
senderTxs map[string]*WrappedTx // sender is defined by the ABCI application
|
||||
}
|
||||
|
||||
func NewTxStore() *TxStore {
|
||||
return &TxStore{
|
||||
senderTxs: make(map[string]*WrappedTx),
|
||||
hashTxs: make(map[types.TxKey]*WrappedTx),
|
||||
// SetPeer adds the specified peer ID as a sender of w.
|
||||
func (w *WrappedTx) SetPeer(id uint16) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
if w.peers == nil {
|
||||
w.peers = map[uint16]bool{id: true}
|
||||
} else {
|
||||
w.peers[id] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the total number of transactions in the store.
|
||||
func (txs *TxStore) Size() int {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return len(txs.hashTxs)
|
||||
}
|
||||
|
||||
// GetAllTxs returns all the transactions currently in the store.
|
||||
func (txs *TxStore) GetAllTxs() []*WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wTxs := make([]*WrappedTx, len(txs.hashTxs))
|
||||
i := 0
|
||||
for _, wtx := range txs.hashTxs {
|
||||
wTxs[i] = wtx
|
||||
i++
|
||||
}
|
||||
|
||||
return wTxs
|
||||
}
|
||||
|
||||
// GetTxBySender returns a *WrappedTx by the transaction's sender property
|
||||
// defined by the ABCI application.
|
||||
func (txs *TxStore) GetTxBySender(sender string) *WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return txs.senderTxs[sender]
|
||||
}
|
||||
|
||||
// GetTxByHash returns a *WrappedTx by the transaction's hash.
|
||||
func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
return txs.hashTxs[hash]
|
||||
}
|
||||
|
||||
// IsTxRemoved returns true if a transaction by hash is marked as removed and
|
||||
// false otherwise.
|
||||
func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wtx, ok := txs.hashTxs[hash]
|
||||
if ok {
|
||||
return wtx.removed
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a
|
||||
// non-empty sender, we additionally store the transaction by the sender as
|
||||
// defined by the ABCI application.
|
||||
func (txs *TxStore) SetTx(wtx *WrappedTx) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
if len(wtx.sender) > 0 {
|
||||
txs.senderTxs[wtx.sender] = wtx
|
||||
}
|
||||
|
||||
txs.hashTxs[wtx.tx.Key()] = wtx
|
||||
}
|
||||
|
||||
// RemoveTx removes a *WrappedTx from the transaction store. It deletes all
|
||||
// indexes of the transaction.
|
||||
func (txs *TxStore) RemoveTx(wtx *WrappedTx) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
if len(wtx.sender) > 0 {
|
||||
delete(txs.senderTxs, wtx.sender)
|
||||
}
|
||||
|
||||
delete(txs.hashTxs, wtx.tx.Key())
|
||||
wtx.removed = true
|
||||
}
|
||||
|
||||
// TxHasPeer returns true if a transaction by hash has a given peer ID and false
|
||||
// otherwise. If the transaction does not exist, false is returned.
|
||||
func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
wtx := txs.hashTxs[hash]
|
||||
if wtx == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := wtx.peers[peerID]
|
||||
// HasPeer reports whether the specified peer ID is a sender of w.
|
||||
func (w *WrappedTx) HasPeer(id uint16) bool {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
_, ok := w.peers[id]
|
||||
return ok
|
||||
}
|
||||
|
||||
// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the
|
||||
// given peerID to the WrappedTx's set of peers that sent us this transaction.
|
||||
// We return true if we've already recorded the given peer for this transaction
|
||||
// and false otherwise. If the transaction does not exist by hash, we return
|
||||
// (nil, false).
|
||||
func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
wtx := txs.hashTxs[hash]
|
||||
if wtx == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if wtx.peers == nil {
|
||||
wtx.peers = make(map[uint16]struct{})
|
||||
}
|
||||
|
||||
if _, ok := wtx.peers[peerID]; ok {
|
||||
return wtx, true
|
||||
}
|
||||
|
||||
wtx.peers[peerID] = struct{}{}
|
||||
return wtx, false
|
||||
// SetGasWanted sets the application-assigned gas requirement of w.
|
||||
func (w *WrappedTx) SetGasWanted(gas int64) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
w.gasWanted = gas
|
||||
}
|
||||
|
||||
// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be
|
||||
// used to build generic transaction indexes in the mempool. It accepts a
|
||||
// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx
|
||||
// references which is used during Insert in order to determine sorted order. If
|
||||
// less returns true, a <= b.
|
||||
type WrappedTxList struct {
|
||||
mtx sync.RWMutex
|
||||
txs []*WrappedTx
|
||||
less func(*WrappedTx, *WrappedTx) bool
|
||||
// GasWanted reports the application-assigned gas requirement of w.
|
||||
func (w *WrappedTx) GasWanted() int64 {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.gasWanted
|
||||
}
|
||||
|
||||
func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList {
|
||||
return &WrappedTxList{
|
||||
txs: make([]*WrappedTx, 0),
|
||||
less: less,
|
||||
}
|
||||
// SetSender sets the application-assigned sender of w.
|
||||
func (w *WrappedTx) SetSender(sender string) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
w.sender = sender
|
||||
}
|
||||
|
||||
// Size returns the number of WrappedTx objects in the list.
|
||||
func (wtl *WrappedTxList) Size() int {
|
||||
wtl.mtx.RLock()
|
||||
defer wtl.mtx.RUnlock()
|
||||
|
||||
return len(wtl.txs)
|
||||
// Sender reports the application-assigned sender of w.
|
||||
func (w *WrappedTx) Sender() string {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.sender
|
||||
}
|
||||
|
||||
// Reset resets the list of transactions to an empty list.
|
||||
func (wtl *WrappedTxList) Reset() {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
wtl.txs = make([]*WrappedTx, 0)
|
||||
// SetPriority sets the application-assigned priority of w.
|
||||
func (w *WrappedTx) SetPriority(p int64) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
w.priority = p
|
||||
}
|
||||
|
||||
// Insert inserts a WrappedTx reference into the sorted list based on the list's
|
||||
// comparator function.
|
||||
func (wtl *WrappedTxList) Insert(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
if i == len(wtl.txs) {
|
||||
// insert at the end
|
||||
wtl.txs = append(wtl.txs, wtx)
|
||||
return
|
||||
}
|
||||
|
||||
// Make space for the inserted element by shifting values at the insertion
|
||||
// index up one index.
|
||||
//
|
||||
// NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs).
|
||||
wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...)
|
||||
wtl.txs[i] = wtx
|
||||
}
|
||||
|
||||
// Remove attempts to remove a WrappedTx from the sorted list.
|
||||
func (wtl *WrappedTxList) Remove(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
// Since the list is sorted, we evaluate all elements starting at i. Note, if
|
||||
// the element does not exist, we may potentially evaluate the entire remainder
|
||||
// of the list. However, a caller should not be expected to call Remove with a
|
||||
// non-existing element.
|
||||
for i < len(wtl.txs) {
|
||||
if wtl.txs[i] == wtx {
|
||||
wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...)
|
||||
return
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
// Priority reports the application-assigned priority of w.
|
||||
func (w *WrappedTx) Priority() int64 {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.priority
|
||||
}
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestTxStore_GetTxBySender(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
sender: "foo",
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
res := txs.GetTxBySender(wtx.sender)
|
||||
require.Nil(t, res)
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxBySender(wtx.sender)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_GetTxByHash(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
sender: "foo",
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
res := txs.GetTxByHash(key)
|
||||
require.Nil(t, res)
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_SetTx(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res := txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
|
||||
wtx.sender = "foo"
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, wtx, res)
|
||||
}
|
||||
|
||||
func TestTxStore_GetOrSetPeerByTxHash(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := wtx.tx.Key()
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res, ok := txs.GetOrSetPeerByTxHash(types.Tx([]byte("test_tx_2")).Key(), 15)
|
||||
require.Nil(t, res)
|
||||
require.False(t, ok)
|
||||
|
||||
res, ok = txs.GetOrSetPeerByTxHash(key, 15)
|
||||
require.NotNil(t, res)
|
||||
require.False(t, ok)
|
||||
|
||||
res, ok = txs.GetOrSetPeerByTxHash(key, 15)
|
||||
require.NotNil(t, res)
|
||||
require.True(t, ok)
|
||||
|
||||
require.True(t, txs.TxHasPeer(key, 15))
|
||||
require.False(t, txs.TxHasPeer(key, 16))
|
||||
}
|
||||
|
||||
func TestTxStore_RemoveTx(t *testing.T) {
|
||||
txs := NewTxStore()
|
||||
wtx := &WrappedTx{
|
||||
tx: []byte("test_tx"),
|
||||
priority: 1,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
key := wtx.tx.Key()
|
||||
res := txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
|
||||
txs.RemoveTx(res)
|
||||
|
||||
res = txs.GetTxByHash(key)
|
||||
require.Nil(t, res)
|
||||
}
|
||||
|
||||
func TestTxStore_Size(t *testing.T) {
|
||||
txStore := NewTxStore()
|
||||
numTxs := 1000
|
||||
|
||||
for i := 0; i < numTxs; i++ {
|
||||
txStore.SetTx(&WrappedTx{
|
||||
tx: []byte(fmt.Sprintf("test_tx_%d", i)),
|
||||
priority: int64(i),
|
||||
timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
require.Equal(t, numTxs, txStore.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Reset(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
require.Zero(t, list.Size())
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
list.Insert(&WrappedTx{height: int64(i)})
|
||||
}
|
||||
|
||||
require.Equal(t, 100, list.Size())
|
||||
|
||||
list.Reset()
|
||||
require.Zero(t, list.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Insert(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var expected []int
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
expected = append(expected, int(height))
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
|
||||
if i%10 == 0 {
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
expected = append(expected, int(height))
|
||||
}
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Remove(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var txs []*WrappedTx
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
tx := &WrappedTx{height: height}
|
||||
|
||||
txs = append(txs, tx)
|
||||
list.Insert(tx)
|
||||
|
||||
if i%10 == 0 {
|
||||
tx = &WrappedTx{height: height}
|
||||
list.Insert(tx)
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
}
|
||||
|
||||
// remove a tx that does not exist
|
||||
list.Remove(&WrappedTx{height: 20000})
|
||||
|
||||
// remove a tx that exists (by height) but not referenced
|
||||
list.Remove(&WrappedTx{height: txs[0].height})
|
||||
|
||||
// remove a few existing txs
|
||||
for i := 0; i < 25; i++ {
|
||||
j := rng.Intn(len(txs))
|
||||
list.Remove(txs[j])
|
||||
txs = append(txs[:j], txs[j+1:]...)
|
||||
}
|
||||
|
||||
expected := make([]int, len(txs))
|
||||
for i, tx := range txs {
|
||||
expected[i] = int(tx.height)
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
@@ -37,6 +38,16 @@ type Wrapper interface {
|
||||
Unwrap() (proto.Message, error)
|
||||
}
|
||||
|
||||
type Channel interface {
|
||||
fmt.Stringer
|
||||
|
||||
Err() error
|
||||
|
||||
Send(context.Context, Envelope) error
|
||||
SendError(context.Context, PeerError) error
|
||||
Receive(context.Context) *ChannelIterator
|
||||
}
|
||||
|
||||
// PeerError is a peer error reported via Channel.Error.
|
||||
//
|
||||
// FIXME: This currently just disconnects the peer, which is too simplistic.
|
||||
@@ -56,9 +67,9 @@ type PeerError struct {
|
||||
func (pe PeerError) Error() string { return fmt.Sprintf("peer=%q: %s", pe.NodeID, pe.Err.Error()) }
|
||||
func (pe PeerError) Unwrap() error { return pe.Err }
|
||||
|
||||
// Channel is a bidirectional channel to exchange Protobuf messages with peers.
|
||||
// legacyChannel is a bidirectional channel to exchange Protobuf messages with peers.
|
||||
// Each message is wrapped in an Envelope to specify its sender and receiver.
|
||||
type Channel struct {
|
||||
type legacyChannel struct {
|
||||
ID ChannelID
|
||||
inCh <-chan Envelope // inbound messages (peers to reactors)
|
||||
outCh chan<- Envelope // outbound messages (reactors to peers)
|
||||
@@ -69,9 +80,10 @@ type Channel struct {
|
||||
|
||||
// NewChannel creates a new channel. It is primarily for internal and test
|
||||
// use, reactors should use Router.OpenChannel().
|
||||
func NewChannel(id ChannelID, inCh <-chan Envelope, outCh chan<- Envelope, errCh chan<- PeerError) *Channel {
|
||||
return &Channel{
|
||||
func NewChannel(id ChannelID, name string, inCh <-chan Envelope, outCh chan<- Envelope, errCh chan<- PeerError) Channel {
|
||||
return &legacyChannel{
|
||||
ID: id,
|
||||
name: name,
|
||||
inCh: inCh,
|
||||
outCh: outCh,
|
||||
errCh: errCh,
|
||||
@@ -80,7 +92,7 @@ func NewChannel(id ChannelID, inCh <-chan Envelope, outCh chan<- Envelope, errCh
|
||||
|
||||
// Send blocks until the envelope has been sent, or until ctx ends.
|
||||
// An error only occurs if the context ends before the send completes.
|
||||
func (ch *Channel) Send(ctx context.Context, envelope Envelope) error {
|
||||
func (ch *legacyChannel) Send(ctx context.Context, envelope Envelope) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -89,9 +101,15 @@ func (ch *Channel) Send(ctx context.Context, envelope Envelope) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *legacyChannel) Err() error { return nil }
|
||||
|
||||
// SendError blocks until the given error has been sent, or ctx ends.
|
||||
// An error only occurs if the context ends before the send completes.
|
||||
func (ch *Channel) SendError(ctx context.Context, pe PeerError) error {
|
||||
func (ch *legacyChannel) SendError(ctx context.Context, pe PeerError) error {
|
||||
if errors.Is(pe.Err, context.Canceled) || errors.Is(pe.Err, context.DeadlineExceeded) {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
@@ -100,18 +118,29 @@ func (ch *Channel) SendError(ctx context.Context, pe PeerError) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *Channel) String() string { return fmt.Sprintf("p2p.Channel<%d:%s>", ch.ID, ch.name) }
|
||||
func (ch *legacyChannel) String() string { return fmt.Sprintf("p2p.Channel<%d:%s>", ch.ID, ch.name) }
|
||||
|
||||
// Receive returns a new unbuffered iterator to receive messages from ch.
|
||||
// The iterator runs until ctx ends.
|
||||
func (ch *Channel) Receive(ctx context.Context) *ChannelIterator {
|
||||
func (ch *legacyChannel) Receive(ctx context.Context) *ChannelIterator {
|
||||
iter := &ChannelIterator{
|
||||
pipe: make(chan Envelope), // unbuffered
|
||||
}
|
||||
go func() {
|
||||
go func(pipe chan<- Envelope) {
|
||||
defer close(iter.pipe)
|
||||
iteratorWorker(ctx, ch, iter.pipe)
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case envelope := <-ch.inCh:
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case pipe <- envelope:
|
||||
}
|
||||
}
|
||||
}
|
||||
}(iter.pipe)
|
||||
return iter
|
||||
}
|
||||
|
||||
@@ -126,21 +155,6 @@ type ChannelIterator struct {
|
||||
current *Envelope
|
||||
}
|
||||
|
||||
func iteratorWorker(ctx context.Context, ch *Channel, pipe chan Envelope) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case envelope := <-ch.inCh:
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case pipe <- envelope:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next returns true when the Envelope value has advanced, and false
|
||||
// when the context is canceled or iteration should stop. If an iterator has returned false,
|
||||
// it will never return true again.
|
||||
@@ -179,7 +193,7 @@ func (iter *ChannelIterator) Envelope() *Envelope { return iter.current }
|
||||
//
|
||||
// This allows the caller to consume messages from multiple channels
|
||||
// without needing to manage the concurrency separately.
|
||||
func MergedChannelIterator(ctx context.Context, chs ...*Channel) *ChannelIterator {
|
||||
func MergedChannelIterator(ctx context.Context, chs ...Channel) *ChannelIterator {
|
||||
iter := &ChannelIterator{
|
||||
pipe: make(chan Envelope), // unbuffered
|
||||
}
|
||||
@@ -187,10 +201,17 @@ func MergedChannelIterator(ctx context.Context, chs ...*Channel) *ChannelIterato
|
||||
|
||||
for _, ch := range chs {
|
||||
wg.Add(1)
|
||||
go func(ch *Channel) {
|
||||
go func(ch Channel, pipe chan<- Envelope) {
|
||||
defer wg.Done()
|
||||
iteratorWorker(ctx, ch, iter.pipe)
|
||||
}(ch)
|
||||
iter := ch.Receive(ctx)
|
||||
for iter.Next(ctx) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case pipe <- *iter.Envelope():
|
||||
}
|
||||
}
|
||||
}(ch, iter.pipe)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
|
||||
@@ -16,13 +16,13 @@ type channelInternal struct {
|
||||
Error chan PeerError
|
||||
}
|
||||
|
||||
func testChannel(size int) (*channelInternal, *Channel) {
|
||||
func testChannel(size int) (*channelInternal, *legacyChannel) {
|
||||
in := &channelInternal{
|
||||
In: make(chan Envelope, size),
|
||||
Out: make(chan Envelope, size),
|
||||
Error: make(chan PeerError, size),
|
||||
}
|
||||
ch := &Channel{
|
||||
ch := &legacyChannel{
|
||||
inCh: in.In,
|
||||
outCh: in.Out,
|
||||
errCh: in.Error,
|
||||
|
||||
@@ -313,7 +313,7 @@ func (c *MConnection) Send(chID ChannelID, msgBytes []byte) bool {
|
||||
// Send message to channel.
|
||||
channel, ok := c.channelsIdx[chID]
|
||||
if !ok {
|
||||
c.logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
|
||||
c.logger.Error("Cannot send bytes to unknown channel", "channel", chID)
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -315,6 +315,10 @@ func TestMConnectionMultiplePings(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMConnectionPingPongs(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
// check that we are not leaking any go-routines
|
||||
t.Cleanup(leaktest.CheckTimeout(t, 10*time.Second))
|
||||
|
||||
@@ -558,6 +562,10 @@ func TestMConnectionReadErrorUnknownMsgType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMConnectionTrySend(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
server, client := net.Pipe()
|
||||
t.Cleanup(closeAll(t, client, server))
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -606,6 +614,10 @@ func TestConnVectors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMConnectionChannelOverflow(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
chOnErr := make(chan struct{})
|
||||
chOnRcv := make(chan struct{})
|
||||
|
||||
|
||||
@@ -146,8 +146,8 @@ func (n *Network) MakeChannels(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
chDesc *p2p.ChannelDescriptor,
|
||||
) map[types.NodeID]*p2p.Channel {
|
||||
channels := map[types.NodeID]*p2p.Channel{}
|
||||
) map[types.NodeID]p2p.Channel {
|
||||
channels := map[types.NodeID]p2p.Channel{}
|
||||
for _, node := range n.Nodes {
|
||||
channels[node.NodeID] = node.MakeChannel(ctx, t, chDesc)
|
||||
}
|
||||
@@ -161,8 +161,8 @@ func (n *Network) MakeChannelsNoCleanup(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
chDesc *p2p.ChannelDescriptor,
|
||||
) map[types.NodeID]*p2p.Channel {
|
||||
channels := map[types.NodeID]*p2p.Channel{}
|
||||
) map[types.NodeID]p2p.Channel {
|
||||
channels := map[types.NodeID]p2p.Channel{}
|
||||
for _, node := range n.Nodes {
|
||||
channels[node.NodeID] = node.MakeChannelNoCleanup(ctx, t, chDesc)
|
||||
}
|
||||
@@ -304,7 +304,7 @@ func (n *Node) MakeChannel(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
chDesc *p2p.ChannelDescriptor,
|
||||
) *p2p.Channel {
|
||||
) p2p.Channel {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
channel, err := n.Router.OpenChannel(ctx, chDesc)
|
||||
require.NoError(t, err)
|
||||
@@ -321,7 +321,7 @@ func (n *Node) MakeChannelNoCleanup(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
chDesc *p2p.ChannelDescriptor,
|
||||
) *p2p.Channel {
|
||||
) p2p.Channel {
|
||||
channel, err := n.Router.OpenChannel(ctx, chDesc)
|
||||
require.NoError(t, err)
|
||||
return channel
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
// RequireEmpty requires that the given channel is empty.
|
||||
func RequireEmpty(ctx context.Context, t *testing.T, channels ...*p2p.Channel) {
|
||||
func RequireEmpty(ctx context.Context, t *testing.T, channels ...p2p.Channel) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
|
||||
@@ -32,7 +32,7 @@ func RequireEmpty(ctx context.Context, t *testing.T, channels ...*p2p.Channel) {
|
||||
}
|
||||
|
||||
// RequireReceive requires that the given envelope is received on the channel.
|
||||
func RequireReceive(ctx context.Context, t *testing.T, channel *p2p.Channel, expect p2p.Envelope) {
|
||||
func RequireReceive(ctx context.Context, t *testing.T, channel p2p.Channel, expect p2p.Envelope) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
@@ -54,7 +54,7 @@ func RequireReceive(ctx context.Context, t *testing.T, channel *p2p.Channel, exp
|
||||
|
||||
// RequireReceiveUnordered requires that the given envelopes are all received on
|
||||
// the channel, ignoring order.
|
||||
func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel *p2p.Channel, expect []*p2p.Envelope) {
|
||||
func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel p2p.Channel, expect []*p2p.Envelope) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
|
||||
@@ -75,7 +75,7 @@ func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel *p2p.Cha
|
||||
}
|
||||
|
||||
// RequireSend requires that the given envelope is sent on the channel.
|
||||
func RequireSend(ctx context.Context, t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) {
|
||||
func RequireSend(ctx context.Context, t *testing.T, channel p2p.Channel, envelope p2p.Envelope) {
|
||||
tctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
|
||||
@@ -93,7 +93,7 @@ func RequireSend(ctx context.Context, t *testing.T, channel *p2p.Channel, envelo
|
||||
func RequireSendReceive(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
channel *p2p.Channel,
|
||||
channel p2p.Channel,
|
||||
peerID types.NodeID,
|
||||
send proto.Message,
|
||||
receive proto.Message,
|
||||
@@ -116,7 +116,7 @@ func RequireNoUpdates(ctx context.Context, t *testing.T, peerUpdates *p2p.PeerUp
|
||||
}
|
||||
|
||||
// RequireError requires that the given peer error is submitted for a peer.
|
||||
func RequireError(ctx context.Context, t *testing.T, channel *p2p.Channel, peerError p2p.PeerError) {
|
||||
func RequireError(ctx context.Context, t *testing.T, channel p2p.Channel, peerError p2p.PeerError) {
|
||||
tctx, tcancel := context.WithTimeout(ctx, time.Second)
|
||||
defer tcancel()
|
||||
|
||||
|
||||
@@ -954,7 +954,7 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress
|
||||
}
|
||||
|
||||
var numAddresses int
|
||||
var totalScore int
|
||||
var totalAbsScore int
|
||||
ranked := m.store.Ranked()
|
||||
seenAddresses := map[NodeAddress]struct{}{}
|
||||
scores := map[types.NodeID]int{}
|
||||
@@ -965,8 +965,12 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress
|
||||
continue
|
||||
}
|
||||
score := int(peer.Score())
|
||||
if score < 0 {
|
||||
totalAbsScore += -score
|
||||
} else {
|
||||
totalAbsScore += score
|
||||
}
|
||||
|
||||
totalScore += score
|
||||
scores[peer.ID] = score
|
||||
for addr := range peer.AddressInfo {
|
||||
if _, ok := m.options.PrivatePeers[addr.NodeID]; !ok {
|
||||
@@ -975,6 +979,8 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress
|
||||
}
|
||||
}
|
||||
|
||||
meanAbsScore := (totalAbsScore + 1) / (len(scores) + 1)
|
||||
|
||||
var attempts uint16
|
||||
var addedLastIteration bool
|
||||
|
||||
@@ -1023,7 +1029,7 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress
|
||||
// peer.
|
||||
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
if numAddresses <= int(limit) || rand.Intn(totalScore+1) <= scores[peer.ID]+1 || rand.Intn((idx+1)*10) <= idx+1 {
|
||||
if numAddresses <= int(limit) || rand.Intn((meanAbsScore*2)+1) <= scores[peer.ID]+1 || rand.Intn((idx+1)*10) <= idx+1 {
|
||||
addresses = append(addresses, addressInfo.Address)
|
||||
addedLastIteration = true
|
||||
seenAddresses[addressInfo.Address] = struct{}{}
|
||||
|
||||
@@ -296,6 +296,10 @@ func TestPeerManager_DialNext(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPeerManager_DialNext_Retry(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
|
||||
@@ -145,7 +145,7 @@ func (r *Reactor) OnStop() {}
|
||||
|
||||
// processPexCh implements a blocking event loop where we listen for p2p
|
||||
// Envelope messages from the pexCh.
|
||||
func (r *Reactor) processPexCh(ctx context.Context, pexCh *p2p.Channel) {
|
||||
func (r *Reactor) processPexCh(ctx context.Context, pexCh p2p.Channel) {
|
||||
incoming := make(chan *p2p.Envelope)
|
||||
go func() {
|
||||
defer close(incoming)
|
||||
@@ -192,8 +192,7 @@ func (r *Reactor) processPexCh(ctx context.Context, pexCh *p2p.Channel) {
|
||||
// A request from another peer, or a response to one of our requests.
|
||||
dur, err := r.handlePexMessage(ctx, envelope, pexCh)
|
||||
if err != nil {
|
||||
r.logger.Error("failed to process message",
|
||||
"ch_id", envelope.ChannelID, "envelope", envelope, "err", err)
|
||||
r.logger.Error("failed to process message", "ch_id", envelope.ChannelID, "envelope", envelope, "err", err)
|
||||
if serr := pexCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: envelope.From,
|
||||
Err: err,
|
||||
@@ -225,7 +224,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerU
|
||||
// handlePexMessage handles envelopes sent from peers on the PexChannel.
|
||||
// If an update was received, a new polling interval is returned; otherwise the
|
||||
// duration is 0.
|
||||
func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope, pexCh *p2p.Channel) (time.Duration, error) {
|
||||
func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope, pexCh p2p.Channel) (time.Duration, error) {
|
||||
logger := r.logger.With("peer", envelope.From)
|
||||
|
||||
switch msg := envelope.Message.(type) {
|
||||
@@ -308,7 +307,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
// that peer a request for more peer addresses. The chosen peer is moved into
|
||||
// the requestsSent bucket so that we will not attempt to contact them again
|
||||
// until they've replied or updated.
|
||||
func (r *Reactor) sendRequestForPeers(ctx context.Context, pexCh *p2p.Channel) error {
|
||||
func (r *Reactor) sendRequestForPeers(ctx context.Context, pexCh p2p.Channel) error {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if len(r.availablePeers) == 0 {
|
||||
@@ -359,7 +358,8 @@ func (r *Reactor) calculateNextRequestTime(added int) time.Duration {
|
||||
// If the peer store is nearly full, wait the maximum interval.
|
||||
if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 {
|
||||
r.logger.Debug("Peer manager is nearly full",
|
||||
"sleep_period", fullCapacityInterval, "ratio", ratio)
|
||||
"sleep_period", fullCapacityInterval,
|
||||
"ratio", ratio)
|
||||
return fullCapacityInterval
|
||||
}
|
||||
|
||||
|
||||
@@ -275,7 +275,7 @@ type singleTestReactor struct {
|
||||
pexInCh chan p2p.Envelope
|
||||
pexOutCh chan p2p.Envelope
|
||||
pexErrCh chan p2p.PeerError
|
||||
pexCh *p2p.Channel
|
||||
pexCh p2p.Channel
|
||||
peerCh chan p2p.PeerUpdate
|
||||
manager *p2p.PeerManager
|
||||
}
|
||||
@@ -287,8 +287,11 @@ func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor {
|
||||
pexInCh := make(chan p2p.Envelope, chBuf)
|
||||
pexOutCh := make(chan p2p.Envelope, chBuf)
|
||||
pexErrCh := make(chan p2p.PeerError, chBuf)
|
||||
|
||||
chDesc := pex.ChannelDescriptor()
|
||||
pexCh := p2p.NewChannel(
|
||||
p2p.ChannelID(pex.PexChannel),
|
||||
chDesc.ID,
|
||||
chDesc.Name,
|
||||
pexInCh,
|
||||
pexOutCh,
|
||||
pexErrCh,
|
||||
@@ -299,7 +302,7 @@ func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor {
|
||||
peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
chCreator := func(context.Context, *p2p.ChannelDescriptor) (p2p.Channel, error) {
|
||||
return pexCh, nil
|
||||
}
|
||||
|
||||
@@ -324,7 +327,7 @@ type reactorTestSuite struct {
|
||||
logger log.Logger
|
||||
|
||||
reactors map[types.NodeID]*pex.Reactor
|
||||
pexChannels map[types.NodeID]*p2p.Channel
|
||||
pexChannels map[types.NodeID]p2p.Channel
|
||||
|
||||
peerChans map[types.NodeID]chan p2p.PeerUpdate
|
||||
peerUpdates map[types.NodeID]*p2p.PeerUpdates
|
||||
@@ -367,7 +370,7 @@ func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorT
|
||||
logger: log.NewNopLogger().With("testCase", t.Name()),
|
||||
network: p2ptest.MakeNetwork(ctx, t, networkOpts),
|
||||
reactors: make(map[types.NodeID]*pex.Reactor, realNodes),
|
||||
pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes),
|
||||
pexChannels: make(map[types.NodeID]p2p.Channel, opts.TotalNodes),
|
||||
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes),
|
||||
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes),
|
||||
total: opts.TotalNodes,
|
||||
@@ -388,7 +391,7 @@ func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorT
|
||||
rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], chBuf)
|
||||
rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID])
|
||||
|
||||
chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
chCreator := func(context.Context, *p2p.ChannelDescriptor) (p2p.Channel, error) {
|
||||
return rts.pexChannels[nodeID], nil
|
||||
}
|
||||
|
||||
@@ -448,7 +451,7 @@ func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int
|
||||
r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize)
|
||||
r.network.Nodes[nodeID].PeerManager.Register(ctx, r.peerUpdates[nodeID])
|
||||
|
||||
chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
chCreator := func(context.Context, *p2p.ChannelDescriptor) (p2p.Channel, error) {
|
||||
return r.pexChannels[nodeID], nil
|
||||
}
|
||||
|
||||
|
||||
@@ -208,13 +208,11 @@ func (s *pqScheduler) process(ctx context.Context) {
|
||||
} else {
|
||||
pqEnvTmpChIDStr := strconv.Itoa(int(pqEnvTmp.envelope.ChannelID))
|
||||
s.metrics.PeerQueueDroppedMsgs.With("ch_id", pqEnvTmpChIDStr).Add(1)
|
||||
s.logger.Debug(
|
||||
"dropped envelope",
|
||||
s.logger.Debug("dropped envelope",
|
||||
"ch_id", pqEnvTmpChIDStr,
|
||||
"priority", pqEnvTmp.priority,
|
||||
"msg_size", pqEnvTmp.size,
|
||||
"capacity", s.capacity,
|
||||
)
|
||||
"capacity", s.capacity)
|
||||
|
||||
s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnvTmp.envelope.To)).Add(float64(-pqEnvTmp.size))
|
||||
|
||||
@@ -238,13 +236,11 @@ func (s *pqScheduler) process(ctx context.Context) {
|
||||
// There is not sufficient capacity to drop lower priority Envelopes,
|
||||
// so we drop the incoming Envelope.
|
||||
s.metrics.PeerQueueDroppedMsgs.With("ch_id", chIDStr).Add(1)
|
||||
s.logger.Debug(
|
||||
"dropped envelope",
|
||||
s.logger.Debug("dropped envelope",
|
||||
"ch_id", chIDStr,
|
||||
"priority", pqEnv.priority,
|
||||
"msg_size", pqEnv.size,
|
||||
"capacity", s.capacity,
|
||||
)
|
||||
"capacity", s.capacity)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmstrings "github.com/tendermint/tendermint/internal/libs/strings"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -239,7 +240,7 @@ func (r *Router) createQueueFactory(ctx context.Context) (func(int) queue, error
|
||||
// ChannelCreator allows routers to construct their own channels,
|
||||
// either by receiving a reference to Router.OpenChannel or using some
|
||||
// kind shim for testing purposes.
|
||||
type ChannelCreator func(context.Context, *ChannelDescriptor) (*Channel, error)
|
||||
type ChannelCreator func(context.Context, *ChannelDescriptor) (Channel, error)
|
||||
|
||||
// OpenChannel opens a new channel for the given message type. The caller must
|
||||
// close the channel when done, before stopping the Router. messageType is the
|
||||
@@ -247,7 +248,7 @@ type ChannelCreator func(context.Context, *ChannelDescriptor) (*Channel, error)
|
||||
// implement Wrapper to automatically (un)wrap multiple message types in a
|
||||
// wrapper message. The caller may provide a size to make the channel buffered,
|
||||
// which internally makes the inbound, outbound, and error channel buffered.
|
||||
func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*Channel, error) {
|
||||
func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (Channel, error) {
|
||||
r.channelMtx.Lock()
|
||||
defer r.channelMtx.Unlock()
|
||||
|
||||
@@ -262,11 +263,10 @@ func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*C
|
||||
queue := r.queueFactory(chDesc.RecvBufferCapacity)
|
||||
outCh := make(chan Envelope, chDesc.RecvBufferCapacity)
|
||||
errCh := make(chan PeerError, chDesc.RecvBufferCapacity)
|
||||
channel := NewChannel(id, queue.dequeue(), outCh, errCh)
|
||||
channel.name = chDesc.Name
|
||||
channel := NewChannel(chDesc.ID, chDesc.Name, queue.dequeue(), outCh, errCh)
|
||||
|
||||
var wrapper Wrapper
|
||||
if w, ok := messageType.(Wrapper); ok {
|
||||
if w, ok := chDesc.MessageType.(Wrapper); ok {
|
||||
wrapper = w
|
||||
}
|
||||
|
||||
@@ -287,7 +287,7 @@ func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*C
|
||||
queue.close()
|
||||
}()
|
||||
|
||||
r.routeChannel(ctx, id, outCh, errCh, wrapper)
|
||||
r.routeChannel(ctx, chDesc.ID, outCh, errCh, wrapper)
|
||||
}()
|
||||
|
||||
return channel, nil
|
||||
@@ -462,7 +462,7 @@ func (r *Router) acceptPeers(ctx context.Context, transport Transport) {
|
||||
closeErr := conn.Close()
|
||||
r.logger.Debug("rate limiting incoming peer",
|
||||
"err", err,
|
||||
"ip", incomingIP.String(),
|
||||
"ip", tmstrings.LazyStringer(incomingIP),
|
||||
"close_err", closeErr,
|
||||
)
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func echoReactor(ctx context.Context, channel *p2p.Channel) {
|
||||
func echoReactor(ctx context.Context, channel p2p.Channel) {
|
||||
iter := channel.Receive(ctx)
|
||||
for iter.Next(ctx) {
|
||||
envelope := iter.Envelope()
|
||||
@@ -41,6 +41,10 @@ func echoReactor(ctx context.Context, channel *p2p.Channel) {
|
||||
}
|
||||
|
||||
func TestRouter_Network(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -162,6 +166,10 @@ func TestRouter_Channel_Basic(t *testing.T) {
|
||||
|
||||
// Channel tests are hairy to mock, so we use an in-memory network instead.
|
||||
func TestRouter_Channel_SendReceive(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -224,6 +232,10 @@ func TestRouter_Channel_SendReceive(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRouter_Channel_Broadcast(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -255,6 +267,10 @@ func TestRouter_Channel_Broadcast(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRouter_Channel_Wrapper(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -443,6 +459,11 @@ func TestRouter_AcceptPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRouter_AcceptPeers_Errors(t *testing.T) {
|
||||
if testing.Short() {
|
||||
// Each subtest takes more than one second due to the time.Sleep call,
|
||||
// so just skip from the parent test in short mode.
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
for _, err := range []error{io.EOF, context.Canceled, context.DeadlineExceeded} {
|
||||
t.Run(err.Error(), func(t *testing.T) {
|
||||
@@ -480,9 +501,7 @@ func TestRouter_AcceptPeers_Errors(t *testing.T) {
|
||||
router.Stop()
|
||||
|
||||
mockTransport.AssertExpectations(t)
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -811,6 +830,10 @@ func TestRouter_EvictPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRouter_ChannelCompatability(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -59,6 +59,10 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
|
||||
@@ -126,6 +126,10 @@ func TestBlockQueueWithFailures(t *testing.T) {
|
||||
// Test that when all the blocks are retrieved that the queue still holds on to
|
||||
// it's workers and in the event of failure can still fetch the failed block
|
||||
func TestBlockQueueBlocks(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 2)
|
||||
@@ -176,6 +180,10 @@ loop:
|
||||
}
|
||||
|
||||
func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1)
|
||||
|
||||
@@ -26,14 +26,14 @@ var (
|
||||
// NOTE: It is not the responsibility of the dispatcher to verify the light blocks.
|
||||
type Dispatcher struct {
|
||||
// the channel with which to send light block requests on
|
||||
requestCh *p2p.Channel
|
||||
requestCh p2p.Channel
|
||||
|
||||
mtx sync.Mutex
|
||||
// all pending calls that have been dispatched and are awaiting an answer
|
||||
calls map[types.NodeID]chan *types.LightBlock
|
||||
}
|
||||
|
||||
func NewDispatcher(requestChannel *p2p.Channel) *Dispatcher {
|
||||
func NewDispatcher(requestChannel p2p.Channel) *Dispatcher {
|
||||
return &Dispatcher{
|
||||
requestCh: requestChannel,
|
||||
calls: make(map[types.NodeID]chan *types.LightBlock),
|
||||
|
||||
@@ -24,13 +24,13 @@ type channelInternal struct {
|
||||
Error chan p2p.PeerError
|
||||
}
|
||||
|
||||
func testChannel(size int) (*channelInternal, *p2p.Channel) {
|
||||
func testChannel(size int) (*channelInternal, p2p.Channel) {
|
||||
in := &channelInternal{
|
||||
In: make(chan p2p.Envelope, size),
|
||||
Out: make(chan p2p.Envelope, size),
|
||||
Error: make(chan p2p.PeerError, size),
|
||||
}
|
||||
return in, p2p.NewChannel(0, in.In, in.Out, in.Error)
|
||||
return in, p2p.NewChannel(0, "test", in.In, in.Out, in.Error)
|
||||
}
|
||||
|
||||
func TestDispatcherBasic(t *testing.T) {
|
||||
|
||||
@@ -305,7 +305,7 @@ func (r *Reactor) OnStart(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
go r.processChannels(ctx, map[p2p.ChannelID]*p2p.Channel{
|
||||
go r.processChannels(ctx, map[p2p.ChannelID]p2p.Channel{
|
||||
SnapshotChannel: snapshotCh,
|
||||
ChunkChannel: chunkCh,
|
||||
LightBlockChannel: blockCh,
|
||||
@@ -611,7 +611,7 @@ func (r *Reactor) backfill(
|
||||
// handleSnapshotMessage handles envelopes sent from peers on the
|
||||
// SnapshotChannel. It returns an error only if the Envelope.Message is unknown
|
||||
// for this channel. This should never be called outside of handleMessage.
|
||||
func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envelope, snapshotCh *p2p.Channel) error {
|
||||
func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envelope, snapshotCh p2p.Channel) error {
|
||||
logger := r.logger.With("peer", envelope.From)
|
||||
|
||||
switch msg := envelope.Message.(type) {
|
||||
@@ -683,40 +683,34 @@ func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envel
|
||||
// handleChunkMessage handles envelopes sent from peers on the ChunkChannel.
|
||||
// It returns an error only if the Envelope.Message is unknown for this channel.
|
||||
// This should never be called outside of handleMessage.
|
||||
func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope, chunkCh *p2p.Channel) error {
|
||||
func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope, chunkCh p2p.Channel) error {
|
||||
switch msg := envelope.Message.(type) {
|
||||
case *ssproto.ChunkRequest:
|
||||
r.logger.Debug(
|
||||
"received chunk request",
|
||||
r.logger.Debug("received chunk request",
|
||||
"height", msg.Height,
|
||||
"format", msg.Format,
|
||||
"chunk", msg.Index,
|
||||
"peer", envelope.From,
|
||||
)
|
||||
"peer", envelope.From)
|
||||
resp, err := r.conn.LoadSnapshotChunk(ctx, &abci.RequestLoadSnapshotChunk{
|
||||
Height: msg.Height,
|
||||
Format: msg.Format,
|
||||
Chunk: msg.Index,
|
||||
})
|
||||
if err != nil {
|
||||
r.logger.Error(
|
||||
"failed to load chunk",
|
||||
r.logger.Error("failed to load chunk",
|
||||
"height", msg.Height,
|
||||
"format", msg.Format,
|
||||
"chunk", msg.Index,
|
||||
"err", err,
|
||||
"peer", envelope.From,
|
||||
)
|
||||
"peer", envelope.From)
|
||||
return nil
|
||||
}
|
||||
|
||||
r.logger.Debug(
|
||||
"sending chunk",
|
||||
r.logger.Debug("sending chunk",
|
||||
"height", msg.Height,
|
||||
"format", msg.Format,
|
||||
"chunk", msg.Index,
|
||||
"peer", envelope.From,
|
||||
)
|
||||
"peer", envelope.From)
|
||||
if err := chunkCh.Send(ctx, p2p.Envelope{
|
||||
To: envelope.From,
|
||||
Message: &ssproto.ChunkResponse{
|
||||
@@ -739,13 +733,11 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope
|
||||
return nil
|
||||
}
|
||||
|
||||
r.logger.Debug(
|
||||
"received chunk; adding to sync",
|
||||
r.logger.Debug("received chunk; adding to sync",
|
||||
"height", msg.Height,
|
||||
"format", msg.Format,
|
||||
"chunk", msg.Index,
|
||||
"peer", envelope.From,
|
||||
)
|
||||
"peer", envelope.From)
|
||||
_, err := r.syncer.AddChunk(&chunk{
|
||||
Height: msg.Height,
|
||||
Format: msg.Format,
|
||||
@@ -754,14 +746,12 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope
|
||||
Sender: envelope.From,
|
||||
})
|
||||
if err != nil {
|
||||
r.logger.Error(
|
||||
"failed to add chunk",
|
||||
r.logger.Error("failed to add chunk",
|
||||
"height", msg.Height,
|
||||
"format", msg.Format,
|
||||
"chunk", msg.Index,
|
||||
"err", err,
|
||||
"peer", envelope.From,
|
||||
)
|
||||
"peer", envelope.From)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -772,7 +762,7 @@ func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Envelope, blockCh *p2p.Channel) error {
|
||||
func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Envelope, blockCh p2p.Channel) error {
|
||||
switch msg := envelope.Message.(type) {
|
||||
case *ssproto.LightBlockRequest:
|
||||
r.logger.Info("received light block request", "height", msg.Height)
|
||||
@@ -829,7 +819,7 @@ func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Env
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelope, paramsCh *p2p.Channel) error {
|
||||
func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelope, paramsCh p2p.Channel) error {
|
||||
switch msg := envelope.Message.(type) {
|
||||
case *ssproto.ParamsRequest:
|
||||
r.logger.Debug("received consensus params request", "height", msg.Height)
|
||||
@@ -878,7 +868,7 @@ func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelop
|
||||
// handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
|
||||
// It will handle errors and any possible panics gracefully. A caller can handle
|
||||
// any error returned by sending a PeerError on the respective channel.
|
||||
func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, chans map[p2p.ChannelID]*p2p.Channel) (err error) {
|
||||
func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, chans map[p2p.ChannelID]p2p.Channel) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
@@ -912,12 +902,12 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, cha
|
||||
// encountered during message execution will result in a PeerError being sent on
|
||||
// the respective channel. When the reactor is stopped, we will catch the signal
|
||||
// and close the p2p Channel gracefully.
|
||||
func (r *Reactor) processChannels(ctx context.Context, chanTable map[p2p.ChannelID]*p2p.Channel) {
|
||||
// make sure that the iterator gets cleaned up in case of error
|
||||
func (r *Reactor) processChannels(ctx context.Context, chanTable map[p2p.ChannelID]p2p.Channel) {
|
||||
// make sure tht the iterator gets cleaned up in case of error
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
chs := make([]*p2p.Channel, 0, len(chanTable))
|
||||
chs := make([]p2p.Channel, 0, len(chanTable))
|
||||
for key := range chanTable {
|
||||
chs = append(chs, chanTable[key])
|
||||
}
|
||||
|
||||
@@ -40,22 +40,22 @@ type reactorTestSuite struct {
|
||||
conn *clientmocks.Client
|
||||
stateProvider *mocks.StateProvider
|
||||
|
||||
snapshotChannel *p2p.Channel
|
||||
snapshotChannel p2p.Channel
|
||||
snapshotInCh chan p2p.Envelope
|
||||
snapshotOutCh chan p2p.Envelope
|
||||
snapshotPeerErrCh chan p2p.PeerError
|
||||
|
||||
chunkChannel *p2p.Channel
|
||||
chunkChannel p2p.Channel
|
||||
chunkInCh chan p2p.Envelope
|
||||
chunkOutCh chan p2p.Envelope
|
||||
chunkPeerErrCh chan p2p.PeerError
|
||||
|
||||
blockChannel *p2p.Channel
|
||||
blockChannel p2p.Channel
|
||||
blockInCh chan p2p.Envelope
|
||||
blockOutCh chan p2p.Envelope
|
||||
blockPeerErrCh chan p2p.PeerError
|
||||
|
||||
paramsChannel *p2p.Channel
|
||||
paramsChannel p2p.Channel
|
||||
paramsInCh chan p2p.Envelope
|
||||
paramsOutCh chan p2p.Envelope
|
||||
paramsPeerErrCh chan p2p.PeerError
|
||||
@@ -102,6 +102,7 @@ func setup(
|
||||
|
||||
rts.snapshotChannel = p2p.NewChannel(
|
||||
SnapshotChannel,
|
||||
"snapshot",
|
||||
rts.snapshotInCh,
|
||||
rts.snapshotOutCh,
|
||||
rts.snapshotPeerErrCh,
|
||||
@@ -109,6 +110,7 @@ func setup(
|
||||
|
||||
rts.chunkChannel = p2p.NewChannel(
|
||||
ChunkChannel,
|
||||
"chunk",
|
||||
rts.chunkInCh,
|
||||
rts.chunkOutCh,
|
||||
rts.chunkPeerErrCh,
|
||||
@@ -116,6 +118,7 @@ func setup(
|
||||
|
||||
rts.blockChannel = p2p.NewChannel(
|
||||
LightBlockChannel,
|
||||
"lightblock",
|
||||
rts.blockInCh,
|
||||
rts.blockOutCh,
|
||||
rts.blockPeerErrCh,
|
||||
@@ -123,6 +126,7 @@ func setup(
|
||||
|
||||
rts.paramsChannel = p2p.NewChannel(
|
||||
ParamsChannel,
|
||||
"params",
|
||||
rts.paramsInCh,
|
||||
rts.paramsOutCh,
|
||||
rts.paramsPeerErrCh,
|
||||
@@ -133,7 +137,7 @@ func setup(
|
||||
|
||||
cfg := config.DefaultStateSyncConfig()
|
||||
|
||||
chCreator := func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
chCreator := func(ctx context.Context, desc *p2p.ChannelDescriptor) (p2p.Channel, error) {
|
||||
switch desc.ID {
|
||||
case SnapshotChannel:
|
||||
return rts.snapshotChannel, nil
|
||||
@@ -193,6 +197,10 @@ func setup(
|
||||
}
|
||||
|
||||
func TestReactor_Sync(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
@@ -614,6 +622,10 @@ func TestReactor_StateProviderP2P(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReactor_Backfill(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -622,6 +634,10 @@ func TestReactor_Backfill(t *testing.T) {
|
||||
for _, failureRate := range failureRates {
|
||||
failureRate := failureRate
|
||||
t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) {
|
||||
if testing.Short() && failureRate > 0 {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
|
||||
@@ -208,7 +208,7 @@ type stateProviderP2P struct {
|
||||
sync.Mutex // light.Client is not concurrency-safe
|
||||
lc *light.Client
|
||||
initialHeight int64
|
||||
paramsSendCh *p2p.Channel
|
||||
paramsSendCh p2p.Channel
|
||||
paramsRecvCh chan types.ConsensusParams
|
||||
}
|
||||
|
||||
@@ -220,7 +220,7 @@ func NewP2PStateProvider(
|
||||
initialHeight int64,
|
||||
providers []lightprovider.Provider,
|
||||
trustOptions light.TrustOptions,
|
||||
paramsSendCh *p2p.Channel,
|
||||
paramsSendCh p2p.Channel,
|
||||
logger log.Logger,
|
||||
) (StateProvider, error) {
|
||||
if len(providers) < 2 {
|
||||
|
||||
@@ -56,8 +56,8 @@ type syncer struct {
|
||||
stateProvider StateProvider
|
||||
conn abciclient.Client
|
||||
snapshots *snapshotPool
|
||||
snapshotCh *p2p.Channel
|
||||
chunkCh *p2p.Channel
|
||||
snapshotCh p2p.Channel
|
||||
chunkCh p2p.Channel
|
||||
tempDir string
|
||||
fetchers int32
|
||||
retryTimeout time.Duration
|
||||
@@ -84,11 +84,9 @@ func (s *syncer) AddChunk(chunk *chunk) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
if added {
|
||||
s.logger.Debug("Added chunk to queue", "height", chunk.Height, "format", chunk.Format,
|
||||
"chunk", chunk.Index)
|
||||
s.logger.Debug("Added chunk to queue", "height", chunk.Height, "format", chunk.Format, "chunk", chunk.Index)
|
||||
} else {
|
||||
s.logger.Debug("Ignoring duplicate chunk in queue", "height", chunk.Height, "format", chunk.Format,
|
||||
"chunk", chunk.Index)
|
||||
s.logger.Debug("Ignoring duplicate chunk in queue", "height", chunk.Height, "format", chunk.Format, "chunk", chunk.Index)
|
||||
}
|
||||
return added, nil
|
||||
}
|
||||
@@ -137,12 +135,20 @@ func (s *syncer) SyncAny(
|
||||
discoveryTime = minimumDiscoveryTime
|
||||
}
|
||||
|
||||
timer := time.NewTimer(discoveryTime)
|
||||
defer timer.Stop()
|
||||
|
||||
if discoveryTime > 0 {
|
||||
if err := requestSnapshots(); err != nil {
|
||||
return sm.State{}, nil, err
|
||||
}
|
||||
s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime))
|
||||
time.Sleep(discoveryTime)
|
||||
s.logger.Info("discovering snapshots",
|
||||
"interval", discoveryTime)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return sm.State{}, nil, ctx.Err()
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
|
||||
// The app may ask us to retry a snapshot restoration, in which case we need to reuse
|
||||
@@ -151,8 +157,11 @@ func (s *syncer) SyncAny(
|
||||
snapshot *snapshot
|
||||
chunks *chunkQueue
|
||||
err error
|
||||
iters int
|
||||
)
|
||||
|
||||
for {
|
||||
iters++
|
||||
// If not nil, we're going to retry restoration of the same snapshot.
|
||||
if snapshot == nil {
|
||||
snapshot = s.snapshots.Best()
|
||||
@@ -162,9 +171,16 @@ func (s *syncer) SyncAny(
|
||||
if discoveryTime == 0 {
|
||||
return sm.State{}, nil, errNoSnapshots
|
||||
}
|
||||
s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime))
|
||||
time.Sleep(discoveryTime)
|
||||
continue
|
||||
s.logger.Info("discovering snapshots",
|
||||
"iterations", iters,
|
||||
"interval", discoveryTime)
|
||||
timer.Reset(discoveryTime)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return sm.State{}, nil, ctx.Err()
|
||||
case <-timer.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
if chunks == nil {
|
||||
chunks, err = newChunkQueue(snapshot, s.tempDir)
|
||||
@@ -494,13 +510,11 @@ func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uin
|
||||
return nil
|
||||
}
|
||||
|
||||
s.logger.Debug(
|
||||
"Requesting snapshot chunk",
|
||||
s.logger.Debug("Requesting snapshot chunk",
|
||||
"height", snapshot.Height,
|
||||
"format", snapshot.Format,
|
||||
"chunk", chunk,
|
||||
"peer", peer,
|
||||
)
|
||||
"peer", peer)
|
||||
|
||||
msg := p2p.Envelope{
|
||||
To: peer,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user