mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-11 23:32:50 +00:00
Compare commits
39 Commits
wb/lock-de
...
wb/trigger
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ed24a5656 | ||
|
|
9b29adb7cf | ||
|
|
b95c261981 | ||
|
|
bc1a20dbb8 | ||
|
|
86f00135dd | ||
|
|
ff7b0e638e | ||
|
|
36a1acff52 | ||
|
|
164de91842 | ||
|
|
4fe0f262d4 | ||
|
|
6538776e6a | ||
|
|
4781d04d18 | ||
|
|
52ed994416 | ||
|
|
0524558696 | ||
|
|
d837432681 | ||
|
|
34a3fcd8fc | ||
|
|
48295955ed | ||
|
|
ded310093e | ||
|
|
befd669794 | ||
|
|
3646b635d3 | ||
|
|
59404003ee | ||
|
|
f2a8f5e054 | ||
|
|
1b5bb5348f | ||
|
|
4ca130d226 | ||
|
|
1f438f205a | ||
|
|
5bf30bb049 | ||
|
|
e53f92ba9c | ||
|
|
e4d6f6df09 | ||
|
|
0ef1a12186 | ||
|
|
72aee47847 | ||
|
|
109814c85a | ||
|
|
851d2e3bde | ||
|
|
3ea81bfaa7 | ||
|
|
5703ae2fb3 | ||
|
|
03ad7d6f20 | ||
|
|
f5b9c210ca | ||
|
|
cb69ed8135 | ||
|
|
c201e3b54d | ||
|
|
b30ec89ee9 | ||
|
|
6276fdcb5d |
9
.github/mergify.yml
vendored
9
.github/mergify.yml
vendored
@@ -16,3 +16,12 @@ pull_request_rules:
|
||||
backport:
|
||||
branches:
|
||||
- v0.34.x
|
||||
- name: backport patches to v0.35.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:backport-to-v0.35.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.35.x
|
||||
|
||||
|
||||
76
.github/workflows/e2e-nightly-35x.yml
vendored
Normal file
76
.github/workflows/e2e-nightly-35x.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
# Runs randomly generated E2E testnets nightly on v0.35.x.
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-master workflow as well!
|
||||
|
||||
name: e2e-nightly-35x
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
ref: 'v0.35.x'
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.35.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.35.x
|
||||
SLACK_FOOTER: ''
|
||||
13
.github/workflows/e2e-nightly-master.yml
vendored
13
.github/workflows/e2e-nightly-master.yml
vendored
@@ -10,13 +10,12 @@ on:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test-2:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
@@ -35,14 +34,14 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
run: ./build/generator -g 4 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test-2
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -58,7 +57,7 @@ jobs:
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test-2
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
6
.github/workflows/e2e.yml
vendored
6
.github/workflows/e2e.yml
vendored
@@ -33,10 +33,6 @@ jobs:
|
||||
|
||||
- name: Run CI testnet
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml
|
||||
run: ./run-multiple.sh networks/ci.toml
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Emit logs on failure
|
||||
if: ${{ failure() }}
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml logs
|
||||
|
||||
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
- uses: golangci/golangci-lint-action@v2.5.2
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.38
|
||||
version: v1.42.1
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
@@ -13,12 +13,12 @@ linters:
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
- goconst
|
||||
- gocritic
|
||||
# - gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- revive
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
|
||||
18
CHANGELOG.md
18
CHANGELOG.md
@@ -178,6 +178,24 @@ Special thanks to external contributors on this release: @JayT106, @bipulprasad,
|
||||
- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106)
|
||||
|
||||
## v0.34.14
|
||||
|
||||
This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) The tendermint binary now has built-in suppport for running the end-to-end test application (with state sync support) (@cmwaters).
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state. This may be useful in the event of non-determinstic app hash or when reverting an upgrade. @cmwaters
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [\#7103](https://github.com/tendermint/tendermint/pull/7104) Remove IAVL dependency (backport of #6550) (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [ABCI] [\#7110](https://github.com/tendermint/tendermint/issues/7110) Revert "change client to use multi-reader mutexes (#6873)" (@tychoish).
|
||||
|
||||
## v0.34.13
|
||||
|
||||
*September 6, 2021*
|
||||
|
||||
@@ -12,16 +12,31 @@ Special thanks to external contributors on this release:
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [rpc] Remove the deprecated gRPC interface to the RPC service (@creachadair).
|
||||
|
||||
- Apps
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- [p2p] \#7035 Remove legacy P2P routing implementation and
|
||||
associated configuration options (@tychoish)
|
||||
|
||||
- Go API
|
||||
|
||||
- [blocksync] \#7046 Remove v2 implementation of the blocksync
|
||||
service and recactor, which was disabled in the previous release
|
||||
(@tychoish)
|
||||
- [p2p] \#7064 Remove WDRR queue implementation. (@tychoish)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade.
|
||||
- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek)
|
||||
|
||||
144
CONTRIBUTING.md
144
CONTRIBUTING.md
@@ -227,150 +227,6 @@ Fixes #nnnn
|
||||
|
||||
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
|
||||
### Release procedure
|
||||
|
||||
#### A note about backport branches
|
||||
Tendermint's `master` branch is under active development.
|
||||
Releases are specified using tags and are built from long-lived "backport" branches.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
|
||||
and the backport branches have names like `v0.34.x` or `v0.33.x`
|
||||
(literally, `x`; it is not a placeholder in this case).
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked)
|
||||
to these backport branches.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
|
||||
to the needed branch. There should be a label for any backport branch that you'll be targeting.
|
||||
To notify the bot to backport a pull request, mark the pull request with
|
||||
the label `S:backport-to-<backport_branch>`.
|
||||
Once the original pull request is merged, the bot will try to cherry-pick the pull request
|
||||
to the backport branch. If the bot fails to backport, it will open a pull request.
|
||||
The author of the original pull request is responsible for solving the conflicts and
|
||||
merging the pull request.
|
||||
|
||||
#### Creating a backport branch
|
||||
|
||||
If this is the first release candidate for a major release, you get to have the honor of creating
|
||||
the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the tags on `master`
|
||||
so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag
|
||||
that is "greater than" the backport branches tags. See #6072 for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
|
||||
1. Start on `master`
|
||||
2. Create the backport branch:
|
||||
`git checkout -b v0.35.x`
|
||||
3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up:
|
||||
`git tag -a v0.36.0-dev; git push v0.36.0-dev`
|
||||
4. Create a new workflow to run the e2e nightlies for this backport branch.
|
||||
(See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml
|
||||
for an example.)
|
||||
|
||||
#### Release candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of backport branches.
|
||||
|
||||
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
|
||||
(for example, `v0.35.0-rc0`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
|
||||
Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
1. Run the integration tests and the e2e nightlies
|
||||
(which can be triggered from the Github UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||
1. Prepare the changelog:
|
||||
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`.
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
1. Open a PR with these changes against the backport branch.
|
||||
1. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
2. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.35.0-rc0`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Future RCs will continue to be built off of this branch.
|
||||
|
||||
Note that this process should only be used for "true" RCs--
|
||||
release candidates that, if successful, will be the next release.
|
||||
For more experimental "RCs," create a new, short-lived branch and tag that instead.
|
||||
|
||||
#### Major release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, begin by creating a backport branch, as described above.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
2. Run integration tests and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.35.0`.
|
||||
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||
- `git push origin v0.35.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
8. Add the release to the documentation site generator config (see
|
||||
[DOCS_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
- Start on branch `master`.
|
||||
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
ensure the newest release is the default for the landing page.
|
||||
- Add a new entry to `themeConfig.versions` in
|
||||
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
|
||||
release in the dropdown versions menu.
|
||||
|
||||
#### Minor release (point releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit tests
|
||||
|
||||
10
Makefile
10
Makefile
@@ -89,7 +89,7 @@ proto-gen:
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-lint:
|
||||
@$(DOCKER_BUF) check lint --error-format=json
|
||||
@$(DOCKER_BUF) lint --error-format=json
|
||||
.PHONY: proto-lint
|
||||
|
||||
proto-format:
|
||||
@@ -98,11 +98,11 @@ proto-format:
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-check-breaking:
|
||||
@$(DOCKER_BUF) check breaking --against-input .git#branch=master
|
||||
@$(DOCKER_BUF) breaking --against .git#branch=master
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
proto-check-breaking-ci:
|
||||
@$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master
|
||||
@$(DOCKER_BUF) breaking --against $(HTTPS_GIT)#branch=master
|
||||
.PHONY: proto-check-breaking-ci
|
||||
|
||||
###############################################################################
|
||||
@@ -227,13 +227,13 @@ build-docs:
|
||||
|
||||
build-docker: build-linux
|
||||
cp $(BUILDDIR)/tendermint DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile .
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
|
||||
161
RELEASES.md
Normal file
161
RELEASES.md
Normal file
@@ -0,0 +1,161 @@
|
||||
# Releases
|
||||
|
||||
Tendermint uses [semantic versioning](https://semver.org/) with each release following
|
||||
a `vX.Y.Z` format. The `master` branch is used for active development and thus it's
|
||||
advisable not to build against it.
|
||||
|
||||
The latest changes are always initially merged into `master`.
|
||||
Releases are specified using tags and are built from long-lived "backport" branches
|
||||
that are cut from `master` when the release process begins.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
|
||||
and the backport branches have names like `v0.34.x` or `v0.33.x`
|
||||
(literally, `x`; it is not a placeholder in this case). Tendermint only
|
||||
maintains the last two releases at a time (the oldest release is predominantly
|
||||
just security patches).
|
||||
|
||||
## Backporting
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported
|
||||
to these backport branches.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
|
||||
to the needed branch. There should be a label for any backport branch that you'll be targeting.
|
||||
To notify the bot to backport a pull request, mark the pull request with the label corresponding
|
||||
to the correct backport branch. For example, to backport to v0.35.x, add the label `S:backport-to-v0.35.x`.
|
||||
Once the original pull request is merged, the bot will try to cherry-pick the pull request
|
||||
to the backport branch. If the bot fails to backport, it will open a pull request.
|
||||
The author of the original pull request is responsible for solving the conflicts and
|
||||
merging the pull request.
|
||||
|
||||
### Creating a backport branch
|
||||
|
||||
If this is the first release candidate for a major release, you get to have the honor of creating
|
||||
the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the
|
||||
tags on `master` so that `go mod` is able to order the branches correctly. You
|
||||
should tag `master` with a "dev" tag that is "greater than" the backport
|
||||
branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072)
|
||||
for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
|
||||
1. Start on `master`
|
||||
2. Create and push the backport branch:
|
||||
`git checkout -b v0.35.x; git push origin v0.35.x`
|
||||
3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up:
|
||||
`git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."; git push origin v0.36.0-dev`
|
||||
4. Create a new workflow (still on master) to run e2e nightlies for the new backport branch.
|
||||
(See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml
|
||||
for an example.)
|
||||
5. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x`
|
||||
[label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered.
|
||||
|
||||
## Release candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of backport branches.
|
||||
|
||||
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
|
||||
(for example, `v0.35.0-rc0`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
|
||||
Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
2. Run the integration tests and the e2e nightlies
|
||||
(which can be triggered from the Github UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||
3. Prepare the changelog:
|
||||
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. Each RC should have
|
||||
it's own changelog section. These will be squashed when the final candidate is released.
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary.
|
||||
Check the changelog for breaking changes in these components.
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
6. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||
7. Push the tag back up to origin:
|
||||
`git push origin v0.35.0-rc0`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
8. Future RCs will continue to be built off of this branch.
|
||||
|
||||
Note that this process should only be used for "true" RCs--
|
||||
release candidates that, if successful, will be the next release.
|
||||
For more experimental "RCs," create a new, short-lived branch and tag that instead.
|
||||
|
||||
## Major release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, begin by creating a backport branch, as described above.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.35.0`.
|
||||
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||
- `git push origin v0.35.0`
|
||||
6. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
7. Add the release to the documentation site generator config (see
|
||||
[DOCS_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
- Start on branch `master`.
|
||||
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
ensure the newest release is the default for the landing page.
|
||||
- Add a new entry to `themeConfig.versions` in
|
||||
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
|
||||
release in the dropdown versions menu.
|
||||
|
||||
## Minor release (point releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of
|
||||
long-lived backport branches, rather than from master. As non-breaking changes
|
||||
land on `master`, they should also be backported into these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky
|
||||
changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the TMDefaultVersion in `version.go`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
@@ -87,7 +87,7 @@ type ReqRes struct {
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
mtx tmsync.Mutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
@@ -137,16 +137,16 @@ func (r *ReqRes) InvokeCallback() {
|
||||
//
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5439
|
||||
func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.cb
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.done = true
|
||||
r.mtx.Unlock()
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
|
||||
@@ -13,7 +13,7 @@ type Creator func() (Client, error)
|
||||
// NewLocalCreator returns a Creator for the given app,
|
||||
// which will be running locally.
|
||||
func NewLocalCreator(app types.Application) Creator {
|
||||
mtx := new(tmsync.RWMutex)
|
||||
mtx := new(tmsync.Mutex)
|
||||
|
||||
return func() (Client, error) {
|
||||
return NewLocalClient(mtx, app), nil
|
||||
|
||||
@@ -24,7 +24,7 @@ type grpcClient struct {
|
||||
conn *grpc.ClientConn
|
||||
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
mtx tmsync.Mutex
|
||||
addr string
|
||||
err error
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
@@ -149,8 +149,8 @@ func (cli *grpcClient) StopForError(err error) {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Error() error {
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
@@ -158,8 +158,8 @@ func (cli *grpcClient) Error() error {
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.resCb = resCb
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
type localClient struct {
|
||||
service.BaseService
|
||||
|
||||
mtx *tmsync.RWMutex
|
||||
mtx *tmsync.Mutex
|
||||
types.Application
|
||||
Callback
|
||||
}
|
||||
@@ -26,24 +26,22 @@ var _ Client = (*localClient)(nil)
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.RWMutex, app types.Application) Client {
|
||||
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = &tmsync.RWMutex{}
|
||||
mtx = new(tmsync.Mutex)
|
||||
}
|
||||
|
||||
cli := &localClient{
|
||||
mtx: mtx,
|
||||
Application: app,
|
||||
}
|
||||
|
||||
cli.BaseService = *service.NewBaseService(nil, "localClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (app *localClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.Callback = cb
|
||||
app.mtx.Unlock()
|
||||
}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
@@ -67,8 +65,8 @@ func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, err
|
||||
}
|
||||
|
||||
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return app.callback(
|
||||
@@ -100,8 +98,8 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return app.callback(
|
||||
@@ -215,8 +213,8 @@ func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.Respon
|
||||
}
|
||||
|
||||
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
@@ -249,8 +247,8 @@ func (app *localClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
|
||||
@@ -39,7 +39,7 @@ type socketClient struct {
|
||||
|
||||
reqQueue chan *reqResWithContext
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
mtx tmsync.Mutex
|
||||
err error
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
@@ -102,8 +102,8 @@ func (cli *socketClient) OnStop() {
|
||||
|
||||
// Error returns an error if the client was stopped abruptly.
|
||||
func (cli *socketClient) Error() error {
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
@@ -113,8 +113,8 @@ func (cli *socketClient) Error() error {
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *socketClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.resCb = resCb
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -29,11 +29,12 @@ var ReIndexEventCmd = &cobra.Command{
|
||||
Use: "reindex-event",
|
||||
Short: "reindex events to the event store backends",
|
||||
Long: `
|
||||
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
|
||||
you can run this command when the event store backend dropped/disconnected or you want to replace the backend.
|
||||
The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the
|
||||
default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits
|
||||
either or both arguments.
|
||||
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
|
||||
you can run this command when the event store backend dropped/disconnected or you want to
|
||||
replace the backend. The default start-height is 0, meaning the tooling will start
|
||||
reindex from the base block height(inclusive); and the default end-height is 0, meaning
|
||||
the tooling will reindex until the latest block height(inclusive). User can omit
|
||||
either or both arguments.
|
||||
`,
|
||||
Example: `
|
||||
tendermint reindex-event
|
||||
|
||||
@@ -37,7 +37,7 @@ var ResetPrivValidatorCmd = &cobra.Command{
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAll(cmd *cobra.Command, args []string) error {
|
||||
return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(),
|
||||
return ResetAll(config.DBDir(), config.PrivValidator.KeyFile(),
|
||||
config.PrivValidator.StateFile(), logger)
|
||||
}
|
||||
|
||||
@@ -49,12 +49,7 @@ func resetPrivValidator(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// ResetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// Exported so other CLI tools can use it.
|
||||
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) error {
|
||||
if keepAddrBook {
|
||||
logger.Info("The address book remains intact")
|
||||
} else {
|
||||
removeAddrBook(addrBookFile, logger)
|
||||
}
|
||||
func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger) error {
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
@@ -87,11 +82,3 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeAddrBook(addrBookFile string, logger log.Logger) {
|
||||
if err := os.Remove(addrBookFile); err == nil {
|
||||
logger.Info("Removed existing address book", "file", addrBookFile)
|
||||
} else if !os.IsNotExist(err) {
|
||||
logger.Info("Error removing address book", "file", addrBookFile, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
46
cmd/tendermint/commands/rollback.go
Normal file
46
cmd/tendermint/commands/rollback.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/state"
|
||||
)
|
||||
|
||||
var RollbackStateCmd = &cobra.Command{
|
||||
Use: "rollback",
|
||||
Short: "rollback tendermint state by one height",
|
||||
Long: `
|
||||
A state rollback is performed to recover from an incorrect application state transition,
|
||||
when Tendermint has persisted an incorrect app hash and is thus unable to make
|
||||
progress. Rollback overwrites a state at height n with the state at height n - 1.
|
||||
The application should also roll back to height n - 1. No blocks are removed, so upon
|
||||
restarting Tendermint the transactions in block n will be re-executed against the
|
||||
application.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
height, hash, err := RollbackState(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rollback state: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Rolled back state to height %d and hash %v", height, hash)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// RollbackState takes the state at the current height n and overwrites it with the state
|
||||
// at height n - 1. Note state here refers to tendermint state not application state.
|
||||
// Returns the latest state height and app hash alongside an error if there was one.
|
||||
func RollbackState(config *cfg.Config) (int64, []byte, error) {
|
||||
// use the parsed config to load the block and state store
|
||||
blockStore, stateStore, err := loadStateAndBlockStore(config)
|
||||
if err != nil {
|
||||
return -1, nil, err
|
||||
}
|
||||
|
||||
// rollback the last state
|
||||
return state.Rollback(blockStore, stateStore)
|
||||
}
|
||||
@@ -70,10 +70,6 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
|
||||
// rpc flags
|
||||
cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required")
|
||||
cmd.Flags().String(
|
||||
"rpc.grpc-laddr",
|
||||
config.RPC.GRPCListenAddress,
|
||||
"GRPC listen address (BroadcastTx only). Port required")
|
||||
cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "enabled unsafe rpc methods")
|
||||
cmd.Flags().String("rpc.pprof-laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)")
|
||||
|
||||
@@ -84,8 +80,6 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
"node listen address. (0.0.0.0:0 means any interface, any port)")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent-peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
|
||||
cmd.Flags().String("p2p.unconditional-peer-ids",
|
||||
config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers")
|
||||
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")
|
||||
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange")
|
||||
cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs")
|
||||
|
||||
@@ -226,7 +226,6 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
config.P2P.AddrBookStrict = false
|
||||
config.P2P.AllowDuplicateIP = true
|
||||
if populatePersistentPeers {
|
||||
persistentPeersWithoutSelf := make([]string, 0)
|
||||
|
||||
@@ -29,6 +29,7 @@ func main() {
|
||||
cmd.GenNodeKeyCmd,
|
||||
cmd.VersionCmd,
|
||||
cmd.InspectCmd,
|
||||
cmd.RollbackStateCmd,
|
||||
cmd.MakeKeyMigrateCommand(),
|
||||
debug.DebugCmd,
|
||||
cli.NewCompletionCmd(rootCmd, true),
|
||||
|
||||
161
config/config.go
161
config/config.go
@@ -30,7 +30,6 @@ const (
|
||||
ModeSeed = "seed"
|
||||
|
||||
BlockSyncV0 = "v0"
|
||||
BlockSyncV2 = "v2"
|
||||
|
||||
MempoolV0 = "v0"
|
||||
MempoolV1 = "v1"
|
||||
@@ -54,16 +53,14 @@ var (
|
||||
defaultPrivValKeyName = "priv_validator_key.json"
|
||||
defaultPrivValStateName = "priv_validator_state.json"
|
||||
|
||||
defaultNodeKeyName = "node_key.json"
|
||||
defaultAddrBookName = "addrbook.json"
|
||||
defaultNodeKeyName = "node_key.json"
|
||||
|
||||
defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName)
|
||||
defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
|
||||
defaultPrivValKeyPath = filepath.Join(defaultConfigDir, defaultPrivValKeyName)
|
||||
defaultPrivValStatePath = filepath.Join(defaultDataDir, defaultPrivValStateName)
|
||||
|
||||
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
||||
defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName)
|
||||
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
||||
)
|
||||
|
||||
// Config defines the top level configuration for a Tendermint node
|
||||
@@ -142,9 +139,6 @@ func (cfg *Config) ValidateBasic() error {
|
||||
if err := cfg.RPC.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [rpc] section: %w", err)
|
||||
}
|
||||
if err := cfg.P2P.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [p2p] section: %w", err)
|
||||
}
|
||||
if err := cfg.Mempool.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [mempool] section: %w", err)
|
||||
}
|
||||
@@ -461,24 +455,10 @@ type RPCConfig struct {
|
||||
// A list of non simple headers the client is allowed to use with cross-domain requests.
|
||||
CORSAllowedHeaders []string `mapstructure:"cors-allowed-headers"`
|
||||
|
||||
// TCP or UNIX socket address for the gRPC server to listen on
|
||||
// NOTE: This server only supports /broadcast_tx_commit
|
||||
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||
GRPCListenAddress string `mapstructure:"grpc-laddr"`
|
||||
|
||||
// Maximum number of simultaneous connections.
|
||||
// Does not include RPC (HTTP&WebSocket) connections. See max-open-connections
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
|
||||
|
||||
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
|
||||
Unsafe bool `mapstructure:"unsafe"`
|
||||
|
||||
// Maximum number of simultaneous connections (including WebSocket).
|
||||
// Does not include gRPC connections. See grpc-max-open-connections
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
@@ -492,7 +472,7 @@ type RPCConfig struct {
|
||||
MaxSubscriptionClients int `mapstructure:"max-subscription-clients"`
|
||||
|
||||
// Maximum number of unique queries a given client can /subscribe to
|
||||
// If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set
|
||||
// If you're using a Local RPC client and /broadcast_tx_commit, set this
|
||||
// to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"`
|
||||
|
||||
@@ -533,12 +513,10 @@ type RPCConfig struct {
|
||||
// DefaultRPCConfig returns a default configuration for the RPC server
|
||||
func DefaultRPCConfig() *RPCConfig {
|
||||
return &RPCConfig{
|
||||
ListenAddress: "tcp://127.0.0.1:26657",
|
||||
CORSAllowedOrigins: []string{},
|
||||
CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost},
|
||||
CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"},
|
||||
GRPCListenAddress: "",
|
||||
GRPCMaxOpenConnections: 900,
|
||||
ListenAddress: "tcp://127.0.0.1:26657",
|
||||
CORSAllowedOrigins: []string{},
|
||||
CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost},
|
||||
CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"},
|
||||
|
||||
Unsafe: false,
|
||||
MaxOpenConnections: 900,
|
||||
@@ -559,7 +537,6 @@ func DefaultRPCConfig() *RPCConfig {
|
||||
func TestRPCConfig() *RPCConfig {
|
||||
cfg := DefaultRPCConfig()
|
||||
cfg.ListenAddress = "tcp://127.0.0.1:36657"
|
||||
cfg.GRPCListenAddress = "tcp://127.0.0.1:36658"
|
||||
cfg.Unsafe = true
|
||||
return cfg
|
||||
}
|
||||
@@ -567,9 +544,6 @@ func TestRPCConfig() *RPCConfig {
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg *RPCConfig) ValidateBasic() error {
|
||||
if cfg.GRPCMaxOpenConnections < 0 {
|
||||
return errors.New("grpc-max-open-connections can't be negative")
|
||||
}
|
||||
if cfg.MaxOpenConnections < 0 {
|
||||
return errors.New("max-open-connections can't be negative")
|
||||
}
|
||||
@@ -647,25 +621,6 @@ type P2PConfig struct { //nolint: maligned
|
||||
// UPNP port forwarding
|
||||
UPNP bool `mapstructure:"upnp"`
|
||||
|
||||
// Path to address book
|
||||
AddrBook string `mapstructure:"addr-book-file"`
|
||||
|
||||
// Set true for strict address routability rules
|
||||
// Set false for private or local networks
|
||||
AddrBookStrict bool `mapstructure:"addr-book-strict"`
|
||||
|
||||
// Maximum number of inbound peers
|
||||
//
|
||||
// TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
MaxNumInboundPeers int `mapstructure:"max-num-inbound-peers"`
|
||||
|
||||
// Maximum number of outbound peers to connect to, excluding persistent peers.
|
||||
//
|
||||
// TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
MaxNumOutboundPeers int `mapstructure:"max-num-outbound-peers"`
|
||||
|
||||
// MaxConnections defines the maximum number of connected peers (inbound and
|
||||
// outbound).
|
||||
MaxConnections uint16 `mapstructure:"max-connections"`
|
||||
@@ -674,24 +629,6 @@ type P2PConfig struct { //nolint: maligned
|
||||
// attempts per IP address.
|
||||
MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"`
|
||||
|
||||
// List of node IDs, to which a connection will be (re)established ignoring any existing limits
|
||||
UnconditionalPeerIDs string `mapstructure:"unconditional-peer-ids"`
|
||||
|
||||
// Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
|
||||
PersistentPeersMaxDialPeriod time.Duration `mapstructure:"persistent-peers-max-dial-period"`
|
||||
|
||||
// Time to wait before flushing messages out on the connection
|
||||
FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"`
|
||||
|
||||
// Maximum size of a message packet payload, in bytes
|
||||
MaxPacketMsgPayloadSize int `mapstructure:"max-packet-msg-payload-size"`
|
||||
|
||||
// Rate at which packets can be sent, in bytes/second
|
||||
SendRate int64 `mapstructure:"send-rate"`
|
||||
|
||||
// Rate at which packets can be received, in bytes/second
|
||||
RecvRate int64 `mapstructure:"recv-rate"`
|
||||
|
||||
// Set true to enable the peer-exchange reactor
|
||||
PexReactor bool `mapstructure:"pex"`
|
||||
|
||||
@@ -710,13 +647,8 @@ type P2PConfig struct { //nolint: maligned
|
||||
// Force dial to fail
|
||||
TestDialFail bool `mapstructure:"test-dial-fail"`
|
||||
|
||||
// UseLegacy enables the "legacy" P2P implementation and
|
||||
// disables the newer default implementation. This flag will
|
||||
// be removed in a future release.
|
||||
UseLegacy bool `mapstructure:"use-legacy"`
|
||||
|
||||
// Makes it possible to configure which queue backend the p2p
|
||||
// layer uses. Options are: "fifo", "priority" and "wdrr",
|
||||
// layer uses. Options are: "fifo" and "priority",
|
||||
// with the default being "priority".
|
||||
QueueType string `mapstructure:"queue-type"`
|
||||
}
|
||||
@@ -727,29 +659,14 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
ListenAddress: "tcp://0.0.0.0:26656",
|
||||
ExternalAddress: "",
|
||||
UPNP: false,
|
||||
AddrBook: defaultAddrBookPath,
|
||||
AddrBookStrict: true,
|
||||
MaxNumInboundPeers: 40,
|
||||
MaxNumOutboundPeers: 10,
|
||||
MaxConnections: 64,
|
||||
MaxIncomingConnectionAttempts: 100,
|
||||
PersistentPeersMaxDialPeriod: 0 * time.Second,
|
||||
FlushThrottleTimeout: 100 * time.Millisecond,
|
||||
// The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes.
|
||||
// The IP header and the TCP header take up 20 bytes each at least (unless
|
||||
// optional header fields are used) and thus the max for (non-Jumbo frame)
|
||||
// Ethernet is 1500 - 20 -20 = 1460
|
||||
// Source: https://stackoverflow.com/a/3074427/820520
|
||||
MaxPacketMsgPayloadSize: 1400,
|
||||
SendRate: 5120000, // 5 mB/s
|
||||
RecvRate: 5120000, // 5 mB/s
|
||||
PexReactor: true,
|
||||
AllowDuplicateIP: false,
|
||||
HandshakeTimeout: 20 * time.Second,
|
||||
DialTimeout: 3 * time.Second,
|
||||
TestDialFail: false,
|
||||
QueueType: "priority",
|
||||
UseLegacy: false,
|
||||
PexReactor: true,
|
||||
AllowDuplicateIP: false,
|
||||
HandshakeTimeout: 20 * time.Second,
|
||||
DialTimeout: 3 * time.Second,
|
||||
TestDialFail: false,
|
||||
QueueType: "priority",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -757,43 +674,10 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
func TestP2PConfig() *P2PConfig {
|
||||
cfg := DefaultP2PConfig()
|
||||
cfg.ListenAddress = "tcp://127.0.0.1:36656"
|
||||
cfg.FlushThrottleTimeout = 10 * time.Millisecond
|
||||
cfg.AllowDuplicateIP = true
|
||||
return cfg
|
||||
}
|
||||
|
||||
// AddrBookFile returns the full path to the address book
|
||||
func (cfg *P2PConfig) AddrBookFile() string {
|
||||
return rootify(cfg.AddrBook, cfg.RootDir)
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg *P2PConfig) ValidateBasic() error {
|
||||
if cfg.MaxNumInboundPeers < 0 {
|
||||
return errors.New("max-num-inbound-peers can't be negative")
|
||||
}
|
||||
if cfg.MaxNumOutboundPeers < 0 {
|
||||
return errors.New("max-num-outbound-peers can't be negative")
|
||||
}
|
||||
if cfg.FlushThrottleTimeout < 0 {
|
||||
return errors.New("flush-throttle-timeout can't be negative")
|
||||
}
|
||||
if cfg.PersistentPeersMaxDialPeriod < 0 {
|
||||
return errors.New("persistent-peers-max-dial-period can't be negative")
|
||||
}
|
||||
if cfg.MaxPacketMsgPayloadSize < 0 {
|
||||
return errors.New("max-packet-msg-payload-size can't be negative")
|
||||
}
|
||||
if cfg.SendRate < 0 {
|
||||
return errors.New("send-rate can't be negative")
|
||||
}
|
||||
if cfg.RecvRate < 0 {
|
||||
return errors.New("recv-rate can't be negative")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// MempoolConfig
|
||||
|
||||
@@ -1025,15 +909,13 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
|
||||
// allows them to catchup quickly by downloading blocks in parallel
|
||||
// and verifying their commits.
|
||||
type BlockSyncConfig struct {
|
||||
Enable bool `mapstructure:"enable"`
|
||||
Version string `mapstructure:"version"`
|
||||
Enable bool `mapstructure:"enable"`
|
||||
}
|
||||
|
||||
// DefaultBlockSyncConfig returns a default configuration for the block sync service
|
||||
func DefaultBlockSyncConfig() *BlockSyncConfig {
|
||||
return &BlockSyncConfig{
|
||||
Enable: true,
|
||||
Version: BlockSyncV0,
|
||||
Enable: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1043,16 +925,7 @@ func TestBlockSyncConfig() *BlockSyncConfig {
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (cfg *BlockSyncConfig) ValidateBasic() error {
|
||||
switch cfg.Version {
|
||||
case BlockSyncV0:
|
||||
return nil
|
||||
case BlockSyncV2:
|
||||
return errors.New("blocksync version v2 is no longer supported. Please use v0")
|
||||
default:
|
||||
return fmt.Errorf("unknown blocksync version %s", cfg.Version)
|
||||
}
|
||||
}
|
||||
func (cfg *BlockSyncConfig) ValidateBasic() error { return nil }
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// ConsensusConfig
|
||||
|
||||
@@ -66,7 +66,6 @@ func TestRPCConfigValidateBasic(t *testing.T) {
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"GRPCMaxOpenConnections",
|
||||
"MaxOpenConnections",
|
||||
"MaxSubscriptionClients",
|
||||
"MaxSubscriptionsPerClient",
|
||||
@@ -82,26 +81,6 @@ func TestRPCConfigValidateBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestP2PConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestP2PConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"MaxNumInboundPeers",
|
||||
"MaxNumOutboundPeers",
|
||||
"FlushThrottleTimeout",
|
||||
"MaxPacketMsgPayloadSize",
|
||||
"SendRate",
|
||||
"RecvRate",
|
||||
}
|
||||
|
||||
for _, fieldName := range fieldsToTest {
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1)
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMempoolConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestMempoolConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
@@ -128,13 +107,6 @@ func TestStateSyncConfigValidateBasic(t *testing.T) {
|
||||
func TestBlockSyncConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestBlockSyncConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with version
|
||||
cfg.Version = "v2"
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
|
||||
cfg.Version = "invalid"
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
|
||||
@@ -193,26 +193,10 @@ cors-allowed-methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }}
|
||||
# A list of non simple headers the client is allowed to use with cross-domain requests
|
||||
cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}]
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
|
||||
# Maximum number of simultaneous connections.
|
||||
# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
# 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
|
||||
|
||||
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
|
||||
unsafe = {{ .RPC.Unsafe }}
|
||||
|
||||
# Maximum number of simultaneous connections (including WebSocket).
|
||||
# Does not include gRPC connections. See grpc-max-open-connections
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
@@ -226,8 +210,8 @@ max-open-connections = {{ .RPC.MaxOpenConnections }}
|
||||
max-subscription-clients = {{ .RPC.MaxSubscriptionClients }}
|
||||
|
||||
# Maximum number of unique queries a given client can /subscribe to
|
||||
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
|
||||
# the estimated # maximum number of broadcast_tx_commit calls per block.
|
||||
# If you're using a Local RPC client and /broadcast_tx_commit, set this
|
||||
# to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }}
|
||||
|
||||
# How long to wait for a tx to be committed during /broadcast_tx_commit.
|
||||
@@ -265,9 +249,6 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}"
|
||||
#######################################################
|
||||
[p2p]
|
||||
|
||||
# Enable the legacy p2p layer.
|
||||
use-legacy = {{ .P2P.UseLegacy }}
|
||||
|
||||
# Select the p2p internal queue
|
||||
queue-type = "{{ .P2P.QueueType }}"
|
||||
|
||||
@@ -299,62 +280,12 @@ persistent-peers = "{{ .P2P.PersistentPeers }}"
|
||||
# UPNP port forwarding
|
||||
upnp = {{ .P2P.UPNP }}
|
||||
|
||||
# Path to address book
|
||||
# TODO: Remove once p2p refactor is complete in favor of peer store.
|
||||
addr-book-file = "{{ js .P2P.AddrBook }}"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
# Set false for private or local networks
|
||||
addr-book-strict = {{ .P2P.AddrBookStrict }}
|
||||
|
||||
# Maximum number of inbound peers
|
||||
#
|
||||
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
max-num-inbound-peers = {{ .P2P.MaxNumInboundPeers }}
|
||||
|
||||
# Maximum number of outbound peers to connect to, excluding persistent peers
|
||||
#
|
||||
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }}
|
||||
|
||||
# Maximum number of connections (inbound and outbound).
|
||||
max-connections = {{ .P2P.MaxConnections }}
|
||||
|
||||
# Rate limits the number of incoming connection attempts per IP address.
|
||||
max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }}
|
||||
|
||||
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
|
||||
# TODO: Remove once p2p refactor is complete.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}"
|
||||
|
||||
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
persistent-peers-max-dial-period = "{{ .P2P.PersistentPeersMaxDialPeriod }}"
|
||||
|
||||
# Time to wait before flushing messages out on the connection
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}"
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }}
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
send-rate = {{ .P2P.SendRate }}
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
recv-rate = {{ .P2P.RecvRate }}
|
||||
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = {{ .P2P.PexReactor }}
|
||||
|
||||
@@ -477,11 +408,6 @@ fetchers = "{{ .StateSync.Fetchers }}"
|
||||
# and verifying their commits
|
||||
enable = {{ .BlockSync.Enable }}
|
||||
|
||||
# Block Sync version to use:
|
||||
# 1) "v0" (default) - the standard Block Sync implementation
|
||||
# 2) "v2" - DEPRECATED, please use v0
|
||||
version = "{{ .BlockSync.Version }}"
|
||||
|
||||
#######################################################
|
||||
### Consensus Configuration Options ###
|
||||
#######################################################
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !libsecp256k1
|
||||
// +build !libsecp256k1
|
||||
|
||||
package secp256k1
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
- April 28, 2021: Specify search capabilities are only supported through the KV indexer (@marbar3778)
|
||||
- May 19, 2021: Update the SQL schema and the eventsink interface (@jayt106)
|
||||
- Aug 30, 2021: Update the SQL schema and the psql implementation (@creachadair)
|
||||
- Oct 5, 2021: Clarify goals and implementation changes (@creachadair)
|
||||
|
||||
## Status
|
||||
|
||||
@@ -73,19 +74,38 @@ the database used.
|
||||
We will adopt a similar approach to that of the Cosmos SDK's `KVStore` state
|
||||
listening described in [ADR-038](https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-038-state-listening.md).
|
||||
|
||||
Namely, we will perform the following:
|
||||
We will implement the following changes:
|
||||
|
||||
- Introduce a new interface, `EventSink`, that all data sinks must implement.
|
||||
- Augment the existing `tx_index.indexer` configuration to now accept a series
|
||||
of one or more indexer types, i.e sinks.
|
||||
of one or more indexer types, i.e., sinks.
|
||||
- Combine the current `TxIndexer` and `BlockIndexer` into a single `KVEventSink`
|
||||
that implements the `EventSink` interface.
|
||||
- Introduce an additional `EventSink` that is backed by [PostgreSQL](https://www.postgresql.org/).
|
||||
- Implement the necessary schemas to support both block and transaction event
|
||||
indexing.
|
||||
- Introduce an additional `EventSink` implementation that is backed by
|
||||
[PostgreSQL](https://www.postgresql.org/).
|
||||
- Implement the necessary schemas to support both block and transaction event indexing.
|
||||
- Update `IndexerService` to use a series of `EventSinks`.
|
||||
- Proxy queries to the relevant sink's native query layer.
|
||||
- Update all relevant RPC methods.
|
||||
|
||||
In addition:
|
||||
|
||||
- The Postgres indexer implementation will _not_ implement the proprietary `kv`
|
||||
query language. Users wishing to write queries against the Postgres indexer
|
||||
will connect to the underlying DBMS directly and use SQL queries based on the
|
||||
indexing schema.
|
||||
|
||||
Future custom indexer implementations will not be required to support the
|
||||
proprietary query language either.
|
||||
|
||||
- For now, the existing `kv` indexer will be left in place with its current
|
||||
query support, but will be marked as deprecated in a subsequent release, and
|
||||
the documentation will be updated to encourage users who need to query the
|
||||
event index to migrate to the Postgres indexer.
|
||||
|
||||
- In the future we may remove the `kv` indexer entirely, or replace it with a
|
||||
different implementation; that decision is deferred as future work.
|
||||
|
||||
- In the future, we may remove the index query endpoints from the RPC service
|
||||
entirely; that decision is deferred as future work, but recommended.
|
||||
|
||||
|
||||
## Detailed Design
|
||||
|
||||
@@ -221,9 +221,6 @@ pprof-laddr = ""
|
||||
#######################################################
|
||||
[p2p]
|
||||
|
||||
# Enable the legacy p2p layer.
|
||||
use-legacy = false
|
||||
|
||||
# Select the p2p internal queue
|
||||
queue-type = "priority"
|
||||
|
||||
|
||||
6
go.mod
6
go.mod
@@ -4,8 +4,7 @@ go 1.16
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.4.1
|
||||
github.com/Workiva/go-datastructures v1.0.53
|
||||
github.com/adlio/schema v1.1.13
|
||||
github.com/adlio/schema v1.1.14
|
||||
github.com/btcsuite/btcd v0.22.0-beta
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/fortytw2/leaktest v1.3.0
|
||||
@@ -20,7 +19,6 @@ require (
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/lib/pq v1.10.3
|
||||
github.com/libp2p/go-buffer-pool v0.0.2
|
||||
github.com/minio/highwayhash v1.0.2
|
||||
github.com/mroth/weightedrand v0.4.1
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
|
||||
github.com/ory/dockertest v3.3.5+incompatible
|
||||
@@ -36,7 +34,7 @@ require (
|
||||
github.com/tendermint/tm-db v0.6.4
|
||||
github.com/vektra/mockery/v2 v2.9.4
|
||||
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf
|
||||
golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/grpc v1.41.0
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
|
||||
62
go.sum
62
go.sum
@@ -1,5 +1,6 @@
|
||||
4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a h1:wFEQiK85fRsEVF0CRrPAos5LoAryUsIX1kPW/WrIqFw=
|
||||
4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo=
|
||||
bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
|
||||
bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
@@ -52,8 +53,8 @@ contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EU
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Antonboom/errname v0.1.4 h1:lGSlI42Gm4bI1e+IITtXJXvxFM8N7naWimVFKcb0McY=
|
||||
github.com/Antonboom/errname v0.1.4/go.mod h1:jRXo3m0E0EuCnK3wbsSVH3X55Z4iTDLl6ZfCxwFj4TM=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
|
||||
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
@@ -72,8 +73,8 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
|
||||
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
@@ -85,10 +86,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx
|
||||
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
|
||||
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig=
|
||||
github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A=
|
||||
github.com/adlio/schema v1.1.13 h1:LeNMVg5Z1FX+Qgz8tJUijBLRdcpbFUElz+d1489On98=
|
||||
github.com/adlio/schema v1.1.13/go.mod h1:L5Z7tw+7lRK1Fnpi/LT/ooCP1elkXn0krMWBQHUhEDE=
|
||||
github.com/adlio/schema v1.1.14 h1:lIjyp5/2wSuEOmeQGNPpaRsVGZRqz9A/B+PaMtEotaU=
|
||||
github.com/adlio/schema v1.1.14/go.mod h1:hQveFEMiDlG/M9yz9RAajnH5DzT6nAfqOG9YkEQU2pg=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||
@@ -126,6 +125,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||
github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A=
|
||||
@@ -160,9 +160,11 @@ github.com/charithe/durationcheck v0.0.8 h1:cnZrThioNW9gSV5JsRIXmkyHUbcDH7Y9hkzF
|
||||
github.com/charithe/durationcheck v0.0.8/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg=
|
||||
github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI=
|
||||
github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
|
||||
@@ -173,8 +175,9 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw=
|
||||
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||
github.com/containerd/continuity v0.2.0 h1:j/9Wnn+hrEWjLvHuIxUU1YI5JjEjVlT2AA68cse9rwY=
|
||||
github.com/containerd/continuity v0.2.0/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
@@ -192,6 +195,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/daixiang0/gci v0.2.9 h1:iwJvwQpBZmMg31w+QQ6jsyZ54KEATn6/nfARbBNW294=
|
||||
github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc=
|
||||
github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -253,6 +257,7 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
||||
@@ -574,6 +579,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
@@ -592,7 +598,6 @@ github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3
|
||||
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
|
||||
github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
@@ -645,7 +650,6 @@ github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0
|
||||
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||
@@ -664,6 +668,7 @@ github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7p
|
||||
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@@ -676,6 +681,7 @@ github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinK
|
||||
github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s=
|
||||
github.com/mroth/weightedrand v0.4.1 h1:rHcbUBopmi/3x4nnrvwGJBhX9d0vk+KgoLUZeDP6YyI=
|
||||
github.com/mroth/weightedrand v0.4.1/go.mod h1:3p2SIcC8al1YMzGhAIoXD+r9olo/g/cdJgAD905gyNE=
|
||||
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo=
|
||||
@@ -724,12 +730,14 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y
|
||||
github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak=
|
||||
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
|
||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE=
|
||||
@@ -748,7 +756,6 @@ github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
|
||||
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -832,6 +839,7 @@ github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dms
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4=
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/securego/gosec/v2 v2.8.1 h1:Tyy/nsH39TYCOkqf5HAgRE+7B5D8sHDwPdXRgFWokh8=
|
||||
github.com/securego/gosec/v2 v2.8.1/go.mod h1:pUmsq6+VyFEElJMUX+QB3p3LWNHXg1R3xh2ssVJPs8Q=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
@@ -842,7 +850,6 @@ github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxr
|
||||
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
@@ -906,6 +913,7 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
|
||||
github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhFAz3nWUcuvpH7WuOMv8LQoCWmruLfFH2U=
|
||||
@@ -918,7 +926,6 @@ github.com/tetafro/godot v1.4.9 h1:wsNd0RuUxISqqudFqchsSsMqsM188DoZVPBeKl87tP0=
|
||||
github.com/tetafro/godot v1.4.9/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
|
||||
github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWhPe2iw9lwfQVF1KB3Q4fpP3X7/2VBG8=
|
||||
github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
|
||||
github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg=
|
||||
github.com/tklauser/go-sysconf v0.3.7/go.mod h1:JZIdXh4RmBvZDBZ41ld2bGxRV3n4daiiqA3skYhAoQ4=
|
||||
github.com/tklauser/numcpus v0.2.3/go.mod h1:vpEPS/JC+oZGGQ/My/vJnNsvMDQL6PwOqt8dsCw5j+E=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
@@ -929,8 +936,8 @@ github.com/tomarrell/wrapcheck/v2 v2.3.0/go.mod h1:aF5rnkdtqNWP/gC7vPUO5pKsB0Oac
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.4.0 h1:1t0f8Uiaq+fqKteUR4N9Umr6E99R+lDnLnq7PwX2PPE=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
|
||||
github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
@@ -950,6 +957,8 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV
|
||||
github.com/vektra/mockery/v2 v2.9.4 h1:ZjpYWY+YLkDIKrKtFnYPxJax10lktcUapWZtOSg4g7g=
|
||||
github.com/vektra/mockery/v2 v2.9.4/go.mod h1:2gU4Cf/f8YyC8oEaSXfCnZBMxMjMl/Ko205rlP0fO90=
|
||||
github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
@@ -1107,6 +1116,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
@@ -1115,8 +1125,9 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf h1:R150MpwJIv1MpS0N/pc+NhTM8ajzvlmxlY5OYsrevXQ=
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b h1:SXy8Ld8oKlcogOvUAh0J5Pm5RKzgYBMMxLxt6n5XW50=
|
||||
golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1163,6 +1174,7 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1176,8 +1188,10 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1202,6 +1216,7 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1217,6 +1232,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -1226,8 +1242,9 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw=
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs=
|
||||
golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@@ -1328,7 +1345,6 @@ golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82u
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
|
||||
@@ -13,14 +13,9 @@ will no longer blocksync and thus no longer run the blocksync process.
|
||||
Note, the blocksync reactor Service gossips entire block and relevant data such
|
||||
that each receiving peer may construct the entire view of the blocksync state.
|
||||
|
||||
There are currently two versions of the blocksync reactor Service:
|
||||
|
||||
- v0: The initial implementation that is battle-tested, but whose test coverage
|
||||
is lacking and is not formally verifiable.
|
||||
- v2: The latest implementation that has much higher test coverage and is formally
|
||||
verified. However, the current implementation of v2 is not as battle-tested and
|
||||
is known to have various bugs that could make it unreliable in production
|
||||
environments.
|
||||
There is currently only one version of the blocksync reactor Service
|
||||
that is battle-tested, but whose test coverage is lacking and is not
|
||||
formally verified.
|
||||
|
||||
The v0 blocksync reactor Service has one p2p channel, BlockchainChannel. This
|
||||
channel is responsible for handling messages that both request blocks and respond
|
||||
|
||||
@@ -98,7 +98,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
|
||||
t.Helper()
|
||||
|
||||
rts.nodes = append(rts.nodes, nodeID)
|
||||
rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}))
|
||||
rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), proxy.NopMetrics())
|
||||
require.NoError(t, rts.app[nodeID].Start())
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
/*
|
||||
Package Behavior provides a mechanism for reactors to report behavior of peers.
|
||||
|
||||
Instead of a reactor calling the switch directly it will call the behavior module which will
|
||||
handle the stoping and marking peer as good on behalf of the reactor.
|
||||
|
||||
There are four different behaviors a reactor can report.
|
||||
|
||||
1. bad message
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
|
||||
2. message out of order
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
|
||||
3. consesnsus Vote
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
|
||||
4. block part
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
|
||||
*/
|
||||
package behavior
|
||||
@@ -1,47 +0,0 @@
|
||||
package behavior
|
||||
|
||||
import "github.com/tendermint/tendermint/types"
|
||||
|
||||
// PeerBehavior is a struct describing a behavior a peer performed.
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behavior performed by the peer.
|
||||
type PeerBehavior struct {
|
||||
peerID types.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BadMessage returns a badMessage PeerBehavior.
|
||||
func BadMessage(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: badMessage{explanation}}
|
||||
}
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// MessageOutOfOrder returns a messagOutOfOrder PeerBehavior.
|
||||
func MessageOutOfOrder(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}}
|
||||
}
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// ConsensusVote returns a consensusVote PeerBehavior.
|
||||
func ConsensusVote(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}}
|
||||
}
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BlockPart returns blockPart PeerBehavior.
|
||||
func BlockPart(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: blockPart{explanation}}
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
package behavior
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Reporter provides an interface for reactors to report the behavior
|
||||
// of peers synchronously to other components.
|
||||
type Reporter interface {
|
||||
Report(behavior PeerBehavior) error
|
||||
}
|
||||
|
||||
// SwitchReporter reports peer behavior to an internal Switch.
|
||||
type SwitchReporter struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
|
||||
// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch.
|
||||
func NewSwitchReporter(sw *p2p.Switch) *SwitchReporter {
|
||||
return &SwitchReporter{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
|
||||
// Report reports the behavior of a peer to the Switch.
|
||||
func (spbr *SwitchReporter) Report(behavior PeerBehavior) error {
|
||||
peer := spbr.sw.Peers().Get(behavior.peerID)
|
||||
if peer == nil {
|
||||
return errors.New("peer not found")
|
||||
}
|
||||
|
||||
switch reason := behavior.reason.(type) {
|
||||
case consensusVote, blockPart:
|
||||
spbr.sw.MarkPeerAsGood(peer)
|
||||
case badMessage:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
case messageOutOfOrder:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
default:
|
||||
return errors.New("unknown reason reported")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockReporter is a concrete implementation of the Reporter
|
||||
// interface used in reactor tests to ensure reactors report the correct
|
||||
// behavior in manufactured scenarios.
|
||||
type MockReporter struct {
|
||||
mtx tmsync.RWMutex
|
||||
pb map[types.NodeID][]PeerBehavior
|
||||
}
|
||||
|
||||
// NewMockReporter returns a Reporter which records all reported
|
||||
// behaviors in memory.
|
||||
func NewMockReporter() *MockReporter {
|
||||
return &MockReporter{
|
||||
pb: map[types.NodeID][]PeerBehavior{},
|
||||
}
|
||||
}
|
||||
|
||||
// Report stores the PeerBehavior produced by the peer identified by peerID.
|
||||
func (mpbr *MockReporter) Report(behavior PeerBehavior) error {
|
||||
mpbr.mtx.Lock()
|
||||
defer mpbr.mtx.Unlock()
|
||||
mpbr.pb[behavior.peerID] = append(mpbr.pb[behavior.peerID], behavior)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBehaviors returns all behaviors reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior {
|
||||
mpbr.mtx.RLock()
|
||||
defer mpbr.mtx.RUnlock()
|
||||
if items, ok := mpbr.pb[peerID]; ok {
|
||||
result := make([]PeerBehavior, len(items))
|
||||
copy(result, items)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
return []PeerBehavior{}
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
package behavior_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behavior in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID types.NodeID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviors := pr.GetBehaviors(peerID)
|
||||
if len(behaviors) != 0 {
|
||||
t.Error("Expected to have no behaviors reported")
|
||||
}
|
||||
|
||||
badMessage := bh.BadMessage(peerID, "bad message")
|
||||
if err := pr.Report(badMessage); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
behaviors = pr.GetBehaviors(peerID)
|
||||
if len(behaviors) != 1 {
|
||||
t.Error("Expected the peer have one reported behavior")
|
||||
}
|
||||
|
||||
if behaviors[0] != badMessage {
|
||||
t.Error("Expected Bad Message to have been reported")
|
||||
}
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID types.NodeID
|
||||
behavior bh.PeerBehavior
|
||||
}
|
||||
|
||||
// equalBehaviors returns true if a and b contain the same PeerBehaviors with
|
||||
// the same freequencies and otherwise false.
|
||||
func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool {
|
||||
aHistogram := map[bh.PeerBehavior]int{}
|
||||
bHistogram := map[bh.PeerBehavior]int{}
|
||||
|
||||
for _, behavior := range a {
|
||||
aHistogram[behavior]++
|
||||
}
|
||||
|
||||
for _, behavior := range b {
|
||||
bHistogram[behavior]++
|
||||
}
|
||||
|
||||
if len(aHistogram) != len(bHistogram) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, behavior := range a {
|
||||
if aHistogram[behavior] != bHistogram[behavior] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, behavior := range b {
|
||||
if bHistogram[behavior] != aHistogram[behavior] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestEqualPeerBehaviors tests that equalBehaviors can tell that two slices
|
||||
// of peer behaviors can be compared for the behaviors they contain and the
|
||||
// freequencies that those behaviors occur.
|
||||
func TestEqualPeerBehaviors(t *testing.T) {
|
||||
var (
|
||||
peerID types.NodeID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
left []bh.PeerBehavior
|
||||
right []bh.PeerBehavior
|
||||
}{
|
||||
// Empty sets
|
||||
{[]bh.PeerBehavior{}, []bh.PeerBehavior{}},
|
||||
// Single behaviors
|
||||
{[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{consensusVote}},
|
||||
// Equal Frequencies
|
||||
{[]bh.PeerBehavior{consensusVote, consensusVote},
|
||||
[]bh.PeerBehavior{consensusVote, consensusVote}},
|
||||
// Equal frequencies different orders
|
||||
{[]bh.PeerBehavior{consensusVote, blockPart},
|
||||
[]bh.PeerBehavior{blockPart, consensusVote}},
|
||||
}
|
||||
unequals = []struct {
|
||||
left []bh.PeerBehavior
|
||||
right []bh.PeerBehavior
|
||||
}{
|
||||
// Comparing empty sets to non empty sets
|
||||
{[]bh.PeerBehavior{}, []bh.PeerBehavior{consensusVote}},
|
||||
// Different behaviors
|
||||
{[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{blockPart}},
|
||||
// Same behavior with different frequencies
|
||||
{[]bh.PeerBehavior{consensusVote},
|
||||
[]bh.PeerBehavior{consensusVote, consensusVote}},
|
||||
}
|
||||
)
|
||||
|
||||
for _, test := range equals {
|
||||
if !equalBehaviors(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be equal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range unequals {
|
||||
if equalBehaviors(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be unequal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPeerBehaviorConcurrency constructs a scenario in which
|
||||
// multiple goroutines are using the same MockReporter instance.
|
||||
// This test reproduces the conditions in which MockReporter will
|
||||
// be used within a Reactor `Receive` method tests to ensure thread safety.
|
||||
func TestMockPeerBehaviorReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviorScript = []struct {
|
||||
peerID types.NodeID
|
||||
behaviors []bh.PeerBehavior
|
||||
}{
|
||||
{"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}},
|
||||
{"2", []bh.PeerBehavior{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
|
||||
{
|
||||
"3",
|
||||
[]bh.PeerBehavior{bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", ""),
|
||||
bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", "")}},
|
||||
{
|
||||
"4",
|
||||
[]bh.PeerBehavior{bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", "")}},
|
||||
{
|
||||
"5",
|
||||
[]bh.PeerBehavior{bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", ""),
|
||||
bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", "")}},
|
||||
}
|
||||
)
|
||||
|
||||
var receiveWg sync.WaitGroup
|
||||
pr := bh.NewMockReporter()
|
||||
scriptItems := make(chan scriptItem)
|
||||
done := make(chan int)
|
||||
numConsumers := 3
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
receiveWg.Add(1)
|
||||
go func() {
|
||||
defer receiveWg.Done()
|
||||
for {
|
||||
select {
|
||||
case pb := <-scriptItems:
|
||||
if err := pr.Report(pb.behavior); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var sendingWg sync.WaitGroup
|
||||
sendingWg.Add(1)
|
||||
go func() {
|
||||
defer sendingWg.Done()
|
||||
for _, item := range behaviorScript {
|
||||
for _, reason := range item.behaviors {
|
||||
scriptItems <- scriptItem{item.peerID, reason}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
sendingWg.Wait()
|
||||
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
done <- 1
|
||||
}
|
||||
|
||||
receiveWg.Wait()
|
||||
|
||||
for _, items := range behaviorScript {
|
||||
reported := pr.GetBehaviors(items.peerID)
|
||||
if !equalBehaviors(reported, items.behaviors) {
|
||||
t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
|
||||
items.peerID, items.behaviors, reported)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/state"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
errPeerQueueFull = errors.New("peer queue full")
|
||||
)
|
||||
|
||||
type iIO interface {
|
||||
sendBlockRequest(peer p2p.Peer, height int64) error
|
||||
sendBlockToPeer(block *types.Block, peer p2p.Peer) error
|
||||
sendBlockNotFound(height int64, peer p2p.Peer) error
|
||||
sendStatusResponse(base, height int64, peer p2p.Peer) error
|
||||
|
||||
sendStatusRequest(peer p2p.Peer) error
|
||||
broadcastStatusRequest() error
|
||||
|
||||
trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
}
|
||||
|
||||
type switchIO struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
|
||||
func newSwitchIo(sw *p2p.Switch) *switchIO {
|
||||
return &switchIO{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and block sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state state.State, skipWAL bool)
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockRequest(peer p2p.Peer, height int64) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_BlockRequest{
|
||||
BlockRequest: &bcproto.BlockRequest{
|
||||
Height: height,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusResponse{
|
||||
StatusResponse: &bcproto.StatusResponse{
|
||||
Height: height,
|
||||
Base: base,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error {
|
||||
if block == nil {
|
||||
panic("trying to send nil block")
|
||||
}
|
||||
|
||||
bpb, err := block.ToProto()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_BlockResponse{
|
||||
BlockResponse: &bcproto.BlockResponse{
|
||||
Block: bpb,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockNotFound(height int64, peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_NoBlockResponse{
|
||||
NoBlockResponse: &bcproto.NoBlockResponse{
|
||||
Height: height,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool {
|
||||
conR, ok := sio.sw.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, skipWAL)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendStatusRequest(peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusRequest{
|
||||
StatusRequest: &bcproto.StatusRequest{},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) broadcastStatusRequest() error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusRequest{
|
||||
StatusRequest: &bcproto.StatusRequest{},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// XXX: maybe we should use an io specific peer list here
|
||||
sio.sw.Broadcast(BlockchainChannel, msgBytes)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
"github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "blockchain"
|
||||
)
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// events_in
|
||||
EventsIn metrics.Counter
|
||||
// events_in
|
||||
EventsHandled metrics.Counter
|
||||
// events_out
|
||||
EventsOut metrics.Counter
|
||||
// errors_in
|
||||
ErrorsIn metrics.Counter
|
||||
// errors_handled
|
||||
ErrorsHandled metrics.Counter
|
||||
// errors_out
|
||||
ErrorsOut metrics.Counter
|
||||
// events_shed
|
||||
EventsShed metrics.Counter
|
||||
// events_sent
|
||||
EventsSent metrics.Counter
|
||||
// errors_sent
|
||||
ErrorsSent metrics.Counter
|
||||
// errors_shed
|
||||
ErrorsShed metrics.Counter
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns metrics for in and out events, errors, etc. handled by routines.
|
||||
// Can we burn in the routine name here?
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_in",
|
||||
Help: "Events read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_handled",
|
||||
Help: "Events handled",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_out",
|
||||
Help: "Events output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_in",
|
||||
Help: "Errors read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_handled",
|
||||
Help: "Errors handled.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_out",
|
||||
Help: "Errors output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_sent",
|
||||
Help: "Errors sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_shed",
|
||||
Help: "Errors dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_sent",
|
||||
Help: "Events sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_shed",
|
||||
Help: "Events dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
// NopMetrics returns no-op Metrics.
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
EventsIn: discard.NewCounter(),
|
||||
EventsHandled: discard.NewCounter(),
|
||||
EventsOut: discard.NewCounter(),
|
||||
ErrorsIn: discard.NewCounter(),
|
||||
ErrorsHandled: discard.NewCounter(),
|
||||
ErrorsOut: discard.NewCounter(),
|
||||
EventsShed: discard.NewCounter(),
|
||||
EventsSent: discard.NewCounter(),
|
||||
ErrorsSent: discard.NewCounter(),
|
||||
ErrorsShed: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
@@ -1,193 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
tmstate "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Events generated by the processor:
|
||||
// block execution failure, event will indicate the peer(s) that caused the error
|
||||
type pcBlockVerificationFailure struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
firstPeerID types.NodeID
|
||||
secondPeerID types.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockVerificationFailure) String() string {
|
||||
return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}",
|
||||
e.height, e.firstPeerID, e.secondPeerID)
|
||||
}
|
||||
|
||||
// successful block execution
|
||||
type pcBlockProcessed struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
peerID types.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockProcessed) String() string {
|
||||
return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID)
|
||||
}
|
||||
|
||||
// processor has finished
|
||||
type pcFinished struct {
|
||||
priorityNormal
|
||||
blocksSynced int
|
||||
tmState tmstate.State
|
||||
}
|
||||
|
||||
func (p pcFinished) Error() string {
|
||||
return "finished"
|
||||
}
|
||||
|
||||
type queueItem struct {
|
||||
block *types.Block
|
||||
peerID types.NodeID
|
||||
}
|
||||
|
||||
type blockQueue map[int64]queueItem
|
||||
|
||||
type pcState struct {
|
||||
// blocks waiting to be processed
|
||||
queue blockQueue
|
||||
|
||||
// draining indicates that the next rProcessBlock event with a queue miss constitutes completion
|
||||
draining bool
|
||||
|
||||
// the number of blocks successfully synced by the processor
|
||||
blocksSynced int
|
||||
|
||||
// the processorContext which contains the processor dependencies
|
||||
context processorContext
|
||||
}
|
||||
|
||||
func (state *pcState) String() string {
|
||||
return fmt.Sprintf("height: %d queue length: %d draining: %v blocks synced: %d",
|
||||
state.height(), len(state.queue), state.draining, state.blocksSynced)
|
||||
}
|
||||
|
||||
// newPcState returns a pcState initialized with the last verified block enqueued
|
||||
func newPcState(context processorContext) *pcState {
|
||||
return &pcState{
|
||||
queue: blockQueue{},
|
||||
draining: false,
|
||||
blocksSynced: 0,
|
||||
context: context,
|
||||
}
|
||||
}
|
||||
|
||||
// nextTwo returns the next two unverified blocks
|
||||
func (state *pcState) nextTwo() (queueItem, queueItem, error) {
|
||||
if first, ok := state.queue[state.height()+1]; ok {
|
||||
if second, ok := state.queue[state.height()+2]; ok {
|
||||
return first, second, nil
|
||||
}
|
||||
}
|
||||
return queueItem{}, queueItem{}, fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
// synced returns true when at most the last verified block remains in the queue
|
||||
func (state *pcState) synced() bool {
|
||||
return len(state.queue) <= 1
|
||||
}
|
||||
|
||||
func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) {
|
||||
if item, ok := state.queue[height]; ok {
|
||||
panic(fmt.Sprintf(
|
||||
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
|
||||
height, block.Hash(), peerID, item.block.Hash(), item.peerID))
|
||||
}
|
||||
|
||||
state.queue[height] = queueItem{block: block, peerID: peerID}
|
||||
}
|
||||
|
||||
func (state *pcState) height() int64 {
|
||||
return state.context.tmState().LastBlockHeight
|
||||
}
|
||||
|
||||
// purgePeer moves all unprocessed blocks from the queue
|
||||
func (state *pcState) purgePeer(peerID types.NodeID) {
|
||||
// what if height is less than state.height?
|
||||
for height, item := range state.queue {
|
||||
if item.peerID == peerID {
|
||||
delete(state.queue, height)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle processes FSM events
|
||||
func (state *pcState) handle(event Event) (Event, error) {
|
||||
switch event := event.(type) {
|
||||
case bcResetState:
|
||||
state.context.setState(event.state)
|
||||
return noOp, nil
|
||||
|
||||
case scFinishedEv:
|
||||
if state.synced() {
|
||||
return pcFinished{tmState: state.context.tmState(), blocksSynced: state.blocksSynced}, nil
|
||||
}
|
||||
state.draining = true
|
||||
return noOp, nil
|
||||
|
||||
case scPeerError:
|
||||
state.purgePeer(event.peerID)
|
||||
return noOp, nil
|
||||
|
||||
case scBlockReceived:
|
||||
if event.block == nil {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// enqueue block if height is higher than state height, else ignore it
|
||||
if event.block.Height > state.height() {
|
||||
state.enqueue(event.peerID, event.block, event.block.Height)
|
||||
}
|
||||
return noOp, nil
|
||||
|
||||
case rProcessBlock:
|
||||
tmstate := state.context.tmState()
|
||||
firstItem, secondItem, err := state.nextTwo()
|
||||
if err != nil {
|
||||
if state.draining {
|
||||
return pcFinished{tmState: tmstate, blocksSynced: state.blocksSynced}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
var (
|
||||
first, second = firstItem.block, secondItem.block
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()}
|
||||
)
|
||||
|
||||
// verify if +second+ last commit "confirms" +first+ block
|
||||
err = state.context.verifyCommit(tmstate.ChainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
state.purgePeer(firstItem.peerID)
|
||||
if firstItem.peerID != secondItem.peerID {
|
||||
state.purgePeer(secondItem.peerID)
|
||||
}
|
||||
return pcBlockVerificationFailure{
|
||||
height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID},
|
||||
nil
|
||||
}
|
||||
|
||||
state.context.saveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
if err := state.context.applyBlock(firstID, first); err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
state.context.recordConsMetrics(first)
|
||||
|
||||
delete(state.queue, first.Height)
|
||||
state.blocksSynced++
|
||||
|
||||
return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type processorContext interface {
|
||||
applyBlock(blockID types.BlockID, block *types.Block) error
|
||||
verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error
|
||||
saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
|
||||
tmState() state.State
|
||||
setState(state.State)
|
||||
recordConsMetrics(block *types.Block)
|
||||
}
|
||||
|
||||
type pContext struct {
|
||||
store blockStore
|
||||
applier blockApplier
|
||||
state state.State
|
||||
metrics *consensus.Metrics
|
||||
}
|
||||
|
||||
func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *consensus.Metrics) *pContext {
|
||||
return &pContext{
|
||||
store: st,
|
||||
applier: ex,
|
||||
state: s,
|
||||
metrics: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
newState, err := pc.applier.ApplyBlock(pc.state, blockID, block)
|
||||
pc.state = newState
|
||||
return err
|
||||
}
|
||||
|
||||
func (pc pContext) tmState() state.State {
|
||||
return pc.state
|
||||
}
|
||||
|
||||
func (pc *pContext) setState(state state.State) {
|
||||
pc.state = state
|
||||
}
|
||||
|
||||
func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
return pc.state.Validators.VerifyCommitLight(chainID, blockID, height, commit)
|
||||
}
|
||||
|
||||
func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
pc.store.SaveBlock(block, blockParts, seenCommit)
|
||||
}
|
||||
|
||||
func (pc *pContext) recordConsMetrics(block *types.Block) {
|
||||
pc.metrics.RecordConsMetrics(block)
|
||||
}
|
||||
|
||||
type mockPContext struct {
|
||||
applicationBL []int64
|
||||
verificationBL []int64
|
||||
state state.State
|
||||
}
|
||||
|
||||
func newMockProcessorContext(
|
||||
state state.State,
|
||||
verificationBlackList []int64,
|
||||
applicationBlackList []int64) *mockPContext {
|
||||
return &mockPContext{
|
||||
applicationBL: applicationBlackList,
|
||||
verificationBL: verificationBlackList,
|
||||
state: state,
|
||||
}
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
for _, h := range mpc.applicationBL {
|
||||
if h == block.Height {
|
||||
return fmt.Errorf("generic application error")
|
||||
}
|
||||
}
|
||||
mpc.state.LastBlockHeight = block.Height
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
for _, h := range mpc.verificationBL {
|
||||
if h == height {
|
||||
return fmt.Errorf("generic verification error")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) setState(state state.State) {
|
||||
mpc.state = state
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) tmState() state.State {
|
||||
return mpc.state
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) recordConsMetrics(block *types.Block) {
|
||||
|
||||
}
|
||||
@@ -1,305 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
tmstate "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// pcBlock is a test helper structure with simple types. Its purpose is to help with test readability.
|
||||
type pcBlock struct {
|
||||
pid string
|
||||
height int64
|
||||
}
|
||||
|
||||
// params is a test structure used to create processor state.
|
||||
type params struct {
|
||||
height int64
|
||||
items []pcBlock
|
||||
blocksSynced int
|
||||
verBL []int64
|
||||
appBL []int64
|
||||
draining bool
|
||||
}
|
||||
|
||||
// makePcBlock makes an empty block.
|
||||
func makePcBlock(height int64) *types.Block {
|
||||
return &types.Block{Header: types.Header{Height: height}}
|
||||
}
|
||||
|
||||
// makeState takes test parameters and creates a specific processor state.
|
||||
func makeState(p *params) *pcState {
|
||||
var (
|
||||
tmState = tmstate.State{LastBlockHeight: p.height}
|
||||
context = newMockProcessorContext(tmState, p.verBL, p.appBL)
|
||||
)
|
||||
state := newPcState(context)
|
||||
|
||||
for _, item := range p.items {
|
||||
state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height)
|
||||
}
|
||||
|
||||
state.blocksSynced = p.blocksSynced
|
||||
state.draining = p.draining
|
||||
return state
|
||||
}
|
||||
|
||||
func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived {
|
||||
return scBlockReceived{
|
||||
peerID: peerID,
|
||||
block: makePcBlock(height),
|
||||
}
|
||||
}
|
||||
|
||||
type pcFsmMakeStateValues struct {
|
||||
currentState *params
|
||||
event Event
|
||||
wantState *params
|
||||
wantNextEvent Event
|
||||
wantErr error
|
||||
wantPanic bool
|
||||
}
|
||||
|
||||
type testFields struct {
|
||||
name string
|
||||
steps []pcFsmMakeStateValues
|
||||
}
|
||||
|
||||
func executeProcessorTests(t *testing.T, tests []testFields) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var state *pcState
|
||||
for _, step := range tt.steps {
|
||||
defer func() {
|
||||
r := recover()
|
||||
if (r != nil) != step.wantPanic {
|
||||
t.Errorf("recover = %v, wantPanic = %v", r, step.wantPanic)
|
||||
}
|
||||
}()
|
||||
|
||||
// First step must always initialize the currentState as state.
|
||||
if step.currentState != nil {
|
||||
state = makeState(step.currentState)
|
||||
}
|
||||
if state == nil {
|
||||
panic("Bad (initial?) step")
|
||||
}
|
||||
|
||||
nextEvent, err := state.handle(step.event)
|
||||
t.Log(state)
|
||||
assert.Equal(t, step.wantErr, err)
|
||||
assert.Equal(t, makeState(step.wantState), state)
|
||||
assert.Equal(t, step.wantNextEvent, nextEvent)
|
||||
// Next step may use the wantedState as their currentState.
|
||||
state = makeState(step.wantState)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRProcessPeerError(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "error for existing peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
event: scPeerError{peerID: "P2"},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}}},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error for unknown peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
event: scPeerError{peerID: "P3"},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestPcBlockResponse(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "add one block",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 1),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "add two blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 3),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp,
|
||||
},
|
||||
{ // use previous wantState as currentState,
|
||||
event: mBlockResponse("P1", 4),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}, {"P1", 4}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestRProcessBlockSuccess(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "noop - no blocks over current height",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: rProcessBlock{},
|
||||
wantState: ¶ms{}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "noop - high new blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}}, blocksSynced: 1},
|
||||
wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present after draining",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{ // some contiguous blocks - on stop check draining is set
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}},
|
||||
event: scFinishedEv{},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}, draining: true},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
{
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true},
|
||||
wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"},
|
||||
},
|
||||
{ // finish when H+1 or/and H+2 are missing
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true},
|
||||
wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 1}, blocksSynced: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestRProcessBlockFailures(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from different peers - H+1 verification fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, verBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{}, verBL: []int64{1}},
|
||||
wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from same peer - H+1 applyBlock fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, appBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{}, appBL: []int64{1}}, wantPanic: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from same peers - H+1 verification fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 0, items: []pcBlock{{"P1", 1}, {"P1", 2}, {"P2", 3}},
|
||||
verBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 0, items: []pcBlock{{"P2", 3}}, verBL: []int64{1}},
|
||||
wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from different peers - H+1 applyBlock fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P2", 3}}, appBL: []int64{1}},
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{{"P2", 3}}, appBL: []int64{1}}, wantPanic: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestScFinishedEv(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "no blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100},
|
||||
wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 100}, blocksSynced: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "maxHeight+1 block present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100},
|
||||
wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 100}, blocksSynced: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "more blocks present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}, {"P1", 102}}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}, {"P1", 102}}, blocksSynced: 100, draining: true},
|
||||
wantNextEvent: noOp,
|
||||
wantErr: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
@@ -1,643 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/blocksync"
|
||||
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// chBufferSize is the buffer size of all event channels.
|
||||
chBufferSize int = 1000
|
||||
)
|
||||
|
||||
type blockStore interface {
|
||||
LoadBlock(height int64) *types.Block
|
||||
SaveBlock(*types.Block, *types.PartSet, *types.Commit)
|
||||
Base() int64
|
||||
Height() int64
|
||||
}
|
||||
|
||||
// BlockchainReactor handles block sync protocol.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
blockSync *sync.AtomicBool // enable block sync on start when it's been Set
|
||||
stateSynced bool // set to true when SwitchToBlockSync is called by state sync
|
||||
scheduler *Routine
|
||||
processor *Routine
|
||||
logger log.Logger
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
maxPeerHeight int64
|
||||
syncHeight int64
|
||||
events chan Event // non-nil during a block sync
|
||||
|
||||
reporter behavior.Reporter
|
||||
io iIO
|
||||
store blockStore
|
||||
|
||||
syncStartTime time.Time
|
||||
syncStartHeight int64
|
||||
lastSyncRate float64 // # blocks sync per sec base on the last 100 blocks
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error)
|
||||
}
|
||||
|
||||
// XXX: unify naming in this package around tmState
|
||||
func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
|
||||
blockApplier blockApplier, blockSync bool, metrics *consensus.Metrics) *BlockchainReactor {
|
||||
initHeight := state.LastBlockHeight + 1
|
||||
if initHeight == 1 {
|
||||
initHeight = state.InitialHeight
|
||||
}
|
||||
scheduler := newScheduler(initHeight, time.Now())
|
||||
pContext := newProcessorContext(store, blockApplier, state, metrics)
|
||||
// TODO: Fix naming to just newProcesssor
|
||||
// newPcState requires a processorContext
|
||||
processor := newPcState(pContext)
|
||||
|
||||
return &BlockchainReactor{
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
blockSync: sync.NewBool(blockSync),
|
||||
syncStartHeight: initHeight,
|
||||
syncStartTime: time.Time{},
|
||||
lastSyncRate: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlockchainReactor creates a new reactor instance.
|
||||
func NewBlockchainReactor(
|
||||
state state.State,
|
||||
blockApplier blockApplier,
|
||||
store blockStore,
|
||||
blockSync bool,
|
||||
metrics *consensus.Metrics) *BlockchainReactor {
|
||||
reporter := behavior.NewMockReporter()
|
||||
return newReactor(state, store, reporter, blockApplier, blockSync, metrics)
|
||||
}
|
||||
|
||||
// SetSwitch implements Reactor interface.
|
||||
func (r *BlockchainReactor) SetSwitch(sw *p2p.Switch) {
|
||||
r.Switch = sw
|
||||
if sw != nil {
|
||||
r.io = newSwitchIo(sw)
|
||||
} else {
|
||||
r.io = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) setMaxPeerHeight(height int64) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if height > r.maxPeerHeight {
|
||||
r.maxPeerHeight = height
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) setSyncHeight(height int64) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.syncHeight = height
|
||||
}
|
||||
|
||||
// SyncHeight returns the height to which the BlockchainReactor has synced.
|
||||
func (r *BlockchainReactor) SyncHeight() int64 {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.syncHeight
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the reactor.
|
||||
func (r *BlockchainReactor) SetLogger(logger log.Logger) {
|
||||
r.logger = logger
|
||||
r.scheduler.setLogger(logger)
|
||||
r.processor.setLogger(logger)
|
||||
}
|
||||
|
||||
// Start implements cmn.Service interface
|
||||
func (r *BlockchainReactor) Start() error {
|
||||
r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch)
|
||||
if r.blockSync.IsSet() {
|
||||
err := r.startSync(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start block sync: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startSync begins a block sync, signaled by r.events being non-nil. If state is non-nil,
|
||||
// the scheduler and processor is updated with this state on startup.
|
||||
func (r *BlockchainReactor) startSync(state *state.State) error {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if r.events != nil {
|
||||
return errors.New("block sync already in progress")
|
||||
}
|
||||
r.events = make(chan Event, chBufferSize)
|
||||
go r.scheduler.start()
|
||||
go r.processor.start()
|
||||
if state != nil {
|
||||
<-r.scheduler.ready()
|
||||
<-r.processor.ready()
|
||||
r.scheduler.send(bcResetState{state: *state})
|
||||
r.processor.send(bcResetState{state: *state})
|
||||
}
|
||||
go r.demux(r.events)
|
||||
return nil
|
||||
}
|
||||
|
||||
// endSync ends a block sync
|
||||
func (r *BlockchainReactor) endSync() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if r.events != nil {
|
||||
close(r.events)
|
||||
}
|
||||
r.events = nil
|
||||
r.scheduler.stop()
|
||||
r.processor.stop()
|
||||
}
|
||||
|
||||
// SwitchToBlockSync is called by the state sync reactor when switching to block sync.
|
||||
func (r *BlockchainReactor) SwitchToBlockSync(state state.State) error {
|
||||
r.stateSynced = true
|
||||
state = state.Copy()
|
||||
|
||||
err := r.startSync(&state)
|
||||
if err == nil {
|
||||
r.syncStartTime = time.Now()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// reactor generated ticker events:
|
||||
// ticker for cleaning peers
|
||||
type rTryPrunePeer struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (e rTryPrunePeer) String() string {
|
||||
return fmt.Sprintf("rTryPrunePeer{%v}", e.time)
|
||||
}
|
||||
|
||||
// ticker event for scheduling block requests
|
||||
type rTrySchedule struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (e rTrySchedule) String() string {
|
||||
return fmt.Sprintf("rTrySchedule{%v}", e.time)
|
||||
}
|
||||
|
||||
// ticker for block processing
|
||||
type rProcessBlock struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
func (e rProcessBlock) String() string {
|
||||
return "rProcessBlock"
|
||||
}
|
||||
|
||||
// reactor generated events based on blockchain related messages from peers:
|
||||
// blockResponse message received from a peer
|
||||
type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
size int64
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
func (resp bcBlockResponse) String() string {
|
||||
return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}",
|
||||
resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time)
|
||||
}
|
||||
|
||||
// blockNoResponse message received from a peer
|
||||
type bcNoBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
func (resp bcNoBlockResponse) String() string {
|
||||
return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}",
|
||||
resp.peerID, resp.height, resp.time)
|
||||
}
|
||||
|
||||
// statusResponse message received from a peer
|
||||
type bcStatusResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
|
||||
func (resp bcStatusResponse) String() string {
|
||||
return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}",
|
||||
resp.peerID, resp.height, resp.base, resp.time)
|
||||
}
|
||||
|
||||
// new peer is connected
|
||||
type bcAddNewPeer struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
}
|
||||
|
||||
func (resp bcAddNewPeer) String() string {
|
||||
return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID)
|
||||
}
|
||||
|
||||
// existing peer is removed
|
||||
type bcRemovePeer struct {
|
||||
priorityHigh
|
||||
peerID types.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
func (resp bcRemovePeer) String() string {
|
||||
return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason)
|
||||
}
|
||||
|
||||
// resets the scheduler and processor state, e.g. following a switch from state syncing
|
||||
type bcResetState struct {
|
||||
priorityHigh
|
||||
state state.State
|
||||
}
|
||||
|
||||
func (e bcResetState) String() string {
|
||||
return fmt.Sprintf("bcResetState{%v}", e.state)
|
||||
}
|
||||
|
||||
// Takes the channel as a parameter to avoid race conditions on r.events.
|
||||
func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
var lastHundred = time.Now()
|
||||
|
||||
var (
|
||||
processBlockFreq = 20 * time.Millisecond
|
||||
doProcessBlockCh = make(chan struct{}, 1)
|
||||
doProcessBlockTk = time.NewTicker(processBlockFreq)
|
||||
)
|
||||
defer doProcessBlockTk.Stop()
|
||||
|
||||
var (
|
||||
prunePeerFreq = 1 * time.Second
|
||||
doPrunePeerCh = make(chan struct{}, 1)
|
||||
doPrunePeerTk = time.NewTicker(prunePeerFreq)
|
||||
)
|
||||
defer doPrunePeerTk.Stop()
|
||||
|
||||
var (
|
||||
scheduleFreq = 20 * time.Millisecond
|
||||
doScheduleCh = make(chan struct{}, 1)
|
||||
doScheduleTk = time.NewTicker(scheduleFreq)
|
||||
)
|
||||
defer doScheduleTk.Stop()
|
||||
|
||||
var (
|
||||
statusFreq = 10 * time.Second
|
||||
doStatusCh = make(chan struct{}, 1)
|
||||
doStatusTk = time.NewTicker(statusFreq)
|
||||
)
|
||||
defer doStatusTk.Stop()
|
||||
doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers
|
||||
|
||||
// Memoize the scSchedulerFail error to avoid printing it every scheduleFreq.
|
||||
var scSchedulerFailErr error
|
||||
|
||||
// XXX: Extract timers to make testing atemporal
|
||||
for {
|
||||
select {
|
||||
// Pacers: send at most per frequency but don't saturate
|
||||
case <-doProcessBlockTk.C:
|
||||
select {
|
||||
case doProcessBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doPrunePeerTk.C:
|
||||
select {
|
||||
case doPrunePeerCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doScheduleTk.C:
|
||||
select {
|
||||
case doScheduleCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doStatusTk.C:
|
||||
select {
|
||||
case doStatusCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
// Tickers: perform tasks periodically
|
||||
case <-doScheduleCh:
|
||||
r.scheduler.send(rTrySchedule{time: time.Now()})
|
||||
case <-doPrunePeerCh:
|
||||
r.scheduler.send(rTryPrunePeer{time: time.Now()})
|
||||
case <-doProcessBlockCh:
|
||||
r.processor.send(rProcessBlock{})
|
||||
case <-doStatusCh:
|
||||
if err := r.io.broadcastStatusRequest(); err != nil {
|
||||
r.logger.Error("Error broadcasting status request", "err", err)
|
||||
}
|
||||
|
||||
// Events from peers. Closing the channel signals event loop termination.
|
||||
case event, ok := <-events:
|
||||
if !ok {
|
||||
r.logger.Info("Stopping event processing")
|
||||
return
|
||||
}
|
||||
switch event := event.(type) {
|
||||
case bcStatusResponse:
|
||||
r.setMaxPeerHeight(event.height)
|
||||
r.scheduler.send(event)
|
||||
case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse:
|
||||
r.scheduler.send(event)
|
||||
default:
|
||||
r.logger.Error("Received unexpected event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Incremental events from scheduler
|
||||
case event := <-r.scheduler.next():
|
||||
switch event := event.(type) {
|
||||
case scBlockReceived:
|
||||
r.processor.send(event)
|
||||
case scPeerError:
|
||||
r.processor.send(event)
|
||||
if err := r.reporter.Report(behavior.BadMessage(event.peerID, "scPeerError")); err != nil {
|
||||
r.logger.Error("Error reporting peer", "err", err)
|
||||
}
|
||||
case scBlockRequest:
|
||||
peer := r.Switch.Peers().Get(event.peerID)
|
||||
if peer == nil {
|
||||
r.logger.Error("Wanted to send block request, but no such peer", "peerID", event.peerID)
|
||||
continue
|
||||
}
|
||||
if err := r.io.sendBlockRequest(peer, event.height); err != nil {
|
||||
r.logger.Error("Error sending block request", "err", err)
|
||||
}
|
||||
case scFinishedEv:
|
||||
r.processor.send(event)
|
||||
r.scheduler.stop()
|
||||
case scSchedulerFail:
|
||||
if scSchedulerFailErr != event.reason {
|
||||
r.logger.Error("Scheduler failure", "err", event.reason.Error())
|
||||
scSchedulerFailErr = event.reason
|
||||
}
|
||||
case scPeersPruned:
|
||||
// Remove peers from the processor.
|
||||
for _, peerID := range event.peers {
|
||||
r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")})
|
||||
}
|
||||
r.logger.Debug("Pruned peers", "count", len(event.peers))
|
||||
case noOpEvent:
|
||||
default:
|
||||
r.logger.Error("Received unexpected scheduler event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Incremental events from processor
|
||||
case event := <-r.processor.next():
|
||||
switch event := event.(type) {
|
||||
case pcBlockProcessed:
|
||||
r.setSyncHeight(event.height)
|
||||
if (r.syncHeight-r.syncStartHeight)%100 == 0 {
|
||||
newSyncRate := 100 / time.Since(lastHundred).Seconds()
|
||||
if r.lastSyncRate == 0 {
|
||||
r.lastSyncRate = newSyncRate
|
||||
} else {
|
||||
r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate
|
||||
}
|
||||
r.logger.Info("block sync Rate", "height", r.syncHeight,
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
r.scheduler.send(event)
|
||||
case pcBlockVerificationFailure:
|
||||
r.scheduler.send(event)
|
||||
case pcFinished:
|
||||
r.logger.Info("block sync complete, switching to consensus")
|
||||
if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) {
|
||||
r.logger.Error("Failed to switch to consensus reactor")
|
||||
}
|
||||
r.endSync()
|
||||
r.blockSync.UnSet()
|
||||
return
|
||||
case noOpEvent:
|
||||
default:
|
||||
r.logger.Error("Received unexpected processor event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Terminal event from scheduler
|
||||
case err := <-r.scheduler.final():
|
||||
switch err {
|
||||
case nil:
|
||||
r.logger.Info("Scheduler stopped")
|
||||
default:
|
||||
r.logger.Error("Scheduler aborted with error", "err", err)
|
||||
}
|
||||
|
||||
// Terminal event from processor
|
||||
case err := <-r.processor.final():
|
||||
switch err {
|
||||
case nil:
|
||||
r.logger.Info("Processor stopped")
|
||||
default:
|
||||
r.logger.Error("Processor aborted with error", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop implements cmn.Service interface.
|
||||
func (r *BlockchainReactor) Stop() error {
|
||||
r.logger.Info("reactor stopping")
|
||||
r.endSync()
|
||||
r.logger.Info("reactor stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling different message types.
|
||||
// XXX: do not call any methods that can block or incur heavy processing.
|
||||
// https://github.com/tendermint/tendermint/issues/2888
|
||||
func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
logger := r.logger.With("src", src.ID(), "chID", chID)
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
|
||||
if err := proto.Unmarshal(msgBytes, msgProto); err != nil {
|
||||
logger.Error("error decoding message", "err", err)
|
||||
_ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err := msgProto.Validate(); err != nil {
|
||||
logger.Error("peer sent us an invalid msg", "msg", msgProto, "err", err)
|
||||
_ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
r.logger.Debug("received", "msg", msgProto)
|
||||
|
||||
switch msg := msgProto.Sum.(type) {
|
||||
case *bcproto.Message_StatusRequest:
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src); err != nil {
|
||||
logger.Error("Could not send status message to src peer")
|
||||
}
|
||||
|
||||
case *bcproto.Message_BlockRequest:
|
||||
block := r.store.LoadBlock(msg.BlockRequest.Height)
|
||||
if block != nil {
|
||||
if err := r.io.sendBlockToPeer(block, src); err != nil {
|
||||
logger.Error("Could not send block message to src peer", "err", err)
|
||||
}
|
||||
} else {
|
||||
logger.Info("peer asking for a block we don't have", "height", msg.BlockRequest.Height)
|
||||
if err := r.io.sendBlockNotFound(msg.BlockRequest.Height, src); err != nil {
|
||||
logger.Error("Couldn't send block not found msg", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
case *bcproto.Message_StatusResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcStatusResponse{
|
||||
peerID: src.ID(),
|
||||
base: msg.StatusResponse.Base,
|
||||
height: msg.StatusResponse.Height,
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.Message_BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.BlockResponse.Block)
|
||||
if err != nil {
|
||||
logger.Error("error transitioning block from protobuf", "err", err)
|
||||
_ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcBlockResponse{
|
||||
peerID: src.ID(),
|
||||
block: bi,
|
||||
size: int64(len(msgBytes)),
|
||||
time: time.Now(),
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcNoBlockResponse{
|
||||
peerID: src.ID(),
|
||||
height: msg.NoBlockResponse.Height,
|
||||
time: time.Now(),
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor interface
|
||||
func (r *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer)
|
||||
if err != nil {
|
||||
r.logger.Error("could not send our status to the new peer", "peer", peer.ID, "err", err)
|
||||
}
|
||||
|
||||
err = r.io.sendStatusRequest(peer)
|
||||
if err != nil {
|
||||
r.logger.Error("could not send status request to the new peer", "peer", peer.ID, "err", err)
|
||||
}
|
||||
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
if r.events != nil {
|
||||
r.events <- bcAddNewPeer{peerID: peer.ID()}
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor interface.
|
||||
func (r *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
if r.events != nil {
|
||||
r.events <- bcRemovePeer{
|
||||
peerID: peer.ID(),
|
||||
reason: reason,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvMessageCapacity: blocksync.MaxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.maxPeerHeight
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration {
|
||||
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
return time.Since(r.syncStartTime)
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration {
|
||||
if !r.blockSync.IsSet() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
|
||||
targetSyncs := r.maxPeerHeight - r.syncStartHeight
|
||||
currentSyncs := r.syncHeight - r.syncStartHeight + 1
|
||||
if currentSyncs < 0 || r.lastSyncRate < 0.001 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
remain := float64(targetSyncs-currentSyncs) / r.lastSyncRate
|
||||
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
@@ -1,533 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/mempool/mock"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
"github.com/tendermint/tendermint/internal/proxy"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
sf "github.com/tendermint/tendermint/internal/state/test/factory"
|
||||
tmstore "github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type mockPeer struct {
|
||||
service.Service
|
||||
id types.NodeID
|
||||
}
|
||||
|
||||
func (mp mockPeer) FlushStop() {}
|
||||
func (mp mockPeer) ID() types.NodeID { return mp.id }
|
||||
func (mp mockPeer) RemoteIP() net.IP { return net.IP{} }
|
||||
func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} }
|
||||
|
||||
func (mp mockPeer) IsOutbound() bool { return true }
|
||||
func (mp mockPeer) IsPersistent() bool { return true }
|
||||
func (mp mockPeer) CloseConn() error { return nil }
|
||||
|
||||
func (mp mockPeer) NodeInfo() types.NodeInfo {
|
||||
return types.NodeInfo{
|
||||
NodeID: "",
|
||||
ListenAddr: "",
|
||||
}
|
||||
}
|
||||
func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
|
||||
func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} }
|
||||
|
||||
func (mp mockPeer) Send(byte, []byte) bool { return true }
|
||||
func (mp mockPeer) TrySend(byte, []byte) bool { return true }
|
||||
|
||||
func (mp mockPeer) Set(string, interface{}) {}
|
||||
func (mp mockPeer) Get(string) interface{} { return struct{}{} }
|
||||
|
||||
//nolint:unused
|
||||
type mockBlockStore struct {
|
||||
blocks map[int64]*types.Block
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func (ml *mockBlockStore) Height() int64 {
|
||||
return int64(len(ml.blocks))
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func (ml *mockBlockStore) LoadBlock(height int64) *types.Block {
|
||||
return ml.blocks[height]
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) {
|
||||
ml.blocks[block.Height] = block
|
||||
}
|
||||
|
||||
type mockBlockApplier struct {
|
||||
}
|
||||
|
||||
// XXX: Add whitelist/blacklist?
|
||||
func (mba *mockBlockApplier) ApplyBlock(
|
||||
state sm.State, blockID types.BlockID, block *types.Block,
|
||||
) (sm.State, error) {
|
||||
state.LastBlockHeight++
|
||||
return state, nil
|
||||
}
|
||||
|
||||
type mockSwitchIo struct {
|
||||
mtx sync.Mutex
|
||||
switchedToConsensus bool
|
||||
numStatusResponse int
|
||||
numBlockResponse int
|
||||
numNoBlockResponse int
|
||||
numStatusRequest int
|
||||
}
|
||||
|
||||
var _ iIO = (*mockSwitchIo)(nil)
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockRequest(_ p2p.Peer, _ int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numStatusResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numBlockResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockNotFound(_ int64, _ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numNoBlockResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) trySwitchToConsensus(_ sm.State, _ bool) bool {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.switchedToConsensus = true
|
||||
return true
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) broadcastStatusRequest() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numStatusRequest++
|
||||
return nil
|
||||
}
|
||||
|
||||
type testReactorParams struct {
|
||||
logger log.Logger
|
||||
genDoc *types.GenesisDoc
|
||||
privVals []types.PrivValidator
|
||||
startHeight int64
|
||||
mockA bool
|
||||
}
|
||||
|
||||
func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
|
||||
store, state, _ := newReactorStore(t, p.genDoc, p.privVals, p.startHeight)
|
||||
reporter := behavior.NewMockReporter()
|
||||
|
||||
var appl blockApplier
|
||||
|
||||
if p.mockA {
|
||||
appl = &mockBlockApplier{}
|
||||
} else {
|
||||
app := &testApp{}
|
||||
cc := abciclient.NewLocalCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
require.NoError(t, err)
|
||||
db := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(db)
|
||||
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
|
||||
appl = sm.NewBlockExecutor(
|
||||
stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
|
||||
err = stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
r := newReactor(state, store, reporter, appl, true, consensus.NopMetrics())
|
||||
logger := log.TestingLogger()
|
||||
r.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// This test is left here and not deleted to retain the termination cases for
|
||||
// future improvement in [#4482](https://github.com/tendermint/tendermint/issues/4482).
|
||||
// func TestReactorTerminationScenarios(t *testing.T) {
|
||||
|
||||
// config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
// defer os.RemoveAll(config.RootDir)
|
||||
// genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
// refStore, _, _ := newReactorStore(genDoc, privVals, 20)
|
||||
|
||||
// params := testReactorParams{
|
||||
// logger: log.TestingLogger(),
|
||||
// genDoc: genDoc,
|
||||
// privVals: privVals,
|
||||
// startHeight: 10,
|
||||
// bufferSize: 100,
|
||||
// mockA: true,
|
||||
// }
|
||||
|
||||
// type testEvent struct {
|
||||
// evType string
|
||||
// peer string
|
||||
// height int64
|
||||
// }
|
||||
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// params testReactorParams
|
||||
// msgs []testEvent
|
||||
// }{
|
||||
// {
|
||||
// name: "simple termination on max peer height - one peer",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 11},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 12},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 13},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: "simple termination on max peer height - two peers",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "AddPeer", peer: "P2"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "ReceiveS", peer: "P2", height: 15},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 11},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 12},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 13},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 14},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 15},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: "termination on max peer height - two peers, noBlock error",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "AddPeer", peer: "P2"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "ReceiveS", peer: "P2", height: 15},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveNB", peer: "P1", height: 11},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 12},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 11},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 13},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 14},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 15},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: "termination on max peer height - two peers, remove one peer",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "AddPeer", peer: "P2"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "ReceiveS", peer: "P2", height: 15},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "RemovePeer", peer: "P1"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 12},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 11},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 13},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 14},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 15},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// for _, tt := range tests {
|
||||
// tt := tt
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// reactor := newTestReactor(params)
|
||||
// reactor.Start()
|
||||
// reactor.reporter = behavior.NewMockReporter()
|
||||
// mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
// reactor.io = mockSwitch
|
||||
// // time for go routines to start
|
||||
// time.Sleep(time.Millisecond)
|
||||
|
||||
// for _, step := range tt.msgs {
|
||||
// switch step.evType {
|
||||
// case "AddPeer":
|
||||
// reactor.scheduler.send(bcAddNewPeer{peerID: p2p.ID(step.peer)})
|
||||
// case "RemovePeer":
|
||||
// reactor.scheduler.send(bcRemovePeer{peerID: p2p.ID(step.peer)})
|
||||
// case "ReceiveS":
|
||||
// reactor.scheduler.send(bcStatusResponse{
|
||||
// peerID: p2p.ID(step.peer),
|
||||
// height: step.height,
|
||||
// time: time.Now(),
|
||||
// })
|
||||
// case "ReceiveB":
|
||||
// reactor.scheduler.send(bcBlockResponse{
|
||||
// peerID: p2p.ID(step.peer),
|
||||
// block: refStore.LoadBlock(step.height),
|
||||
// size: 10,
|
||||
// time: time.Now(),
|
||||
// })
|
||||
// case "ReceiveNB":
|
||||
// reactor.scheduler.send(bcNoBlockResponse{
|
||||
// peerID: p2p.ID(step.peer),
|
||||
// height: step.height,
|
||||
// time: time.Now(),
|
||||
// })
|
||||
// case "BlockReq":
|
||||
// reactor.scheduler.send(rTrySchedule{time: time.Now()})
|
||||
// case "Process":
|
||||
// reactor.processor.send(rProcessBlock{})
|
||||
// }
|
||||
// // give time for messages to propagate between routines
|
||||
// time.Sleep(time.Millisecond)
|
||||
// }
|
||||
|
||||
// // time for processor to finish and reactor to switch to consensus
|
||||
// time.Sleep(20 * time.Millisecond)
|
||||
// assert.True(t, mockSwitch.hasSwitchedToConsensus())
|
||||
// reactor.Stop()
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestReactorHelperMode(t *testing.T) {
|
||||
var (
|
||||
channelID = byte(0x40)
|
||||
)
|
||||
|
||||
cfg := config.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
|
||||
|
||||
params := testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
startHeight: 20,
|
||||
mockA: true,
|
||||
}
|
||||
|
||||
type testEvent struct {
|
||||
peer string
|
||||
event interface{}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
params testReactorParams
|
||||
msgs []testEvent
|
||||
}{
|
||||
{
|
||||
name: "status request",
|
||||
params: params,
|
||||
msgs: []testEvent{
|
||||
{"P1", bcproto.StatusRequest{}},
|
||||
{"P1", bcproto.BlockRequest{Height: 13}},
|
||||
{"P1", bcproto.BlockRequest{Height: 20}},
|
||||
{"P1", bcproto.BlockRequest{Height: 22}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
reactor := newTestReactor(t, params)
|
||||
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
reactor.io = mockSwitch
|
||||
err := reactor.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(tt.msgs); i++ {
|
||||
step := tt.msgs[i]
|
||||
switch ev := step.event.(type) {
|
||||
case bcproto.StatusRequest:
|
||||
old := mockSwitch.numStatusResponse
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
require.NoError(t, msgProto.Wrap(&ev))
|
||||
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
|
||||
case bcproto.BlockRequest:
|
||||
if ev.Height > params.startHeight {
|
||||
old := mockSwitch.numNoBlockResponse
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
require.NoError(t, msgProto.Wrap(&ev))
|
||||
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
|
||||
} else {
|
||||
old := mockSwitch.numBlockResponse
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
require.NoError(t, msgProto.Wrap(&ev))
|
||||
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
|
||||
}
|
||||
}
|
||||
}
|
||||
err = reactor.Stop()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactorSetSwitchNil(t *testing.T) {
|
||||
cfg := config.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
|
||||
|
||||
reactor := newTestReactor(t, testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
})
|
||||
reactor.SetSwitch(nil)
|
||||
|
||||
assert.Nil(t, reactor.Switch)
|
||||
assert.Nil(t, reactor.io)
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
func newReactorStore(
|
||||
t *testing.T,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
t.Helper()
|
||||
|
||||
require.Len(t, privVals, 1)
|
||||
app := &testApp{}
|
||||
cc := abciclient.NewLocalCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
|
||||
err = stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
// add blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
vote, err := factory.MakeVote(
|
||||
privVals[0],
|
||||
lastBlock.Header.ChainID, 0,
|
||||
lastBlock.Header.Height, 0, 2,
|
||||
lastBlockMeta.BlockID,
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
return blockStore, state, blockExec
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/Workiva/go-datastructures/queue"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
type handleFunc = func(event Event) (Event, error)
|
||||
|
||||
const historySize = 25
|
||||
|
||||
// Routine is a structure that models a finite state machine as serialized
|
||||
// stream of events processed by a handle function. This Routine structure
|
||||
// handles the concurrency and messaging guarantees. Events are sent via
|
||||
// `send` are handled by the `handle` function to produce an iterator
|
||||
// `next()`. Calling `stop()` on a routine will conclude processing of all
|
||||
// sent events and produce `final()` event representing the terminal state.
|
||||
type Routine struct {
|
||||
name string
|
||||
handle handleFunc
|
||||
queue *queue.PriorityQueue
|
||||
history []Event
|
||||
out chan Event
|
||||
fin chan error
|
||||
rdy chan struct{}
|
||||
running *uint32
|
||||
logger log.Logger
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine {
|
||||
return &Routine{
|
||||
name: name,
|
||||
handle: handleFunc,
|
||||
queue: queue.NewPriorityQueue(bufferSize, true),
|
||||
history: make([]Event, 0, historySize),
|
||||
out: make(chan Event, bufferSize),
|
||||
rdy: make(chan struct{}, 1),
|
||||
fin: make(chan error, 1),
|
||||
running: new(uint32),
|
||||
logger: log.NewNopLogger(),
|
||||
metrics: NopMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
func (rt *Routine) setLogger(logger log.Logger) {
|
||||
rt.logger = logger
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func (rt *Routine) setMetrics(metrics *Metrics) {
|
||||
rt.metrics = metrics
|
||||
}
|
||||
|
||||
func (rt *Routine) start() {
|
||||
rt.logger.Info(fmt.Sprintf("%s: run", rt.name))
|
||||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
|
||||
if !running {
|
||||
panic(fmt.Sprintf("%s is already running", rt.name))
|
||||
}
|
||||
close(rt.rdy)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var (
|
||||
b strings.Builder
|
||||
j int
|
||||
)
|
||||
for i := len(rt.history) - 1; i >= 0; i-- {
|
||||
fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i])
|
||||
j++
|
||||
}
|
||||
panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String()))
|
||||
}
|
||||
stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0))
|
||||
if !stopped {
|
||||
panic(fmt.Sprintf("%s is failed to stop", rt.name))
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
events, err := rt.queue.Get(1)
|
||||
if err == queue.ErrDisposed {
|
||||
rt.terminate(nil)
|
||||
return
|
||||
} else if err != nil {
|
||||
rt.terminate(err)
|
||||
return
|
||||
}
|
||||
oEvent, err := rt.handle(events[0].(Event))
|
||||
rt.metrics.EventsHandled.With("routine", rt.name).Add(1)
|
||||
if err != nil {
|
||||
rt.terminate(err)
|
||||
return
|
||||
}
|
||||
rt.metrics.EventsOut.With("routine", rt.name).Add(1)
|
||||
rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
|
||||
|
||||
// Skip rTrySchedule and rProcessBlock events as they clutter the history
|
||||
// due to their frequency.
|
||||
switch events[0].(type) {
|
||||
case rTrySchedule:
|
||||
case rProcessBlock:
|
||||
default:
|
||||
rt.history = append(rt.history, events[0].(Event))
|
||||
if len(rt.history) > historySize {
|
||||
rt.history = rt.history[1:]
|
||||
}
|
||||
}
|
||||
|
||||
rt.out <- oEvent
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: look into returning OpError in the net package
|
||||
func (rt *Routine) send(event Event) bool {
|
||||
rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event))
|
||||
if !rt.isRunning() {
|
||||
return false
|
||||
}
|
||||
err := rt.queue.Put(event)
|
||||
if err != nil {
|
||||
rt.metrics.EventsShed.With("routine", rt.name).Add(1)
|
||||
rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name))
|
||||
return false
|
||||
}
|
||||
|
||||
rt.metrics.EventsSent.With("routine", rt.name).Add(1)
|
||||
return true
|
||||
}
|
||||
|
||||
func (rt *Routine) isRunning() bool {
|
||||
return atomic.LoadUint32(rt.running) == 1
|
||||
}
|
||||
|
||||
func (rt *Routine) next() chan Event {
|
||||
return rt.out
|
||||
}
|
||||
|
||||
func (rt *Routine) ready() chan struct{} {
|
||||
return rt.rdy
|
||||
}
|
||||
|
||||
func (rt *Routine) stop() {
|
||||
if !rt.isRunning() { // XXX: this should check rt.queue.Disposed()
|
||||
return
|
||||
}
|
||||
|
||||
rt.logger.Info(fmt.Sprintf("%s: stop", rt.name))
|
||||
rt.queue.Dispose() // this should block until all queue items are free?
|
||||
}
|
||||
|
||||
func (rt *Routine) final() chan error {
|
||||
return rt.fin
|
||||
}
|
||||
|
||||
// XXX: Maybe get rid of this
|
||||
func (rt *Routine) terminate(reason error) {
|
||||
// We don't close the rt.out channel here, to avoid spinning on the closed channel
|
||||
// in the event loop.
|
||||
rt.fin <- reason
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type eventA struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
var errDone = fmt.Errorf("done")
|
||||
|
||||
func simpleHandler(event Event) (Event, error) {
|
||||
if _, ok := event.(eventA); ok {
|
||||
return noOp, errDone
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func TestRoutineFinal(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize)
|
||||
)
|
||||
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an initialized routine to not be running")
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
assert.True(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a ready routine to succeed")
|
||||
|
||||
assert.Equal(t, errDone, <-routine.final(),
|
||||
"expected the final event to be done")
|
||||
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an completed routine to no longer be running")
|
||||
}
|
||||
|
||||
func TestRoutineStop(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize)
|
||||
)
|
||||
|
||||
assert.False(t, routine.send(eventA{}),
|
||||
"expected sending to an unstarted routine to fail")
|
||||
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a running routine to succeed")
|
||||
|
||||
routine.stop()
|
||||
|
||||
assert.False(t, routine.send(eventA{}),
|
||||
"expected sending to a stopped routine to fail")
|
||||
}
|
||||
|
||||
type finalCount struct {
|
||||
count int
|
||||
}
|
||||
|
||||
func (f finalCount) Error() string {
|
||||
return "end"
|
||||
}
|
||||
|
||||
func genStatefulHandler(maxCount int) handleFunc {
|
||||
counter := 0
|
||||
return func(event Event) (Event, error) {
|
||||
if _, ok := event.(eventA); ok {
|
||||
counter++
|
||||
if counter >= maxCount {
|
||||
return noOp, finalCount{counter}
|
||||
}
|
||||
|
||||
return eventA{}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
}
|
||||
|
||||
func feedback(r *Routine) {
|
||||
for event := range r.next() {
|
||||
r.send(event)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatefulRoutine(t *testing.T) {
|
||||
var (
|
||||
count = 10
|
||||
handler = genStatefulHandler(count)
|
||||
bufferSize = 20
|
||||
routine = newRoutine("statefulRoutine", handler, bufferSize)
|
||||
)
|
||||
|
||||
go routine.start()
|
||||
go feedback(routine)
|
||||
<-routine.ready()
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a started routine to succeed")
|
||||
|
||||
final := <-routine.final()
|
||||
if fnl, ok := final.(finalCount); ok {
|
||||
assert.Equal(t, count, fnl.count,
|
||||
"expected the routine to count to 10")
|
||||
} else {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
type lowPriorityEvent struct {
|
||||
priorityLow
|
||||
}
|
||||
|
||||
type highPriorityEvent struct {
|
||||
priorityHigh
|
||||
}
|
||||
|
||||
func handleWithPriority(event Event) (Event, error) {
|
||||
switch event.(type) {
|
||||
case lowPriorityEvent:
|
||||
return noOp, nil
|
||||
case highPriorityEvent:
|
||||
return noOp, errDone
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func TestPriority(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 20
|
||||
routine = newRoutine("priorityRoutine", handleWithPriority, bufferSize)
|
||||
)
|
||||
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
go func() {
|
||||
for {
|
||||
routine.send(lowPriorityEvent{})
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
assert.True(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
assert.True(t, routine.send(highPriorityEvent{}),
|
||||
"expected send to succeed even when saturated")
|
||||
|
||||
assert.Equal(t, errDone, <-routine.final())
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
}
|
||||
@@ -1,711 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Events generated by the scheduler:
|
||||
// all blocks have been processed
|
||||
type scFinishedEv struct {
|
||||
priorityNormal
|
||||
reason string
|
||||
}
|
||||
|
||||
func (e scFinishedEv) String() string {
|
||||
return fmt.Sprintf("scFinishedEv{%v}", e.reason)
|
||||
}
|
||||
|
||||
// send a blockRequest message
|
||||
type scBlockRequest struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
func (e scBlockRequest) String() string {
|
||||
return fmt.Sprintf("scBlockRequest{%d from %v}", e.height, e.peerID)
|
||||
}
|
||||
|
||||
// a block has been received and validated by the scheduler
|
||||
type scBlockReceived struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
func (e scBlockReceived) String() string {
|
||||
return fmt.Sprintf("scBlockReceived{%d#%X from %v}", e.block.Height, e.block.Hash(), e.peerID)
|
||||
}
|
||||
|
||||
// scheduler detected a peer error
|
||||
type scPeerError struct {
|
||||
priorityHigh
|
||||
peerID types.NodeID
|
||||
reason error
|
||||
}
|
||||
|
||||
func (e scPeerError) String() string {
|
||||
return fmt.Sprintf("scPeerError{%v errored with %v}", e.peerID, e.reason)
|
||||
}
|
||||
|
||||
// scheduler removed a set of peers (timed out or slow peer)
|
||||
type scPeersPruned struct {
|
||||
priorityHigh
|
||||
peers []types.NodeID
|
||||
}
|
||||
|
||||
func (e scPeersPruned) String() string {
|
||||
return fmt.Sprintf("scPeersPruned{%v}", e.peers)
|
||||
}
|
||||
|
||||
// XXX: make this fatal?
|
||||
// scheduler encountered a fatal error
|
||||
type scSchedulerFail struct {
|
||||
priorityHigh
|
||||
reason error
|
||||
}
|
||||
|
||||
func (e scSchedulerFail) String() string {
|
||||
return fmt.Sprintf("scSchedulerFail{%v}", e.reason)
|
||||
}
|
||||
|
||||
type blockState int
|
||||
|
||||
const (
|
||||
blockStateUnknown blockState = iota + 1 // no known peer has this block
|
||||
blockStateNew // indicates that a peer has reported having this block
|
||||
blockStatePending // indicates that this block has been requested from a peer
|
||||
blockStateReceived // indicates that this block has been received by a peer
|
||||
blockStateProcessed // indicates that this block has been applied
|
||||
)
|
||||
|
||||
func (e blockState) String() string {
|
||||
switch e {
|
||||
case blockStateUnknown:
|
||||
return "Unknown"
|
||||
case blockStateNew:
|
||||
return "New"
|
||||
case blockStatePending:
|
||||
return "Pending"
|
||||
case blockStateReceived:
|
||||
return "Received"
|
||||
case blockStateProcessed:
|
||||
return "Processed"
|
||||
default:
|
||||
return fmt.Sprintf("invalid blockState: %d", e)
|
||||
}
|
||||
}
|
||||
|
||||
type peerState int
|
||||
|
||||
const (
|
||||
peerStateNew = iota + 1
|
||||
peerStateReady
|
||||
peerStateRemoved
|
||||
)
|
||||
|
||||
func (e peerState) String() string {
|
||||
switch e {
|
||||
case peerStateNew:
|
||||
return "New"
|
||||
case peerStateReady:
|
||||
return "Ready"
|
||||
case peerStateRemoved:
|
||||
return "Removed"
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown peerState: %d", e))
|
||||
}
|
||||
}
|
||||
|
||||
type scPeer struct {
|
||||
peerID types.NodeID
|
||||
|
||||
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
|
||||
// updated to Removed when peer is removed
|
||||
state peerState
|
||||
|
||||
base int64 // updated when statusResponse is received
|
||||
height int64 // updated when statusResponse is received
|
||||
lastTouched time.Time
|
||||
lastRate int64 // last receive rate in bytes
|
||||
}
|
||||
|
||||
func (p scPeer) String() string {
|
||||
return fmt.Sprintf("{state %v, base %d, height %d, lastTouched %v, lastRate %d, id %v}",
|
||||
p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
|
||||
}
|
||||
|
||||
func newScPeer(peerID types.NodeID) *scPeer {
|
||||
return &scPeer{
|
||||
peerID: peerID,
|
||||
state: peerStateNew,
|
||||
base: -1,
|
||||
height: -1,
|
||||
lastTouched: time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
// The scheduler keep track of the state of each block and each peer. The
|
||||
// scheduler will attempt to schedule new block requests with `trySchedule`
|
||||
// events and remove slow peers with `tryPrune` events.
|
||||
type scheduler struct {
|
||||
initHeight int64
|
||||
|
||||
// next block that needs to be processed. All blocks with smaller height are
|
||||
// in Processed state.
|
||||
height int64
|
||||
|
||||
// lastAdvance tracks the last time a block execution happened.
|
||||
// syncTimeout is the maximum time the scheduler waits to advance in the block sync process before finishing.
|
||||
// This covers the cases where there are no peers or all peers have a lower height.
|
||||
lastAdvance time.Time
|
||||
syncTimeout time.Duration
|
||||
|
||||
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
|
||||
// track of peer specific state
|
||||
peers map[types.NodeID]*scPeer
|
||||
peerTimeout time.Duration // maximum response time from a peer otherwise prune
|
||||
minRecvRate int64 // minimum receive rate from peer otherwise prune
|
||||
|
||||
// the maximum number of blocks that should be New, Received or Pending at any point
|
||||
// in time. This is used to enforce a limit on the blockStates map.
|
||||
targetPending int
|
||||
// a list of blocks to be scheduled (New), Pending or Received. Its length should be
|
||||
// smaller than targetPending.
|
||||
blockStates map[int64]blockState
|
||||
|
||||
// a map of heights to the peer we are waiting a response from
|
||||
pendingBlocks map[int64]types.NodeID
|
||||
|
||||
// the time at which a block was put in blockStatePending
|
||||
pendingTime map[int64]time.Time
|
||||
|
||||
// a map of heights to the peers that put the block in blockStateReceived
|
||||
receivedBlocks map[int64]types.NodeID
|
||||
}
|
||||
|
||||
func (sc scheduler) String() string {
|
||||
return fmt.Sprintf("ih: %d, bst: %v, peers: %v, pblks: %v, ptm %v, rblks: %v",
|
||||
sc.initHeight, sc.blockStates, sc.peers, sc.pendingBlocks, sc.pendingTime, sc.receivedBlocks)
|
||||
}
|
||||
|
||||
func newScheduler(initHeight int64, startTime time.Time) *scheduler {
|
||||
sc := scheduler{
|
||||
initHeight: initHeight,
|
||||
lastAdvance: startTime,
|
||||
syncTimeout: 60 * time.Second,
|
||||
height: initHeight,
|
||||
blockStates: make(map[int64]blockState),
|
||||
peers: make(map[types.NodeID]*scPeer),
|
||||
pendingBlocks: make(map[int64]types.NodeID),
|
||||
pendingTime: make(map[int64]time.Time),
|
||||
receivedBlocks: make(map[int64]types.NodeID),
|
||||
targetPending: 10, // TODO - pass as param
|
||||
peerTimeout: 15 * time.Second, // TODO - pass as param
|
||||
minRecvRate: 0, // int64(7680), TODO - pass as param
|
||||
}
|
||||
|
||||
return &sc
|
||||
}
|
||||
|
||||
func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer {
|
||||
if _, ok := sc.peers[peerID]; !ok {
|
||||
sc.peers[peerID] = newScPeer(peerID)
|
||||
}
|
||||
return sc.peers[peerID]
|
||||
}
|
||||
|
||||
func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state != peerStateReady {
|
||||
return fmt.Errorf("tried to touch peer in state %s, must be Ready", peer.state)
|
||||
}
|
||||
|
||||
peer.lastTouched = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) removePeer(peerID types.NodeID) {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if peer.state == peerStateRemoved {
|
||||
return
|
||||
}
|
||||
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.pendingTime, height)
|
||||
delete(sc.pendingBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
for height, rcvPeerID := range sc.receivedBlocks {
|
||||
if rcvPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.receivedBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
// remove the blocks from blockStates if the peer removal causes the max peer height to be lower.
|
||||
peer.state = peerStateRemoved
|
||||
maxPeerHeight := int64(0)
|
||||
for _, otherPeer := range sc.peers {
|
||||
if otherPeer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if otherPeer.peerID != peer.peerID && otherPeer.height > maxPeerHeight {
|
||||
maxPeerHeight = otherPeer.height
|
||||
}
|
||||
}
|
||||
for h := range sc.blockStates {
|
||||
if h > maxPeerHeight {
|
||||
delete(sc.blockStates, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check if the blockPool is running low and add new blocks in New state to be requested.
|
||||
// This function is called when there is an increase in the maximum peer height or when
|
||||
// blocks are processed.
|
||||
func (sc *scheduler) addNewBlocks() {
|
||||
if len(sc.blockStates) >= sc.targetPending {
|
||||
return
|
||||
}
|
||||
|
||||
for i := sc.height; i < int64(sc.targetPending)+sc.height; i++ {
|
||||
if i > sc.maxHeight() {
|
||||
break
|
||||
}
|
||||
if sc.getStateAtHeight(i) == blockStateUnknown {
|
||||
sc.setStateAtHeight(i, blockStateNew)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error {
|
||||
peer := sc.ensurePeer(peerID)
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return nil // noop
|
||||
}
|
||||
|
||||
if height < peer.height {
|
||||
sc.removePeer(peerID)
|
||||
return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height)
|
||||
}
|
||||
|
||||
if base > height {
|
||||
sc.removePeer(peerID)
|
||||
return fmt.Errorf("cannot set peer base higher than its height")
|
||||
}
|
||||
|
||||
peer.base = base
|
||||
peer.height = height
|
||||
peer.state = peerStateReady
|
||||
|
||||
sc.addNewBlocks()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) getStateAtHeight(height int64) blockState {
|
||||
if height < sc.height {
|
||||
return blockStateProcessed
|
||||
} else if state, ok := sc.blockStates[height]; ok {
|
||||
return state
|
||||
} else {
|
||||
return blockStateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
|
||||
peers := make([]types.NodeID, 0)
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if peer.base <= height && peer.height >= height {
|
||||
peers = append(peers, peer.peerID)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID {
|
||||
prunable := make([]types.NodeID, 0)
|
||||
for peerID, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate {
|
||||
prunable = append(prunable, peerID)
|
||||
}
|
||||
}
|
||||
// Tests for handleTryPrunePeer() may fail without sort due to range non-determinism
|
||||
sort.Sort(PeerByID(prunable))
|
||||
return prunable
|
||||
}
|
||||
|
||||
func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
|
||||
sc.blockStates[height] = state
|
||||
}
|
||||
|
||||
// CONTRACT: peer exists and in Ready state.
|
||||
func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error {
|
||||
peer := sc.peers[peerID]
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
return fmt.Errorf("received block %d from peer %s without being requested", height, peerID)
|
||||
}
|
||||
|
||||
pendingTime, ok := sc.pendingTime[height]
|
||||
if !ok || now.Sub(pendingTime) <= 0 {
|
||||
return fmt.Errorf("clock error: block %d received at %s but requested at %s",
|
||||
height, pendingTime, now)
|
||||
}
|
||||
|
||||
peer.lastRate = size / now.Sub(pendingTime).Nanoseconds()
|
||||
|
||||
sc.setStateAtHeight(height, blockStateReceived)
|
||||
delete(sc.pendingBlocks, height)
|
||||
delete(sc.pendingTime, height)
|
||||
|
||||
sc.receivedBlocks[height] = peerID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error {
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateNew {
|
||||
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
|
||||
}
|
||||
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state != peerStateReady {
|
||||
return fmt.Errorf("cannot schedule %d from %s in %s", height, peerID, peer.state)
|
||||
}
|
||||
|
||||
if height > peer.height {
|
||||
return fmt.Errorf("cannot request height %d from peer %s that is at height %d",
|
||||
height, peerID, peer.height)
|
||||
}
|
||||
|
||||
if height < peer.base {
|
||||
return fmt.Errorf("cannot request height %d for peer %s with base %d",
|
||||
height, peerID, peer.base)
|
||||
}
|
||||
|
||||
sc.setStateAtHeight(height, blockStatePending)
|
||||
sc.pendingBlocks[height] = peerID
|
||||
sc.pendingTime[height] = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markProcessed(height int64) error {
|
||||
// It is possible that a peer error or timeout is handled after the processor
|
||||
// has processed the block but before the scheduler received this event, so
|
||||
// when pcBlockProcessed event is received, the block had been requested
|
||||
// again => don't check the block state.
|
||||
sc.lastAdvance = time.Now()
|
||||
sc.height = height + 1
|
||||
delete(sc.pendingBlocks, height)
|
||||
delete(sc.pendingTime, height)
|
||||
delete(sc.receivedBlocks, height)
|
||||
delete(sc.blockStates, height)
|
||||
sc.addNewBlocks()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) allBlocksProcessed() bool {
|
||||
if len(sc.peers) == 0 {
|
||||
return false
|
||||
}
|
||||
return sc.height >= sc.maxHeight()
|
||||
}
|
||||
|
||||
// returns max peer height or the last processed block, i.e. sc.height
|
||||
func (sc *scheduler) maxHeight() int64 {
|
||||
max := sc.height - 1
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if max < peer.height {
|
||||
max = peer.height
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// lowest block in sc.blockStates with state == blockStateNew or -1 if no new blocks
|
||||
func (sc *scheduler) nextHeightToSchedule() int64 {
|
||||
var min int64 = math.MaxInt64
|
||||
for height, state := range sc.blockStates {
|
||||
if state == blockStateNew && height < min {
|
||||
min = height
|
||||
}
|
||||
}
|
||||
if min == math.MaxInt64 {
|
||||
min = -1
|
||||
}
|
||||
return min
|
||||
}
|
||||
|
||||
func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
|
||||
var heights []int64
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
heights = append(heights, height)
|
||||
}
|
||||
}
|
||||
return heights
|
||||
}
|
||||
|
||||
func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
|
||||
peers := sc.getPeersWithHeight(height)
|
||||
if len(peers) == 0 {
|
||||
return "", fmt.Errorf("cannot find peer for height %d", height)
|
||||
}
|
||||
|
||||
// create a map from number of pending requests to a list
|
||||
// of peers having that number of pending requests.
|
||||
pendingFrom := make(map[int][]types.NodeID)
|
||||
for _, peerID := range peers {
|
||||
numPending := len(sc.pendingFrom(peerID))
|
||||
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
|
||||
}
|
||||
|
||||
// find the set of peers with minimum number of pending requests.
|
||||
var minPending int64 = math.MaxInt64
|
||||
for mp := range pendingFrom {
|
||||
if int64(mp) < minPending {
|
||||
minPending = int64(mp)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(PeerByID(pendingFrom[int(minPending)]))
|
||||
return pendingFrom[int(minPending)][0], nil
|
||||
}
|
||||
|
||||
// PeerByID is a list of peers sorted by peerID.
|
||||
type PeerByID []types.NodeID
|
||||
|
||||
func (peers PeerByID) Len() int {
|
||||
return len(peers)
|
||||
}
|
||||
func (peers PeerByID) Less(i, j int) bool {
|
||||
return bytes.Compare([]byte(peers[i]), []byte(peers[j])) == -1
|
||||
}
|
||||
|
||||
func (peers PeerByID) Swap(i, j int) {
|
||||
peers[i], peers[j] = peers[j], peers[i]
|
||||
}
|
||||
|
||||
// Handlers
|
||||
|
||||
// This handler gets the block, performs some validation and then passes it on to the processor.
|
||||
func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) {
|
||||
err := sc.touchPeer(event.peerID, event.time)
|
||||
if err != nil {
|
||||
// peer does not exist OR not ready
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time)
|
||||
if err != nil {
|
||||
sc.removePeer(event.peerID)
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
}
|
||||
|
||||
return scBlockReceived{peerID: event.peerID, block: event.block}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) {
|
||||
// No such peer or peer was removed.
|
||||
peer, ok := sc.peers[event.peerID]
|
||||
if !ok || peer.state == peerStateRemoved {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// The peer may have been just removed due to errors, low speed or timeouts.
|
||||
sc.removePeer(event.peerID)
|
||||
|
||||
return scPeerError{peerID: event.peerID,
|
||||
reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d",
|
||||
event.peerID, peer.base, peer.height, event.height)}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) {
|
||||
if event.height != sc.height {
|
||||
panic(fmt.Sprintf("processed height %d, but expected height %d", event.height, sc.height))
|
||||
}
|
||||
|
||||
err := sc.markProcessed(event.height)
|
||||
if err != nil {
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "processed all blocks"}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// Handles an error from the processor. The processor had already cleaned the blocks from
|
||||
// the peers included in this event. Just attempt to remove the peers.
|
||||
func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) {
|
||||
// The peers may have been just removed due to errors, low speed or timeouts.
|
||||
sc.removePeer(event.firstPeerID)
|
||||
if event.firstPeerID != event.secondPeerID {
|
||||
sc.removePeer(event.secondPeerID)
|
||||
}
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "error on last block"}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) {
|
||||
sc.ensurePeer(event.peerID)
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) {
|
||||
sc.removePeer(event.peerID)
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "removed peer"}, nil
|
||||
}
|
||||
|
||||
// Return scPeerError so the peer (and all associated blocks) is removed from
|
||||
// the processor.
|
||||
return scPeerError{peerID: event.peerID, reason: errors.New("peer was stopped")}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) {
|
||||
// Check behavior of peer responsible to deliver block at sc.height.
|
||||
timeHeightAsked, ok := sc.pendingTime[sc.height]
|
||||
if ok && time.Since(timeHeightAsked) > sc.peerTimeout {
|
||||
// A request was sent to a peer for block at sc.height but a response was not received
|
||||
// from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer
|
||||
// will be timed out even if it sends blocks at higher heights but prevents progress by
|
||||
// not sending the block at current height.
|
||||
sc.removePeer(sc.pendingBlocks[sc.height])
|
||||
}
|
||||
|
||||
prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time)
|
||||
if len(prunablePeers) == 0 {
|
||||
return noOp, nil
|
||||
}
|
||||
for _, peerID := range prunablePeers {
|
||||
sc.removePeer(peerID)
|
||||
}
|
||||
|
||||
// If all blocks are processed we should finish.
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "after try prune"}, nil
|
||||
}
|
||||
|
||||
return scPeersPruned{peers: prunablePeers}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleResetState(event bcResetState) (Event, error) {
|
||||
initHeight := event.state.LastBlockHeight + 1
|
||||
if initHeight == 1 {
|
||||
initHeight = event.state.InitialHeight
|
||||
}
|
||||
sc.initHeight = initHeight
|
||||
sc.height = initHeight
|
||||
sc.lastAdvance = time.Now()
|
||||
sc.addNewBlocks()
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) {
|
||||
if time.Since(sc.lastAdvance) > sc.syncTimeout {
|
||||
return scFinishedEv{reason: "timeout, no advance"}, nil
|
||||
}
|
||||
|
||||
nextHeight := sc.nextHeightToSchedule()
|
||||
if nextHeight == -1 {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
bestPeerID, err := sc.selectPeer(nextHeight)
|
||||
if err != nil {
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
if err := sc.markPending(bestPeerID, nextHeight, event.time); err != nil {
|
||||
return scSchedulerFail{reason: err}, nil // XXX: peerError might be more appropriate
|
||||
}
|
||||
return scBlockRequest{peerID: bestPeerID, height: nextHeight}, nil
|
||||
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) {
|
||||
err := sc.setPeerRange(event.peerID, event.base, event.height)
|
||||
if err != nil {
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handle(event Event) (Event, error) {
|
||||
switch event := event.(type) {
|
||||
case bcResetState:
|
||||
nextEvent, err := sc.handleResetState(event)
|
||||
return nextEvent, err
|
||||
case bcStatusResponse:
|
||||
nextEvent, err := sc.handleStatusResponse(event)
|
||||
return nextEvent, err
|
||||
case bcBlockResponse:
|
||||
nextEvent, err := sc.handleBlockResponse(event)
|
||||
return nextEvent, err
|
||||
case bcNoBlockResponse:
|
||||
nextEvent, err := sc.handleNoBlockResponse(event)
|
||||
return nextEvent, err
|
||||
case rTrySchedule:
|
||||
nextEvent, err := sc.handleTrySchedule(event)
|
||||
return nextEvent, err
|
||||
case bcAddNewPeer:
|
||||
nextEvent, err := sc.handleAddNewPeer(event)
|
||||
return nextEvent, err
|
||||
case bcRemovePeer:
|
||||
nextEvent, err := sc.handleRemovePeer(event)
|
||||
return nextEvent, err
|
||||
case rTryPrunePeer:
|
||||
nextEvent, err := sc.handleTryPrunePeer(event)
|
||||
return nextEvent, err
|
||||
case pcBlockProcessed:
|
||||
nextEvent, err := sc.handleBlockProcessed(event)
|
||||
return nextEvent, err
|
||||
case pcBlockVerificationFailure:
|
||||
nextEvent, err := sc.handleBlockProcessError(event)
|
||||
return nextEvent, err
|
||||
default:
|
||||
return scSchedulerFail{reason: fmt.Errorf("unknown event %v", event)}, nil
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,65 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/Workiva/go-datastructures/queue"
|
||||
)
|
||||
|
||||
// Event is the type that can be added to the priority queue.
|
||||
type Event queue.Item
|
||||
|
||||
type priority interface {
|
||||
Compare(other queue.Item) int
|
||||
Priority() int
|
||||
}
|
||||
|
||||
type priorityLow struct{}
|
||||
type priorityNormal struct{}
|
||||
type priorityHigh struct{}
|
||||
|
||||
func (p priorityLow) Priority() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (p priorityNormal) Priority() int {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (p priorityHigh) Priority() int {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (p priorityLow) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p priorityNormal) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p priorityHigh) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
type noOpEvent struct {
|
||||
priorityLow
|
||||
}
|
||||
|
||||
var noOp = noOpEvent{}
|
||||
@@ -62,7 +62,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.RWMutex)
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abciclient.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := abciclient.NewLocalClient(mtx, app)
|
||||
|
||||
|
||||
@@ -407,7 +407,7 @@ func newStateWithConfigAndBlockStore(
|
||||
blockStore *store.BlockStore,
|
||||
) *State {
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.RWMutex)
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abciclient.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := abciclient.NewLocalClient(mtx, app)
|
||||
|
||||
|
||||
@@ -61,6 +61,9 @@ type Metrics struct {
|
||||
|
||||
// Number of blockparts transmitted by peer.
|
||||
BlockParts metrics.Counter
|
||||
|
||||
// Histogram of time taken per step annotated with reason that the step proceeded.
|
||||
StepTime metrics.Histogram
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns Metrics build using Prometheus client library.
|
||||
@@ -187,6 +190,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "block_parts",
|
||||
Help: "Number of blockparts transmitted by peer.",
|
||||
}, append(labels, "peer_id")).With(labelsAndValues...),
|
||||
StepTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "step_time",
|
||||
Help: "Time spent per step.",
|
||||
}, append(labels, "step", "reason")).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ func (m *NewRoundStepMessage) ValidateHeight(initialHeight int64) error {
|
||||
m.LastCommitRound, initialHeight)
|
||||
}
|
||||
if m.Height > initialHeight && m.LastCommitRound < 0 {
|
||||
return fmt.Errorf("LastCommitRound can only be negative for initial height %v", // nolint
|
||||
return fmt.Errorf("LastCommitRound can only be negative for initial height %v",
|
||||
initialHeight)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -230,17 +230,15 @@ func (r *Reactor) OnStop() {
|
||||
}
|
||||
|
||||
r.mtx.Lock()
|
||||
peers := r.peers
|
||||
// Close and wait for each of the peers to shutdown.
|
||||
// This is safe to perform with the lock since none of the peers require the
|
||||
// lock to complete any of the methods that the waitgroup is waiting on.
|
||||
for _, state := range r.peers {
|
||||
state.closer.Close()
|
||||
state.broadcastWG.Wait()
|
||||
}
|
||||
r.mtx.Unlock()
|
||||
|
||||
// wait for all spawned peer goroutines to gracefully exit
|
||||
for _, ps := range peers {
|
||||
ps.closer.Close()
|
||||
}
|
||||
for _, ps := range peers {
|
||||
ps.broadcastWG.Wait()
|
||||
}
|
||||
|
||||
// Close the StateChannel goroutine separately since it uses its own channel
|
||||
// to signal closure.
|
||||
close(r.stateCloseCh)
|
||||
|
||||
@@ -346,7 +346,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.RWMutex)
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abciclient.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := abciclient.NewLocalClient(mtx, app)
|
||||
|
||||
|
||||
@@ -312,7 +312,7 @@ func newConsensusStateForReplay(cfg config.BaseConfig, csConfig *config.Consensu
|
||||
|
||||
// Create proxyAppConn connection (consensus, mempool, query)
|
||||
clientCreator, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir())
|
||||
proxyApp := proxy.NewAppConns(clientCreator)
|
||||
proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics())
|
||||
err = proxyApp.Start()
|
||||
if err != nil {
|
||||
tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err))
|
||||
|
||||
@@ -24,6 +24,7 @@ func (emptyMempool) Size() int { return 0 }
|
||||
func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error {
|
||||
return nil
|
||||
}
|
||||
func (emptyMempool) RemoveTxByKey(txKey types.TxKey) error { return nil }
|
||||
func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) Update(
|
||||
@@ -63,7 +64,7 @@ func newMockProxyApp(appHash []byte, abciResponses *tmstate.ABCIResponses) proxy
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return proxy.NewAppConnConsensus(cli)
|
||||
return proxy.NewAppConnConsensus(cli, proxy.NopMetrics())
|
||||
}
|
||||
|
||||
type mockProxyApp struct {
|
||||
|
||||
@@ -745,7 +745,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
|
||||
if nBlocks > 0 {
|
||||
// run nBlocks against a new client to build up the app state.
|
||||
// use a throwaway tendermint state
|
||||
proxyApp := proxy.NewAppConns(clientCreator2)
|
||||
proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics())
|
||||
stateDB1 := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB1)
|
||||
err := stateStore.Save(genesisState)
|
||||
@@ -765,7 +765,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
|
||||
// now start the app using the handshake - it should sync
|
||||
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
|
||||
handshaker := NewHandshaker(stateStore, state, store, genDoc)
|
||||
proxyApp := proxy.NewAppConns(clientCreator2)
|
||||
proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics())
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
@@ -893,7 +893,7 @@ func buildTMStateFromChain(
|
||||
defer kvstoreApp.Close()
|
||||
clientCreator := abciclient.NewLocalCreator(kvstoreApp)
|
||||
|
||||
proxyApp := proxy.NewAppConns(clientCreator)
|
||||
proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics())
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -960,7 +960,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
{
|
||||
app := &badApp{numBlocks: 3, allHashesAreWrong: true}
|
||||
clientCreator := abciclient.NewLocalCreator(app)
|
||||
proxyApp := proxy.NewAppConns(clientCreator)
|
||||
proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics())
|
||||
err := proxyApp.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
@@ -984,7 +984,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
{
|
||||
app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true}
|
||||
clientCreator := abciclient.NewLocalCreator(app)
|
||||
proxyApp := proxy.NewAppConns(clientCreator)
|
||||
proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics())
|
||||
err := proxyApp.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
@@ -1243,7 +1243,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
// now start the app using the handshake - it should sync
|
||||
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
|
||||
handshaker := NewHandshaker(stateStore, state, store, genDoc)
|
||||
proxyApp := proxy.NewAppConns(clientCreator)
|
||||
proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics())
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
|
||||
@@ -726,7 +726,6 @@ func (cs *State) updateToState(state sm.State) {
|
||||
cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators)
|
||||
cs.CommitRound = -1
|
||||
cs.LastValidators = state.LastValidators
|
||||
cs.TriggeredTimeoutPrecommit = false
|
||||
|
||||
cs.state = state
|
||||
|
||||
@@ -1052,7 +1051,6 @@ func (cs *State) enterNewRound(height int64, round int32) {
|
||||
}
|
||||
|
||||
cs.Votes.SetRound(tmmath.SafeAddInt32(round, 1)) // also track next round (round+1) to allow round-skipping
|
||||
cs.TriggeredTimeoutPrecommit = false
|
||||
|
||||
if err := cs.eventBus.PublishEventNewRound(cs.NewRoundEvent()); err != nil {
|
||||
cs.Logger.Error("failed publishing new round", "err", err)
|
||||
@@ -1491,12 +1489,10 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
// Enter: any +2/3 precommits for next round.
|
||||
func (cs *State) enterPrecommitWait(height int64, round int32) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering precommit wait step with invalid args",
|
||||
"triggered_timeout", cs.TriggeredTimeoutPrecommit,
|
||||
"current", fmt.Sprintf("%v/%v", cs.Height, cs.Round),
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -1512,7 +1508,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) {
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommitWait:
|
||||
cs.TriggeredTimeoutPrecommit = true
|
||||
cs.updateRoundStep(round, cstypes.RoundStepPrecommitWait)
|
||||
cs.newStep()
|
||||
}()
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
|
||||
p2pmock "github.com/tendermint/tendermint/internal/p2p/mock"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
@@ -1640,13 +1639,14 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) {
|
||||
|
||||
ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.Propose(round).Nanoseconds())
|
||||
rs = cs1.GetRoundState()
|
||||
assert.False(
|
||||
assert.Less(
|
||||
t,
|
||||
rs.TriggeredTimeoutPrecommit,
|
||||
"triggeredTimeoutPrecommit should be false at the beginning of each round")
|
||||
rs.Step,
|
||||
cstypes.RoundStepPrecommitWait,
|
||||
"RoundStep should be less than 'PrecommitWait' at the beginning of each round")
|
||||
}
|
||||
|
||||
func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
|
||||
func TestNotEnteredTimeoutPrecommitUponNewHeight(t *testing.T) {
|
||||
config := configSetup(t)
|
||||
|
||||
config.Consensus.SkipTimeoutCommit = false
|
||||
@@ -1699,10 +1699,11 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
|
||||
ensureNewProposal(proposalCh, height+1, 0)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
assert.False(
|
||||
assert.Less(
|
||||
t,
|
||||
rs.TriggeredTimeoutPrecommit,
|
||||
"triggeredTimeoutPrecommit should be false at the beginning of each height")
|
||||
rs.Step,
|
||||
cstypes.RoundStepPrecommitWait,
|
||||
"RoundStep should be less than 'PrecommitWait' at the beginning of each round")
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------------------
|
||||
@@ -1864,7 +1865,8 @@ func TestStateOutputsBlockPartsStats(t *testing.T) {
|
||||
|
||||
// create dummy peer
|
||||
cs, _ := randState(config, 1)
|
||||
peer := p2pmock.NewPeer(nil)
|
||||
peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
|
||||
require.NoError(t, err)
|
||||
|
||||
// 1) new block part
|
||||
parts := types.NewPartSetFromData(tmrand.Bytes(100), 10)
|
||||
@@ -1875,26 +1877,26 @@ func TestStateOutputsBlockPartsStats(t *testing.T) {
|
||||
}
|
||||
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header())
|
||||
cs.handleMsg(msgInfo{msg, peer.ID()})
|
||||
cs.handleMsg(msgInfo{msg, peerID})
|
||||
|
||||
statsMessage := <-cs.statsMsgQueue
|
||||
require.Equal(t, msg, statsMessage.Msg, "")
|
||||
require.Equal(t, peer.ID(), statsMessage.PeerID, "")
|
||||
require.Equal(t, peerID, statsMessage.PeerID, "")
|
||||
|
||||
// sending the same part from different peer
|
||||
cs.handleMsg(msgInfo{msg, "peer2"})
|
||||
|
||||
// sending the part with the same height, but different round
|
||||
msg.Round = 1
|
||||
cs.handleMsg(msgInfo{msg, peer.ID()})
|
||||
cs.handleMsg(msgInfo{msg, peerID})
|
||||
|
||||
// sending the part from the smaller height
|
||||
msg.Height = 0
|
||||
cs.handleMsg(msgInfo{msg, peer.ID()})
|
||||
cs.handleMsg(msgInfo{msg, peerID})
|
||||
|
||||
// sending the part from the bigger height
|
||||
msg.Height = 3
|
||||
cs.handleMsg(msgInfo{msg, peer.ID()})
|
||||
cs.handleMsg(msgInfo{msg, peerID})
|
||||
|
||||
select {
|
||||
case <-cs.statsMsgQueue:
|
||||
@@ -1909,18 +1911,19 @@ func TestStateOutputVoteStats(t *testing.T) {
|
||||
|
||||
cs, vss := randState(config, 2)
|
||||
// create dummy peer
|
||||
peer := p2pmock.NewPeer(nil)
|
||||
peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
|
||||
require.NoError(t, err)
|
||||
|
||||
randBytes := tmrand.Bytes(tmhash.Size)
|
||||
|
||||
vote := signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{})
|
||||
|
||||
voteMessage := &VoteMessage{vote}
|
||||
cs.handleMsg(msgInfo{voteMessage, peer.ID()})
|
||||
cs.handleMsg(msgInfo{voteMessage, peerID})
|
||||
|
||||
statsMessage := <-cs.statsMsgQueue
|
||||
require.Equal(t, voteMessage, statsMessage.Msg, "")
|
||||
require.Equal(t, peer.ID(), statsMessage.PeerID, "")
|
||||
require.Equal(t, peerID, statsMessage.PeerID, "")
|
||||
|
||||
// sending the same part from different peer
|
||||
cs.handleMsg(msgInfo{&VoteMessage{vote}, "peer2"})
|
||||
@@ -1929,7 +1932,7 @@ func TestStateOutputVoteStats(t *testing.T) {
|
||||
incrementHeight(vss[1])
|
||||
vote = signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{})
|
||||
|
||||
cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()})
|
||||
cs.handleMsg(msgInfo{&VoteMessage{vote}, peerID})
|
||||
|
||||
select {
|
||||
case <-cs.statsMsgQueue:
|
||||
|
||||
@@ -85,12 +85,11 @@ type RoundState struct {
|
||||
ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above.
|
||||
|
||||
// Last known block parts of POL mentioned above.
|
||||
ValidBlockParts *types.PartSet `json:"valid_block_parts"`
|
||||
Votes *HeightVoteSet `json:"votes"`
|
||||
CommitRound int32 `json:"commit_round"` //
|
||||
LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1
|
||||
LastValidators *types.ValidatorSet `json:"last_validators"`
|
||||
TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"`
|
||||
ValidBlockParts *types.PartSet `json:"valid_block_parts"`
|
||||
Votes *HeightVoteSet `json:"votes"`
|
||||
CommitRound int32 `json:"commit_round"` //
|
||||
LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1
|
||||
LastValidators *types.ValidatorSet `json:"last_validators"`
|
||||
}
|
||||
|
||||
// Compressed version of the RoundState for use in RPC
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build gofuzz
|
||||
// +build gofuzz
|
||||
|
||||
package consensus
|
||||
|
||||
@@ -65,7 +65,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
|
||||
proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app))
|
||||
proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), proxy.NopMetrics())
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start proxy app connections: %w", err)
|
||||
@@ -145,22 +145,22 @@ func randPort() int {
|
||||
return base + mrand.Intn(spread)
|
||||
}
|
||||
|
||||
func makeAddrs() (string, string, string) {
|
||||
// makeAddrs constructs local TCP addresses for node services.
|
||||
// It uses consecutive ports from a random starting point, so that concurrent
|
||||
// instances are less likely to collide.
|
||||
func makeAddrs() (p2pAddr, rpcAddr string) {
|
||||
const addrTemplate = "tcp://127.0.0.1:%d"
|
||||
start := randPort()
|
||||
return fmt.Sprintf("tcp://127.0.0.1:%d", start),
|
||||
fmt.Sprintf("tcp://127.0.0.1:%d", start+1),
|
||||
fmt.Sprintf("tcp://127.0.0.1:%d", start+2)
|
||||
return fmt.Sprintf(addrTemplate, start), fmt.Sprintf(addrTemplate, start+1)
|
||||
}
|
||||
|
||||
// getConfig returns a config for test cases
|
||||
func getConfig(t *testing.T) *config.Config {
|
||||
c := config.ResetTestRoot(t.Name())
|
||||
|
||||
// and we use random ports to run in parallel
|
||||
tm, rpc, grpc := makeAddrs()
|
||||
c.P2P.ListenAddress = tm
|
||||
c.RPC.ListenAddress = rpc
|
||||
c.RPC.GRPCListenAddress = grpc
|
||||
p2pAddr, rpcAddr := makeAddrs()
|
||||
c.P2P.ListenAddress = p2pAddr
|
||||
c.RPC.ListenAddress = rpcAddr
|
||||
return c
|
||||
}
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ func iotest(writer protoio.WriteCloser, reader protoio.ReadCloser) error {
|
||||
return err
|
||||
}
|
||||
if n != len(bz)+visize {
|
||||
return fmt.Errorf("WriteMsg() wrote %v bytes, expected %v", n, len(bz)+visize) // nolint
|
||||
return fmt.Errorf("WriteMsg() wrote %v bytes, expected %v", n, len(bz)+visize)
|
||||
}
|
||||
lens[i] = n
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build deadlock
|
||||
// +build deadlock
|
||||
|
||||
package sync
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !deadlock
|
||||
// +build !deadlock
|
||||
|
||||
package sync
|
||||
|
||||
@@ -31,14 +31,14 @@ var _ TxCache = (*LRUTxCache)(nil)
|
||||
type LRUTxCache struct {
|
||||
mtx tmsync.Mutex
|
||||
size int
|
||||
cacheMap map[[TxKeySize]byte]*list.Element
|
||||
cacheMap map[types.TxKey]*list.Element
|
||||
list *list.List
|
||||
}
|
||||
|
||||
func NewLRUTxCache(cacheSize int) *LRUTxCache {
|
||||
return &LRUTxCache{
|
||||
size: cacheSize,
|
||||
cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize),
|
||||
cacheMap: make(map[types.TxKey]*list.Element, cacheSize),
|
||||
list: list.New(),
|
||||
}
|
||||
}
|
||||
@@ -53,7 +53,7 @@ func (c *LRUTxCache) Reset() {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
c.cacheMap = make(map[[TxKeySize]byte]*list.Element, c.size)
|
||||
c.cacheMap = make(map[types.TxKey]*list.Element, c.size)
|
||||
c.list.Init()
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func (c *LRUTxCache) Push(tx types.Tx) bool {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
key := TxKey(tx)
|
||||
key := tx.Key()
|
||||
|
||||
moved, ok := c.cacheMap[key]
|
||||
if ok {
|
||||
@@ -72,7 +72,7 @@ func (c *LRUTxCache) Push(tx types.Tx) bool {
|
||||
if c.list.Len() >= c.size {
|
||||
front := c.list.Front()
|
||||
if front != nil {
|
||||
frontKey := front.Value.([TxKeySize]byte)
|
||||
frontKey := front.Value.(types.TxKey)
|
||||
delete(c.cacheMap, frontKey)
|
||||
c.list.Remove(front)
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func (c *LRUTxCache) Remove(tx types.Tx) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
key := TxKey(tx)
|
||||
key := tx.Key()
|
||||
e := c.cacheMap[key]
|
||||
delete(c.cacheMap, key)
|
||||
|
||||
|
||||
@@ -7,17 +7,15 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// nolint: golint
|
||||
// TODO: Rename type.
|
||||
type MempoolIDs struct {
|
||||
type IDs struct {
|
||||
mtx tmsync.RWMutex
|
||||
peerMap map[types.NodeID]uint16
|
||||
nextID uint16 // assumes that a node will never have over 65536 active peers
|
||||
activeIDs map[uint16]struct{} // used to check if a given peerID key is used
|
||||
}
|
||||
|
||||
func NewMempoolIDs() *MempoolIDs {
|
||||
return &MempoolIDs{
|
||||
func NewMempoolIDs() *IDs {
|
||||
return &IDs{
|
||||
peerMap: make(map[types.NodeID]uint16),
|
||||
|
||||
// reserve UnknownPeerID for mempoolReactor.BroadcastTx
|
||||
@@ -28,7 +26,7 @@ func NewMempoolIDs() *MempoolIDs {
|
||||
|
||||
// ReserveForPeer searches for the next unused ID and assigns it to the provided
|
||||
// peer.
|
||||
func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) {
|
||||
func (ids *IDs) ReserveForPeer(peerID types.NodeID) {
|
||||
ids.mtx.Lock()
|
||||
defer ids.mtx.Unlock()
|
||||
|
||||
@@ -38,7 +36,7 @@ func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) {
|
||||
}
|
||||
|
||||
// Reclaim returns the ID reserved for the peer back to unused pool.
|
||||
func (ids *MempoolIDs) Reclaim(peerID types.NodeID) {
|
||||
func (ids *IDs) Reclaim(peerID types.NodeID) {
|
||||
ids.mtx.Lock()
|
||||
defer ids.mtx.Unlock()
|
||||
|
||||
@@ -50,7 +48,7 @@ func (ids *MempoolIDs) Reclaim(peerID types.NodeID) {
|
||||
}
|
||||
|
||||
// GetForPeer returns an ID reserved for the peer.
|
||||
func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 {
|
||||
func (ids *IDs) GetForPeer(peerID types.NodeID) uint16 {
|
||||
ids.mtx.RLock()
|
||||
defer ids.mtx.RUnlock()
|
||||
|
||||
@@ -59,7 +57,7 @@ func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 {
|
||||
|
||||
// nextPeerID returns the next unused peer ID to use. We assume that the mutex
|
||||
// is already held.
|
||||
func (ids *MempoolIDs) nextPeerID() uint16 {
|
||||
func (ids *IDs) nextPeerID() uint16 {
|
||||
if len(ids.activeIDs) == MaxActiveIDs {
|
||||
panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", MaxActiveIDs))
|
||||
}
|
||||
|
||||
@@ -32,6 +32,10 @@ type Mempool interface {
|
||||
// its validity and whether it should be added to the mempool.
|
||||
CheckTx(ctx context.Context, tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error
|
||||
|
||||
// RemoveTxByKey removes a transaction, identified by its key,
|
||||
// from the mempool.
|
||||
RemoveTxByKey(txKey types.TxKey) error
|
||||
|
||||
// ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes
|
||||
// bytes total with the condition that the total gasWanted must be less than
|
||||
// maxGas.
|
||||
|
||||
@@ -20,6 +20,7 @@ func (Mempool) Size() int { return 0 }
|
||||
func (Mempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error {
|
||||
return nil
|
||||
}
|
||||
func (Mempool) RemoveTxByKey(txKey types.TxKey) error { return nil }
|
||||
func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
|
||||
func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} }
|
||||
func (Mempool) Update(
|
||||
|
||||
@@ -1,24 +1,9 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// TxKeySize defines the size of the transaction's key used for indexing.
|
||||
const TxKeySize = sha256.Size
|
||||
|
||||
// TxKey is the fixed length array key used as an index.
|
||||
func TxKey(tx types.Tx) [TxKeySize]byte {
|
||||
return sha256.Sum256(tx)
|
||||
}
|
||||
|
||||
// TxHashFromBytes returns the hash of a transaction from raw bytes.
|
||||
func TxHashFromBytes(tx []byte) []byte {
|
||||
return types.Tx(tx).Hash()
|
||||
}
|
||||
|
||||
// TxInfo are parameters that get passed when attempting to add a tx to the
|
||||
// mempool.
|
||||
type TxInfo struct {
|
||||
|
||||
@@ -61,7 +61,7 @@ func TestCacheAfterUpdate(t *testing.T) {
|
||||
require.NotEqual(t, len(tc.txsInCache), counter,
|
||||
"cache larger than expected on testcase %d", tcIndex)
|
||||
|
||||
nodeVal := node.Value.([sha256.Size]byte)
|
||||
nodeVal := node.Value.(types.TxKey)
|
||||
expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])})
|
||||
// Reference for reading the errors:
|
||||
// >>> sha256('\x00').hexdigest()
|
||||
@@ -71,7 +71,7 @@ func TestCacheAfterUpdate(t *testing.T) {
|
||||
// >>> sha256('\x02').hexdigest()
|
||||
// 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986'
|
||||
|
||||
require.Equal(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex)
|
||||
require.EqualValues(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex)
|
||||
counter++
|
||||
node = node.Next()
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package v0
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -240,7 +241,7 @@ func (mem *CListMempool) CheckTx(
|
||||
// Note it's possible a tx is still in the cache but no longer in the mempool
|
||||
// (eg. after committing a block, txs are removed from mempool but not cache),
|
||||
// so we only record the sender for txs still in the mempool.
|
||||
if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok {
|
||||
if e, ok := mem.txsMap.Load(tx.Key()); ok {
|
||||
memTx := e.(*clist.CElement).Value.(*mempoolTx)
|
||||
_, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true)
|
||||
// TODO: consider punishing peer for dups,
|
||||
@@ -327,7 +328,7 @@ func (mem *CListMempool) reqResCb(
|
||||
// - resCbFirstTime (lock not held) if tx is valid
|
||||
func (mem *CListMempool) addTx(memTx *mempoolTx) {
|
||||
e := mem.txs.PushBack(memTx)
|
||||
mem.txsMap.Store(mempool.TxKey(memTx.tx), e)
|
||||
mem.txsMap.Store(memTx.tx.Key(), e)
|
||||
atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx)))
|
||||
mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx)))
|
||||
}
|
||||
@@ -338,7 +339,7 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) {
|
||||
func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
|
||||
mem.txs.Remove(elem)
|
||||
elem.DetachPrev()
|
||||
mem.txsMap.Delete(mempool.TxKey(tx))
|
||||
mem.txsMap.Delete(tx.Key())
|
||||
atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
|
||||
|
||||
if removeFromCache {
|
||||
@@ -347,13 +348,16 @@ func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromC
|
||||
}
|
||||
|
||||
// RemoveTxByKey removes a transaction from the mempool by its TxKey index.
|
||||
func (mem *CListMempool) RemoveTxByKey(txKey [mempool.TxKeySize]byte, removeFromCache bool) {
|
||||
func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error {
|
||||
if e, ok := mem.txsMap.Load(txKey); ok {
|
||||
memTx := e.(*clist.CElement).Value.(*mempoolTx)
|
||||
if memTx != nil {
|
||||
mem.removeTx(memTx.tx, e.(*clist.CElement), removeFromCache)
|
||||
mem.removeTx(memTx.tx, e.(*clist.CElement), false)
|
||||
return nil
|
||||
}
|
||||
return errors.New("transaction not found")
|
||||
}
|
||||
return errors.New("invalid transaction found")
|
||||
}
|
||||
|
||||
func (mem *CListMempool) isFull(txSize int) error {
|
||||
@@ -409,7 +413,7 @@ func (mem *CListMempool) resCbFirstTime(
|
||||
mem.addTx(memTx)
|
||||
mem.logger.Debug(
|
||||
"added good transaction",
|
||||
"tx", mempool.TxHashFromBytes(tx),
|
||||
"tx", types.Tx(tx).Hash(),
|
||||
"res", r,
|
||||
"height", memTx.height,
|
||||
"total", mem.Size(),
|
||||
@@ -419,7 +423,7 @@ func (mem *CListMempool) resCbFirstTime(
|
||||
// ignore bad transaction
|
||||
mem.logger.Debug(
|
||||
"rejected bad transaction",
|
||||
"tx", mempool.TxHashFromBytes(tx),
|
||||
"tx", types.Tx(tx).Hash(),
|
||||
"peerID", peerP2PID,
|
||||
"res", r,
|
||||
"err", postCheckErr,
|
||||
@@ -460,7 +464,7 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) {
|
||||
// Good, nothing to do.
|
||||
} else {
|
||||
// Tx became invalidated due to newly committed block.
|
||||
mem.logger.Debug("tx is no longer valid", "tx", mempool.TxHashFromBytes(tx), "res", r, "err", postCheckErr)
|
||||
mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr)
|
||||
// NOTE: we remove tx from the cache because it might be good later
|
||||
mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache)
|
||||
}
|
||||
@@ -598,7 +602,7 @@ func (mem *CListMempool) Update(
|
||||
// Mempool after:
|
||||
// 100
|
||||
// https://github.com/tendermint/tendermint/issues/3322.
|
||||
if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok {
|
||||
if e, ok := mem.txsMap.Load(tx.Key()); ok {
|
||||
mem.removeTx(tx, e.(*clist.CElement), false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -544,9 +544,9 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
err = mp.CheckTx(context.Background(), []byte{0x06}, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 9, mp.SizeBytes())
|
||||
mp.RemoveTxByKey(mempool.TxKey([]byte{0x07}), true)
|
||||
assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key()))
|
||||
assert.EqualValues(t, 9, mp.SizeBytes())
|
||||
mp.RemoveTxByKey(mempool.TxKey([]byte{0x06}), true)
|
||||
assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key()))
|
||||
assert.EqualValues(t, 8, mp.SizeBytes())
|
||||
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ type Reactor struct {
|
||||
|
||||
cfg *config.MempoolConfig
|
||||
mempool *CListMempool
|
||||
ids *mempool.MempoolIDs
|
||||
ids *mempool.IDs
|
||||
|
||||
// XXX: Currently, this is the only way to get information about a peer. Ideally,
|
||||
// we rely on message-oriented communication to get necessary peer data.
|
||||
@@ -171,7 +171,7 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error {
|
||||
|
||||
for _, tx := range protoTxs {
|
||||
if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil {
|
||||
logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(tx)), "err", err)
|
||||
logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -378,7 +378,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
}
|
||||
r.Logger.Debug(
|
||||
"gossiped tx to peer",
|
||||
"tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(memTx.tx)),
|
||||
"tx", fmt.Sprintf("%X", memTx.tx.Hash()),
|
||||
"peer", peerID,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package v1
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -256,7 +257,7 @@ func (txmp *TxMempool) CheckTx(
|
||||
return err
|
||||
}
|
||||
|
||||
txHash := mempool.TxKey(tx)
|
||||
txHash := tx.Key()
|
||||
|
||||
// We add the transaction to the mempool's cache and if the transaction already
|
||||
// exists, i.e. false is returned, then we check if we've seen this transaction
|
||||
@@ -304,6 +305,19 @@ func (txmp *TxMempool) CheckTx(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error {
|
||||
txmp.Lock()
|
||||
defer txmp.Unlock()
|
||||
|
||||
// remove the committed transaction from the transaction store and indexes
|
||||
if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil {
|
||||
txmp.removeTx(wtx, false)
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("transaction not found")
|
||||
}
|
||||
|
||||
// Flush flushes out the mempool. It acquires a read-lock, fetches all the
|
||||
// transactions currently in the transaction store and removes each transaction
|
||||
// from the store and all indexes and finally resets the cache.
|
||||
@@ -451,7 +465,7 @@ func (txmp *TxMempool) Update(
|
||||
}
|
||||
|
||||
// remove the committed transaction from the transaction store and indexes
|
||||
if wtx := txmp.txStore.GetTxByHash(mempool.TxKey(tx)); wtx != nil {
|
||||
if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil {
|
||||
txmp.removeTx(wtx, false)
|
||||
}
|
||||
}
|
||||
@@ -629,7 +643,7 @@ func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response)
|
||||
tx := req.GetCheckTx().Tx
|
||||
wtx := txmp.recheckCursor.Value.(*WrappedTx)
|
||||
if !bytes.Equal(tx, wtx.tx) {
|
||||
panic(fmt.Sprintf("re-CheckTx transaction mismatch; got: %X, expected: %X", wtx.tx.Hash(), mempool.TxKey(tx)))
|
||||
panic(fmt.Sprintf("re-CheckTx transaction mismatch; got: %X, expected: %X", wtx.tx.Hash(), types.Tx(tx).Key()))
|
||||
}
|
||||
|
||||
// Only evaluate transactions that have not been removed. This can happen
|
||||
@@ -647,7 +661,7 @@ func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response)
|
||||
txmp.logger.Debug(
|
||||
"existing transaction no longer valid; failed re-CheckTx callback",
|
||||
"priority", wtx.priority,
|
||||
"tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(wtx.tx)),
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"err", err,
|
||||
"code", checkTxRes.CheckTx.Code,
|
||||
)
|
||||
@@ -784,13 +798,13 @@ func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) {
|
||||
// the height and time based indexes.
|
||||
func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) {
|
||||
now := time.Now()
|
||||
expiredTxs := make(map[[mempool.TxKeySize]byte]*WrappedTx)
|
||||
expiredTxs := make(map[types.TxKey]*WrappedTx)
|
||||
|
||||
if txmp.config.TTLNumBlocks > 0 {
|
||||
purgeIdx := -1
|
||||
for i, wtx := range txmp.heightIndex.txs {
|
||||
if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks {
|
||||
expiredTxs[mempool.TxKey(wtx.tx)] = wtx
|
||||
expiredTxs[wtx.tx.Key()] = wtx
|
||||
purgeIdx = i
|
||||
} else {
|
||||
// since the index is sorted, we know no other txs can be be purged
|
||||
@@ -807,7 +821,7 @@ func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) {
|
||||
purgeIdx := -1
|
||||
for i, wtx := range txmp.timestampIndex.txs {
|
||||
if now.Sub(wtx.timestamp) > txmp.config.TTLDuration {
|
||||
expiredTxs[mempool.TxKey(wtx.tx)] = wtx
|
||||
expiredTxs[wtx.tx.Key()] = wtx
|
||||
purgeIdx = i
|
||||
} else {
|
||||
// since the index is sorted, we know no other txs can be be purged
|
||||
|
||||
@@ -226,10 +226,10 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
txMap := make(map[[mempool.TxKeySize]byte]testTx)
|
||||
txMap := make(map[types.TxKey]testTx)
|
||||
priorities := make([]int64, len(tTxs))
|
||||
for i, tTx := range tTxs {
|
||||
txMap[mempool.TxKey(tTx.tx)] = tTx
|
||||
txMap[tTx.tx.Key()] = tTx
|
||||
priorities[i] = tTx.priority
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
|
||||
ensurePrioritized := func(reapedTxs types.Txs) {
|
||||
reapedPriorities := make([]int64, len(reapedTxs))
|
||||
for i, rTx := range reapedTxs {
|
||||
reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority
|
||||
reapedPriorities[i] = txMap[rTx.Key()].priority
|
||||
}
|
||||
|
||||
require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
|
||||
@@ -276,10 +276,10 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) {
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
txMap := make(map[[mempool.TxKeySize]byte]testTx)
|
||||
txMap := make(map[types.TxKey]testTx)
|
||||
priorities := make([]int64, len(tTxs))
|
||||
for i, tTx := range tTxs {
|
||||
txMap[mempool.TxKey(tTx.tx)] = tTx
|
||||
txMap[tTx.tx.Key()] = tTx
|
||||
priorities[i] = tTx.priority
|
||||
}
|
||||
|
||||
@@ -291,7 +291,7 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) {
|
||||
ensurePrioritized := func(reapedTxs types.Txs) {
|
||||
reapedPriorities := make([]int64, len(reapedTxs))
|
||||
for i, rTx := range reapedTxs {
|
||||
reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority
|
||||
reapedPriorities[i] = txMap[rTx.Key()].priority
|
||||
}
|
||||
|
||||
require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities)
|
||||
|
||||
@@ -39,7 +39,7 @@ type Reactor struct {
|
||||
|
||||
cfg *config.MempoolConfig
|
||||
mempool *TxMempool
|
||||
ids *mempool.MempoolIDs
|
||||
ids *mempool.IDs
|
||||
|
||||
// XXX: Currently, this is the only way to get information about a peer. Ideally,
|
||||
// we rely on message-oriented communication to get necessary peer data.
|
||||
@@ -178,7 +178,7 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error {
|
||||
|
||||
for _, tx := range protoTxs {
|
||||
if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil {
|
||||
logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(tx)), "err", err)
|
||||
logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -386,7 +386,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
}
|
||||
r.Logger.Debug(
|
||||
"gossiped tx to peer",
|
||||
"tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(memTx.tx)),
|
||||
"tx", fmt.Sprintf("%X", memTx.tx.Hash()),
|
||||
"peer", peerID,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -17,7 +16,7 @@ type WrappedTx struct {
|
||||
tx types.Tx
|
||||
|
||||
// hash defines the transaction hash and the primary key used in the mempool
|
||||
hash [mempool.TxKeySize]byte
|
||||
hash types.TxKey
|
||||
|
||||
// height defines the height at which the transaction was validated at
|
||||
height int64
|
||||
@@ -66,14 +65,14 @@ func (wtx *WrappedTx) Size() int {
|
||||
// need mutative access.
|
||||
type TxStore struct {
|
||||
mtx tmsync.RWMutex
|
||||
hashTxs map[[mempool.TxKeySize]byte]*WrappedTx // primary index
|
||||
senderTxs map[string]*WrappedTx // sender is defined by the ABCI application
|
||||
hashTxs map[types.TxKey]*WrappedTx // primary index
|
||||
senderTxs map[string]*WrappedTx // sender is defined by the ABCI application
|
||||
}
|
||||
|
||||
func NewTxStore() *TxStore {
|
||||
return &TxStore{
|
||||
senderTxs: make(map[string]*WrappedTx),
|
||||
hashTxs: make(map[[mempool.TxKeySize]byte]*WrappedTx),
|
||||
hashTxs: make(map[types.TxKey]*WrappedTx),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,7 +109,7 @@ func (txs *TxStore) GetTxBySender(sender string) *WrappedTx {
|
||||
}
|
||||
|
||||
// GetTxByHash returns a *WrappedTx by the transaction's hash.
|
||||
func (txs *TxStore) GetTxByHash(hash [mempool.TxKeySize]byte) *WrappedTx {
|
||||
func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
@@ -119,7 +118,7 @@ func (txs *TxStore) GetTxByHash(hash [mempool.TxKeySize]byte) *WrappedTx {
|
||||
|
||||
// IsTxRemoved returns true if a transaction by hash is marked as removed and
|
||||
// false otherwise.
|
||||
func (txs *TxStore) IsTxRemoved(hash [mempool.TxKeySize]byte) bool {
|
||||
func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
@@ -142,7 +141,7 @@ func (txs *TxStore) SetTx(wtx *WrappedTx) {
|
||||
txs.senderTxs[wtx.sender] = wtx
|
||||
}
|
||||
|
||||
txs.hashTxs[mempool.TxKey(wtx.tx)] = wtx
|
||||
txs.hashTxs[wtx.tx.Key()] = wtx
|
||||
}
|
||||
|
||||
// RemoveTx removes a *WrappedTx from the transaction store. It deletes all
|
||||
@@ -155,13 +154,13 @@ func (txs *TxStore) RemoveTx(wtx *WrappedTx) {
|
||||
delete(txs.senderTxs, wtx.sender)
|
||||
}
|
||||
|
||||
delete(txs.hashTxs, mempool.TxKey(wtx.tx))
|
||||
delete(txs.hashTxs, wtx.tx.Key())
|
||||
wtx.removed = true
|
||||
}
|
||||
|
||||
// TxHasPeer returns true if a transaction by hash has a given peer ID and false
|
||||
// otherwise. If the transaction does not exist, false is returned.
|
||||
func (txs *TxStore) TxHasPeer(hash [mempool.TxKeySize]byte, peerID uint16) bool {
|
||||
func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool {
|
||||
txs.mtx.RLock()
|
||||
defer txs.mtx.RUnlock()
|
||||
|
||||
@@ -179,7 +178,7 @@ func (txs *TxStore) TxHasPeer(hash [mempool.TxKeySize]byte, peerID uint16) bool
|
||||
// We return true if we've already recorded the given peer for this transaction
|
||||
// and false otherwise. If the transaction does not exist by hash, we return
|
||||
// (nil, false).
|
||||
func (txs *TxStore) GetOrSetPeerByTxHash(hash [mempool.TxKeySize]byte, peerID uint16) (*WrappedTx, bool) {
|
||||
func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) {
|
||||
txs.mtx.Lock()
|
||||
defer txs.mtx.Unlock()
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestTxStore_GetTxBySender(t *testing.T) {
|
||||
@@ -39,7 +39,7 @@ func TestTxStore_GetTxByHash(t *testing.T) {
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := mempool.TxKey(wtx.tx)
|
||||
key := wtx.tx.Key()
|
||||
res := txs.GetTxByHash(key)
|
||||
require.Nil(t, res)
|
||||
|
||||
@@ -58,7 +58,7 @@ func TestTxStore_SetTx(t *testing.T) {
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := mempool.TxKey(wtx.tx)
|
||||
key := wtx.tx.Key()
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res := txs.GetTxByHash(key)
|
||||
@@ -81,10 +81,10 @@ func TestTxStore_GetOrSetPeerByTxHash(t *testing.T) {
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
key := mempool.TxKey(wtx.tx)
|
||||
key := wtx.tx.Key()
|
||||
txs.SetTx(wtx)
|
||||
|
||||
res, ok := txs.GetOrSetPeerByTxHash(mempool.TxKey([]byte("test_tx_2")), 15)
|
||||
res, ok := txs.GetOrSetPeerByTxHash(types.Tx([]byte("test_tx_2")).Key(), 15)
|
||||
require.Nil(t, res)
|
||||
require.False(t, ok)
|
||||
|
||||
@@ -110,7 +110,7 @@ func TestTxStore_RemoveTx(t *testing.T) {
|
||||
|
||||
txs.SetTx(wtx)
|
||||
|
||||
key := mempool.TxKey(wtx.tx)
|
||||
key := wtx.tx.Key()
|
||||
res := txs.GetTxByHash(key)
|
||||
require.NotNil(t, res)
|
||||
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
// Reactor is responsible for handling incoming messages on one or more
|
||||
// Channel. Switch calls GetChannels when reactor is added to it. When a new
|
||||
// peer joins our node, InitPeer and AddPeer are called. RemovePeer is called
|
||||
// when the peer is stopped. Receive is called when a message is received on a
|
||||
// channel associated with this reactor.
|
||||
//
|
||||
// Peer#Send or Peer#TrySend should be used to send the message to a peer.
|
||||
type Reactor interface {
|
||||
service.Service // Start, Stop
|
||||
|
||||
// SetSwitch allows setting a switch.
|
||||
SetSwitch(*Switch)
|
||||
|
||||
// GetChannels returns the list of MConnection.ChannelDescriptor. Make sure
|
||||
// that each ID is unique across all the reactors added to the switch.
|
||||
GetChannels() []*conn.ChannelDescriptor
|
||||
|
||||
// InitPeer is called by the switch before the peer is started. Use it to
|
||||
// initialize data for the peer (e.g. peer state).
|
||||
//
|
||||
// NOTE: The switch won't call AddPeer nor RemovePeer if it fails to start
|
||||
// the peer. Do not store any data associated with the peer in the reactor
|
||||
// itself unless you don't want to have a state, which is never cleaned up.
|
||||
InitPeer(peer Peer) Peer
|
||||
|
||||
// AddPeer is called by the switch after the peer is added and successfully
|
||||
// started. Use it to start goroutines communicating with the peer.
|
||||
AddPeer(peer Peer)
|
||||
|
||||
// RemovePeer is called by the switch when the peer is stopped (due to error
|
||||
// or other reason).
|
||||
RemovePeer(peer Peer, reason interface{})
|
||||
|
||||
// Receive is called by the switch when msgBytes is received from the peer.
|
||||
//
|
||||
// NOTE reactor can not keep msgBytes around after Receive completes without
|
||||
// copying.
|
||||
//
|
||||
// CONTRACT: msgBytes are not nil.
|
||||
//
|
||||
// XXX: do not call any methods that can block or incur heavy processing.
|
||||
// https://github.com/tendermint/tendermint/issues/2888
|
||||
Receive(chID byte, peer Peer, msgBytes []byte)
|
||||
}
|
||||
|
||||
//--------------------------------------
|
||||
|
||||
type BaseReactor struct {
|
||||
service.BaseService // Provides Start, Stop, .Quit
|
||||
Switch *Switch
|
||||
}
|
||||
|
||||
func NewBaseReactor(name string, impl Reactor) *BaseReactor {
|
||||
return &BaseReactor{
|
||||
BaseService: *service.NewBaseService(nil, name, impl),
|
||||
Switch: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (br *BaseReactor) SetSwitch(sw *Switch) {
|
||||
br.Switch = sw
|
||||
}
|
||||
func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil }
|
||||
func (*BaseReactor) AddPeer(peer Peer) {}
|
||||
func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
|
||||
func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
|
||||
func (*BaseReactor) InitPeer(peer Peer) Peer { return peer }
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build go1.10
|
||||
// +build go1.10
|
||||
|
||||
package conn
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !go1.10
|
||||
// +build !go1.10
|
||||
|
||||
package conn
|
||||
|
||||
@@ -64,15 +64,11 @@ initialization of the connection.
|
||||
|
||||
There are two methods for sending messages:
|
||||
func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
|
||||
func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
|
||||
|
||||
`Send(chID, msgBytes)` is a blocking call that waits until `msg` is
|
||||
successfully queued for the channel with the given id byte `chID`, or until the
|
||||
request times out. The message `msg` is serialized using Protobuf.
|
||||
|
||||
`TrySend(chID, msgBytes)` is a nonblocking call that returns false if the
|
||||
channel's queue is full.
|
||||
|
||||
Inbound message bytes are handled with an onReceive callback function.
|
||||
*/
|
||||
type MConnection struct {
|
||||
@@ -265,43 +261,6 @@ func (c *MConnection) stopServices() (alreadyStopped bool) {
|
||||
return false
|
||||
}
|
||||
|
||||
// FlushStop replicates the logic of OnStop.
|
||||
// It additionally ensures that all successful
|
||||
// .Send() calls will get flushed before closing
|
||||
// the connection.
|
||||
func (c *MConnection) FlushStop() {
|
||||
if c.stopServices() {
|
||||
return
|
||||
}
|
||||
|
||||
// this block is unique to FlushStop
|
||||
{
|
||||
// wait until the sendRoutine exits
|
||||
// so we dont race on calling sendSomePacketMsgs
|
||||
<-c.doneSendRoutine
|
||||
|
||||
// Send and flush all pending msgs.
|
||||
// Since sendRoutine has exited, we can call this
|
||||
// safely
|
||||
eof := c.sendSomePacketMsgs()
|
||||
for !eof {
|
||||
eof = c.sendSomePacketMsgs()
|
||||
}
|
||||
c.flush()
|
||||
|
||||
// Now we can close the connection
|
||||
}
|
||||
|
||||
c.conn.Close()
|
||||
|
||||
// We can't close pong safely here because
|
||||
// recvRoutine may write to it after we've stopped.
|
||||
// Though it doesn't need to get closed at all,
|
||||
// we close it @ recvRoutine.
|
||||
|
||||
// c.Stop()
|
||||
}
|
||||
|
||||
// OnStop implements BaseService
|
||||
func (c *MConnection) OnStop() {
|
||||
if c.stopServices() {
|
||||
@@ -375,49 +334,6 @@ func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
|
||||
return success
|
||||
}
|
||||
|
||||
// Queues a message to be sent to channel.
|
||||
// Nonblocking, returns true if successful.
|
||||
func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool {
|
||||
if !c.IsRunning() {
|
||||
return false
|
||||
}
|
||||
|
||||
c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", msgBytes)
|
||||
|
||||
// Send message to channel.
|
||||
channel, ok := c.channelsIdx[chID]
|
||||
if !ok {
|
||||
c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID))
|
||||
return false
|
||||
}
|
||||
|
||||
ok = channel.trySendBytes(msgBytes)
|
||||
if ok {
|
||||
// Wake up sendRoutine if necessary
|
||||
select {
|
||||
case c.send <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// CanSend returns true if you can send more data onto the chID, false
|
||||
// otherwise. Use only as a heuristic.
|
||||
func (c *MConnection) CanSend(chID byte) bool {
|
||||
if !c.IsRunning() {
|
||||
return false
|
||||
}
|
||||
|
||||
channel, ok := c.channelsIdx[chID]
|
||||
if !ok {
|
||||
c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID))
|
||||
return false
|
||||
}
|
||||
return channel.canSend()
|
||||
}
|
||||
|
||||
// sendRoutine polls for packets to send from channels.
|
||||
func (c *MConnection) sendRoutine() {
|
||||
defer c._recover()
|
||||
@@ -682,13 +598,6 @@ func (c *MConnection) maxPacketMsgSize() int {
|
||||
return len(bz)
|
||||
}
|
||||
|
||||
type ConnectionStatus struct {
|
||||
Duration time.Duration
|
||||
SendMonitor flow.Status
|
||||
RecvMonitor flow.Status
|
||||
Channels []ChannelStatus
|
||||
}
|
||||
|
||||
type ChannelStatus struct {
|
||||
ID byte
|
||||
SendQueueCapacity int
|
||||
@@ -697,24 +606,6 @@ type ChannelStatus struct {
|
||||
RecentlySent int64
|
||||
}
|
||||
|
||||
func (c *MConnection) Status() ConnectionStatus {
|
||||
var status ConnectionStatus
|
||||
status.Duration = time.Since(c.created)
|
||||
status.SendMonitor = c.sendMonitor.Status()
|
||||
status.RecvMonitor = c.recvMonitor.Status()
|
||||
status.Channels = make([]ChannelStatus, len(c.channels))
|
||||
for i, channel := range c.channels {
|
||||
status.Channels[i] = ChannelStatus{
|
||||
ID: channel.desc.ID,
|
||||
SendQueueCapacity: cap(channel.sendQueue),
|
||||
SendQueueSize: int(atomic.LoadInt32(&channel.sendQueueSize)),
|
||||
Priority: channel.desc.Priority,
|
||||
RecentlySent: atomic.LoadInt64(&channel.recentlySent),
|
||||
}
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
type ChannelDescriptor struct {
|
||||
@@ -800,30 +691,6 @@ func (ch *Channel) sendBytes(bytes []byte) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// Queues message to send to this channel.
|
||||
// Nonblocking, returns true if successful.
|
||||
// Goroutine-safe
|
||||
func (ch *Channel) trySendBytes(bytes []byte) bool {
|
||||
select {
|
||||
case ch.sendQueue <- bytes:
|
||||
atomic.AddInt32(&ch.sendQueueSize, 1)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Goroutine-safe
|
||||
func (ch *Channel) loadSendQueueSize() (size int) {
|
||||
return int(atomic.LoadInt32(&ch.sendQueueSize))
|
||||
}
|
||||
|
||||
// Goroutine-safe
|
||||
// Use only as a heuristic.
|
||||
func (ch *Channel) canSend() bool {
|
||||
return ch.loadSendQueueSize() < defaultSendQueueCapacity
|
||||
}
|
||||
|
||||
// Returns true if any PacketMsgs are pending to be sent.
|
||||
// Call before calling nextPacketMsg()
|
||||
// Goroutine-safe
|
||||
|
||||
@@ -69,9 +69,6 @@ func TestMConnectionSendFlushStop(t *testing.T) {
|
||||
errCh <- err
|
||||
}()
|
||||
|
||||
// stop the conn - it should flush all conns
|
||||
clientConn.FlushStop()
|
||||
|
||||
timer := time.NewTimer(3 * time.Second)
|
||||
select {
|
||||
case <-errCh:
|
||||
@@ -97,16 +94,14 @@ func TestMConnectionSend(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.True(t, mconn.CanSend(0x01))
|
||||
|
||||
msg = []byte("Spider-Man")
|
||||
assert.True(t, mconn.TrySend(0x01, msg))
|
||||
assert.True(t, mconn.Send(0x01, msg))
|
||||
_, err = server.Read(make([]byte, len(msg)))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
assert.False(t, mconn.CanSend(0x05), "CanSend should return false because channel is unknown")
|
||||
assert.False(t, mconn.Send(0x05, []byte("Absorbing Man")), "Send should return false because channel is unknown")
|
||||
}
|
||||
|
||||
@@ -145,20 +140,6 @@ func TestMConnectionReceive(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMConnectionStatus(t *testing.T) {
|
||||
server, client := NetPipe()
|
||||
t.Cleanup(closeAll(t, client, server))
|
||||
|
||||
mconn := createTestMConnection(client)
|
||||
err := mconn.Start()
|
||||
require.Nil(t, err)
|
||||
t.Cleanup(stopAll(t, mconn))
|
||||
|
||||
status := mconn.Status()
|
||||
assert.NotNil(t, status)
|
||||
assert.Zero(t, status.Channels[0].SendQueueSize)
|
||||
}
|
||||
|
||||
func TestMConnectionPongTimeoutResultsInError(t *testing.T) {
|
||||
server, client := net.Pipe()
|
||||
t.Cleanup(closeAll(t, client, server))
|
||||
@@ -514,18 +495,15 @@ func TestMConnectionTrySend(t *testing.T) {
|
||||
|
||||
msg := []byte("Semicolon-Woman")
|
||||
resultCh := make(chan string, 2)
|
||||
assert.True(t, mconn.TrySend(0x01, msg))
|
||||
assert.True(t, mconn.Send(0x01, msg))
|
||||
_, err = server.Read(make([]byte, len(msg)))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, mconn.CanSend(0x01))
|
||||
assert.True(t, mconn.TrySend(0x01, msg))
|
||||
assert.False(t, mconn.CanSend(0x01))
|
||||
assert.True(t, mconn.Send(0x01, msg))
|
||||
go func() {
|
||||
mconn.TrySend(0x01, msg)
|
||||
mconn.Send(0x01, msg)
|
||||
resultCh <- "TrySend"
|
||||
}()
|
||||
assert.False(t, mconn.CanSend(0x01))
|
||||
assert.False(t, mconn.TrySend(0x01, msg))
|
||||
assert.False(t, mconn.Send(0x01, msg))
|
||||
assert.Equal(t, "TrySend", <-resultCh)
|
||||
}
|
||||
|
||||
|
||||
@@ -195,7 +195,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
|
||||
compareWritesReads := func(writes []string, reads []string) {
|
||||
for {
|
||||
// Pop next write & corresponding reads
|
||||
var read, write string = "", writes[0]
|
||||
var read, write = "", writes[0]
|
||||
var readCount = 0
|
||||
for _, readChunk := range reads {
|
||||
read += readChunk
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
)
|
||||
|
||||
// ConnSet is a lookup table for connections and all their ips.
|
||||
type ConnSet interface {
|
||||
Has(net.Conn) bool
|
||||
HasIP(net.IP) bool
|
||||
Set(net.Conn, []net.IP)
|
||||
Remove(net.Conn)
|
||||
RemoveAddr(net.Addr)
|
||||
}
|
||||
|
||||
type connSetItem struct {
|
||||
conn net.Conn
|
||||
ips []net.IP
|
||||
}
|
||||
|
||||
type connSet struct {
|
||||
tmsync.RWMutex
|
||||
|
||||
conns map[string]connSetItem
|
||||
}
|
||||
|
||||
// NewConnSet returns a ConnSet implementation.
|
||||
func NewConnSet() ConnSet {
|
||||
return &connSet{
|
||||
conns: map[string]connSetItem{},
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *connSet) Has(c net.Conn) bool {
|
||||
cs.RLock()
|
||||
defer cs.RUnlock()
|
||||
|
||||
_, ok := cs.conns[c.RemoteAddr().String()]
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
func (cs *connSet) HasIP(ip net.IP) bool {
|
||||
cs.RLock()
|
||||
defer cs.RUnlock()
|
||||
|
||||
for _, c := range cs.conns {
|
||||
for _, known := range c.ips {
|
||||
if known.Equal(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (cs *connSet) Remove(c net.Conn) {
|
||||
cs.Lock()
|
||||
defer cs.Unlock()
|
||||
|
||||
delete(cs.conns, c.RemoteAddr().String())
|
||||
}
|
||||
|
||||
func (cs *connSet) RemoveAddr(addr net.Addr) {
|
||||
cs.Lock()
|
||||
defer cs.Unlock()
|
||||
|
||||
delete(cs.conns, addr.String())
|
||||
}
|
||||
|
||||
func (cs *connSet) Set(c net.Conn, ips []net.IP) {
|
||||
cs.Lock()
|
||||
defer cs.Unlock()
|
||||
|
||||
cs.conns[c.RemoteAddr().String()] = connSetItem{
|
||||
conn: c,
|
||||
ips: ips,
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,7 @@ func (e ErrFilterTimeout) Error() string {
|
||||
// ErrRejected indicates that a Peer was rejected carrying additional
|
||||
// information as to the reason.
|
||||
type ErrRejected struct {
|
||||
addr NetAddress
|
||||
addr NodeAddress
|
||||
conn net.Conn
|
||||
err error
|
||||
id types.NodeID
|
||||
@@ -30,7 +30,7 @@ type ErrRejected struct {
|
||||
}
|
||||
|
||||
// Addr returns the NetAddress for the rejected Peer.
|
||||
func (e ErrRejected) Addr() NetAddress {
|
||||
func (e ErrRejected) Addr() NodeAddress {
|
||||
return e.addr
|
||||
}
|
||||
|
||||
@@ -120,15 +120,15 @@ func (e ErrSwitchDuplicatePeerIP) Error() string {
|
||||
|
||||
// ErrSwitchConnectToSelf to be raised when trying to connect to itself.
|
||||
type ErrSwitchConnectToSelf struct {
|
||||
Addr *NetAddress
|
||||
Addr *NodeAddress
|
||||
}
|
||||
|
||||
func (e ErrSwitchConnectToSelf) Error() string {
|
||||
return fmt.Sprintf("connect to self: %v", e.Addr)
|
||||
return fmt.Sprintf("connect to self: %s", e.Addr)
|
||||
}
|
||||
|
||||
type ErrSwitchAuthenticationFailure struct {
|
||||
Dialed *NetAddress
|
||||
Dialed *NodeAddress
|
||||
Got types.NodeID
|
||||
}
|
||||
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type Peer struct {
|
||||
*service.BaseService
|
||||
ip net.IP
|
||||
id types.NodeID
|
||||
addr *p2p.NetAddress
|
||||
kv map[string]interface{}
|
||||
Outbound, Persistent bool
|
||||
}
|
||||
|
||||
// NewPeer creates and starts a new mock peer. If the ip
|
||||
// is nil, random routable address is used.
|
||||
func NewPeer(ip net.IP) *Peer {
|
||||
var netAddr *p2p.NetAddress
|
||||
if ip == nil {
|
||||
_, netAddr = p2p.CreateRoutableAddr()
|
||||
} else {
|
||||
netAddr = types.NewNetAddressIPPort(ip, 26656)
|
||||
}
|
||||
nodeKey := types.GenNodeKey()
|
||||
netAddr.ID = nodeKey.ID
|
||||
mp := &Peer{
|
||||
ip: ip,
|
||||
id: nodeKey.ID,
|
||||
addr: netAddr,
|
||||
kv: make(map[string]interface{}),
|
||||
}
|
||||
mp.BaseService = service.NewBaseService(nil, "MockPeer", mp)
|
||||
if err := mp.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return mp
|
||||
}
|
||||
|
||||
func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error
|
||||
func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *Peer) NodeInfo() types.NodeInfo {
|
||||
return types.NodeInfo{
|
||||
NodeID: mp.addr.ID,
|
||||
ListenAddr: mp.addr.DialString(),
|
||||
}
|
||||
}
|
||||
func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
|
||||
func (mp *Peer) ID() types.NodeID { return mp.id }
|
||||
func (mp *Peer) IsOutbound() bool { return mp.Outbound }
|
||||
func (mp *Peer) IsPersistent() bool { return mp.Persistent }
|
||||
func (mp *Peer) Get(key string) interface{} {
|
||||
if value, ok := mp.kv[key]; ok {
|
||||
return value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (mp *Peer) Set(key string, value interface{}) {
|
||||
mp.kv[key] = value
|
||||
}
|
||||
func (mp *Peer) RemoteIP() net.IP { return mp.ip }
|
||||
func (mp *Peer) SocketAddr() *p2p.NetAddress { return mp.addr }
|
||||
func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} }
|
||||
func (mp *Peer) CloseConn() error { return nil }
|
||||
@@ -1,23 +0,0 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
type Reactor struct {
|
||||
p2p.BaseReactor
|
||||
}
|
||||
|
||||
func NewReactor() *Reactor {
|
||||
r := &Reactor{}
|
||||
r.BaseReactor = *p2p.NewBaseReactor("Mock-PEX", r)
|
||||
r.SetLogger(log.TestingLogger())
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return []*conn.ChannelDescriptor{} }
|
||||
func (r *Reactor) AddPeer(peer p2p.Peer) {}
|
||||
func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {}
|
||||
func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {}
|
||||
@@ -5,11 +5,8 @@ package mocks
|
||||
import (
|
||||
context "context"
|
||||
|
||||
conn "github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
|
||||
crypto "github.com/tendermint/tendermint/crypto"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
crypto "github.com/tendermint/tendermint/crypto"
|
||||
|
||||
p2p "github.com/tendermint/tendermint/internal/p2p"
|
||||
|
||||
@@ -35,20 +32,6 @@ func (_m *Connection) Close() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// FlushClose provides a mock function with given fields:
|
||||
func (_m *Connection) FlushClose() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Handshake provides a mock function with given fields: _a0, _a1, _a2
|
||||
func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2)
|
||||
@@ -138,35 +121,14 @@ func (_m *Connection) RemoteEndpoint() p2p.Endpoint {
|
||||
}
|
||||
|
||||
// SendMessage provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Connection) SendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error) {
|
||||
func (_m *Connection) SendMessage(_a0 p2p.ChannelID, _a1 []byte) error {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) bool); ok {
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) error); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(p2p.ChannelID, []byte) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Status provides a mock function with given fields:
|
||||
func (_m *Connection) Status() conn.ConnectionStatus {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 conn.ConnectionStatus
|
||||
if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(conn.ConnectionStatus)
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
@@ -185,24 +147,3 @@ func (_m *Connection) String() string {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// TrySendMessage provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Connection) TrySendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) bool); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(p2p.ChannelID, []byte) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
@@ -1,334 +0,0 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
conn "github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
net "net"
|
||||
|
||||
types "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Peer is an autogenerated mock type for the Peer type
|
||||
type Peer struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// CloseConn provides a mock function with given fields:
|
||||
func (_m *Peer) CloseConn() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// FlushStop provides a mock function with given fields:
|
||||
func (_m *Peer) FlushStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// Get provides a mock function with given fields: _a0
|
||||
func (_m *Peer) Get(_a0 string) interface{} {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 interface{}
|
||||
if rf, ok := ret.Get(0).(func(string) interface{}); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(interface{})
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ID provides a mock function with given fields:
|
||||
func (_m *Peer) ID() types.NodeID {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 types.NodeID
|
||||
if rf, ok := ret.Get(0).(func() types.NodeID); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.NodeID)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// IsOutbound provides a mock function with given fields:
|
||||
func (_m *Peer) IsOutbound() bool {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func() bool); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// IsPersistent provides a mock function with given fields:
|
||||
func (_m *Peer) IsPersistent() bool {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func() bool); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// IsRunning provides a mock function with given fields:
|
||||
func (_m *Peer) IsRunning() bool {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func() bool); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NodeInfo provides a mock function with given fields:
|
||||
func (_m *Peer) NodeInfo() types.NodeInfo {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 types.NodeInfo
|
||||
if rf, ok := ret.Get(0).(func() types.NodeInfo); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.NodeInfo)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OnReset provides a mock function with given fields:
|
||||
func (_m *Peer) OnReset() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OnStart provides a mock function with given fields:
|
||||
func (_m *Peer) OnStart() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OnStop provides a mock function with given fields:
|
||||
func (_m *Peer) OnStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// Quit provides a mock function with given fields:
|
||||
func (_m *Peer) Quit() <-chan struct{} {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 <-chan struct{}
|
||||
if rf, ok := ret.Get(0).(func() <-chan struct{}); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(<-chan struct{})
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// RemoteAddr provides a mock function with given fields:
|
||||
func (_m *Peer) RemoteAddr() net.Addr {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 net.Addr
|
||||
if rf, ok := ret.Get(0).(func() net.Addr); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(net.Addr)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// RemoteIP provides a mock function with given fields:
|
||||
func (_m *Peer) RemoteIP() net.IP {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 net.IP
|
||||
if rf, ok := ret.Get(0).(func() net.IP); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(net.IP)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Reset provides a mock function with given fields:
|
||||
func (_m *Peer) Reset() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Send provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Peer) Send(_a0 byte, _a1 []byte) bool {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Set provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Peer) Set(_a0 string, _a1 interface{}) {
|
||||
_m.Called(_a0, _a1)
|
||||
}
|
||||
|
||||
// SetLogger provides a mock function with given fields: _a0
|
||||
func (_m *Peer) SetLogger(_a0 log.Logger) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// SocketAddr provides a mock function with given fields:
|
||||
func (_m *Peer) SocketAddr() *types.NetAddress {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *types.NetAddress
|
||||
if rf, ok := ret.Get(0).(func() *types.NetAddress); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.NetAddress)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Start provides a mock function with given fields:
|
||||
func (_m *Peer) Start() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Status provides a mock function with given fields:
|
||||
func (_m *Peer) Status() conn.ConnectionStatus {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 conn.ConnectionStatus
|
||||
if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(conn.ConnectionStatus)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Stop provides a mock function with given fields:
|
||||
func (_m *Peer) Stop() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// String provides a mock function with given fields:
|
||||
func (_m *Peer) String() string {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// TrySend provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Wait provides a mock function with given fields:
|
||||
func (_m *Peer) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
// Modified for Tendermint
|
||||
// Originally Copyright (c) 2013-2014 Conformal Systems LLC.
|
||||
// https://github.com/conformal/btcd/blob/master/LICENSE
|
||||
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type NetAddress = types.NetAddress
|
||||
@@ -1,371 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
tmconn "github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
"github.com/tendermint/tendermint/libs/cmap"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Peer
|
||||
|
||||
const metricsTickerDuration = 10 * time.Second
|
||||
|
||||
// Peer is an interface representing a peer connected on a reactor.
|
||||
type Peer interface {
|
||||
service.Service
|
||||
FlushStop()
|
||||
|
||||
ID() types.NodeID // peer's cryptographic ID
|
||||
RemoteIP() net.IP // remote IP of the connection
|
||||
RemoteAddr() net.Addr // remote address of the connection
|
||||
|
||||
IsOutbound() bool // did we dial the peer
|
||||
IsPersistent() bool // do we redial this peer when we disconnect
|
||||
|
||||
CloseConn() error // close original connection
|
||||
|
||||
NodeInfo() types.NodeInfo // peer's info
|
||||
Status() tmconn.ConnectionStatus
|
||||
SocketAddr() *NetAddress // actual address of the socket
|
||||
|
||||
Send(byte, []byte) bool
|
||||
TrySend(byte, []byte) bool
|
||||
|
||||
Set(string, interface{})
|
||||
Get(string) interface{}
|
||||
}
|
||||
|
||||
//----------------------------------------------------------
|
||||
|
||||
// peerConn contains the raw connection and its config.
|
||||
type peerConn struct {
|
||||
outbound bool
|
||||
persistent bool
|
||||
conn Connection
|
||||
ip net.IP // cached RemoteIP()
|
||||
}
|
||||
|
||||
func newPeerConn(outbound, persistent bool, conn Connection) peerConn {
|
||||
return peerConn{
|
||||
outbound: outbound,
|
||||
persistent: persistent,
|
||||
conn: conn,
|
||||
}
|
||||
}
|
||||
|
||||
// Return the IP from the connection RemoteAddr
|
||||
func (pc peerConn) RemoteIP() net.IP {
|
||||
if pc.ip == nil {
|
||||
pc.ip = pc.conn.RemoteEndpoint().IP
|
||||
}
|
||||
return pc.ip
|
||||
}
|
||||
|
||||
// peer implements Peer.
|
||||
//
|
||||
// Before using a peer, you will need to perform a handshake on connection.
|
||||
type peer struct {
|
||||
service.BaseService
|
||||
|
||||
// raw peerConn and the multiplex connection
|
||||
peerConn
|
||||
|
||||
// peer's node info and the channel it knows about
|
||||
// channels = nodeInfo.Channels
|
||||
// cached to avoid copying nodeInfo in hasChannel
|
||||
nodeInfo types.NodeInfo
|
||||
channels []byte
|
||||
reactors map[byte]Reactor
|
||||
onPeerError func(Peer, interface{})
|
||||
|
||||
// User data
|
||||
Data *cmap.CMap
|
||||
|
||||
metrics *Metrics
|
||||
metricsTicker *time.Ticker
|
||||
}
|
||||
|
||||
type PeerOption func(*peer)
|
||||
|
||||
func newPeer(
|
||||
nodeInfo types.NodeInfo,
|
||||
pc peerConn,
|
||||
reactorsByCh map[byte]Reactor,
|
||||
onPeerError func(Peer, interface{}),
|
||||
options ...PeerOption,
|
||||
) *peer {
|
||||
p := &peer{
|
||||
peerConn: pc,
|
||||
nodeInfo: nodeInfo,
|
||||
channels: nodeInfo.Channels,
|
||||
reactors: reactorsByCh,
|
||||
onPeerError: onPeerError,
|
||||
Data: cmap.NewCMap(),
|
||||
metricsTicker: time.NewTicker(metricsTickerDuration),
|
||||
metrics: NopMetrics(),
|
||||
}
|
||||
|
||||
p.BaseService = *service.NewBaseService(nil, "Peer", p)
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// onError calls the peer error callback.
|
||||
func (p *peer) onError(err interface{}) {
|
||||
p.onPeerError(p, err)
|
||||
}
|
||||
|
||||
// String representation.
|
||||
func (p *peer) String() string {
|
||||
if p.outbound {
|
||||
return fmt.Sprintf("Peer{%v %v out}", p.conn, p.ID())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Peer{%v %v in}", p.conn, p.ID())
|
||||
}
|
||||
|
||||
//---------------------------------------------------
|
||||
// Implements service.Service
|
||||
|
||||
// SetLogger implements BaseService.
|
||||
func (p *peer) SetLogger(l log.Logger) {
|
||||
p.Logger = l
|
||||
}
|
||||
|
||||
// OnStart implements BaseService.
|
||||
func (p *peer) OnStart() error {
|
||||
if err := p.BaseService.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go p.processMessages()
|
||||
go p.metricsReporter()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processMessages processes messages received from the connection.
|
||||
func (p *peer) processMessages() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
p.Logger.Error("peer message processing panic", "err", r, "stack", string(debug.Stack()))
|
||||
p.onError(fmt.Errorf("panic during peer message processing: %v", r))
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
chID, msg, err := p.conn.ReceiveMessage()
|
||||
if err != nil {
|
||||
p.onError(err)
|
||||
return
|
||||
}
|
||||
reactor, ok := p.reactors[byte(chID)]
|
||||
if !ok {
|
||||
p.onError(fmt.Errorf("unknown channel %v", chID))
|
||||
return
|
||||
}
|
||||
reactor.Receive(byte(chID), p, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// FlushStop mimics OnStop but additionally ensures that all successful
|
||||
// .Send() calls will get flushed before closing the connection.
|
||||
// NOTE: it is not safe to call this method more than once.
|
||||
func (p *peer) FlushStop() {
|
||||
p.metricsTicker.Stop()
|
||||
p.BaseService.OnStop()
|
||||
if err := p.conn.FlushClose(); err != nil {
|
||||
p.Logger.Debug("error while stopping peer", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnStop implements BaseService.
|
||||
func (p *peer) OnStop() {
|
||||
p.metricsTicker.Stop()
|
||||
p.BaseService.OnStop()
|
||||
if err := p.conn.Close(); err != nil {
|
||||
p.Logger.Debug("error while stopping peer", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------
|
||||
// Implements Peer
|
||||
|
||||
// ID returns the peer's ID - the hex encoded hash of its pubkey.
|
||||
func (p *peer) ID() types.NodeID {
|
||||
return p.nodeInfo.ID()
|
||||
}
|
||||
|
||||
// IsOutbound returns true if the connection is outbound, false otherwise.
|
||||
func (p *peer) IsOutbound() bool {
|
||||
return p.peerConn.outbound
|
||||
}
|
||||
|
||||
// IsPersistent returns true if the peer is persitent, false otherwise.
|
||||
func (p *peer) IsPersistent() bool {
|
||||
return p.peerConn.persistent
|
||||
}
|
||||
|
||||
// NodeInfo returns a copy of the peer's NodeInfo.
|
||||
func (p *peer) NodeInfo() types.NodeInfo {
|
||||
return p.nodeInfo
|
||||
}
|
||||
|
||||
// SocketAddr returns the address of the socket.
|
||||
// For outbound peers, it's the address dialed (after DNS resolution).
|
||||
// For inbound peers, it's the address returned by the underlying connection
|
||||
// (not what's reported in the peer's NodeInfo).
|
||||
func (p *peer) SocketAddr() *NetAddress {
|
||||
endpoint := p.peerConn.conn.RemoteEndpoint()
|
||||
return &NetAddress{
|
||||
ID: p.ID(),
|
||||
IP: endpoint.IP,
|
||||
Port: endpoint.Port,
|
||||
}
|
||||
}
|
||||
|
||||
// Status returns the peer's ConnectionStatus.
|
||||
func (p *peer) Status() tmconn.ConnectionStatus {
|
||||
return p.conn.Status()
|
||||
}
|
||||
|
||||
// Send msg bytes to the channel identified by chID byte. Returns false if the
|
||||
// send queue is full after timeout, specified by MConnection.
|
||||
func (p *peer) Send(chID byte, msgBytes []byte) bool {
|
||||
if !p.IsRunning() {
|
||||
// see Switch#Broadcast, where we fetch the list of peers and loop over
|
||||
// them - while we're looping, one peer may be removed and stopped.
|
||||
return false
|
||||
} else if !p.hasChannel(chID) {
|
||||
return false
|
||||
}
|
||||
res, err := p.conn.SendMessage(ChannelID(chID), msgBytes)
|
||||
if err == io.EOF {
|
||||
return false
|
||||
} else if err != nil {
|
||||
p.onError(err)
|
||||
return false
|
||||
}
|
||||
if res {
|
||||
labels := []string{
|
||||
"peer_id", string(p.ID()),
|
||||
"chID", fmt.Sprintf("%#x", chID),
|
||||
}
|
||||
p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes)))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// TrySend msg bytes to the channel identified by chID byte. Immediately returns
|
||||
// false if the send queue is full.
|
||||
func (p *peer) TrySend(chID byte, msgBytes []byte) bool {
|
||||
if !p.IsRunning() {
|
||||
return false
|
||||
} else if !p.hasChannel(chID) {
|
||||
return false
|
||||
}
|
||||
res, err := p.conn.TrySendMessage(ChannelID(chID), msgBytes)
|
||||
if err == io.EOF {
|
||||
return false
|
||||
} else if err != nil {
|
||||
p.onError(err)
|
||||
return false
|
||||
}
|
||||
if res {
|
||||
labels := []string{
|
||||
"peer_id", string(p.ID()),
|
||||
"chID", fmt.Sprintf("%#x", chID),
|
||||
}
|
||||
p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes)))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Get the data for a given key.
|
||||
func (p *peer) Get(key string) interface{} {
|
||||
return p.Data.Get(key)
|
||||
}
|
||||
|
||||
// Set sets the data for the given key.
|
||||
func (p *peer) Set(key string, data interface{}) {
|
||||
p.Data.Set(key, data)
|
||||
}
|
||||
|
||||
// hasChannel returns true if the peer reported
|
||||
// knowing about the given chID.
|
||||
func (p *peer) hasChannel(chID byte) bool {
|
||||
for _, ch := range p.channels {
|
||||
if ch == chID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// NOTE: probably will want to remove this
|
||||
// but could be helpful while the feature is new
|
||||
p.Logger.Debug(
|
||||
"Unknown channel for peer",
|
||||
"channel",
|
||||
chID,
|
||||
"channels",
|
||||
p.channels,
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
||||
// CloseConn closes original connection. Used for cleaning up in cases where the peer had not been started at all.
|
||||
func (p *peer) CloseConn() error {
|
||||
return p.peerConn.conn.Close()
|
||||
}
|
||||
|
||||
//---------------------------------------------------
|
||||
// methods only used for testing
|
||||
// TODO: can we remove these?
|
||||
|
||||
// CloseConn closes the underlying connection
|
||||
func (pc *peerConn) CloseConn() {
|
||||
pc.conn.Close()
|
||||
}
|
||||
|
||||
// RemoteAddr returns peer's remote network address.
|
||||
func (p *peer) RemoteAddr() net.Addr {
|
||||
endpoint := p.conn.RemoteEndpoint()
|
||||
return &net.TCPAddr{
|
||||
IP: endpoint.IP,
|
||||
Port: int(endpoint.Port),
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------
|
||||
|
||||
func PeerMetrics(metrics *Metrics) PeerOption {
|
||||
return func(p *peer) {
|
||||
p.metrics = metrics
|
||||
}
|
||||
}
|
||||
|
||||
func (p *peer) metricsReporter() {
|
||||
for {
|
||||
select {
|
||||
case <-p.metricsTicker.C:
|
||||
status := p.conn.Status()
|
||||
var sendQueueSize float64
|
||||
for _, chStatus := range status.Channels {
|
||||
sendQueueSize += float64(chStatus.SendQueueSize)
|
||||
}
|
||||
|
||||
p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize)
|
||||
case <-p.Quit():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,149 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// IPeerSet has a (immutable) subset of the methods of PeerSet.
|
||||
type IPeerSet interface {
|
||||
Has(key types.NodeID) bool
|
||||
HasIP(ip net.IP) bool
|
||||
Get(key types.NodeID) Peer
|
||||
List() []Peer
|
||||
Size() int
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// PeerSet is a special structure for keeping a table of peers.
|
||||
// Iteration over the peers is super fast and thread-safe.
|
||||
type PeerSet struct {
|
||||
mtx tmsync.Mutex
|
||||
lookup map[types.NodeID]*peerSetItem
|
||||
list []Peer
|
||||
}
|
||||
|
||||
type peerSetItem struct {
|
||||
peer Peer
|
||||
index int
|
||||
}
|
||||
|
||||
// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items.
|
||||
func NewPeerSet() *PeerSet {
|
||||
return &PeerSet{
|
||||
lookup: make(map[types.NodeID]*peerSetItem),
|
||||
list: make([]Peer, 0, 256),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds the peer to the PeerSet.
|
||||
// It returns an error carrying the reason, if the peer is already present.
|
||||
func (ps *PeerSet) Add(peer Peer) error {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.lookup[peer.ID()] != nil {
|
||||
return ErrSwitchDuplicatePeerID{peer.ID()}
|
||||
}
|
||||
|
||||
index := len(ps.list)
|
||||
// Appending is safe even with other goroutines
|
||||
// iterating over the ps.list slice.
|
||||
ps.list = append(ps.list, peer)
|
||||
ps.lookup[peer.ID()] = &peerSetItem{peer, index}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Has returns true if the set contains the peer referred to by this
|
||||
// peerKey, otherwise false.
|
||||
func (ps *PeerSet) Has(peerKey types.NodeID) bool {
|
||||
ps.mtx.Lock()
|
||||
_, ok := ps.lookup[peerKey]
|
||||
ps.mtx.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// HasIP returns true if the set contains the peer referred to by this IP
|
||||
// address, otherwise false.
|
||||
func (ps *PeerSet) HasIP(peerIP net.IP) bool {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
return ps.hasIP(peerIP)
|
||||
}
|
||||
|
||||
// hasIP does not acquire a lock so it can be used in public methods which
|
||||
// already lock.
|
||||
func (ps *PeerSet) hasIP(peerIP net.IP) bool {
|
||||
for _, item := range ps.lookup {
|
||||
if item.peer.RemoteIP().Equal(peerIP) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Get looks up a peer by the provided peerKey. Returns nil if peer is not
|
||||
// found.
|
||||
func (ps *PeerSet) Get(peerKey types.NodeID) Peer {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
item, ok := ps.lookup[peerKey]
|
||||
if ok {
|
||||
return item.peer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove discards peer by its Key, if the peer was previously memoized.
|
||||
// Returns true if the peer was removed, and false if it was not found.
|
||||
// in the set.
|
||||
func (ps *PeerSet) Remove(peer Peer) bool {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
item := ps.lookup[peer.ID()]
|
||||
if item == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
index := item.index
|
||||
// Create a new copy of the list but with one less item.
|
||||
// (we must copy because we'll be mutating the list).
|
||||
newList := make([]Peer, len(ps.list)-1)
|
||||
copy(newList, ps.list)
|
||||
// If it's the last peer, that's an easy special case.
|
||||
if index == len(ps.list)-1 {
|
||||
ps.list = newList
|
||||
delete(ps.lookup, peer.ID())
|
||||
return true
|
||||
}
|
||||
|
||||
// Replace the popped item with the last item in the old list.
|
||||
lastPeer := ps.list[len(ps.list)-1]
|
||||
lastPeerKey := lastPeer.ID()
|
||||
lastPeerItem := ps.lookup[lastPeerKey]
|
||||
newList[index] = lastPeer
|
||||
lastPeerItem.index = index
|
||||
ps.list = newList
|
||||
delete(ps.lookup, peer.ID())
|
||||
return true
|
||||
}
|
||||
|
||||
// Size returns the number of unique items in the peerSet.
|
||||
func (ps *PeerSet) Size() int {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
return len(ps.list)
|
||||
}
|
||||
|
||||
// List returns the threadsafe list of peers.
|
||||
func (ps *PeerSet) List() []Peer {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
return ps.list
|
||||
}
|
||||
@@ -1,190 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// mockPeer for testing the PeerSet
|
||||
type mockPeer struct {
|
||||
service.BaseService
|
||||
ip net.IP
|
||||
id types.NodeID
|
||||
}
|
||||
|
||||
func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error
|
||||
func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *mockPeer) NodeInfo() types.NodeInfo { return types.NodeInfo{} }
|
||||
func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} }
|
||||
func (mp *mockPeer) ID() types.NodeID { return mp.id }
|
||||
func (mp *mockPeer) IsOutbound() bool { return false }
|
||||
func (mp *mockPeer) IsPersistent() bool { return true }
|
||||
func (mp *mockPeer) Get(s string) interface{} { return s }
|
||||
func (mp *mockPeer) Set(string, interface{}) {}
|
||||
func (mp *mockPeer) RemoteIP() net.IP { return mp.ip }
|
||||
func (mp *mockPeer) SocketAddr() *NetAddress { return nil }
|
||||
func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} }
|
||||
func (mp *mockPeer) CloseConn() error { return nil }
|
||||
|
||||
// Returns a mock peer
|
||||
func newMockPeer(ip net.IP) *mockPeer {
|
||||
if ip == nil {
|
||||
ip = net.IP{127, 0, 0, 1}
|
||||
}
|
||||
nodeKey := types.GenNodeKey()
|
||||
return &mockPeer{
|
||||
ip: ip,
|
||||
id: nodeKey.ID,
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerSetAddRemoveOne(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
peerSet := NewPeerSet()
|
||||
|
||||
var peerList []Peer
|
||||
for i := 0; i < 5; i++ {
|
||||
p := newMockPeer(net.IP{127, 0, 0, byte(i)})
|
||||
if err := peerSet.Add(p); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
peerList = append(peerList, p)
|
||||
}
|
||||
|
||||
n := len(peerList)
|
||||
// 1. Test removing from the front
|
||||
for i, peerAtFront := range peerList {
|
||||
removed := peerSet.Remove(peerAtFront)
|
||||
assert.True(t, removed)
|
||||
wantSize := n - i - 1
|
||||
for j := 0; j < 2; j++ {
|
||||
assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j)
|
||||
assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j)
|
||||
// Test the route of removing the now non-existent element
|
||||
removed := peerSet.Remove(peerAtFront)
|
||||
assert.False(t, removed)
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Next we are testing removing the peer at the end
|
||||
// a) Replenish the peerSet
|
||||
for _, peer := range peerList {
|
||||
if err := peerSet.Add(peer); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// b) In reverse, remove each element
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
peerAtEnd := peerList[i]
|
||||
removed := peerSet.Remove(peerAtEnd)
|
||||
assert.True(t, removed)
|
||||
assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i)
|
||||
assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerSetAddRemoveMany(t *testing.T) {
|
||||
t.Parallel()
|
||||
peerSet := NewPeerSet()
|
||||
|
||||
peers := []Peer{}
|
||||
N := 100
|
||||
for i := 0; i < N; i++ {
|
||||
peer := newMockPeer(net.IP{127, 0, 0, byte(i)})
|
||||
if err := peerSet.Add(peer); err != nil {
|
||||
t.Errorf("failed to add new peer")
|
||||
}
|
||||
if peerSet.Size() != i+1 {
|
||||
t.Errorf("failed to add new peer and increment size")
|
||||
}
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
|
||||
for i, peer := range peers {
|
||||
removed := peerSet.Remove(peer)
|
||||
assert.True(t, removed)
|
||||
if peerSet.Has(peer.ID()) {
|
||||
t.Errorf("failed to remove peer")
|
||||
}
|
||||
if peerSet.Size() != len(peers)-i-1 {
|
||||
t.Errorf("failed to remove peer and decrement size")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerSetAddDuplicate(t *testing.T) {
|
||||
t.Parallel()
|
||||
peerSet := NewPeerSet()
|
||||
peer := newMockPeer(nil)
|
||||
|
||||
n := 20
|
||||
errsChan := make(chan error)
|
||||
// Add the same asynchronously to test the
|
||||
// concurrent guarantees of our APIs, and
|
||||
// our expectation in the end is that only
|
||||
// one addition succeeded, but the rest are
|
||||
// instances of ErrSwitchDuplicatePeer.
|
||||
for i := 0; i < n; i++ {
|
||||
go func() {
|
||||
errsChan <- peerSet.Add(peer)
|
||||
}()
|
||||
}
|
||||
|
||||
// Now collect and tally the results
|
||||
errsTally := make(map[string]int)
|
||||
for i := 0; i < n; i++ {
|
||||
err := <-errsChan
|
||||
|
||||
switch err.(type) {
|
||||
case ErrSwitchDuplicatePeerID:
|
||||
errsTally["duplicateID"]++
|
||||
default:
|
||||
errsTally["other"]++
|
||||
}
|
||||
}
|
||||
|
||||
// Our next procedure is to ensure that only one addition
|
||||
// succeeded and that the rest are each ErrSwitchDuplicatePeer.
|
||||
wantErrCount, gotErrCount := n-1, errsTally["duplicateID"]
|
||||
assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count")
|
||||
|
||||
wantNilErrCount, gotNilErrCount := 1, errsTally["other"]
|
||||
assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount")
|
||||
}
|
||||
|
||||
func TestPeerSetGet(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var (
|
||||
peerSet = NewPeerSet()
|
||||
peer = newMockPeer(nil)
|
||||
)
|
||||
|
||||
assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add")
|
||||
|
||||
if err := peerSet.Add(peer); err != nil {
|
||||
t.Fatalf("Failed to add new peer: %v", err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
// Add them asynchronously to test the
|
||||
// concurrent guarantees of our APIs.
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
have, want := peerSet.Get(peer.ID()), peer
|
||||
assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -1,239 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
golog "log"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
tmconn "github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
)
|
||||
|
||||
func TestPeerBasic(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
|
||||
// simulate remote peer
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
rp.Start()
|
||||
t.Cleanup(rp.Stop)
|
||||
|
||||
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig())
|
||||
require.Nil(err)
|
||||
|
||||
err = p.Start()
|
||||
require.Nil(err)
|
||||
t.Cleanup(func() {
|
||||
if err := p.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
assert.True(p.IsRunning())
|
||||
assert.True(p.IsOutbound())
|
||||
assert.False(p.IsPersistent())
|
||||
p.persistent = true
|
||||
assert.True(p.IsPersistent())
|
||||
assert.Equal(rp.Addr().DialString(), p.RemoteAddr().String())
|
||||
assert.Equal(rp.ID(), p.ID())
|
||||
}
|
||||
|
||||
func TestPeerSend(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
|
||||
config := cfg
|
||||
|
||||
// simulate remote peer
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config}
|
||||
rp.Start()
|
||||
t.Cleanup(rp.Stop)
|
||||
|
||||
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig())
|
||||
require.Nil(err)
|
||||
|
||||
err = p.Start()
|
||||
require.Nil(err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := p.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
assert.True(p.Send(testCh, []byte("Asylum")))
|
||||
}
|
||||
|
||||
func createOutboundPeerAndPerformHandshake(
|
||||
addr *NetAddress,
|
||||
config *config.P2PConfig,
|
||||
mConfig tmconn.MConnConfig,
|
||||
) (*peer, error) {
|
||||
chDescs := []*tmconn.ChannelDescriptor{
|
||||
{ID: testCh, Priority: 1},
|
||||
}
|
||||
pk := ed25519.GenPrivKey()
|
||||
ourNodeInfo := testNodeInfo(types.NodeIDFromPubKey(pk.PubKey()), "host_peer")
|
||||
transport := NewMConnTransport(log.TestingLogger(), mConfig, chDescs, MConnTransportOptions{})
|
||||
reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)}
|
||||
pc, err := testOutboundPeerConn(transport, addr, config, false, pk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peerInfo, _, err := pc.conn.Handshake(context.Background(), ourNodeInfo, pk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := newPeer(peerInfo, pc, reactorsByCh, func(p Peer, r interface{}) {})
|
||||
p.SetLogger(log.TestingLogger().With("peer", addr))
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func testDial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) {
|
||||
if cfg.TestDialFail {
|
||||
return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)")
|
||||
}
|
||||
|
||||
conn, err := addr.DialTimeout(cfg.DialTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func testOutboundPeerConn(
|
||||
transport *MConnTransport,
|
||||
addr *NetAddress,
|
||||
config *config.P2PConfig,
|
||||
persistent bool,
|
||||
ourNodePrivKey crypto.PrivKey,
|
||||
) (peerConn, error) {
|
||||
|
||||
var pc peerConn
|
||||
conn, err := testDial(addr, config)
|
||||
if err != nil {
|
||||
return pc, fmt.Errorf("error creating peer: %w", err)
|
||||
}
|
||||
|
||||
pc, err = testPeerConn(transport, conn, true, persistent)
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
return pc, fmt.Errorf("%v: %w", cerr.Error(), err)
|
||||
}
|
||||
return pc, err
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
type remotePeer struct {
|
||||
PrivKey crypto.PrivKey
|
||||
Config *config.P2PConfig
|
||||
Network string
|
||||
addr *NetAddress
|
||||
channels bytes.HexBytes
|
||||
listenAddr string
|
||||
listener net.Listener
|
||||
}
|
||||
|
||||
func (rp *remotePeer) Addr() *NetAddress {
|
||||
return rp.addr
|
||||
}
|
||||
|
||||
func (rp *remotePeer) ID() types.NodeID {
|
||||
return types.NodeIDFromPubKey(rp.PrivKey.PubKey())
|
||||
}
|
||||
|
||||
func (rp *remotePeer) Start() {
|
||||
if rp.listenAddr == "" {
|
||||
rp.listenAddr = "127.0.0.1:0"
|
||||
}
|
||||
|
||||
l, e := net.Listen("tcp", rp.listenAddr) // any available address
|
||||
if e != nil {
|
||||
golog.Fatalf("net.Listen tcp :0: %+v", e)
|
||||
}
|
||||
rp.listener = l
|
||||
rp.addr = types.NewNetAddress(types.NodeIDFromPubKey(rp.PrivKey.PubKey()), l.Addr())
|
||||
if rp.channels == nil {
|
||||
rp.channels = []byte{testCh}
|
||||
}
|
||||
go rp.accept()
|
||||
}
|
||||
|
||||
func (rp *remotePeer) Stop() {
|
||||
rp.listener.Close()
|
||||
}
|
||||
|
||||
func (rp *remotePeer) Dial(addr *NetAddress) (net.Conn, error) {
|
||||
transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config),
|
||||
[]*ChannelDescriptor{}, MConnTransportOptions{})
|
||||
conn, err := addr.DialTimeout(1 * time.Second)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc, err := testInboundPeerConn(transport, conn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
func (rp *remotePeer) accept() {
|
||||
transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config),
|
||||
[]*ChannelDescriptor{}, MConnTransportOptions{})
|
||||
conns := []net.Conn{}
|
||||
|
||||
for {
|
||||
conn, err := rp.listener.Accept()
|
||||
if err != nil {
|
||||
golog.Printf("Failed to accept conn: %+v", err)
|
||||
for _, conn := range conns {
|
||||
_ = conn.Close()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
pc, err := testInboundPeerConn(transport, conn)
|
||||
if err != nil {
|
||||
golog.Printf("Failed to create a peer: %+v", err)
|
||||
}
|
||||
_, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey)
|
||||
if err != nil {
|
||||
golog.Printf("Failed to handshake a peer: %+v", err)
|
||||
}
|
||||
|
||||
conns = append(conns, conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *remotePeer) nodeInfo() types.NodeInfo {
|
||||
ni := types.NodeInfo{
|
||||
ProtocolVersion: defaultProtocolVersion,
|
||||
NodeID: rp.Addr().ID,
|
||||
ListenAddr: rp.listener.Addr().String(),
|
||||
Network: "testing",
|
||||
Version: "1.2.3-rc0-deadbeef",
|
||||
Channels: rp.channels,
|
||||
Moniker: "remote_peer",
|
||||
}
|
||||
if rp.Network != "" {
|
||||
ni.Network = rp.Network
|
||||
}
|
||||
return ni
|
||||
}
|
||||
@@ -180,7 +180,7 @@ func (o *PeerManagerOptions) Validate() error {
|
||||
|
||||
if o.MaxPeers > 0 {
|
||||
if o.MaxConnected == 0 || o.MaxConnected+o.MaxConnectedUpgrade > o.MaxPeers {
|
||||
return fmt.Errorf("MaxConnected %v and MaxConnectedUpgrade %v can't exceed MaxPeers %v", // nolint
|
||||
return fmt.Errorf("MaxConnected %v and MaxConnectedUpgrade %v can't exceed MaxPeers %v",
|
||||
o.MaxConnected, o.MaxConnectedUpgrade, o.MaxPeers)
|
||||
}
|
||||
}
|
||||
@@ -190,7 +190,7 @@ func (o *PeerManagerOptions) Validate() error {
|
||||
return errors.New("can't set MaxRetryTime without MinRetryTime")
|
||||
}
|
||||
if o.MinRetryTime > o.MaxRetryTime {
|
||||
return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTime %v", // nolint
|
||||
return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTime %v",
|
||||
o.MinRetryTime, o.MaxRetryTime)
|
||||
}
|
||||
}
|
||||
@@ -200,7 +200,7 @@ func (o *PeerManagerOptions) Validate() error {
|
||||
return errors.New("can't set MaxRetryTimePersistent without MinRetryTime")
|
||||
}
|
||||
if o.MinRetryTime > o.MaxRetryTimePersistent {
|
||||
return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTimePersistent %v", // nolint
|
||||
return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTimePersistent %v",
|
||||
o.MinRetryTime, o.MaxRetryTimePersistent)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,948 +0,0 @@
|
||||
// Modified for Tendermint
|
||||
// Originally Copyright (c) 2013-2014 Conformal Systems LLC.
|
||||
// https://github.com/conformal/btcd/blob/master/LICENSE
|
||||
|
||||
package pex
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math"
|
||||
mrand "math/rand"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketTypeNew = 0x01
|
||||
bucketTypeOld = 0x02
|
||||
)
|
||||
|
||||
// AddrBook is an address book used for tracking peers
|
||||
// so we can gossip about them to others and select
|
||||
// peers to dial.
|
||||
// TODO: break this up?
|
||||
type AddrBook interface {
|
||||
service.Service
|
||||
|
||||
// Add our own addresses so we don't later add ourselves
|
||||
AddOurAddress(*p2p.NetAddress)
|
||||
// Check if it is our address
|
||||
OurAddress(*p2p.NetAddress) bool
|
||||
|
||||
AddPrivateIDs([]string)
|
||||
|
||||
// Add and remove an address
|
||||
AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error
|
||||
RemoveAddress(*p2p.NetAddress)
|
||||
|
||||
// Check if the address is in the book
|
||||
HasAddress(*p2p.NetAddress) bool
|
||||
|
||||
// Do we need more peers?
|
||||
NeedMoreAddrs() bool
|
||||
// Is Address Book Empty? Answer should not depend on being in your own
|
||||
// address book, or private peers
|
||||
Empty() bool
|
||||
|
||||
// Pick an address to dial
|
||||
PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress
|
||||
|
||||
// Mark address
|
||||
MarkGood(types.NodeID)
|
||||
MarkAttempt(*p2p.NetAddress)
|
||||
MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list
|
||||
// Add bad peers back to addrBook
|
||||
ReinstateBadPeers()
|
||||
|
||||
IsGood(*p2p.NetAddress) bool
|
||||
IsBanned(*p2p.NetAddress) bool
|
||||
|
||||
// Send a selection of addresses to peers
|
||||
GetSelection() []*p2p.NetAddress
|
||||
// Send a selection of addresses with bias
|
||||
GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress
|
||||
|
||||
Size() int
|
||||
|
||||
// Persist to disk
|
||||
Save()
|
||||
}
|
||||
|
||||
var _ AddrBook = (*addrBook)(nil)
|
||||
|
||||
// addrBook - concurrency safe peer address manager.
|
||||
// Implements AddrBook.
|
||||
type addrBook struct {
|
||||
service.BaseService
|
||||
|
||||
// accessed concurrently
|
||||
mtx tmsync.Mutex
|
||||
ourAddrs map[string]struct{}
|
||||
privateIDs map[types.NodeID]struct{}
|
||||
addrLookup map[types.NodeID]*knownAddress // new & old
|
||||
badPeers map[types.NodeID]*knownAddress // blacklisted peers
|
||||
bucketsOld []map[string]*knownAddress
|
||||
bucketsNew []map[string]*knownAddress
|
||||
nOld int
|
||||
nNew int
|
||||
|
||||
// immutable after creation
|
||||
filePath string
|
||||
key string // random prefix for bucket placement
|
||||
routabilityStrict bool
|
||||
hasher hash.Hash64
|
||||
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func mustNewHasher() hash.Hash64 {
|
||||
key := crypto.CRandBytes(highwayhash.Size)
|
||||
hasher, err := highwayhash.New64(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hasher
|
||||
}
|
||||
|
||||
// NewAddrBook creates a new address book.
|
||||
// Use Start to begin processing asynchronous address updates.
|
||||
func NewAddrBook(filePath string, routabilityStrict bool) AddrBook {
|
||||
am := &addrBook{
|
||||
ourAddrs: make(map[string]struct{}),
|
||||
privateIDs: make(map[types.NodeID]struct{}),
|
||||
addrLookup: make(map[types.NodeID]*knownAddress),
|
||||
badPeers: make(map[types.NodeID]*knownAddress),
|
||||
filePath: filePath,
|
||||
routabilityStrict: routabilityStrict,
|
||||
}
|
||||
am.init()
|
||||
am.BaseService = *service.NewBaseService(nil, "AddrBook", am)
|
||||
return am
|
||||
}
|
||||
|
||||
// Initialize the buckets.
|
||||
// When modifying this, don't forget to update loadFromFile()
|
||||
func (a *addrBook) init() {
|
||||
a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits
|
||||
// New addr buckets
|
||||
a.bucketsNew = make([]map[string]*knownAddress, newBucketCount)
|
||||
for i := range a.bucketsNew {
|
||||
a.bucketsNew[i] = make(map[string]*knownAddress)
|
||||
}
|
||||
// Old addr buckets
|
||||
a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount)
|
||||
for i := range a.bucketsOld {
|
||||
a.bucketsOld[i] = make(map[string]*knownAddress)
|
||||
}
|
||||
a.hasher = mustNewHasher()
|
||||
}
|
||||
|
||||
// OnStart implements Service.
|
||||
func (a *addrBook) OnStart() error {
|
||||
if err := a.BaseService.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
a.loadFromFile(a.filePath)
|
||||
|
||||
// wg.Add to ensure that any invocation of .Wait()
|
||||
// later on will wait for saveRoutine to terminate.
|
||||
a.wg.Add(1)
|
||||
go a.saveRoutine()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements Service.
|
||||
func (a *addrBook) OnStop() {
|
||||
a.BaseService.OnStop()
|
||||
}
|
||||
|
||||
func (a *addrBook) Wait() {
|
||||
a.wg.Wait()
|
||||
}
|
||||
|
||||
func (a *addrBook) FilePath() string {
|
||||
return a.filePath
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// AddOurAddress one of our addresses.
|
||||
func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
a.Logger.Info("Add our address to book", "addr", addr)
|
||||
a.ourAddrs[addr.String()] = struct{}{}
|
||||
}
|
||||
|
||||
// OurAddress returns true if it is our address.
|
||||
func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
_, ok := a.ourAddrs[addr.String()]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (a *addrBook) AddPrivateIDs(ids []string) {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
for _, id := range ids {
|
||||
a.privateIDs[types.NodeID(id)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// AddAddress implements AddrBook
|
||||
// Add address to a "new" bucket. If it's already in one, only add it probabilistically.
|
||||
// Returns error if the addr is non-routable. Does not add self.
|
||||
// NOTE: addr must not be nil
|
||||
func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
return a.addAddress(addr, src)
|
||||
}
|
||||
|
||||
// RemoveAddress implements AddrBook - removes the address from the book.
|
||||
func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
a.removeAddress(addr)
|
||||
}
|
||||
|
||||
// IsGood returns true if peer was ever marked as good and haven't
|
||||
// done anything wrong since then.
|
||||
func (a *addrBook) IsGood(addr *p2p.NetAddress) bool {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
return a.addrLookup[addr.ID].isOld()
|
||||
}
|
||||
|
||||
// IsBanned returns true if the peer is currently banned
|
||||
func (a *addrBook) IsBanned(addr *p2p.NetAddress) bool {
|
||||
a.mtx.Lock()
|
||||
_, ok := a.badPeers[addr.ID]
|
||||
a.mtx.Unlock()
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// HasAddress returns true if the address is in the book.
|
||||
func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
ka := a.addrLookup[addr.ID]
|
||||
return ka != nil
|
||||
}
|
||||
|
||||
// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book.
|
||||
func (a *addrBook) NeedMoreAddrs() bool {
|
||||
return a.Size() < needAddressThreshold
|
||||
}
|
||||
|
||||
// Empty implements AddrBook - returns true if there are no addresses in the address book.
|
||||
// Does not count the peer appearing in its own address book, or private peers.
|
||||
func (a *addrBook) Empty() bool {
|
||||
return a.Size() == 0
|
||||
}
|
||||
|
||||
// PickAddress implements AddrBook. It picks an address to connect to.
|
||||
// The address is picked randomly from an old or new bucket according
|
||||
// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range)
|
||||
// and determines how biased we are to pick an address from a new bucket.
|
||||
// PickAddress returns nil if the AddrBook is empty or if we try to pick
|
||||
// from an empty bucket.
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
bookSize := a.size()
|
||||
if bookSize <= 0 {
|
||||
if bookSize < 0 {
|
||||
panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if biasTowardsNewAddrs > 100 {
|
||||
biasTowardsNewAddrs = 100
|
||||
}
|
||||
if biasTowardsNewAddrs < 0 {
|
||||
biasTowardsNewAddrs = 0
|
||||
}
|
||||
|
||||
// Bias between new and old addresses.
|
||||
oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs))
|
||||
newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs)
|
||||
|
||||
// pick a random peer from a random bucket
|
||||
var bucket map[string]*knownAddress
|
||||
pickFromOldBucket := (newCorrelation+oldCorrelation)*mrand.Float64() < oldCorrelation
|
||||
if (pickFromOldBucket && a.nOld == 0) ||
|
||||
(!pickFromOldBucket && a.nNew == 0) {
|
||||
return nil
|
||||
}
|
||||
// loop until we pick a random non-empty bucket
|
||||
for len(bucket) == 0 {
|
||||
if pickFromOldBucket {
|
||||
bucket = a.bucketsOld[mrand.Intn(len(a.bucketsOld))]
|
||||
} else {
|
||||
bucket = a.bucketsNew[mrand.Intn(len(a.bucketsNew))]
|
||||
}
|
||||
}
|
||||
// pick a random index and loop over the map to return that index
|
||||
randIndex := mrand.Intn(len(bucket))
|
||||
for _, ka := range bucket {
|
||||
if randIndex == 0 {
|
||||
return ka.Addr
|
||||
}
|
||||
randIndex--
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkGood implements AddrBook - it marks the peer as good and
|
||||
// moves it into an "old" bucket.
|
||||
func (a *addrBook) MarkGood(id types.NodeID) {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
ka := a.addrLookup[id]
|
||||
if ka == nil {
|
||||
return
|
||||
}
|
||||
ka.markGood()
|
||||
if ka.isNew() {
|
||||
if err := a.moveToOld(ka); err != nil {
|
||||
a.Logger.Error("Error moving address to old", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MarkAttempt implements AddrBook - it marks that an attempt was made to connect to the address.
|
||||
func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
ka := a.addrLookup[addr.ID]
|
||||
if ka == nil {
|
||||
return
|
||||
}
|
||||
ka.markAttempt()
|
||||
}
|
||||
|
||||
// MarkBad implements AddrBook. Kicks address out from book, places
|
||||
// the address in the badPeers pool.
|
||||
func (a *addrBook) MarkBad(addr *p2p.NetAddress, banTime time.Duration) {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
if a.addBadPeer(addr, banTime) {
|
||||
a.removeAddress(addr)
|
||||
}
|
||||
}
|
||||
|
||||
// ReinstateBadPeers removes bad peers from ban list and places them into a new
|
||||
// bucket.
|
||||
func (a *addrBook) ReinstateBadPeers() {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
for _, ka := range a.badPeers {
|
||||
if ka.isBanned() {
|
||||
continue
|
||||
}
|
||||
|
||||
bucket, err := a.calcNewBucket(ka.Addr, ka.Src)
|
||||
if err != nil {
|
||||
a.Logger.Error("Failed to calculate new bucket (bad peer won't be reinstantiated)",
|
||||
"addr", ka.Addr, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := a.addToNewBucket(ka, bucket); err != nil {
|
||||
a.Logger.Error("Error adding peer to new bucket", "err", err)
|
||||
}
|
||||
delete(a.badPeers, ka.ID())
|
||||
|
||||
a.Logger.Info("Reinstated address", "addr", ka.Addr)
|
||||
}
|
||||
}
|
||||
|
||||
// GetSelection implements AddrBook.
|
||||
// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
|
||||
// Must never return a nil address.
|
||||
func (a *addrBook) GetSelection() []*p2p.NetAddress {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
bookSize := a.size()
|
||||
if bookSize <= 0 {
|
||||
if bookSize < 0 {
|
||||
panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
numAddresses := tmmath.MaxInt(
|
||||
tmmath.MinInt(minGetSelection, bookSize),
|
||||
bookSize*getSelectionPercent/100)
|
||||
numAddresses = tmmath.MinInt(maxGetSelection, numAddresses)
|
||||
|
||||
// XXX: instead of making a list of all addresses, shuffling, and slicing a random chunk,
|
||||
// could we just select a random numAddresses of indexes?
|
||||
allAddr := make([]*p2p.NetAddress, bookSize)
|
||||
i := 0
|
||||
for _, ka := range a.addrLookup {
|
||||
allAddr[i] = ka.Addr
|
||||
i++
|
||||
}
|
||||
|
||||
// Fisher-Yates shuffle the array. We only need to do the first
|
||||
// `numAddresses' since we are throwing the rest.
|
||||
for i := 0; i < numAddresses; i++ {
|
||||
// pick a number between current index and the end
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
j := mrand.Intn(len(allAddr)-i) + i
|
||||
allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
|
||||
}
|
||||
|
||||
// slice off the limit we are willing to share.
|
||||
return allAddr[:numAddresses]
|
||||
}
|
||||
|
||||
func percentageOfNum(p, n int) int {
|
||||
return int(math.Round((float64(p) / float64(100)) * float64(n)))
|
||||
}
|
||||
|
||||
// GetSelectionWithBias implements AddrBook.
|
||||
// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
|
||||
// Must never return a nil address.
|
||||
//
|
||||
// Each address is picked randomly from an old or new bucket according to the
|
||||
// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to
|
||||
// that range) and determines how biased we are to pick an address from a new
|
||||
// bucket.
|
||||
func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
bookSize := a.size()
|
||||
if bookSize <= 0 {
|
||||
if bookSize < 0 {
|
||||
panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if biasTowardsNewAddrs > 100 {
|
||||
biasTowardsNewAddrs = 100
|
||||
}
|
||||
if biasTowardsNewAddrs < 0 {
|
||||
biasTowardsNewAddrs = 0
|
||||
}
|
||||
|
||||
numAddresses := tmmath.MaxInt(
|
||||
tmmath.MinInt(minGetSelection, bookSize),
|
||||
bookSize*getSelectionPercent/100)
|
||||
numAddresses = tmmath.MinInt(maxGetSelection, numAddresses)
|
||||
|
||||
// number of new addresses that, if possible, should be in the beginning of the selection
|
||||
// if there are no enough old addrs, will choose new addr instead.
|
||||
numRequiredNewAdd := tmmath.MaxInt(percentageOfNum(biasTowardsNewAddrs, numAddresses), numAddresses-a.nOld)
|
||||
selection := a.randomPickAddresses(bucketTypeNew, numRequiredNewAdd)
|
||||
selection = append(selection, a.randomPickAddresses(bucketTypeOld, numAddresses-len(selection))...)
|
||||
return selection
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
|
||||
// Size returns the number of addresses in the book.
|
||||
func (a *addrBook) Size() int {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
return a.size()
|
||||
}
|
||||
|
||||
func (a *addrBook) size() int {
|
||||
return a.nNew + a.nOld
|
||||
}
|
||||
|
||||
//----------------------------------------------------------
|
||||
|
||||
// Save persists the address book to disk.
|
||||
func (a *addrBook) Save() {
|
||||
a.saveToFile(a.filePath) // thread safe
|
||||
}
|
||||
|
||||
func (a *addrBook) saveRoutine() {
|
||||
defer a.wg.Done()
|
||||
|
||||
saveFileTicker := time.NewTicker(dumpAddressInterval)
|
||||
out:
|
||||
for {
|
||||
select {
|
||||
case <-saveFileTicker.C:
|
||||
a.saveToFile(a.filePath)
|
||||
case <-a.Quit():
|
||||
break out
|
||||
}
|
||||
}
|
||||
saveFileTicker.Stop()
|
||||
a.saveToFile(a.filePath)
|
||||
}
|
||||
|
||||
//----------------------------------------------------------
|
||||
|
||||
func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
|
||||
switch bucketType {
|
||||
case bucketTypeNew:
|
||||
return a.bucketsNew[bucketIdx]
|
||||
case bucketTypeOld:
|
||||
return a.bucketsOld[bucketIdx]
|
||||
default:
|
||||
panic("Invalid bucket type")
|
||||
}
|
||||
}
|
||||
|
||||
// Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full.
|
||||
// NOTE: currently it always returns true.
|
||||
func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) error {
|
||||
// Consistency check to ensure we don't add an already known address
|
||||
if ka.isOld() {
|
||||
return errAddrBookOldAddressNewBucket{ka.Addr, bucketIdx}
|
||||
}
|
||||
|
||||
addrStr := ka.Addr.String()
|
||||
bucket := a.getBucket(bucketTypeNew, bucketIdx)
|
||||
|
||||
// Already exists?
|
||||
if _, ok := bucket[addrStr]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Enforce max addresses.
|
||||
if len(bucket) > newBucketSize {
|
||||
a.Logger.Info("new bucket is full, expiring new")
|
||||
a.expireNew(bucketIdx)
|
||||
}
|
||||
|
||||
// Add to bucket.
|
||||
bucket[addrStr] = ka
|
||||
// increment nNew if the peer doesnt already exist in a bucket
|
||||
if ka.addBucketRef(bucketIdx) == 1 {
|
||||
a.nNew++
|
||||
}
|
||||
|
||||
// Add it to addrLookup
|
||||
a.addrLookup[ka.ID()] = ka
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full.
|
||||
func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
|
||||
// Sanity check
|
||||
if ka.isNew() {
|
||||
a.Logger.Error(fmt.Sprintf("Cannot add new address to old bucket: %v", ka))
|
||||
return false
|
||||
}
|
||||
if len(ka.Buckets) != 0 {
|
||||
a.Logger.Error(fmt.Sprintf("Cannot add already old address to another old bucket: %v", ka))
|
||||
return false
|
||||
}
|
||||
|
||||
addrStr := ka.Addr.String()
|
||||
bucket := a.getBucket(bucketTypeOld, bucketIdx)
|
||||
|
||||
// Already exists?
|
||||
if _, ok := bucket[addrStr]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
// Enforce max addresses.
|
||||
if len(bucket) > oldBucketSize {
|
||||
return false
|
||||
}
|
||||
|
||||
// Add to bucket.
|
||||
bucket[addrStr] = ka
|
||||
if ka.addBucketRef(bucketIdx) == 1 {
|
||||
a.nOld++
|
||||
}
|
||||
|
||||
// Ensure in addrLookup
|
||||
a.addrLookup[ka.ID()] = ka
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
|
||||
if ka.BucketType != bucketType {
|
||||
a.Logger.Error(fmt.Sprintf("Bucket type mismatch: %v", ka))
|
||||
return
|
||||
}
|
||||
bucket := a.getBucket(bucketType, bucketIdx)
|
||||
delete(bucket, ka.Addr.String())
|
||||
if ka.removeBucketRef(bucketIdx) == 0 {
|
||||
if bucketType == bucketTypeNew {
|
||||
a.nNew--
|
||||
} else {
|
||||
a.nOld--
|
||||
}
|
||||
delete(a.addrLookup, ka.ID())
|
||||
}
|
||||
}
|
||||
|
||||
func (a *addrBook) removeFromAllBuckets(ka *knownAddress) {
|
||||
for _, bucketIdx := range ka.Buckets {
|
||||
bucket := a.getBucket(ka.BucketType, bucketIdx)
|
||||
delete(bucket, ka.Addr.String())
|
||||
}
|
||||
ka.Buckets = nil
|
||||
if ka.BucketType == bucketTypeNew {
|
||||
a.nNew--
|
||||
} else {
|
||||
a.nOld--
|
||||
}
|
||||
delete(a.addrLookup, ka.ID())
|
||||
}
|
||||
|
||||
//----------------------------------------------------------
|
||||
|
||||
func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
|
||||
bucket := a.getBucket(bucketType, bucketIdx)
|
||||
var oldest *knownAddress
|
||||
for _, ka := range bucket {
|
||||
if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) {
|
||||
oldest = ka
|
||||
}
|
||||
}
|
||||
return oldest
|
||||
}
|
||||
|
||||
// adds the address to a "new" bucket. if its already in one,
|
||||
// it only adds it probabilistically
|
||||
func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error {
|
||||
if addr == nil || src == nil {
|
||||
return ErrAddrBookNilAddr{addr, src}
|
||||
}
|
||||
|
||||
if err := addr.Valid(); err != nil {
|
||||
return ErrAddrBookInvalidAddr{Addr: addr, AddrErr: err}
|
||||
}
|
||||
|
||||
if _, ok := a.badPeers[addr.ID]; ok {
|
||||
return ErrAddressBanned{addr}
|
||||
}
|
||||
|
||||
if _, ok := a.privateIDs[addr.ID]; ok {
|
||||
return ErrAddrBookPrivate{addr}
|
||||
}
|
||||
|
||||
if _, ok := a.privateIDs[src.ID]; ok {
|
||||
return ErrAddrBookPrivateSrc{src}
|
||||
}
|
||||
|
||||
// TODO: we should track ourAddrs by ID and by IP:PORT and refuse both.
|
||||
if _, ok := a.ourAddrs[addr.String()]; ok {
|
||||
return ErrAddrBookSelf{addr}
|
||||
}
|
||||
|
||||
if a.routabilityStrict && !addr.Routable() {
|
||||
return ErrAddrBookNonRoutable{addr}
|
||||
}
|
||||
|
||||
ka := a.addrLookup[addr.ID]
|
||||
if ka != nil {
|
||||
// If its already old and the address ID's are the same, ignore it.
|
||||
// Thereby avoiding issues with a node on the network attempting to change
|
||||
// the IP of a known node ID. (Which could yield an eclipse attack on the node)
|
||||
if ka.isOld() && ka.Addr.ID == addr.ID {
|
||||
return nil
|
||||
}
|
||||
// Already in max new buckets.
|
||||
if len(ka.Buckets) == maxNewBucketsPerAddress {
|
||||
return nil
|
||||
}
|
||||
// The more entries we have, the less likely we are to add more.
|
||||
factor := int32(2 * len(ka.Buckets))
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
if mrand.Int31n(factor) != 0 {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
ka = newKnownAddress(addr, src)
|
||||
}
|
||||
|
||||
bucket, err := a.calcNewBucket(addr, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return a.addToNewBucket(ka, bucket)
|
||||
}
|
||||
|
||||
func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddress {
|
||||
var buckets []map[string]*knownAddress
|
||||
switch bucketType {
|
||||
case bucketTypeNew:
|
||||
buckets = a.bucketsNew
|
||||
case bucketTypeOld:
|
||||
buckets = a.bucketsOld
|
||||
default:
|
||||
panic("unexpected bucketType")
|
||||
}
|
||||
total := 0
|
||||
for _, bucket := range buckets {
|
||||
total += len(bucket)
|
||||
}
|
||||
addresses := make([]*knownAddress, 0, total)
|
||||
for _, bucket := range buckets {
|
||||
for _, ka := range bucket {
|
||||
addresses = append(addresses, ka)
|
||||
}
|
||||
}
|
||||
selection := make([]*p2p.NetAddress, 0, num)
|
||||
chosenSet := make(map[string]bool, num)
|
||||
rand := tmrand.NewRand()
|
||||
rand.Shuffle(total, func(i, j int) {
|
||||
addresses[i], addresses[j] = addresses[j], addresses[i]
|
||||
})
|
||||
for _, addr := range addresses {
|
||||
if chosenSet[addr.Addr.String()] {
|
||||
continue
|
||||
}
|
||||
chosenSet[addr.Addr.String()] = true
|
||||
selection = append(selection, addr.Addr)
|
||||
if len(selection) >= num {
|
||||
return selection
|
||||
}
|
||||
}
|
||||
return selection
|
||||
}
|
||||
|
||||
// Make space in the new buckets by expiring the really bad entries.
|
||||
// If no bad entries are available we remove the oldest.
|
||||
func (a *addrBook) expireNew(bucketIdx int) {
|
||||
for addrStr, ka := range a.bucketsNew[bucketIdx] {
|
||||
// If an entry is bad, throw it away
|
||||
if ka.isBad() {
|
||||
a.Logger.Info(fmt.Sprintf("expiring bad address %v", addrStr))
|
||||
a.removeFromBucket(ka, bucketTypeNew, bucketIdx)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If we haven't thrown out a bad entry, throw out the oldest entry
|
||||
oldest := a.pickOldest(bucketTypeNew, bucketIdx)
|
||||
a.removeFromBucket(oldest, bucketTypeNew, bucketIdx)
|
||||
}
|
||||
|
||||
// Promotes an address from new to old. If the destination bucket is full,
|
||||
// demote the oldest one to a "new" bucket.
|
||||
// TODO: Demote more probabilistically?
|
||||
func (a *addrBook) moveToOld(ka *knownAddress) error {
|
||||
// Sanity check
|
||||
if ka.isOld() {
|
||||
a.Logger.Error(fmt.Sprintf("Cannot promote address that is already old %v", ka))
|
||||
return nil
|
||||
}
|
||||
if len(ka.Buckets) == 0 {
|
||||
a.Logger.Error(fmt.Sprintf("Cannot promote address that isn't in any new buckets %v", ka))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove from all (new) buckets.
|
||||
a.removeFromAllBuckets(ka)
|
||||
// It's officially old now.
|
||||
ka.BucketType = bucketTypeOld
|
||||
|
||||
// Try to add it to its oldBucket destination.
|
||||
oldBucketIdx, err := a.calcOldBucket(ka.Addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
added := a.addToOldBucket(ka, oldBucketIdx)
|
||||
if !added {
|
||||
// No room; move the oldest to a new bucket
|
||||
oldest := a.pickOldest(bucketTypeOld, oldBucketIdx)
|
||||
a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx)
|
||||
newBucketIdx, err := a.calcNewBucket(oldest.Addr, oldest.Src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.addToNewBucket(oldest, newBucketIdx); err != nil {
|
||||
a.Logger.Error("Error adding peer to old bucket", "err", err)
|
||||
}
|
||||
|
||||
// Finally, add our ka to old bucket again.
|
||||
added = a.addToOldBucket(ka, oldBucketIdx)
|
||||
if !added {
|
||||
a.Logger.Error(fmt.Sprintf("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *addrBook) removeAddress(addr *p2p.NetAddress) {
|
||||
ka := a.addrLookup[addr.ID]
|
||||
if ka == nil {
|
||||
return
|
||||
}
|
||||
a.Logger.Info("Remove address from book", "addr", addr)
|
||||
a.removeFromAllBuckets(ka)
|
||||
}
|
||||
|
||||
func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool {
|
||||
// check it exists in addrbook
|
||||
ka := a.addrLookup[addr.ID]
|
||||
// check address is not already there
|
||||
if ka == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, alreadyBadPeer := a.badPeers[addr.ID]; !alreadyBadPeer {
|
||||
// add to bad peer list
|
||||
ka.ban(banTime)
|
||||
a.badPeers[addr.ID] = ka
|
||||
a.Logger.Info("Add address to blacklist", "addr", addr)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// calculate bucket placements
|
||||
|
||||
// hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets
|
||||
func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) (int, error) {
|
||||
data1 := []byte{}
|
||||
data1 = append(data1, []byte(a.key)...)
|
||||
data1 = append(data1, []byte(a.groupKey(addr))...)
|
||||
data1 = append(data1, []byte(a.groupKey(src))...)
|
||||
hash1, err := a.hash(data1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hash64 := binary.BigEndian.Uint64(hash1)
|
||||
hash64 %= newBucketsPerGroup
|
||||
var hashbuf [8]byte
|
||||
binary.BigEndian.PutUint64(hashbuf[:], hash64)
|
||||
data2 := []byte{}
|
||||
data2 = append(data2, []byte(a.key)...)
|
||||
data2 = append(data2, a.groupKey(src)...)
|
||||
data2 = append(data2, hashbuf[:]...)
|
||||
|
||||
hash2, err := a.hash(data2)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
result := int(binary.BigEndian.Uint64(hash2) % newBucketCount)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets
|
||||
func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) (int, error) {
|
||||
data1 := []byte{}
|
||||
data1 = append(data1, []byte(a.key)...)
|
||||
data1 = append(data1, []byte(addr.String())...)
|
||||
hash1, err := a.hash(data1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hash64 := binary.BigEndian.Uint64(hash1)
|
||||
hash64 %= oldBucketsPerGroup
|
||||
var hashbuf [8]byte
|
||||
binary.BigEndian.PutUint64(hashbuf[:], hash64)
|
||||
data2 := []byte{}
|
||||
data2 = append(data2, []byte(a.key)...)
|
||||
data2 = append(data2, a.groupKey(addr)...)
|
||||
data2 = append(data2, hashbuf[:]...)
|
||||
|
||||
hash2, err := a.hash(data2)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
result := int(binary.BigEndian.Uint64(hash2) % oldBucketCount)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Return a string representing the network group of this address.
|
||||
// This is the /16 for IPv4 (e.g. 1.2.0.0), the /32 (/36 for he.net) for IPv6, the string
|
||||
// "local" for a local address and the string "unroutable" for an unroutable
|
||||
// address.
|
||||
func (a *addrBook) groupKey(na *p2p.NetAddress) string {
|
||||
return groupKeyFor(na, a.routabilityStrict)
|
||||
}
|
||||
|
||||
func groupKeyFor(na *p2p.NetAddress, routabilityStrict bool) string {
|
||||
if routabilityStrict && na.Local() {
|
||||
return "local"
|
||||
}
|
||||
if routabilityStrict && !na.Routable() {
|
||||
return "unroutable"
|
||||
}
|
||||
|
||||
if ipv4 := na.IP.To4(); ipv4 != nil {
|
||||
return na.IP.Mask(net.CIDRMask(16, 32)).String()
|
||||
}
|
||||
|
||||
if na.RFC6145() || na.RFC6052() {
|
||||
// last four bytes are the ip address
|
||||
ip := na.IP[12:16]
|
||||
return ip.Mask(net.CIDRMask(16, 32)).String()
|
||||
}
|
||||
|
||||
if na.RFC3964() {
|
||||
ip := na.IP[2:6]
|
||||
return ip.Mask(net.CIDRMask(16, 32)).String()
|
||||
}
|
||||
|
||||
if na.RFC4380() {
|
||||
// teredo tunnels have the last 4 bytes as the v4 address XOR
|
||||
// 0xff.
|
||||
ip := net.IP(make([]byte, 4))
|
||||
for i, byte := range na.IP[12:16] {
|
||||
ip[i] = byte ^ 0xff
|
||||
}
|
||||
return ip.Mask(net.CIDRMask(16, 32)).String()
|
||||
}
|
||||
|
||||
if na.OnionCatTor() {
|
||||
// group is keyed off the first 4 bits of the actual onion key.
|
||||
return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1))
|
||||
}
|
||||
|
||||
// OK, so now we know ourselves to be a IPv6 address.
|
||||
// bitcoind uses /32 for everything, except for Hurricane Electric's
|
||||
// (he.net) IP range, which it uses /36 for.
|
||||
bits := 32
|
||||
heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), Mask: net.CIDRMask(32, 128)}
|
||||
if heNet.Contains(na.IP) {
|
||||
bits = 36
|
||||
}
|
||||
ipv6Mask := net.CIDRMask(bits, 128)
|
||||
return na.IP.Mask(ipv6Mask).String()
|
||||
}
|
||||
|
||||
func (a *addrBook) hash(b []byte) ([]byte, error) {
|
||||
a.hasher.Reset()
|
||||
a.hasher.Write(b)
|
||||
return a.hasher.Sum(nil), nil
|
||||
}
|
||||
@@ -1,777 +0,0 @@
|
||||
package pex
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
mrand "math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// FIXME These tests should not rely on .(*addrBook) assertions
|
||||
|
||||
func TestAddrBookPickAddress(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
|
||||
// 0 addresses
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
assert.Zero(t, book.Size())
|
||||
|
||||
addr := book.PickAddress(50)
|
||||
assert.Nil(t, addr, "expected no address")
|
||||
|
||||
randAddrs := randNetAddressPairs(t, 1)
|
||||
addrSrc := randAddrs[0]
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
|
||||
// pick an address when we only have new address
|
||||
addr = book.PickAddress(0)
|
||||
assert.NotNil(t, addr, "expected an address")
|
||||
addr = book.PickAddress(50)
|
||||
assert.NotNil(t, addr, "expected an address")
|
||||
addr = book.PickAddress(100)
|
||||
assert.NotNil(t, addr, "expected an address")
|
||||
|
||||
// pick an address when we only have old address
|
||||
book.MarkGood(addrSrc.addr.ID)
|
||||
addr = book.PickAddress(0)
|
||||
assert.NotNil(t, addr, "expected an address")
|
||||
addr = book.PickAddress(50)
|
||||
assert.NotNil(t, addr, "expected an address")
|
||||
|
||||
// in this case, nNew==0 but we biased 100% to new, so we return nil
|
||||
addr = book.PickAddress(100)
|
||||
assert.Nil(t, addr, "did not expected an address")
|
||||
}
|
||||
|
||||
func TestAddrBookSaveLoad(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
|
||||
// 0 addresses
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
book.Save()
|
||||
|
||||
book = NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
err := book.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, book.Empty())
|
||||
|
||||
// 100 addresses
|
||||
randAddrs := randNetAddressPairs(t, 100)
|
||||
|
||||
for _, addrSrc := range randAddrs {
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 100, book.Size())
|
||||
book.Save()
|
||||
|
||||
book = NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
err = book.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 100, book.Size())
|
||||
}
|
||||
|
||||
func TestAddrBookLookup(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
randAddrs := randNetAddressPairs(t, 100)
|
||||
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
for _, addrSrc := range randAddrs {
|
||||
addr := addrSrc.addr
|
||||
src := addrSrc.src
|
||||
err := book.AddAddress(addr, src)
|
||||
require.NoError(t, err)
|
||||
|
||||
ka := book.HasAddress(addr)
|
||||
assert.True(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddrBookPromoteToOld(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
randAddrs := randNetAddressPairs(t, 100)
|
||||
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
for _, addrSrc := range randAddrs {
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Attempt all addresses.
|
||||
for _, addrSrc := range randAddrs {
|
||||
book.MarkAttempt(addrSrc.addr)
|
||||
}
|
||||
|
||||
// Promote half of them
|
||||
for i, addrSrc := range randAddrs {
|
||||
if i%2 == 0 {
|
||||
book.MarkGood(addrSrc.addr.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: do more testing :)
|
||||
|
||||
selection := book.GetSelection()
|
||||
t.Logf("selection: %v", selection)
|
||||
|
||||
if len(selection) > book.Size() {
|
||||
t.Errorf("selection could not be bigger than the book")
|
||||
}
|
||||
|
||||
selection = book.GetSelectionWithBias(30)
|
||||
t.Logf("selection: %v", selection)
|
||||
|
||||
if len(selection) > book.Size() {
|
||||
t.Errorf("selection with bias could not be bigger than the book")
|
||||
}
|
||||
|
||||
assert.Equal(t, book.Size(), 100, "expecting book size to be 100")
|
||||
}
|
||||
|
||||
func TestAddrBookHandlesDuplicates(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
book := NewAddrBook(fname, true)
|
||||
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
randAddrs := randNetAddressPairs(t, 100)
|
||||
|
||||
differentSrc := randIPv4Address(t)
|
||||
for _, addrSrc := range randAddrs {
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
err = book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate
|
||||
require.NoError(t, err)
|
||||
err = book.AddAddress(addrSrc.addr, differentSrc) // different src
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 100, book.Size())
|
||||
}
|
||||
|
||||
type netAddressPair struct {
|
||||
addr *p2p.NetAddress
|
||||
src *p2p.NetAddress
|
||||
}
|
||||
|
||||
func randNetAddressPairs(t *testing.T, n int) []netAddressPair {
|
||||
randAddrs := make([]netAddressPair, n)
|
||||
for i := 0; i < n; i++ {
|
||||
randAddrs[i] = netAddressPair{addr: randIPv4Address(t), src: randIPv4Address(t)}
|
||||
}
|
||||
return randAddrs
|
||||
}
|
||||
|
||||
func randIPv4Address(t *testing.T) *p2p.NetAddress {
|
||||
for {
|
||||
ip := fmt.Sprintf("%v.%v.%v.%v",
|
||||
mrand.Intn(254)+1,
|
||||
mrand.Intn(255),
|
||||
mrand.Intn(255),
|
||||
mrand.Intn(255),
|
||||
)
|
||||
port := mrand.Intn(65535-1) + 1
|
||||
id := types.NodeID(hex.EncodeToString(tmrand.Bytes(types.NodeIDByteLength)))
|
||||
idAddr := id.AddressString(fmt.Sprintf("%v:%v", ip, port))
|
||||
addr, err := types.NewNetAddressString(idAddr)
|
||||
assert.Nil(t, err, "error generating rand network address")
|
||||
if addr.Routable() {
|
||||
return addr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddrBookRemoveAddress(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
addr := randIPv4Address(t)
|
||||
err := book.AddAddress(addr, addr)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, book.Size())
|
||||
|
||||
book.RemoveAddress(addr)
|
||||
assert.Equal(t, 0, book.Size())
|
||||
|
||||
nonExistingAddr := randIPv4Address(t)
|
||||
book.RemoveAddress(nonExistingAddr)
|
||||
assert.Equal(t, 0, book.Size())
|
||||
}
|
||||
|
||||
func TestAddrBookGetSelectionWithOneMarkedGood(t *testing.T) {
|
||||
// create a book with 10 addresses, 1 good/old and 9 new
|
||||
book, _ := createAddrBookWithMOldAndNNewAddrs(t, 1, 9)
|
||||
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
|
||||
assert.NotNil(t, addrs)
|
||||
assertMOldAndNNewAddrsInSelection(t, 1, 9, addrs, book)
|
||||
}
|
||||
|
||||
func TestAddrBookGetSelectionWithOneNotMarkedGood(t *testing.T) {
|
||||
// create a book with 10 addresses, 9 good/old and 1 new
|
||||
book, _ := createAddrBookWithMOldAndNNewAddrs(t, 9, 1)
|
||||
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
|
||||
assert.NotNil(t, addrs)
|
||||
assertMOldAndNNewAddrsInSelection(t, 9, 1, addrs, book)
|
||||
}
|
||||
|
||||
func TestAddrBookGetSelectionReturnsNilWhenAddrBookIsEmpty(t *testing.T) {
|
||||
book, _ := createAddrBookWithMOldAndNNewAddrs(t, 0, 0)
|
||||
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
|
||||
assert.Nil(t, addrs)
|
||||
}
|
||||
|
||||
func TestAddrBookGetSelection(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
// 1) empty book
|
||||
assert.Empty(t, book.GetSelection())
|
||||
|
||||
// 2) add one address
|
||||
addr := randIPv4Address(t)
|
||||
err := book.AddAddress(addr, addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(book.GetSelection()))
|
||||
assert.Equal(t, addr, book.GetSelection()[0])
|
||||
|
||||
// 3) add a bunch of addresses
|
||||
randAddrs := randNetAddressPairs(t, 100)
|
||||
for _, addrSrc := range randAddrs {
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// check there is no duplicates
|
||||
addrs := make(map[string]*p2p.NetAddress)
|
||||
selection := book.GetSelection()
|
||||
for _, addr := range selection {
|
||||
if dup, ok := addrs[addr.String()]; ok {
|
||||
t.Fatalf("selection %v contains duplicates %v", selection, dup)
|
||||
}
|
||||
addrs[addr.String()] = addr
|
||||
}
|
||||
|
||||
if len(selection) > book.Size() {
|
||||
t.Errorf("selection %v could not be bigger than the book", selection)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddrBookGetSelectionWithBias(t *testing.T) {
|
||||
const biasTowardsNewAddrs = 30
|
||||
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
// 1) empty book
|
||||
selection := book.GetSelectionWithBias(biasTowardsNewAddrs)
|
||||
assert.Empty(t, selection)
|
||||
|
||||
// 2) add one address
|
||||
addr := randIPv4Address(t)
|
||||
err := book.AddAddress(addr, addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
|
||||
assert.Equal(t, 1, len(selection))
|
||||
assert.Equal(t, addr, selection[0])
|
||||
|
||||
// 3) add a bunch of addresses
|
||||
randAddrs := randNetAddressPairs(t, 100)
|
||||
for _, addrSrc := range randAddrs {
|
||||
err := book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// check there is no duplicates
|
||||
addrs := make(map[string]*p2p.NetAddress)
|
||||
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
|
||||
for _, addr := range selection {
|
||||
if dup, ok := addrs[addr.String()]; ok {
|
||||
t.Fatalf("selection %v contains duplicates %v", selection, dup)
|
||||
}
|
||||
addrs[addr.String()] = addr
|
||||
}
|
||||
|
||||
if len(selection) > book.Size() {
|
||||
t.Fatalf("selection %v could not be bigger than the book", selection)
|
||||
}
|
||||
|
||||
// 4) mark 80% of the addresses as good
|
||||
randAddrsLen := len(randAddrs)
|
||||
for i, addrSrc := range randAddrs {
|
||||
if int((float64(i)/float64(randAddrsLen))*100) >= 20 {
|
||||
book.MarkGood(addrSrc.addr.ID)
|
||||
}
|
||||
}
|
||||
|
||||
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
|
||||
|
||||
// check that ~70% of addresses returned are good
|
||||
good := 0
|
||||
for _, addr := range selection {
|
||||
if book.IsGood(addr) {
|
||||
good++
|
||||
}
|
||||
}
|
||||
|
||||
got, expected := int((float64(good)/float64(len(selection)))*100), 100-biasTowardsNewAddrs
|
||||
|
||||
// compute some slack to protect against small differences due to rounding:
|
||||
slack := int(math.Round(float64(100) / float64(len(selection))))
|
||||
if got > expected+slack {
|
||||
t.Fatalf(
|
||||
"got more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)",
|
||||
got,
|
||||
expected,
|
||||
good,
|
||||
len(selection),
|
||||
)
|
||||
}
|
||||
if got < expected-slack {
|
||||
t.Fatalf(
|
||||
"got fewer good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)",
|
||||
got,
|
||||
expected,
|
||||
good,
|
||||
len(selection),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddrBookHasAddress(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
addr := randIPv4Address(t)
|
||||
err := book.AddAddress(addr, addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, book.HasAddress(addr))
|
||||
|
||||
book.RemoveAddress(addr)
|
||||
|
||||
assert.False(t, book.HasAddress(addr))
|
||||
}
|
||||
|
||||
func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []string) {
|
||||
t.Helper()
|
||||
addrs := make([]*p2p.NetAddress, numAddrs)
|
||||
for i := 0; i < numAddrs; i++ {
|
||||
addrs[i] = randIPv4Address(t)
|
||||
}
|
||||
|
||||
private := make([]string, numAddrs)
|
||||
for i, addr := range addrs {
|
||||
private[i] = string(addr.ID)
|
||||
}
|
||||
return addrs, private
|
||||
}
|
||||
|
||||
func TestBanBadPeers(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
addr := randIPv4Address(t)
|
||||
_ = book.AddAddress(addr, addr)
|
||||
|
||||
book.MarkBad(addr, 1*time.Second)
|
||||
// addr should not reachable
|
||||
assert.False(t, book.HasAddress(addr))
|
||||
assert.True(t, book.IsBanned(addr))
|
||||
|
||||
err := book.AddAddress(addr, addr)
|
||||
// book should not add address from the blacklist
|
||||
assert.Error(t, err)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
book.ReinstateBadPeers()
|
||||
// address should be reinstated in the new bucket
|
||||
assert.EqualValues(t, 1, book.Size())
|
||||
assert.True(t, book.HasAddress(addr))
|
||||
assert.False(t, book.IsGood(addr))
|
||||
}
|
||||
|
||||
func TestAddrBookEmpty(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
// Check that empty book is empty
|
||||
require.True(t, book.Empty())
|
||||
// Check that book with our address is empty
|
||||
book.AddOurAddress(randIPv4Address(t))
|
||||
require.True(t, book.Empty())
|
||||
// Check that book with private addrs is empty
|
||||
_, privateIds := testCreatePrivateAddrs(t, 5)
|
||||
book.AddPrivateIDs(privateIds)
|
||||
require.True(t, book.Empty())
|
||||
|
||||
// Check that book with address is not empty
|
||||
err := book.AddAddress(randIPv4Address(t), randIPv4Address(t))
|
||||
require.NoError(t, err)
|
||||
require.False(t, book.Empty())
|
||||
}
|
||||
|
||||
func TestPrivatePeers(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
addrs, private := testCreatePrivateAddrs(t, 10)
|
||||
book.AddPrivateIDs(private)
|
||||
|
||||
// private addrs must not be added
|
||||
for _, addr := range addrs {
|
||||
err := book.AddAddress(addr, addr)
|
||||
if assert.Error(t, err) {
|
||||
_, ok := err.(ErrAddrBookPrivate)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// addrs coming from private peers must not be added
|
||||
err := book.AddAddress(randIPv4Address(t), addrs[0])
|
||||
if assert.Error(t, err) {
|
||||
_, ok := err.(ErrAddrBookPrivateSrc)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func testAddrBookAddressSelection(t *testing.T, bookSize int) {
|
||||
// generate all combinations of old (m) and new addresses
|
||||
for nBookOld := 0; nBookOld <= bookSize; nBookOld++ {
|
||||
nBookNew := bookSize - nBookOld
|
||||
dbgStr := fmt.Sprintf("book of size %d (new %d, old %d)", bookSize, nBookNew, nBookOld)
|
||||
|
||||
// create book and get selection
|
||||
book, _ := createAddrBookWithMOldAndNNewAddrs(t, nBookOld, nBookNew)
|
||||
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
|
||||
assert.NotNil(t, addrs, "%s - expected a non-nil selection", dbgStr)
|
||||
nAddrs := len(addrs)
|
||||
assert.NotZero(t, nAddrs, "%s - expected at least one address in selection", dbgStr)
|
||||
|
||||
// check there's no nil addresses
|
||||
for _, addr := range addrs {
|
||||
if addr == nil {
|
||||
t.Fatalf("%s - got nil address in selection %v", dbgStr, addrs)
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: shadowing
|
||||
nOld, nNew := countOldAndNewAddrsInSelection(addrs, book)
|
||||
|
||||
// Given:
|
||||
// n - num new addrs, m - num old addrs
|
||||
// k - num new addrs expected in the beginning (based on bias %)
|
||||
// i=min(n, max(k,r-m)), aka expNew
|
||||
// j=min(m, r-i), aka expOld
|
||||
//
|
||||
// We expect this layout:
|
||||
// indices: 0...i-1 i...i+j-1
|
||||
// addresses: N0..Ni-1 O0..Oj-1
|
||||
//
|
||||
// There is at least one partition and at most three.
|
||||
var (
|
||||
k = percentageOfNum(biasToSelectNewPeers, nAddrs)
|
||||
expNew = tmmath.MinInt(nNew, tmmath.MaxInt(k, nAddrs-nBookOld))
|
||||
expOld = tmmath.MinInt(nOld, nAddrs-expNew)
|
||||
)
|
||||
|
||||
// Verify that the number of old and new addresses are as expected
|
||||
if nNew != expNew {
|
||||
t.Fatalf("%s - expected new addrs %d, got %d", dbgStr, expNew, nNew)
|
||||
}
|
||||
if nOld != expOld {
|
||||
t.Fatalf("%s - expected old addrs %d, got %d", dbgStr, expOld, nOld)
|
||||
}
|
||||
|
||||
// Verify that the order of addresses is as expected
|
||||
// Get the sequence types and lengths of the selection
|
||||
seqLens, seqTypes, err := analyseSelectionLayout(book, addrs)
|
||||
assert.NoError(t, err, "%s", dbgStr)
|
||||
|
||||
// Build a list with the expected lengths of partitions and another with the expected types, e.g.:
|
||||
// expSeqLens = [10, 22], expSeqTypes = [1, 2]
|
||||
// means we expect 10 new (type 1) addresses followed by 22 old (type 2) addresses.
|
||||
var expSeqLens []int
|
||||
var expSeqTypes []int
|
||||
|
||||
switch {
|
||||
case expOld == 0: // all new addresses
|
||||
expSeqLens = []int{nAddrs}
|
||||
expSeqTypes = []int{1}
|
||||
case expNew == 0: // all old addresses
|
||||
expSeqLens = []int{nAddrs}
|
||||
expSeqTypes = []int{2}
|
||||
case nAddrs-expNew-expOld == 0: // new addresses, old addresses
|
||||
expSeqLens = []int{expNew, expOld}
|
||||
expSeqTypes = []int{1, 2}
|
||||
}
|
||||
|
||||
assert.Equal(t, expSeqLens, seqLens,
|
||||
"%s - expected sequence lengths of old/new %v, got %v",
|
||||
dbgStr, expSeqLens, seqLens)
|
||||
assert.Equal(t, expSeqTypes, seqTypes,
|
||||
"%s - expected sequence types (1-new, 2-old) was %v, got %v",
|
||||
dbgStr, expSeqTypes, seqTypes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleAddrBookAddressSelection(t *testing.T) {
|
||||
// test books with smaller size, < N
|
||||
const N = 32
|
||||
for bookSize := 1; bookSize < N; bookSize++ {
|
||||
testAddrBookAddressSelection(t, bookSize)
|
||||
}
|
||||
|
||||
// Test for two books with sizes from following ranges
|
||||
ranges := [...][]int{{33, 100}, {100, 175}}
|
||||
bookSizes := make([]int, 0, len(ranges))
|
||||
for _, r := range ranges {
|
||||
bookSizes = append(bookSizes, mrand.Intn(r[1]-r[0])+r[0])
|
||||
}
|
||||
t.Logf("Testing address selection for the following book sizes %v\n", bookSizes)
|
||||
for _, bookSize := range bookSizes {
|
||||
testAddrBookAddressSelection(t, bookSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) {
|
||||
fname := createTempFileName(t, "addrbook_test")
|
||||
|
||||
// This test creates adds a peer to the address book and marks it good
|
||||
// It then attempts to override the peer's IP, by adding a peer with the same ID
|
||||
// but different IP. We distinguish the IP's by "RealIP" and "OverrideAttemptIP"
|
||||
peerID := "678503e6c8f50db7279c7da3cb9b072aac4bc0d5"
|
||||
peerRealIP := "1.1.1.1:26656"
|
||||
peerOverrideAttemptIP := "2.2.2.2:26656"
|
||||
SrcAddr := "b0dd378c3fbc4c156cd6d302a799f0d2e4227201@159.89.121.174:26656"
|
||||
|
||||
// There is a chance that AddAddress will ignore the new peer its given.
|
||||
// So we repeat trying to override the peer several times,
|
||||
// to ensure we aren't in a case that got probabilistically ignored
|
||||
numOverrideAttempts := 10
|
||||
|
||||
peerRealAddr, err := types.NewNetAddressString(peerID + "@" + peerRealIP)
|
||||
require.Nil(t, err)
|
||||
|
||||
peerOverrideAttemptAddr, err := types.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP)
|
||||
require.Nil(t, err)
|
||||
|
||||
src, err := types.NewNetAddressString(SrcAddr)
|
||||
require.Nil(t, err)
|
||||
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
err = book.AddAddress(peerRealAddr, src)
|
||||
require.Nil(t, err)
|
||||
book.MarkAttempt(peerRealAddr)
|
||||
book.MarkGood(peerRealAddr.ID)
|
||||
|
||||
// Double check that adding a peer again doesn't error
|
||||
err = book.AddAddress(peerRealAddr, src)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Try changing ip but keeping the same node id. (change 1.1.1.1 to 2.2.2.2)
|
||||
// This should just be ignored, and not error.
|
||||
for i := 0; i < numOverrideAttempts; i++ {
|
||||
err = book.AddAddress(peerOverrideAttemptAddr, src)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
// Now check that the IP was not overridden.
|
||||
// This is done by sampling several peers from addr book
|
||||
// and ensuring they all have the correct IP.
|
||||
// In the expected functionality, this test should only have 1 Peer, hence will pass.
|
||||
for i := 0; i < numOverrideAttempts; i++ {
|
||||
selection := book.GetSelection()
|
||||
for _, addr := range selection {
|
||||
require.Equal(t, addr.IP, peerRealAddr.IP)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddrBookGroupKey(t *testing.T) {
|
||||
// non-strict routability
|
||||
testCases := []struct {
|
||||
name string
|
||||
ip string
|
||||
expKey string
|
||||
}{
|
||||
// IPv4 normal.
|
||||
{"ipv4 normal class a", "12.1.2.3", "12.1.0.0"},
|
||||
{"ipv4 normal class b", "173.1.2.3", "173.1.0.0"},
|
||||
{"ipv4 normal class c", "196.1.2.3", "196.1.0.0"},
|
||||
|
||||
// IPv6/IPv4 translations.
|
||||
{"ipv6 rfc3964 with ipv4 encap", "2002:0c01:0203::", "12.1.0.0"},
|
||||
{"ipv6 rfc4380 toredo ipv4", "2001:0:1234::f3fe:fdfc", "12.1.0.0"},
|
||||
{"ipv6 rfc6052 well-known prefix with ipv4", "64:ff9b::0c01:0203", "12.1.0.0"},
|
||||
{"ipv6 rfc6145 translated ipv4", "::ffff:0:0c01:0203", "12.1.0.0"},
|
||||
|
||||
// Tor.
|
||||
{"ipv6 tor onioncat", "fd87:d87e:eb43:1234::5678", "tor:2"},
|
||||
{"ipv6 tor onioncat 2", "fd87:d87e:eb43:1245::6789", "tor:2"},
|
||||
{"ipv6 tor onioncat 3", "fd87:d87e:eb43:1345::6789", "tor:3"},
|
||||
|
||||
// IPv6 normal.
|
||||
{"ipv6 normal", "2602:100::1", "2602:100::"},
|
||||
{"ipv6 normal 2", "2602:0100::1234", "2602:100::"},
|
||||
{"ipv6 hurricane electric", "2001:470:1f10:a1::2", "2001:470:1000::"},
|
||||
{"ipv6 hurricane electric 2", "2001:0470:1f10:a1::2", "2001:470:1000::"},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
nip := net.ParseIP(tc.ip)
|
||||
key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), false)
|
||||
assert.Equal(t, tc.expKey, key, "#%d", i)
|
||||
}
|
||||
|
||||
// strict routability
|
||||
testCases = []struct {
|
||||
name string
|
||||
ip string
|
||||
expKey string
|
||||
}{
|
||||
// Local addresses.
|
||||
{"ipv4 localhost", "127.0.0.1", "local"},
|
||||
{"ipv6 localhost", "::1", "local"},
|
||||
{"ipv4 zero", "0.0.0.0", "local"},
|
||||
{"ipv4 first octet zero", "0.1.2.3", "local"},
|
||||
|
||||
// Unroutable addresses.
|
||||
{"ipv4 invalid bcast", "255.255.255.255", "unroutable"},
|
||||
{"ipv4 rfc1918 10/8", "10.1.2.3", "unroutable"},
|
||||
{"ipv4 rfc1918 172.16/12", "172.16.1.2", "unroutable"},
|
||||
{"ipv4 rfc1918 192.168/16", "192.168.1.2", "unroutable"},
|
||||
{"ipv6 rfc3849 2001:db8::/32", "2001:db8::1234", "unroutable"},
|
||||
{"ipv4 rfc3927 169.254/16", "169.254.1.2", "unroutable"},
|
||||
{"ipv6 rfc4193 fc00::/7", "fc00::1234", "unroutable"},
|
||||
{"ipv6 rfc4843 2001:10::/28", "2001:10::1234", "unroutable"},
|
||||
{"ipv6 rfc4862 fe80::/64", "fe80::1234", "unroutable"},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
nip := net.ParseIP(tc.ip)
|
||||
key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), true)
|
||||
assert.Equal(t, tc.expKey, key, "#%d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetAddress, book *addrBook) {
|
||||
nOld, nNew := countOldAndNewAddrsInSelection(addrs, book)
|
||||
assert.Equal(t, m, nOld, "old addresses")
|
||||
assert.Equal(t, n, nNew, "new addresses")
|
||||
}
|
||||
|
||||
func createTempFileName(t *testing.T, prefix string) string {
|
||||
t.Helper()
|
||||
f, err := ioutil.TempFile("", prefix)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fname := f.Name()
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() { _ = os.Remove(fname) })
|
||||
|
||||
return fname
|
||||
}
|
||||
|
||||
func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *addrBook, fname string) {
|
||||
t.Helper()
|
||||
fname = createTempFileName(t, "addrbook_test")
|
||||
|
||||
book = NewAddrBook(fname, true).(*addrBook)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
assert.Zero(t, book.Size())
|
||||
|
||||
randAddrs := randNetAddressPairs(t, nOld)
|
||||
for _, addr := range randAddrs {
|
||||
err := book.AddAddress(addr.addr, addr.src)
|
||||
require.NoError(t, err)
|
||||
book.MarkGood(addr.addr.ID)
|
||||
}
|
||||
|
||||
randAddrs = randNetAddressPairs(t, nNew)
|
||||
for _, addr := range randAddrs {
|
||||
err := book.AddAddress(addr.addr, addr.src)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func countOldAndNewAddrsInSelection(addrs []*p2p.NetAddress, book *addrBook) (nOld, nNew int) {
|
||||
for _, addr := range addrs {
|
||||
if book.IsGood(addr) {
|
||||
nOld++
|
||||
} else {
|
||||
nNew++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Analyze the layout of the selection specified by 'addrs'
|
||||
// Returns:
|
||||
// - seqLens - the lengths of the sequences of addresses of same type
|
||||
// - seqTypes - the types of sequences in selection
|
||||
func analyseSelectionLayout(book *addrBook, addrs []*p2p.NetAddress) (seqLens, seqTypes []int, err error) {
|
||||
// address types are: 0 - nil, 1 - new, 2 - old
|
||||
var (
|
||||
prevType = 0
|
||||
currentSeqLen = 0
|
||||
)
|
||||
|
||||
for _, addr := range addrs {
|
||||
addrType := 0
|
||||
if book.IsGood(addr) {
|
||||
addrType = 2
|
||||
} else {
|
||||
addrType = 1
|
||||
}
|
||||
if addrType != prevType && prevType != 0 {
|
||||
seqLens = append(seqLens, currentSeqLen)
|
||||
seqTypes = append(seqTypes, prevType)
|
||||
currentSeqLen = 0
|
||||
}
|
||||
currentSeqLen++
|
||||
prevType = addrType
|
||||
}
|
||||
|
||||
seqLens = append(seqLens, currentSeqLen)
|
||||
seqTypes = append(seqTypes, prevType)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package pex
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func BenchmarkAddrBook_hash(b *testing.B) {
|
||||
book := &addrBook{
|
||||
ourAddrs: make(map[string]struct{}),
|
||||
privateIDs: make(map[types.NodeID]struct{}),
|
||||
addrLookup: make(map[types.NodeID]*knownAddress),
|
||||
badPeers: make(map[types.NodeID]*knownAddress),
|
||||
filePath: "",
|
||||
routabilityStrict: true,
|
||||
}
|
||||
book.init()
|
||||
msg := []byte(`foobar`)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = book.hash(msg)
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user