Compare commits

..

10 Commits

Author SHA1 Message Date
marbar3778
ed0c89eb76 fix tests amd errors 2021-07-15 15:00:15 +02:00
marbar3778
87c2ee69fd fix errors in abci 2021-07-05 16:58:31 +02:00
marbar3778
dc3327083e fix errors 2021-07-05 14:34:43 +02:00
marbar3778
6a29aa2b7a Merge branch 'abci++' into finalizeBlock 2021-07-05 13:51:13 +02:00
marbar3778
8a39848d06 fix some tests 2021-06-17 11:56:16 +02:00
marbar3778
f9a22483c0 vacation commit 2021-06-17 09:38:02 +02:00
marbar3778
5a4a56a781 abciresponse, blok events, indexer, some tests 2021-06-08 18:26:35 +02:00
marbar3778
3e006e6bc1 work on abci, proxy and mempool 2021-06-08 17:53:31 +02:00
marbar3778
ffb1fc32ea migrate abci to finalizeBlock 2021-06-08 10:43:57 +02:00
marbar3778
a8f91f696a finalize block 2021-06-07 18:19:56 +02:00
278 changed files with 4581 additions and 9739 deletions

3
.github/CODEOWNERS vendored
View File

@@ -7,4 +7,5 @@
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield @creachadair
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish

View File

@@ -46,7 +46,7 @@ jobs:
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
@@ -68,7 +68,7 @@ jobs:
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
@@ -96,7 +96,7 @@ jobs:
needs: tests
steps:
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
@@ -121,7 +121,7 @@ jobs:
- run: |
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v2.0.2
- uses: codecov/codecov-action@v1.5.2
with:
file: ./coverage.txt
if: env.GIT_DIFF

View File

@@ -17,7 +17,7 @@ jobs:
fail-fast: false
matrix:
p2p: ['legacy', 'new', 'hybrid']
group: ['00', '01']
group: ['00', '01', '02', '03']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
@@ -35,7 +35,7 @@ jobs:
- name: Generate testnets
working-directory: test/e2e
# When changing -g, also change the matrix groups above
run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
working-directory: test/e2e

View File

@@ -18,7 +18,7 @@ jobs:
with:
go-version: '1.16'
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go

View File

@@ -23,14 +23,9 @@ jobs:
working-directory: test/fuzz
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
- name: Fuzz mempool-v1
- name: Fuzz mempool
working-directory: test/fuzz
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1
continue-on-error: true
- name: Fuzz mempool-v0
working-directory: test/fuzz
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
continue-on-error: true
- name: Fuzz p2p-addrbook

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 3
steps:
- uses: styfle/cancel-workflow-action@0.9.1
- uses: styfle/cancel-workflow-action@0.9.0
with:
workflow_id: 1041851,1401230,2837803
access_token: ${{ github.token }}

View File

@@ -7,6 +7,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.4
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
with:
folder-path: "docs"

View File

@@ -14,7 +14,7 @@ jobs:
timeout-minutes: 8
steps:
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go

View File

@@ -7,14 +7,12 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v4
- uses: actions/stale@v3.0.19
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions."
days-before-stale: -1
days-before-close: -1
days-before-pr-stale: 10
days-before-pr-close: 4
days-before-stale: 10
days-before-close: 4
exempt-pr-labels: "S:wip"

View File

@@ -19,7 +19,7 @@ jobs:
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
@@ -42,6 +42,38 @@ jobs:
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
if: env.GIT_DIFF
test_abci_apps:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.6
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.6
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
if: env.GIT_DIFF
- name: test_abci_apps
run: abci/tests/test_app/test.sh
shell: bash
if: env.GIT_DIFF
test_abci_cli:
runs-on: ubuntu-latest
needs: build
@@ -51,7 +83,7 @@ jobs:
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
@@ -82,7 +114,7 @@ jobs:
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go

2
.gitignore vendored
View File

@@ -15,7 +15,7 @@
.vagrant
.vendor-new/
.vscode/
abci/abci-cli
abci-cli
addrbook.json
artifacts/*
build/*

View File

@@ -21,21 +21,15 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
- [fastsync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
- [blockchain/v2] \#6730 Fast Sync v2 is deprecated, please use v0
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
- [rpc] \#6820 Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
- [libs/CList] \#6626 Automatically detach the prev/next elements in Remove function (@JayT106)
- Apps
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
- [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
- It is not required any longer to set ldflags to set version strings
- [abci/counter] \#6684 Delete counter example app
- It is not required any longer to set ldflags to set version strings
- P2P Protocol
@@ -57,25 +51,23 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
- [all] \#6077 Change spelling from British English to American (@cmwaters)
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
- Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
- Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
and `TxInfo`. (@alexanderbez)
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
and `TxInfo`. (@alexanderbez)
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
- [types] \#6627 Move `NodeKey` to types to make the type public.
- [types] \#6627 Move `NodeKey` to types to make the type public.
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
- [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync`
(@cmwaters)
- Blockchain Protocol
@@ -86,7 +78,6 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- Tooling
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
- [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106)
### FEATURES
@@ -104,12 +95,8 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
- Transactions are gossiped in FIFO order as they are in `v0`.
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
- [fastsync/event] \#6619 Emit fastsync status event when switching consensus/fastsync (@JayT106)
- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106)
### IMPROVEMENTS
- [rpc] \#6825 Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
@@ -157,9 +144,6 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
- [rpc] \#6507 Ensure RPC client can handle URLs without ports (@JayT106)
- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106)
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)
- [gitignore] \#6668 Fix gitignore of abci-cli (@tanyabouman)
- [light] \#6687 Fix bug with incorrectly handled contexts in the light client (@cmwaters)
- [privval] \#6748 Fix vote timestamp to prevent chain halt (@JayT106)

View File

@@ -227,96 +227,16 @@ Fixes #nnnn
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
### Release procedure
### Release Procedure
#### A note about backport branches
Tendermint's `master` branch is under active development.
Releases are specified using tags and are built from long-lived "backport" branches.
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
and the backport branches have names like `v0.34.x` or `v0.33.x`
(literally, `x`; it is not a placeholder in this case).
As non-breaking changes land on `master`, they should also be backported (cherry-picked)
to these backport branches.
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
to the needed branch. There should be a label for any backport branch that you'll be targeting.
To notify the bot to backport a pull request, mark the pull request with
the label `S:backport-to-<backport_branch>`.
Once the original pull request is merged, the bot will try to cherry-pick the pull request
to the backport branch. If the bot fails to backport, it will open a pull request.
The author of the original pull request is responsible for solving the conflicts and
merging the pull request.
#### Creating a backport branch
If this is the first release candidate for a major release, you get to have the honor of creating
the backport branch!
Note that, after creating the backport branch, you'll also need to update the tags on `master`
so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag
that is "greater than" the backport branches tags. See #6072 for more context.
In the following example, we'll assume that we're making a backport branch for
the 0.35.x line.
1. Start on `master`
2. Create the backport branch:
`git checkout -b v0.35.x`
3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up:
`git tag -a v0.36.0-dev; git push v0.36.0-dev`
4. Create a new workflow to run the e2e nightlies for this backport branch.
(See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml
for an example.)
#### Release candidates
Before creating an official release, especially a major release, we may want to create a
release candidate (RC) for our friends and partners to test out. We use git tags to
create RCs, and we build them off of backport branches.
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
(for example, `v0.35.0-rc0`).
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
have distinct names from the tags/release names.)
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
Otherwise:
1. Start from the backport branch (e.g. `v0.35.x`).
1. Run the integration tests and the e2e nightlies
(which can be triggered from the Github UI;
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
1. Prepare the changelog:
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`.
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all PRs
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
or other upgrading flows.
- Bump TMVersionDefault version in `version.go`
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
1. Open a PR with these changes against the backport branch.
1. Once these changes have landed on the backport branch, be sure to pull them back down locally.
2. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
3. Push the tag back up to origin:
`git push origin v0.35.0-rc0`
Now the tag should be available on the repo's releases page.
4. Future RCs will continue to be built off of this branch.
Note that this process should only be used for "true" RCs--
release candidates that, if successful, will be the next release.
For more experimental "RCs," create a new, short-lived branch and tag that instead.
#### Major release
#### Major Release
This major release process assumes that this release was preceded by release candidates.
If there were no release candidates, begin by creating a backport branch, as described above.
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
1. Start on the backport branch (e.g. `v0.35.x`)
2. Run integration tests and the e2e nightlies.
3. Prepare the release:
1. Start on the latest RC branch (`RCx/vX.X.0`).
2. Run integration tests.
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
- "Squash" changes from the changelog entries for the RCs into a single entry,
and add all changes included in `CHANGELOG_PENDING.md`.
(Squashing includes both combining all entries, as well as removing or simplifying
@@ -329,24 +249,57 @@ If there were no release candidates, begin by creating a backport branch, as des
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
4. Open a PR with these changes against the backport branch.
5. Once these changes are on the backport branch, push a tag with prepared release details.
This will trigger the actual release `v0.35.0`.
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
- `git push origin v0.35.0`
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
- `git checkout RCx/vX.X.0`
- `git checkout -b release/vX.X.0`
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
- `git push origin vX.X.0`
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
new major release series.
#### Minor release (point releases)
##### Major Release (from `master`)
1. Start on `master`
2. Run integration tests (see `test_integrations` in Makefile)
3. Prepare release in a pull request against `master` (to be squash merged):
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
had release candidates, squash all the RC updates into one
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all issues
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
release, and add the github aliases of external contributors to the top of
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
- Reset the `CHANGELOG_PENDING.md`
- Bump TMVersionDefault version in `version.go`
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
- Make sure all significant breaking changes are covered in `UPGRADING.md`
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
- `git tag -a vX.X.x -m 'Release vX.X.x'`
- `git push origin vX.X.x`
5. Update the `CHANGELOG.md` file on master with the releases changelog.
6. Delete any RC branches and tags for this release (if applicable)
#### Minor Release (Point Releases)
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport to the needed branch. Depending on which backport branch you need to backport to there will be labels for them. To notify the bot to backport a pull request, mark the pull request with the label `backport-to-<backport_branch>`. Once the original pull request is merged, the bot will try to cherry-pick the pull request to the backport branch. If the bot fails to backport, it will open a pull request. The author of the original pull request is responsible for solving the conflicts and merging the pull request.
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
To create a minor release:
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
2. Run integration tests (`make test_integrations`) and the nightlies.
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
2. Run integration tests: `make test_integrations`
3. Check out a new branch and prepare the release:
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
@@ -356,14 +309,34 @@ To create a minor release:
(Note that ABCI follows semver, and that ABCI versions are the only versions
which can change during minor releases, and only field additions are valid minor changes.)
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
4. Open a PR with these changes that will land them back on `v0.35.x`
4. Open a PR with these changes that will land them back on `vX.X.x`
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
- `git push origin v0.35.1`
- `git tag -a vX.X.x -m 'Release vX.X.x'`
- `git push origin vX.X.x`
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
- Remove all `R:minor` labels from the pull requests that were included in the release.
- Do not merge the backport branch into master.
#### Release Candidates
Before creating an official release, especially a major release, we may want to create a
release candidate (RC) for our friends and partners to test out. We use git tags to
create RCs, and we build them off of RC branches. RC branches typically have names formatted
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
have distinct names from the tags/release names.)
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
2. Create the new tag, specifying a name and a tag "message":
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
3. Push the tag back up to origin:
`git push origin v0.34.0-rc4`
Now the tag should be available on the repo's releases page.
4. Create a new release candidate branch for any possible updates to the RC:
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
## Testing
### Unit tests

View File

@@ -202,7 +202,7 @@ format:
lint:
@echo "--> Running linter"
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
@golangci-lint run
.PHONY: lint
DESTINATION = ./index.html.md
@@ -231,15 +231,6 @@ build-docker: build-linux
rm -rf DOCKER/tendermint
.PHONY: build-docker
###############################################################################
### Mocks ###
###############################################################################
mockery:
go generate -run="./scripts/mockery_generate.sh" ./...
.PHONY: mockery
###############################################################################
### Local testnet using docker ###
###############################################################################

View File

@@ -17,45 +17,21 @@ This guide provides instructions for upgrading to specific versions of Tendermin
### Config Changes
* `fast_sync = "v1"` and `fast_sync = "v2"` are no longer supported. Please use `v0` instead.
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
you have updated all the variables in your `config.toml` file.
* Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`)
[ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
* `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
persistent peers, there's no gaurantee that the node will remain connected with these peers.
persistent peers, there's no gaurantee that the node will remain connected with these peers.
* configuration values starting with `priv-validator-` have moved to the new
- configuration values starting with `priv-validator-` have moved to the new
`priv-validator` section, without the `priv-validator-` prefix.
* The fast sync process as well as the blockchain package and service has all
been renamed to block sync
### Key Format Changes
The format of all tendermint on-disk database keys changes in
0.35. Upgrading nodes must either re-sync all data or run a migration
script provided in this release. The script located in
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
provides the function `Migrate(context.Context, db.DB)` which you can
operationalize as makes sense for your deployment.
For ease of use the `tendermint` command includes a CLI version of the
migration script, which you can invoke, as in:
tendermint key-migrate
This reads the configuration file as normal and allows the
`--db-backend` and `--db-dir` flags to change database operations as
needed.
The migration operation is idempotent and can be run more than once,
if needed.
### CLI Changes
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
@@ -87,7 +63,7 @@ are:
- `blockchain`
- `evidence`
Accordingly, the `node` package was changed to reduce access to
Accordingly, the space `node` package was changed to reduce access to
tendermint internals: applications that use tendermint as a library
will need to change to accommodate these changes. Most notably:
@@ -98,34 +74,6 @@ will need to change to accommodate these changes. Most notably:
longer exported and have been replaced with `node.New` and
`node.NewDefault` which provide more functional interfaces.
### RPC changes
#### gRPC Support
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
#### Peer Management Interface
When running with the new P2P Layer, the methods `UnsafeDialSeeds` and
`UnsafeDialPeers` RPC methods will always return an error. They are
deprecated and will be removed in 0.36 when the legacy peer stack is
removed.
Additionally the format of the Peer list returned in the `NetInfo`
method changes in this release to accommodate the different way that
the new stack tracks data about peers. This change affects users of
both stacks.
### Support for Custom Reactor and Mempool Implementations
The changes to p2p layer removed existing support for custom
reactors. Based on our understanding of how this functionality was
used, the introduction of the prioritized mempool covers nearly all of
the use cases for custom reactors. If you are currently running custom
reactors and mempools and are having trouble seeing the migration path
for your project please feel free to reach out to the Tendermint Core
development team directly.
## v0.34.0
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
@@ -279,8 +227,8 @@ Other user-relevant changes include:
* The old `lite` package was removed; the new light client uses the `light` package.
* The `Verifier` was broken up into two pieces:
* Core verification logic (pure `VerifyX` functions)
* `Client` object, which represents the complete light client
* Core verification logic (pure `VerifyX` functions)
* `Client` object, which represents the complete light client
* The new light clients stores headers & validator sets as `LightBlock`s
* The RPC client can be found in the `/rpc` directory.
* The HTTP(S) proxy is located in the `/proxy` directory.
@@ -412,12 +360,12 @@ Evidence Params has been changed to include duration.
### Go API
* `libs/common` has been removed in favor of specific pkgs.
* `async`
* `service`
* `rand`
* `net`
* `strings`
* `cmap`
* `async`
* `service`
* `rand`
* `net`
* `strings`
* `cmap`
* removal of `errors` pkg
### RPC Changes
@@ -486,9 +434,9 @@ Prior to the update, suppose your `ResponseDeliverTx` look like:
```go
abci.ResponseDeliverTx{
Tags: []kv.Pair{
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
}
}
```
@@ -507,14 +455,14 @@ the following `Events`:
```go
abci.ResponseDeliverTx{
Events: []abci.Event{
{
Type: "transfer",
Attributes: kv.Pairs{
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
},
}
{
Type: "transfer",
Attributes: kv.Pairs{
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
},
}
}
```
@@ -562,9 +510,9 @@ In this case, the WS client will receive an error with description:
"jsonrpc": "2.0",
"id": "{ID}#event",
"error": {
"code": -32000,
"msg": "Server error",
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
"code": -32000,
"msg": "Server error",
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
}
}
@@ -770,9 +718,9 @@ just the `Data` field set:
```go
[]ProofOp{
ProofOp{
Data: <proof bytes>,
}
ProofOp{
Data: <proof bytes>,
}
}
```

View File

@@ -15,7 +15,7 @@ const (
echoRetryIntervalSeconds = 1
)
//go:generate ../../scripts/mockery_generate.sh Client
//go:generate mockery --case underscore --name Client
// Client defines an interface for an ABCI client.
//
@@ -35,33 +35,29 @@ type Client interface {
FlushAsync(context.Context) (*ReqRes, error)
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
CommitAsync(context.Context) (*ReqRes, error)
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
FinalizeBlockAsync(context.Context, types.RequestFinalizeBlock) (*ReqRes, error)
// Synchronous requests
FlushSync(context.Context) error
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
CommitSync(context.Context) (*types.ResponseCommit, error)
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
FinalizeBlockSync(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
}
//----------------------------------------

View File

@@ -194,16 +194,6 @@ func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo)
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
req := types.ToRequestDeliverTx(params)
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
req := types.ToRequestCheckTx(params)
@@ -244,26 +234,6 @@ func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestI
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
req := types.ToRequestBeginBlock(params)
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
req := types.ToRequestEndBlock(params)
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
req := types.ToRequestListSnapshots(params)
@@ -314,6 +284,22 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(
)
}
func (cli *grpcClient) FinalizeBlockAsync(
ctx context.Context,
params types.RequestFinalizeBlock,
) (*ReqRes, error) {
req := types.ToRequestFinalizeBlock(params)
res, err := cli.client.FinalizeBlock(ctx, req.GetFinalizeBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(
ctx,
req,
&types.Response{Value: &types.Response_FinalizeBlock{FinalizeBlock: res}},
)
}
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
// with the response. We don't complete it until it's been ordered via the channel.
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
@@ -380,18 +366,6 @@ func (cli *grpcClient) InfoSync(
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
}
func (cli *grpcClient) DeliverTxSync(
ctx context.Context,
params types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
reqres, err := cli.DeliverTxAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
}
func (cli *grpcClient) CheckTxSync(
ctx context.Context,
params types.RequestCheckTx,
@@ -435,30 +409,6 @@ func (cli *grpcClient) InitChainSync(
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
}
func (cli *grpcClient) BeginBlockSync(
ctx context.Context,
params types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
reqres, err := cli.BeginBlockAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
}
func (cli *grpcClient) EndBlockSync(
ctx context.Context,
params types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
reqres, err := cli.EndBlockAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
}
func (cli *grpcClient) ListSnapshotsSync(
ctx context.Context,
params types.RequestListSnapshots,
@@ -504,3 +454,14 @@ func (cli *grpcClient) ApplySnapshotChunkSync(
}
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
}
func (cli *grpcClient) FinalizeBlockSync(
ctx context.Context,
params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
reqres, err := cli.FinalizeBlockAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetFinalizeBlock(), cli.Error()
}

View File

@@ -77,17 +77,6 @@ func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*
), nil
}
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(params)
return app.callback(
types.ToRequestDeliverTx(params),
types.ToResponseDeliverTx(res),
), nil
}
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -132,28 +121,6 @@ func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestIni
), nil
}
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
return app.callback(
types.ToRequestBeginBlock(req),
types.ToResponseBeginBlock(res),
), nil
}
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
return app.callback(
types.ToRequestEndBlock(req),
types.ToResponseEndBlock(res),
), nil
}
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -204,6 +171,20 @@ func (app *localClient) ApplySnapshotChunkAsync(
), nil
}
func (app *localClient) FinalizeBlockAsync(
ctx context.Context,
req types.RequestFinalizeBlock,
) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.FinalizeBlock(req)
return app.callback(
types.ToRequestFinalizeBlock(req),
types.ToResponseFinalizeBlock(res),
), nil
}
//-------------------------------------------------------
func (app *localClient) FlushSync(ctx context.Context) error {
@@ -222,18 +203,6 @@ func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*t
return &res, nil
}
func (app *localClient) DeliverTxSync(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(req)
return &res, nil
}
func (app *localClient) CheckTxSync(
ctx context.Context,
req types.RequestCheckTx,
@@ -276,30 +245,6 @@ func (app *localClient) InitChainSync(
return &res, nil
}
func (app *localClient) BeginBlockSync(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
return &res, nil
}
func (app *localClient) EndBlockSync(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
return &res, nil
}
func (app *localClient) ListSnapshotsSync(
ctx context.Context,
req types.RequestListSnapshots,
@@ -346,6 +291,17 @@ func (app *localClient) ApplySnapshotChunkSync(
return &res, nil
}
func (app *localClient) FinalizeBlockSync(
ctx context.Context,
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.FinalizeBlock(req)
return &res, nil
}
//-------------------------------------------------------
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {

View File

@@ -1,4 +1,4 @@
// Code generated by mockery. DO NOT EDIT.
// Code generated by mockery 2.9.0. DO NOT EDIT.
package mocks
@@ -65,52 +65,6 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
return r0, r1
}
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// BeginBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseBeginBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseBeginBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CheckTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
@@ -203,52 +157,6 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
return r0, r1
}
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeliverTxSync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseDeliverTx
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseDeliverTx)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// EchoAsync provides a mock function with given fields: ctx, msg
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
ret := _m.Called(ctx, msg)
@@ -295,52 +203,6 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
return r0, r1
}
// EndBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// EndBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseEndBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseEndBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Error provides a mock function with given fields:
func (_m *Client) Error() error {
ret := _m.Called()
@@ -355,6 +217,52 @@ func (_m *Client) Error() error {
return r0
}
// FinalizeBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) FinalizeBlockAsync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// FinalizeBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) FinalizeBlockSync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseFinalizeBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// FlushAsync provides a mock function with given fields: _a0
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0)

View File

@@ -245,10 +245,6 @@ func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
}
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
}
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
}
@@ -265,14 +261,6 @@ func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestIn
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
}
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
}
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
}
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
}
@@ -295,6 +283,13 @@ func (cli *socketClient) ApplySnapshotChunkAsync(
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
}
func (cli *socketClient) FinalizeBlockAsync(
ctx context.Context,
req types.RequestFinalizeBlock,
) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestFinalizeBlock(req))
}
//----------------------------------------
func (cli *socketClient) FlushSync(ctx context.Context) error {
@@ -341,18 +336,6 @@ func (cli *socketClient) InfoSync(
return reqres.Response.GetInfo(), nil
}
func (cli *socketClient) DeliverTxSync(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
if err != nil {
return nil, err
}
return reqres.Response.GetDeliverTx(), nil
}
func (cli *socketClient) CheckTxSync(
ctx context.Context,
req types.RequestCheckTx,
@@ -395,30 +378,6 @@ func (cli *socketClient) InitChainSync(
return reqres.Response.GetInitChain(), nil
}
func (cli *socketClient) BeginBlockSync(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetBeginBlock(), nil
}
func (cli *socketClient) EndBlockSync(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetEndBlock(), nil
}
func (cli *socketClient) ListSnapshotsSync(
ctx context.Context,
req types.RequestListSnapshots,
@@ -465,6 +424,17 @@ func (cli *socketClient) ApplySnapshotChunkSync(
return reqres.Response.GetApplySnapshotChunk(), nil
}
func (cli *socketClient) FinalizeBlockSync(
ctx context.Context,
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestFinalizeBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetFinalizeBlock(), nil
}
//----------------------------------------
// queueRequest enqueues req onto the queue. If the queue is full, it ether
@@ -569,8 +539,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_Flush)
case *types.Request_Info:
_, ok = res.Value.(*types.Response_Info)
case *types.Request_DeliverTx:
_, ok = res.Value.(*types.Response_DeliverTx)
case *types.Request_CheckTx:
_, ok = res.Value.(*types.Response_CheckTx)
case *types.Request_Commit:
@@ -579,10 +547,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_Query)
case *types.Request_InitChain:
_, ok = res.Value.(*types.Response_InitChain)
case *types.Request_BeginBlock:
_, ok = res.Value.(*types.Response_BeginBlock)
case *types.Request_EndBlock:
_, ok = res.Value.(*types.Response_EndBlock)
case *types.Request_ApplySnapshotChunk:
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
case *types.Request_LoadSnapshotChunk:
@@ -591,6 +555,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_ListSnapshots)
case *types.Request_OfferSnapshot:
_, ok = res.Value.(*types.Response_OfferSnapshot)
case *types.Request_FinalizeBlock:
_, ok = res.Value.(*types.Response_FinalizeBlock)
}
return ok
}

View File

@@ -37,11 +37,11 @@ func TestProperSyncCalls(t *testing.T) {
resp := make(chan error, 1)
go func() {
// This is BeginBlockSync unrolled....
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
assert.NoError(t, err)
err = c.FlushSync(context.Background())
assert.NoError(t, err)
res := reqres.Response.GetBeginBlock()
res := reqres.Response.GetFinalizeBlock()
assert.NotNil(t, res)
resp <- c.Error()
}()
@@ -73,7 +73,7 @@ func TestHangingSyncCalls(t *testing.T) {
resp := make(chan error, 1)
go func() {
// Start BeginBlock and flush it
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
assert.NoError(t, err)
flush, err := c.FlushAsync(ctx)
assert.NoError(t, err)
@@ -84,7 +84,7 @@ func TestHangingSyncCalls(t *testing.T) {
err = s.Stop()
assert.NoError(t, err)
// wait for the response from BeginBlock
// wait for the response from FinalizeBlock
reqres.Wait()
flush.Wait()
resp <- c.Error()
@@ -121,7 +121,7 @@ type slowApp struct {
types.BaseApplication
}
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
time.Sleep(200 * time.Millisecond)
return types.ResponseBeginBlock{}
return types.ResponseFinalizeBlock{}
}

View File

@@ -17,6 +17,7 @@ import (
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/example/counter"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/abci/server"
servertest "github.com/tendermint/tendermint/abci/tests/server"
@@ -46,6 +47,9 @@ var (
flagHeight int
flagProve bool
// counter
flagSerial bool
// kvstore
flagPersist string
)
@@ -57,7 +61,9 @@ var RootCmd = &cobra.Command{
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
switch cmd.Use {
case "kvstore", "version":
case "counter", "kvstore": // for the examples apps, don't pre-run
return nil
case "version": // skip running for version command
return nil
}
@@ -129,6 +135,10 @@ func addQueryFlags() {
"whether or not to return a merkle proof of the query result")
}
func addCounterFlags() {
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
}
func addKVStoreFlags() {
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
}
@@ -147,6 +157,8 @@ func addCommands() {
RootCmd.AddCommand(queryCmd)
// examples
addCounterFlags()
RootCmd.AddCommand(counterCmd)
addKVStoreFlags()
RootCmd.AddCommand(kvstoreCmd)
}
@@ -246,6 +258,14 @@ var queryCmd = &cobra.Command{
RunE: cmdQuery,
}
var counterCmd = &cobra.Command{
Use: "counter",
Short: "ABCI demo example",
Long: "ABCI demo example",
Args: cobra.ExactArgs(0),
RunE: cmdCounter,
}
var kvstoreCmd = &cobra.Command{
Use: "kvstore",
Short: "ABCI demo example",
@@ -484,16 +504,18 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
if err != nil {
return err
}
printResponse(cmd, args, response{
Code: res.Code,
Data: res.Data,
Info: res.Info,
Log: res.Log,
})
for _, tx := range res.Txs {
printResponse(cmd, args, response{
Code: tx.Code,
Data: tx.Data,
Info: tx.Info,
Log: tx.Log,
})
}
return nil
}
@@ -573,6 +595,32 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
return nil
}
func cmdCounter(cmd *cobra.Command, args []string) error {
app := counter.NewApplication(flagSerial)
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
// Start the listener
srv, err := server.NewServer(flagAddress, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {
// Cleanup
if err := srv.Stop(); err != nil {
logger.Error("Error while stopping server", "err", err)
}
})
// Run forever.
select {}
}
func cmdKVStore(cmd *cobra.Command, args []string) error {
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)

View File

@@ -0,0 +1,92 @@
package counter
import (
"encoding/binary"
"fmt"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
)
type Application struct {
types.BaseApplication
hashCount int
txCount int
serial bool
}
func NewApplication(serial bool) *Application {
return &Application{serial: serial}
}
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
}
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
if app.serial {
for _, tx := range req.Txs {
if len(tx) > 8 {
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}},
}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue != uint64(app.txCount) {
return types.ResponseFinalizeBlock{
Txs: []*types.ResponseDeliverTx{{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
}
}
}
}
app.txCount++
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
}
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
if app.serial {
if len(req.Tx) > 8 {
return types.ResponseCheckTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue < uint64(app.txCount) {
return types.ResponseCheckTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
}
}
return types.ResponseCheckTx{Code: code.CodeTypeOK}
}
func (app *Application) Commit() (resp types.ResponseCommit) {
app.hashCount++
if app.txCount == 0 {
return types.ResponseCommit{}
}
hash := make([]byte, 8)
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
return types.ResponseCommit{Data: hash}
}
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
switch reqQuery.Path {
case "hash":
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
case "tx":
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
default:
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
}
}

View File

@@ -76,20 +76,22 @@ func testStream(t *testing.T, app types.Application) {
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
// Process response
switch r := res.Value.(type) {
case *types.Response_DeliverTx:
counter++
if r.DeliverTx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
}
if counter > numDeliverTxs {
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
}
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
close(done)
}()
return
case *types.Response_FinalizeBlock:
for _, tx := range r.FinalizeBlock.Txs {
counter++
if tx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", tx.Code)
}
if counter > numDeliverTxs {
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
}
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
close(done)
}()
return
}
}
case *types.Response_Flush:
// ignore
@@ -103,7 +105,8 @@ func testStream(t *testing.T, app types.Application) {
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
tx := []byte("test")
_, err = client.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
// Sometimes send flush messages
@@ -163,22 +166,25 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
txt := []byte("test")
response, err := client.FinalizeBlock(context.Background(), &types.RequestFinalizeBlock{Txs: [][]byte{txt}})
if err != nil {
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
}
counter++
if response.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", response.Code)
}
if counter > numDeliverTxs {
t.Fatal("Too many DeliverTx responses")
}
t.Log("response", counter)
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
for _, tx := range response.Txs {
if tx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", tx.Code)
}
if counter > numDeliverTxs {
t.Fatal("Too many DeliverTx responses")
}
t.Log("response", counter)
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
}
}
}

View File

@@ -86,35 +86,40 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
}
// tx is either "key=value" or just arbitrary bytes
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
var key, value string
var txs = make([]*types.ResponseDeliverTx, len(req.Txs))
parts := bytes.Split(req.Tx, []byte("="))
if len(parts) == 2 {
key, value = string(parts[0]), string(parts[1])
} else {
key, value = string(req.Tx), string(req.Tx)
}
for i, tx := range req.Txs {
parts := bytes.Split(tx, []byte("="))
if len(parts) == 2 {
key, value = string(parts[0]), string(parts[1])
} else {
key, value = string(tx), string(tx)
}
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
if err != nil {
panic(err)
}
app.state.Size++
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
if err != nil {
panic(err)
}
app.state.Size++
events := []types.Event{
{
Type: "app",
Attributes: []types.EventAttribute{
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
{Key: "key", Value: key, Index: true},
{Key: "index_key", Value: "index is working", Index: true},
{Key: "noindex_key", Value: "index is working", Index: false},
events := []types.Event{
{
Type: "app",
Attributes: []types.EventAttribute{
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
{Key: "key", Value: key, Index: true},
{Key: "index_key", Value: "index is working", Index: true},
{Key: "noindex_key", Value: "index is working", Index: false},
},
},
},
}
txs[i] = &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
}
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
return types.ResponseFinalizeBlock{Txs: txs}
}
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {

View File

@@ -27,12 +27,16 @@ const (
var ctx = context.Background()
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
req := types.RequestDeliverTx{Tx: tx}
ar := app.DeliverTx(req)
require.False(t, ar.IsErr(), ar)
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
ar := app.FinalizeBlock(req)
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
// repeating tx doesn't raise error
ar = app.DeliverTx(req)
require.False(t, ar.IsErr(), ar)
ar = app.FinalizeBlock(req)
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
// commit
app.Commit()
@@ -109,8 +113,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
header := tmproto.Header{
Height: height,
}
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
kvstore.Commit()
resInfo = kvstore.Info(types.RequestInfo{})
@@ -200,16 +203,15 @@ func makeApplyBlock(
Height: height,
}
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
for _, tx := range txs {
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
t.Fatal(r)
}
}
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
Hash: hash,
Header: header,
Txs: txs,
})
kvstore.Commit()
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
}
@@ -326,13 +328,16 @@ func runClientTests(t *testing.T, client abcicli.Client) {
}
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
ar, err := app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
ar, err = app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) // repeating tx doesn't raise error
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
// commit
_, err = app.CommitSync(ctx)
require.NoError(t, err)

View File

@@ -66,19 +66,19 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
return res
}
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
// if it starts with "val:", update the validator set
// format is "val:pubkey!power"
if isValidatorTx(req.Tx) {
// update validators in the merkle tree
// and in app.ValUpdates
return app.execValidatorTx(req.Tx)
}
// // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
// func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
// // if it starts with "val:", update the validator set
// // format is "val:pubkey!power"
// if isValidatorTx(req.Tx) {
// // update validators in the merkle tree
// // and in app.ValUpdates
// return app.execValidatorTx(req.Tx)
// }
// otherwise, update the key-value store
return app.app.DeliverTx(req)
}
// // otherwise, update the key-value store
// return app.app.DeliverTx(req)
// }
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
return app.app.CheckTx(req)
@@ -119,8 +119,40 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
return types.ResponseInitChain{}
}
// Track the block hash and header information
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
func (app *PersistentKVStoreApplication) ListSnapshots(
req types.RequestListSnapshots) types.ResponseListSnapshots {
return types.ResponseListSnapshots{}
}
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
return types.ResponseLoadSnapshotChunk{}
}
func (app *PersistentKVStoreApplication) OfferSnapshot(
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
}
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
}
func (app *PersistentKVStoreApplication) FinalizeBlock(
req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
// for i, tx := range req.Txs {
// // if it starts with "val:", update the validator set
// // format is "val:pubkey!power"
// if isValidatorTx(tx) {
// // update validators in the merkle tree
// // and in app.ValUpdates
// return app.execValidatorTx(req.Tx)
// }
// // otherwise, update the key-value store
// return app.app.DeliverTx(tx)
// }
// reset valset changes
app.ValUpdates = make([]types.ValidatorUpdate, 0)
@@ -142,32 +174,7 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock)
}
}
return types.ResponseBeginBlock{}
}
// Update the validator set
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
}
func (app *PersistentKVStoreApplication) ListSnapshots(
req types.RequestListSnapshots) types.ResponseListSnapshots {
return types.ResponseListSnapshots{}
}
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
return types.ResponseLoadSnapshotChunk{}
}
func (app *PersistentKVStoreApplication) OfferSnapshot(
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
}
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
return types.ResponseFinalizeBlock{ValidatorUpdates: app.ValUpdates}
}
//---------------------------------------------

View File

@@ -200,9 +200,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_Info:
res := s.app.Info(*r.Info)
responses <- types.ToResponseInfo(res)
case *types.Request_DeliverTx:
res := s.app.DeliverTx(*r.DeliverTx)
responses <- types.ToResponseDeliverTx(res)
case *types.Request_CheckTx:
res := s.app.CheckTx(*r.CheckTx)
responses <- types.ToResponseCheckTx(res)
@@ -215,12 +212,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_InitChain:
res := s.app.InitChain(*r.InitChain)
responses <- types.ToResponseInitChain(res)
case *types.Request_BeginBlock:
res := s.app.BeginBlock(*r.BeginBlock)
responses <- types.ToResponseBeginBlock(res)
case *types.Request_EndBlock:
res := s.app.EndBlock(*r.EndBlock)
responses <- types.ToResponseEndBlock(res)
case *types.Request_ListSnapshots:
res := s.app.ListSnapshots(*r.ListSnapshots)
responses <- types.ToResponseListSnapshots(res)
@@ -233,6 +224,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_ApplySnapshotChunk:
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
responses <- types.ToResponseApplySnapshotChunk(res)
case *types.Request_FinalizeBlock:
res := s.app.FinalizeBlock(*r.FinalizeBlock)
responses <- types.ToResponseFinalizeBlock(res)
default:
responses <- types.ToResponseException("Unknown request")
}

View File

@@ -51,20 +51,22 @@ func Commit(client abcicli.Client, hashExp []byte) error {
return nil
}
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("deliverTx error")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("deliverTx error")
func FinalizeBlock(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
for _, tx := range res.Txs {
code, data, log := tx.Code, tx.Data, tx.Log
if code != codeExp {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("deliverTx error")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("deliverTx error")
}
}
fmt.Println("Passed test: DeliverTx")
return nil

View File

@@ -0,0 +1,68 @@
package main
import (
"bytes"
"context"
"fmt"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
)
var ctx = context.Background()
func startClient(abciType string) abcicli.Client {
// Start client
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
if err != nil {
panic(err.Error())
}
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
client.SetLogger(logger.With("module", "abcicli"))
if err := client.Start(); err != nil {
panicf("connecting to abci_app: %v", err.Error())
}
return client
}
func commit(client abcicli.Client, hashExp []byte) {
res, err := client.CommitSync(ctx)
if err != nil {
panicf("client error: %v", err)
}
if !bytes.Equal(res.Data, hashExp) {
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
}
}
type tx struct {
Data []byte
CodeExp uint32
DataExp []byte
}
func finalizeBlock(client abcicli.Client, txs []tx) {
var txsData = make([][]byte, len(txs))
for i, tx := range txs {
txsData[i] = tx.Data
}
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: txsData})
if err != nil {
panicf("client error: %v", err)
}
for i, tx := range res.Txs {
if tx.Code != txs[i].CodeExp {
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", tx.Code, txs[i].CodeExp, tx.Log)
}
if !bytes.Equal(tx.Data, txs[i].DataExp) {
panicf("DeliverTx response data was unexpected. Got %X expected %X", tx.Data, txs[i].DataExp)
}
}
}
func panicf(format string, a ...interface{}) {
panic(fmt.Sprintf(format, a...))
}

View File

@@ -0,0 +1,95 @@
package main
import (
"fmt"
"log"
"os"
"os/exec"
"time"
"github.com/tendermint/tendermint/abci/types"
)
var abciType string
func init() {
abciType = os.Getenv("ABCI")
if abciType == "" {
abciType = "socket"
}
}
func main() {
testCounter()
}
const (
maxABCIConnectTries = 10
)
func ensureABCIIsUp(typ string, n int) error {
var err error
cmdString := "abci-cli echo hello"
if typ == "grpc" {
cmdString = "abci-cli --abci grpc echo hello"
}
for i := 0; i < n; i++ {
cmd := exec.Command("bash", "-c", cmdString)
_, err = cmd.CombinedOutput()
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
}
return err
}
func testCounter() {
abciApp := os.Getenv("ABCI_APP")
if abciApp == "" {
panic("No ABCI_APP specified")
}
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
cmd := exec.Command("bash", "-c", subCommand)
cmd.Stdout = os.Stdout
if err := cmd.Start(); err != nil {
log.Fatalf("starting %q err: %v", abciApp, err)
}
defer func() {
if err := cmd.Process.Kill(); err != nil {
log.Printf("error on process kill: %v", err)
}
if err := cmd.Wait(); err != nil {
log.Printf("error while waiting for cmd to exit: %v", err)
}
}()
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
log.Fatalf("echo failed: %v", err) //nolint:gocritic
}
client := startClient(abciType)
defer func() {
if err := client.Stop(); err != nil {
log.Printf("error trying client stop: %v", err)
}
}()
// commit(client, nil)
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
commit(client, nil)
finalizeBlock(client, []tx{{Data: []byte{0x00}, CodeExp: types.CodeTypeOK, DataExp: nil}})
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
txs := []tx{
{Data: []byte{0x01}, DataExp: nil, CodeExp: types.CodeTypeOK},
{Data: []byte{0x00, 0x02}, DataExp: nil, CodeExp: types.CodeTypeOK},
{Data: []byte{0x00, 0x03}, DataExp: nil, CodeExp: types.CodeTypeOK},
{Data: []byte{0x00, 0x00, 0x04}, DataExp: nil, CodeExp: types.CodeTypeOK}}
finalizeBlock(client, txs)
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
}

28
abci/tests/test_app/test.sh Executable file
View File

@@ -0,0 +1,28 @@
#! /bin/bash
set -e
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
# Get the directory of where this script is.
export PATH="$GOBIN:$PATH"
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
# Change into that dir because we expect that.
cd "$DIR"
echo "RUN COUNTER OVER SOCKET"
# test golang counter
ABCI_APP="counter" go run -mod=readonly ./*.go
echo "----------------------"
echo "RUN COUNTER OVER GRPC"
# test golang counter via grpc
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
echo "----------------------"
# test nodejs counter
# TODO: fix node app
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter

View File

@@ -37,6 +37,7 @@ function testExample() {
}
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
testExample 2 tests/test_cli/ex2.abci abci-cli counter
echo ""
echo "PASS"

View File

@@ -17,11 +17,9 @@ type Application interface {
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
// Consensus Connection
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
// State Sync Connection
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
@@ -46,10 +44,6 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
return ResponseInfo{}
}
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
return ResponseDeliverTx{Code: CodeTypeOK}
}
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
return ResponseCheckTx{Code: CodeTypeOK}
}
@@ -66,14 +60,6 @@ func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
return ResponseInitChain{}
}
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
return ResponseBeginBlock{}
}
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
return ResponseEndBlock{}
}
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
return ResponseListSnapshots{}
}
@@ -90,6 +76,10 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
return ResponseApplySnapshotChunk{}
}
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
return ResponseFinalizeBlock{}
}
//-------------------------------------------------------
// GRPCApplication is a GRPC wrapper for Application
@@ -114,11 +104,6 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
return &res, nil
}
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
res := app.app.DeliverTx(*req)
return &res, nil
}
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
res := app.app.CheckTx(*req)
return &res, nil
@@ -139,16 +124,6 @@ func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain
return &res, nil
}
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
res := app.app.BeginBlock(*req)
return &res, nil
}
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
res := app.app.EndBlock(*req)
return &res, nil
}
func (app *GRPCApplication) ListSnapshots(
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
res := app.app.ListSnapshots(*req)
@@ -172,3 +147,9 @@ func (app *GRPCApplication) ApplySnapshotChunk(
res := app.app.ApplySnapshotChunk(*req)
return &res, nil
}
func (app *GRPCApplication) FinalizeBlock(
ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
res := app.app.FinalizeBlock(*req)
return &res, nil
}

View File

@@ -15,7 +15,11 @@ const (
func WriteMessage(msg proto.Message, w io.Writer) error {
protoWriter := protoio.NewDelimitedWriter(w)
_, err := protoWriter.WriteMsg(msg)
return err
if err != nil {
return err
}
return nil
}
// ReadMessage reads a varint length-delimited protobuf message.
@@ -44,12 +48,6 @@ func ToRequestInfo(req RequestInfo) *Request {
}
}
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
return &Request{
Value: &Request_DeliverTx{&req},
}
}
func ToRequestCheckTx(req RequestCheckTx) *Request {
return &Request{
Value: &Request_CheckTx{&req},
@@ -74,18 +72,6 @@ func ToRequestInitChain(req RequestInitChain) *Request {
}
}
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
return &Request{
Value: &Request_BeginBlock{&req},
}
}
func ToRequestEndBlock(req RequestEndBlock) *Request {
return &Request{
Value: &Request_EndBlock{&req},
}
}
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
return &Request{
Value: &Request_ListSnapshots{&req},
@@ -110,6 +96,12 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
}
}
func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request {
return &Request{
Value: &Request_FinalizeBlock{&req},
}
}
//----------------------------------------
func ToResponseException(errStr string) *Response {
@@ -135,11 +127,6 @@ func ToResponseInfo(res ResponseInfo) *Response {
Value: &Response_Info{&res},
}
}
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
return &Response{
Value: &Response_DeliverTx{&res},
}
}
func ToResponseCheckTx(res ResponseCheckTx) *Response {
return &Response{
@@ -165,18 +152,6 @@ func ToResponseInitChain(res ResponseInitChain) *Response {
}
}
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
return &Response{
Value: &Response_BeginBlock{&res},
}
}
func ToResponseEndBlock(res ResponseEndBlock) *Response {
return &Response{
Value: &Response_EndBlock{&res},
}
}
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
return &Response{
Value: &Response_ListSnapshots{&res},
@@ -200,3 +175,9 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
Value: &Response_ApplySnapshotChunk{&res},
}
}
func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response {
return &Response{
Value: &Response_FinalizeBlock{&res},
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,64 +0,0 @@
package commands
import (
"context"
"fmt"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/scripts/keymigrate"
)
func MakeKeyMigrateCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "key-migrate",
Short: "Run Database key migration",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
contexts := []string{
// this is ordered to put the
// (presumably) biggest/most important
// subsets first.
"blockstore",
"state",
"peerstore",
"tx_index",
"evidence",
"light",
}
for idx, dbctx := range contexts {
logger.Info("beginning a key migration",
"dbctx", dbctx,
"num", idx+1,
"total", len(contexts),
)
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
ID: dbctx,
Config: config,
})
if err != nil {
return fmt.Errorf("constructing database handle: %w", err)
}
if err = keymigrate.Migrate(ctx, db); err != nil {
return fmt.Errorf("running migration for context %q: %w",
dbctx, err)
}
}
logger.Info("completed database migration successfully")
return nil
},
}
// allow database info to be overridden via cli
addDBFlags(cmd)
return cmd
}

View File

@@ -1,251 +0,0 @@
package commands
import (
"errors"
"fmt"
"strings"
"github.com/spf13/cobra"
tmdb "github.com/tendermint/tm-db"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/progressbar"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/indexer/sink/kv"
"github.com/tendermint/tendermint/state/indexer/sink/psql"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
const (
reindexFailed = "event re-index failed: "
)
// ReIndexEventCmd allows re-index the event by given block height interval
var ReIndexEventCmd = &cobra.Command{
Use: "reindex-event",
Short: "reindex events to the event store backends",
Long: `
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
you can run this command when the event store backend dropped/disconnected or you want to replace the backend.
The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the
default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits
either or both arguments.
`,
Example: `
tendermint reindex-event
tendermint reindex-event --start-height 2
tendermint reindex-event --end-height 10
tendermint reindex-event --start-height 2 --end-height 10
`,
Run: func(cmd *cobra.Command, args []string) {
bs, ss, err := loadStateAndBlockStore(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err := checkValidHeight(bs); err != nil {
fmt.Println(reindexFailed, err)
return
}
es, err := loadEventSinks(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err = eventReIndex(cmd, es, bs, ss); err != nil {
fmt.Println(reindexFailed, err)
return
}
fmt.Println("event re-index finished")
},
}
var (
startHeight int64
endHeight int64
)
func init() {
ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index")
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
}
func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
// Check duplicated sinks.
sinks := map[string]bool{}
for _, s := range cfg.TxIndex.Indexer {
sl := strings.ToLower(s)
if sinks[sl] {
return nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
}
sinks[sl] = true
}
eventSinks := []indexer.EventSink{}
for k := range sinks {
switch k {
case string(indexer.NULL):
return nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
case string(indexer.KV):
store, err := tmcfg.DefaultDBProvider(&tmcfg.DBContext{ID: "tx_index", Config: cfg})
if err != nil {
return nil, err
}
eventSinks = append(eventSinks, kv.NewEventSink(store))
case string(indexer.PSQL):
conn := cfg.TxIndex.PsqlConn
if conn == "" {
return nil, errors.New("the psql connection settings cannot be empty")
}
es, _, err := psql.NewEventSink(conn, chainID)
if err != nil {
return nil, err
}
eventSinks = append(eventSinks, es)
default:
return nil, errors.New("unsupported event sink type")
}
}
if len(eventSinks) == 0 {
return nil, errors.New("no proper event sink can do event re-indexing," +
" please check the tx-index section in the config.toml")
}
if !indexer.IndexingEnabled(eventSinks) {
return nil, fmt.Errorf("no event sink has been enabled")
}
return eventSinks, nil
}
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
dbType := tmdb.BackendType(cfg.DBBackend)
// Get BlockStore
blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
blockStore := store.NewBlockStore(blockStoreDB)
// Get StateStore
stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
stateStore := state.NewStore(stateDB)
return blockStore, stateStore, nil
}
func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStore, ss state.Store) error {
var bar progressbar.Bar
bar.NewOption(startHeight-1, endHeight)
fmt.Println("start re-indexing events:")
defer bar.Finish()
for i := startHeight; i <= endHeight; i++ {
select {
case <-cmd.Context().Done():
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
default:
b := bs.LoadBlock(i)
if b == nil {
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
}
r, err := ss.LoadABCIResponses(i)
if err != nil {
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
}
e := types.EventDataNewBlockHeader{
Header: b.Header,
NumTxs: int64(len(b.Txs)),
ResultBeginBlock: *r.BeginBlock,
ResultEndBlock: *r.EndBlock,
}
var batch *indexer.Batch
if e.NumTxs > 0 {
batch = indexer.NewBatch(e.NumTxs)
for i, tx := range b.Data.Txs {
tr := abcitypes.TxResult{
Height: b.Height,
Index: uint32(i),
Tx: tx,
Result: *(r.DeliverTxs[i]),
}
_ = batch.Add(&tr)
}
}
for _, sink := range es {
if err := sink.IndexBlockEvents(e); err != nil {
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
}
if batch != nil {
if err := sink.IndexTxEvents(batch.Ops); err != nil {
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
}
}
}
}
bar.Play(i)
}
return nil
}
func checkValidHeight(bs state.BlockStore) error {
base := bs.Base()
if startHeight == 0 {
startHeight = base
fmt.Printf("set the start block height to the base height of the blockstore %d \n", base)
}
if startHeight < base {
return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base)
}
height := bs.Height()
if startHeight > height {
return fmt.Errorf(
"%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height)
}
if endHeight == 0 || endHeight > height {
endHeight = height
fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height)
}
if endHeight < base {
return fmt.Errorf(
"%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base)
}
if endHeight < startHeight {
return fmt.Errorf(
"%s (requested the end height: %d is less than the start height: %d)",
ctypes.ErrInvalidRequest, startHeight, endHeight)
}
return nil
}

View File

@@ -1,171 +0,0 @@
package commands
import (
"context"
"errors"
"testing"
"github.com/spf13/cobra"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/mocks"
"github.com/tendermint/tendermint/types"
)
const (
height int64 = 10
base int64 = 2
)
func setupReIndexEventCmd() *cobra.Command {
reIndexEventCmd := &cobra.Command{
Use: ReIndexEventCmd.Use,
Run: func(cmd *cobra.Command, args []string) {},
}
_ = reIndexEventCmd.ExecuteContext(context.Background())
return reIndexEventCmd
}
func TestReIndexEventCheckHeight(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height)
testCases := []struct {
startHeight int64
endHeight int64
validHeight bool
}{
{0, 0, true},
{0, base, true},
{0, base - 1, false},
{0, height, true},
{0, height + 1, true},
{0, 0, true},
{base - 1, 0, false},
{base, 0, true},
{base, base, true},
{base, base - 1, false},
{base, height, true},
{base, height + 1, true},
{height, 0, true},
{height, base, false},
{height, height - 1, false},
{height, height, true},
{height, height + 1, true},
{height + 1, 0, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := checkValidHeight(mockBlockStore)
if tc.validHeight {
require.NoError(t, err)
} else {
require.Error(t, err)
}
}
}
func TestLoadEventSink(t *testing.T) {
testCases := []struct {
sinks []string
connURL string
loadErr bool
}{
{[]string{}, "", true},
{[]string{"NULL"}, "", true},
{[]string{"KV"}, "", false},
{[]string{"KV", "KV"}, "", true},
{[]string{"PSQL"}, "", true}, // true because empty connect url
{[]string{"PSQL"}, "wrongUrl", true}, // true because wrong connect url
// skip to test PSQL connect with correct url
{[]string{"UnsupportedSinkType"}, "wrongUrl", true},
}
for _, tc := range testCases {
cfg := tmcfg.TestConfig()
cfg.TxIndex.Indexer = tc.sinks
cfg.TxIndex.PsqlConn = tc.connURL
_, err := loadEventSinks(cfg)
if tc.loadErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}
func TestLoadBlockStore(t *testing.T) {
bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig())
require.NoError(t, err)
require.NotNil(t, bs)
require.NotNil(t, ss)
}
func TestReIndexEvent(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockStateStore := &mocks.Store{}
mockEventSink := &mocks.EventSink{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height).
On("LoadBlock", base).Return(nil).Once().
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
mockEventSink.
On("Type").Return(indexer.KV).
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil).
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
dtx := abcitypes.ResponseDeliverTx{}
abciResp := &prototmstate.ABCIResponses{
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
EndBlock: &abcitypes.ResponseEndBlock{},
BeginBlock: &abcitypes.ResponseBeginBlock{},
}
mockStateStore.
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
On("LoadABCIResponses", base).Return(abciResp, nil).
On("LoadABCIResponses", height).Return(abciResp, nil)
testCases := []struct {
startHeight int64
endHeight int64
reIndexErr bool
}{
{base, height, true}, // LoadBlock error
{base, height, true}, // LoadABCIResponses error
{base, height, true}, // index block event error
{base, height, true}, // index tx event error
{base, base, false},
{height, height, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
if tc.reIndexErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}

View File

@@ -48,7 +48,9 @@ func AddNodeFlags(cmd *cobra.Command) {
"proxy-app",
config.ProxyApp,
"proxy app address, or one of: 'kvstore',"+
" 'persistent_kvstore' or 'noop' for local testing.")
" 'persistent_kvstore',"+
" 'counter',"+
" 'counter_serial' or 'noop' for local testing.")
cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)")
// rpc flags
@@ -83,10 +85,7 @@ func AddNodeFlags(cmd *cobra.Command) {
config.Consensus.CreateEmptyBlocksInterval.String(),
"the possible interval between empty blocks")
addDBFlags(cmd)
}
func addDBFlags(cmd *cobra.Command) {
// db flags
cmd.Flags().String(
"db-backend",
config.DBBackend,

View File

@@ -15,7 +15,6 @@ func main() {
rootCmd := cmd.RootCmd
rootCmd.AddCommand(
cmd.GenValidatorCmd,
cmd.ReIndexEventCmd,
cmd.InitFilesCmd,
cmd.ProbeUpnpCmd,
cmd.LightCmd,
@@ -28,7 +27,6 @@ func main() {
cmd.ShowNodeIDCmd,
cmd.GenNodeKeyCmd,
cmd.VersionCmd,
cmd.MakeKeyMigrateCommand(),
debug.DebugCmd,
cli.NewCompletionCmd(rootCmd, true),
)

View File

@@ -29,8 +29,8 @@ const (
ModeValidator = "validator"
ModeSeed = "seed"
BlockSyncV0 = "v0"
BlockSyncV2 = "v2"
BlockchainV0 = "v0"
BlockchainV2 = "v2"
MempoolV0 = "v0"
MempoolV1 = "v1"
@@ -76,7 +76,7 @@ type Config struct {
P2P *P2PConfig `mapstructure:"p2p"`
Mempool *MempoolConfig `mapstructure:"mempool"`
StateSync *StateSyncConfig `mapstructure:"statesync"`
BlockSync *BlockSyncConfig `mapstructure:"fastsync"`
FastSync *FastSyncConfig `mapstructure:"fastsync"`
Consensus *ConsensusConfig `mapstructure:"consensus"`
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
@@ -91,7 +91,7 @@ func DefaultConfig() *Config {
P2P: DefaultP2PConfig(),
Mempool: DefaultMempoolConfig(),
StateSync: DefaultStateSyncConfig(),
BlockSync: DefaultBlockSyncConfig(),
FastSync: DefaultFastSyncConfig(),
Consensus: DefaultConsensusConfig(),
TxIndex: DefaultTxIndexConfig(),
Instrumentation: DefaultInstrumentationConfig(),
@@ -114,7 +114,7 @@ func TestConfig() *Config {
P2P: TestP2PConfig(),
Mempool: TestMempoolConfig(),
StateSync: TestStateSyncConfig(),
BlockSync: TestBlockSyncConfig(),
FastSync: TestFastSyncConfig(),
Consensus: TestConsensusConfig(),
TxIndex: TestTxIndexConfig(),
Instrumentation: TestInstrumentationConfig(),
@@ -151,7 +151,7 @@ func (cfg *Config) ValidateBasic() error {
if err := cfg.StateSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [statesync] section: %w", err)
}
if err := cfg.BlockSync.ValidateBasic(); err != nil {
if err := cfg.FastSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [fastsync] section: %w", err)
}
if err := cfg.Consensus.ValidateBasic(); err != nil {
@@ -197,7 +197,6 @@ type BaseConfig struct { //nolint: maligned
// If this node is many blocks behind the tip of the chain, FastSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits
// TODO: This should be moved to the blocksync config
FastSyncMode bool `mapstructure:"fast-sync"`
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
@@ -447,7 +446,6 @@ type RPCConfig struct {
// TCP or UNIX socket address for the gRPC server to listen on
// NOTE: This server only supports /broadcast_tx_commit
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
GRPCListenAddress string `mapstructure:"grpc-laddr"`
// Maximum number of simultaneous connections.
@@ -455,7 +453,6 @@ type RPCConfig struct {
// If you want to accept a larger number than the default, make sure
// you increase your OS limits.
// 0 - unlimited.
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
@@ -785,47 +782,25 @@ type MempoolConfig struct {
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
Broadcast bool `mapstructure:"broadcast"`
// Maximum number of transactions in the mempool
Size int `mapstructure:"size"`
// Limit the total size of all txs in the mempool.
// This only accounts for raw transactions (e.g. given 1MB transactions and
// max-txs-bytes=5MB, mempool will only accept 5 transactions).
MaxTxsBytes int64 `mapstructure:"max-txs-bytes"`
// Size of the cache (used to filter transactions we saw earlier) in transactions
CacheSize int `mapstructure:"cache-size"`
// Do not remove invalid transactions from the cache (default: false)
// Set to true if it's not possible for any invalid transaction to become
// valid again in the future.
KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"`
// Maximum size of a single transaction
// NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
MaxTxBytes int `mapstructure:"max-tx-bytes"`
// Maximum size of a batch of transactions to send to a peer
// Including space needed by encoding (one varint per transaction).
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
// can exist for in the mempool.
//
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
// insertion time into the mempool is beyond TTLDuration.
TTLDuration time.Duration `mapstructure:"ttl-duration"`
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
// can exist for in the mempool.
//
// Note, if TTLDuration is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if
// it's insertion time into the mempool is beyond TTLDuration.
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
}
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
@@ -836,12 +811,10 @@ func DefaultMempoolConfig() *MempoolConfig {
Broadcast: true,
// Each signature verification takes .5ms, Size reduced until we implement
// ABCI Recheck
Size: 5000,
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
CacheSize: 10000,
MaxTxBytes: 1024 * 1024, // 1MB
TTLDuration: 0 * time.Second,
TTLNumBlocks: 0,
Size: 5000,
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
CacheSize: 10000,
MaxTxBytes: 1024 * 1024, // 1MB
}
}
@@ -867,13 +840,6 @@ func (cfg *MempoolConfig) ValidateBasic() error {
if cfg.MaxTxBytes < 0 {
return errors.New("max-tx-bytes can't be negative")
}
if cfg.TTLDuration < 0 {
return errors.New("ttl-duration can't be negative")
}
if cfg.TTLNumBlocks < 0 {
return errors.New("ttl-num-blocks can't be negative")
}
return nil
}
@@ -912,7 +878,7 @@ func DefaultStateSyncConfig() *StateSyncConfig {
}
}
// TestStateSyncConfig returns a default configuration for the state sync service
// TestFastSyncConfig returns a default configuration for the state sync service
func TestStateSyncConfig() *StateSyncConfig {
return DefaultStateSyncConfig()
}
@@ -968,33 +934,34 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
}
//-----------------------------------------------------------------------------
// FastSyncConfig
// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service
type BlockSyncConfig struct {
// FastSyncConfig defines the configuration for the Tendermint fast sync service
type FastSyncConfig struct {
Version string `mapstructure:"version"`
}
// DefaultBlockSyncConfig returns a default configuration for the block sync service
func DefaultBlockSyncConfig() *BlockSyncConfig {
return &BlockSyncConfig{
Version: BlockSyncV0,
// DefaultFastSyncConfig returns a default configuration for the fast sync service
func DefaultFastSyncConfig() *FastSyncConfig {
return &FastSyncConfig{
Version: BlockchainV0,
}
}
// TestBlockSyncConfig returns a default configuration for the block sync.
func TestBlockSyncConfig() *BlockSyncConfig {
return DefaultBlockSyncConfig()
// TestFastSyncConfig returns a default configuration for the fast sync.
func TestFastSyncConfig() *FastSyncConfig {
return DefaultFastSyncConfig()
}
// ValidateBasic performs basic validation.
func (cfg *BlockSyncConfig) ValidateBasic() error {
func (cfg *FastSyncConfig) ValidateBasic() error {
switch cfg.Version {
case BlockSyncV0:
case BlockchainV0:
return nil
case BlockchainV2:
return nil
case BlockSyncV2:
return errors.New("blocksync version v2 is no longer supported. Please use v0")
default:
return fmt.Errorf("unknown blocksync version %s", cfg.Version)
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
}
}

View File

@@ -125,13 +125,13 @@ func TestStateSyncConfigValidateBasic(t *testing.T) {
require.NoError(t, cfg.ValidateBasic())
}
func TestBlockSyncConfigValidateBasic(t *testing.T) {
cfg := TestBlockSyncConfig()
func TestFastSyncConfigValidateBasic(t *testing.T) {
cfg := TestFastSyncConfig()
assert.NoError(t, cfg.ValidateBasic())
// tamper with version
cfg.Version = "v2"
assert.Error(t, cfg.ValidateBasic())
assert.NoError(t, cfg.ValidateBasic())
cfg.Version = "invalid"
assert.Error(t, cfg.ValidateBasic())

View File

@@ -200,7 +200,6 @@ cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
# Maximum number of simultaneous connections.
@@ -210,7 +209,6 @@ grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
@@ -399,22 +397,6 @@ max-tx-bytes = {{ .Mempool.MaxTxBytes }}
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max-batch-bytes = {{ .Mempool.MaxBatchBytes }}
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
# can exist for in the mempool.
#
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
# insertion time into the mempool is beyond ttl-duration.
ttl-duration = "{{ .Mempool.TTLDuration }}"
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
# can exist for in the mempool.
#
# Note, if ttl-duration is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if
# it's insertion time into the mempool is beyond ttl-duration.
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
#######################################################
### State Sync Configuration Options ###
#######################################################
@@ -452,14 +434,14 @@ chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}"
fetchers = "{{ .StateSync.Fetchers }}"
#######################################################
### Block Sync Configuration Connections ###
### Fast Sync Configuration Connections ###
#######################################################
[fastsync]
# Block Sync version to use:
# 1) "v0" (default) - the legacy block sync implementation
# 2) "v2" - DEPRECATED, please use v0
version = "{{ .BlockSync.Version }}"
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "{{ .FastSync.Version }}"
#######################################################
### Consensus Configuration Options ###

View File

@@ -204,10 +204,7 @@ func (spn *ProofNode) FlattenAunts() [][]byte {
case spn.Right != nil:
innerHashes = append(innerHashes, spn.Right.Hash)
default:
// FIXME(fromberger): Per the documentation above, exactly one of
// these fields should be set. If that is true, this should probably
// be a panic since it violates the invariant. If not, when can it
// be OK to have no siblings? Does this occur at the leaves?
break
}
spn = spn.Parent
}

View File

@@ -13,7 +13,7 @@ import (
tmjson "github.com/tendermint/tendermint/libs/json"
// necessary for Bitcoin address format
"golang.org/x/crypto/ripemd160" // nolint
"golang.org/x/crypto/ripemd160" // nolint: staticcheck
)
//-------------------------------------

View File

@@ -31,6 +31,7 @@ Available Commands:
check_tx Validate a tx
commit Commit the application state and return the Merkle root hash
console Start an interactive abci console for multiple commands
counter ABCI demo example
deliver_tx Deliver a new tx to the application
kvstore ABCI demo example
echo Have the application echo a message
@@ -213,9 +214,137 @@ we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
Similarly, you could put the commands in a file and run
`abci-cli --verbose batch < myfile`.
## Counter - Another Example
Now that we've got the hang of it, let's try another application, the
"counter" app.
Like the kvstore app, its code can be found
[here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go)
and looks like:
```go
func cmdCounter(cmd *cobra.Command, args []string) error {
app := counter.NewCounterApplication(flagSerial)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Start the listener
srv, err := server.NewServer(flagAddrC, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {
// Cleanup
srv.Stop()
})
// Run forever.
select {}
}
```
The counter app doesn't use a Merkle tree, it just counts how many times
we've sent a transaction, asked for a hash, or committed the state. The
result of `commit` is just the number of transactions sent.
This application has two modes: `serial=off` and `serial=on`.
When `serial=on`, transactions must be a big-endian encoded incrementing
integer, starting at 0.
If `serial=off`, there are no restrictions on transactions.
We can toggle the value of `serial` using the `set_option` ABCI message.
When `serial=on`, some transactions are invalid. In a live blockchain,
transactions collect in memory before they are committed into blocks. To
avoid wasting resources on invalid transactions, ABCI provides the
`check_tx` message, which application developers can use to accept or
reject transactions, before they are stored in memory or gossipped to
other peers.
In this instance of the counter app, `check_tx` only allows transactions
whose integer is greater than the last committed one.
Let's kill the console and the kvstore application, and start the
counter app:
```sh
abci-cli counter
```
In another window, start the `abci-cli console`:
```sh
> check_tx 0x00
-> code: OK
> check_tx 0xff
-> code: OK
> deliver_tx 0x00
-> code: OK
> check_tx 0x00
-> code: BadNonce
-> log: Invalid nonce. Expected >= 1, got 0
> deliver_tx 0x01
-> code: OK
> deliver_tx 0x04
-> code: BadNonce
-> log: Invalid nonce. Expected 2, got 4
> info
-> code: OK
-> data: {"hashes":0,"txs":2}
-> data.hex: 0x7B22686173686573223A302C22747873223A327D
```
This is a very simple application, but between `counter` and `kvstore`,
its easy to see how you can build out arbitrary application states on
top of the ABCI. [Hyperledger's
Burrow](https://github.com/hyperledger/burrow) also runs atop ABCI,
bringing with it Ethereum-like accounts, the Ethereum virtual-machine,
Monax's permissioning scheme, and native contracts extensions.
But the ultimate flexibility comes from being able to write the
application easily in any language.
We have implemented the counter in a number of languages [see the
example directory](https://github.com/tendermint/tendermint/tree/master/abci/example).
To run the Node.js version, fist download & install [the Javascript ABCI server](https://github.com/tendermint/js-abci):
```sh
git clone https://github.com/tendermint/js-abci.git
cd js-abci
npm install abci
```
Now you can start the app:
```sh
node example/counter.js
```
(you'll have to kill the other counter application process). In another
window, run the console and those previous ABCI commands. You should get
the same results as for the Go version.
## Bounties
Want to write an app in your favorite language?! We'd be happy
Want to write the counter app in your favorite language?! We'd be happy
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
See [funding](https://github.com/interchainio/funding) opportunities from the
[Interchain Foundation](https://interchain.io/) for implementations in new languages and more.

View File

@@ -37,8 +37,8 @@ cd $GOPATH/src/github.com/tendermint/tendermint
make install_abci
```
Now you should have the `abci-cli` installed; you'll notice the `kvstore`
command, an example application written
Now you should have the `abci-cli` installed; you'll see a couple of
commands (`counter` and `kvstore`) that are example applications written
in Go. See below for an application written in JavaScript.
Now, let's run some apps!
@@ -165,6 +165,92 @@ curl -s 'localhost:26657/abci_query?data="name"'
Try some other transactions and queries to make sure everything is
working!
## Counter - Another Example
Now that we've got the hang of it, let's try another application, the
`counter` app.
The counter app doesn't use a Merkle tree, it just counts how many times
we've sent a transaction, or committed the state.
This application has two modes: `serial=off` and `serial=on`.
When `serial=on`, transactions must be a big-endian encoded incrementing
integer, starting at 0.
If `serial=off`, there are no restrictions on transactions.
In a live blockchain, transactions collect in memory before they are
committed into blocks. To avoid wasting resources on invalid
transactions, ABCI provides the `CheckTx` message, which application
developers can use to accept or reject transactions, before they are
stored in memory or gossipped to other peers.
In this instance of the counter app, with `serial=on`, `CheckTx` only
allows transactions whose integer is greater than the last committed
one.
Let's kill the previous instance of `tendermint` and the `kvstore`
application, and start the counter app. We can enable `serial=on` with a
flag:
```sh
abci-cli counter --serial
```
In another window, reset then start Tendermint:
```sh
tendermint unsafe_reset_all
tendermint start
```
Once again, you can see the blocks streaming by. Let's send some
transactions. Since we have set `serial=on`, the first transaction must
be the number `0`:
```sh
curl localhost:26657/broadcast_tx_commit?tx=0x00
```
Note the empty (hence successful) response. The next transaction must be
the number `1`. If instead, we try to send a `5`, we get an error:
```json
> curl localhost:26657/broadcast_tx_commit?tx=0x05
{
"jsonrpc": "2.0",
"id": "",
"result": {
"check_tx": {},
"deliver_tx": {
"code": 2,
"log": "Invalid nonce. Expected 1, got 5"
},
"hash": "33B93DFF98749B0D6996A70F64071347060DC19C",
"height": 34
}
}
```
But if we send a `1`, it works again:
```json
> curl localhost:26657/broadcast_tx_commit?tx=0x01
{
"jsonrpc": "2.0",
"id": "",
"result": {
"check_tx": {},
"deliver_tx": {},
"hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D",
"height": 60
}
}
```
For more details on the `broadcast_tx` API, see [the guide on using
Tendermint](../tendermint-core/using-tendermint.md).
## CounterJS - Example in Another Language

View File

@@ -31,61 +31,24 @@ For example:
would be equal to the composite key of `jack.account.number`.
By default, Tendermint will index all transactions by their respective hashes
and height and blocks by their height.
## Configuration
Operators can configure indexing via the `[tx_index]` section. The `indexer`
field takes a series of supported indexers. If `null` is included, indexing will
be turned off regardless of other values provided.
Let's take a look at the `[tx_index]` config section:
```toml
[tx-index]
##### transactions indexer configuration options #####
[tx_index]
# The backend database list to back the indexer.
# If list contains null, meaning no indexer service will be used.
#
# The application will set which txs to index. In some cases a node operator will be able
# to decide which txs to index based on configuration set in the application.
# What indexer to use for transactions
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
# 3) "psql" - the indexer services backed by PostgreSQL.
# indexer = []
indexer = "kv"
```
### Supported Indexers
By default, Tendermint will index all transactions by their respective hashes
and height and blocks by their height.
#### KV
The `kv` indexer type is an embedded key-value store supported by the main
underling Tendermint database. Using the `kv` indexer type allows you to query
for block and transaction events directly against Tendermint's RPC. However, the
query syntax is limited and so this indexer type might be deprecated or removed
entirely in the future.
#### PostgreSQL
The `psql` indexer type allows an operator to enable block and transaction event
indexing by proxying it to an external PostgreSQL instance allowing for the events
to be stored in relational models. Since the events are stored in a RDBMS, operators
can leverage SQL to perform a series of rich and complex queries that are not
supported by the `kv` indexer type. Since operators can leverage SQL directly,
searching is not enabled for the `psql` indexer type via Tendermint's RPC -- any
such query will fail.
Note, the SQL schema is stored in `state/indexer/sink/psql/schema.sql` and operators
must explicitly create the relations prior to starting Tendermint and enabling
the `psql` indexer type.
Example:
```shell
$ psql ... -f state/indexer/sink/psql/schema.sql
```
You can turn off indexing completely by setting `tx_index` to `null`.
## Default Indexes

View File

@@ -61,7 +61,7 @@ Note the context/background should be written in the present tense.
- [ADR-053: State-Sync-Prototype](./adr-053-state-sync-prototype.md)
- [ADR-054: Crypto-Encoding-2](./adr-054-crypto-encoding-2.md)
- [ADR-055: Protobuf-Design](./adr-055-protobuf-design.md)
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks.md)
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks)
- [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md)
- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md)
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
@@ -97,4 +97,3 @@ Note the context/background should be written in the present tense.
- [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md)
- [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md)
- [ADR-057: RPC](./adr-057-RPC.md)
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)

View File

@@ -97,7 +97,8 @@ design for tendermint was originally tracked in
[#828](https://github.com/tendermint/tendermint/issues/828).
#### Eager StateSync
Warp Sync as implemented in OpenEthereum to rapidly
Warp Sync as implemented in Parity
["Warp Sync"](https://wiki.parity.io/Warp-Sync-Snapshot-Format.html) to rapidly
download both blocks and state snapshots from peers. Data is carved into ~4MB
chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a
manifest file which co-ordinates the state-sync. Obtaining a correct manifest
@@ -233,3 +234,5 @@ Proposed
[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal
[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos
[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed

View File

@@ -119,7 +119,7 @@ network usage.
---
Check out the formal specification
[here](https://github.com/tendermint/spec/tree/master/spec/light-client).
[here](https://docs.tendermint.com/master/spec/consensus/light-client.html).
## Status

View File

@@ -18,7 +18,7 @@ graceful here, but that's for another day.
It's possible to fool lite clients without there being a fork on the
main chain - so called Fork-Lite. See the
[fork accountability](https://docs.tendermint.com/master/spec/light-client/accountability/)
[fork accountability](https://docs.tendermint.com/master/spec/consensus/fork-accountability.html)
document for more details. For a sequential lite client, this can happen via
equivocation or amnesia attacks. For a skipping lite client this can also happen
via lunatic validator attacks. There must be some way for applications to punish

View File

@@ -179,7 +179,7 @@ This then ends the process and the verify function that was called at the start
the user.
For a detailed overview of how each of these three attacks can be conducted please refer to the
[fork accountability spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md).
[fork accountability spec]((https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md)).
## Full Node Verification

View File

@@ -1,273 +0,0 @@
# ADR 069: Flexible Node Initialization
## Changlog
- 2021-06-09: Initial Draft (@tychoish)
- 2021-07-21: Major Revision (@tychoish)
## Status
Proposed.
## Context
In an effort to support [Go-API-Stability](./adr-060-go-api-stability.md),
during the 0.35 development cycle, we have attempted to reduce the the API
surface area by moving most of the interface of the `node` package into
unexported functions, as well as moving the reactors to an `internal`
package. Having this coincide with the 0.35 release made a lot of sense
because these interfaces were _already_ changing as a result of the `p2p`
[refactor](./adr-061-p2p-refactor-scope.md), so it made sense to think a bit
more about how tendermint exposes this API.
While the interfaces of the P2P layer and most of the node package are already
internalized, this precludes some operational patterns that are important to
users who use tendermint as a library. Specifically, introspecting the
tendermint node service and replacing components is not supported in the latest
version of the code, and some of these use cases would require maintaining a
vendor copy of the code. Adding these features requires rather extensive
(internal/implementation) changes to the `node` and `rpc` packages, and this
ADR describes a model for changing the way that tendermint nodes initialize, in
service of providing this kind of functionality.
We consider node initialization, because the current implemention
provides strong connections between all components, as well as between
the components of the node and the RPC layer, and being able to think
about the interactions of these components will help enable these
features and help define the requirements of the node package.
## Alternative Approaches
These alternatives are presented to frame the design space and to
contextualize the decision in terms of product requirements. These
ideas are not inherently bad, and may even be possible or desireable
in the (distant) future, and merely provide additional context for how
we, in the moment came to our decision(s).
### Do Nothing
The current implementation is functional and sufficient for the vast
majority of use cases (e.g., all users of the Cosmos-SDK as well as
anyone who runs tendermint and the ABCI application in separate
processes). In the current implementation, and even previous versions,
modifying node initialization or injecting custom components required
copying most of the `node` package, which required such users
to maintain a vendored copy of tendermint.
While this is (likely) not tenable in the long term, as users do want
more modularity, and the current service implementation is brittle and
difficult to maintain, in the short term it may be possible to delay
implementation somewhat. Eventually, however, we will need to make the
`node` package easier to maintain and reason about.
### Generic Service Pluggability
One possible system design would export interfaces (in the Golang
sense) for all components of the system, to permit runtime dependency
injection of all components in the system, so that users can compose
tendermint nodes of arbitrary user-supplied components.
Although this level of customization would provide benefits, it would be a huge
undertaking (particularly with regards to API design work) that we do not have
scope for at the moment. Eventually providing support for some kinds of
pluggability may be useful, so the current solution does not explicitly
foreclose the possibility of this alternative.
### Abstract Dependency Based Startup and Shutdown
The main proposal in this document makes tendermint node initialization simpler
and more abstract, but the system lacks a number of
features which daemon/service initialization could provide, such as a
system allowing the authors of services to control initialization and shutdown order
of components using dependency relationships.
Such a system could work by allowing services to declare
initialization order dependencies to other reactors (by ID, perhaps)
so that the node could decide the initialization based on the
dependencies declared by services rather than requiring the node to
encode this logic directly.
This level of configuration is probably more complicated than is needed. Given
that the authors of components in the current implementation of tendermint
already *do* need to know about other components, a dependency-based system
would probably be overly-abstract at this stage.
## Decisions
- To the greatest extent possible, factor the code base so that
packages are responsible for their own initialization, and minimize
the amount of code in the `node` package itself.
- As a design goal, reduce direct coupling and dependencies between
components in the implementation of `node`.
- Begin iterating on a more-flexible internal framework for
initializing tendermint nodes to make the initatilization process
less hard-coded by the implementation of the node objects.
- Reactors should not need to expose their interfaces *within* the
implementation of the node type
- This refactoring should be entirely opaque to users.
- These node initialization changes should not require a
reevaluation of the `service.Service` or a generic initialization
orchestration framework.
- Do not proactively provide a system for injecting
components/services within a tendtermint node, though make it
possible to retrofit this kind of plugability in the future if
needed.
- Prioritize implementation of p2p-based statesync reactor to obviate
need for users to inject a custom state-sync provider.
## Detailed Design
The [current
nodeImpl](https://github.com/tendermint/tendermint/blob/master/node/node.go#L47)
includes direct references to the implementations of each of the
reactors, which should be replaced by references to `service.Service`
objects. This will require moving construction of the [rpc
service](https://github.com/tendermint/tendermint/blob/master/node/node.go#L771)
into the constructor of
[makeNode](https://github.com/tendermint/tendermint/blob/master/node/node.go#L126). One
possible implementation of this would be to eliminate the current
`ConfigureRPC` method on the node package and instead [configure it
here](https://github.com/tendermint/tendermint/pull/6798/files#diff-375d57e386f20eaa5f09f02bb9d28bfc48ac3dca18d0325f59492208219e5618R441).
To avoid adding complexity to the `node` package, we will add a
composite service implementation to the `service` package
that implements `service.Service` and is composed of a sequence of
underlying `service.Service` objects and handles their
startup/shutdown in the specified sequential order.
Consensus, blocksync (*née* fast sync), and statesync all depend on
each other, and have significant initialization dependencies that are
presently encoded in the `node` package. As part of this change, a
new package/component (likely named `blocks` located at
`internal/blocks`) will encapsulate the initialization of these block
management areas of the code.
### Injectable Component Option
This section briefly describes a possible implementation for
user-supplied services running within a node. This should not be
implemented unless user-supplied components are a hard requirement for
a user.
In order to allow components to be replaced, a new public function
will be added to the public interface of `node` with a signature that
resembles the following:
```go
func NewWithServices(conf *config.Config,
logger log.Logger,
cf proxy.ClientCreator,
gen *types.GenesisDoc,
srvs []service.Service,
) (service.Service, error) {
```
The `service.Service` objects will be initialized in the order supplied, after
all pre-configured/default services have started (and shut down in reverse
order). The given services may implement additional interfaces, allowing them
to replace specific default services. `NewWithServices` will validate input
service lists with the following rules:
- None of the services may already be running.
- The caller may not supply more than one replacement reactor for a given
default service type.
If callers violate any of these rules, `NewWithServices` will return
an error. To retract support for this kind of operation in the future,
the function can be modified to *always* return an error.
## Consequences
### Positive
- The node package will become easier to maintain.
- It will become easier to add additional services within tendermint
nodes.
- It will become possible to replace default components in the node
package without vendoring the tendermint repo and modifying internal
code.
- The current end-to-end (e2e) test suite will be able to prevent any
regressions, and the new functionality can be thoroughly unit tested.
- The scope of this project is very narrow, which minimizes risk.
### Negative
- This increases our reliance on the `service.Service` interface which
is probably not an interface that we want to fully commit to.
- This proposal implements a fairly minimal set of functionality and
leaves open the possibility for many additional features which are
not included in the scope of this proposal.
### Neutral
N/A
## Open Questions
- To what extent does this new initialization framework need to accommodate
the legacy p2p stack? Would it be possible to delay a great deal of this
work to the 0.36 cycle to avoid this complexity?
- Answer: _depends on timing_, and the requirement to ship pluggable reactors in 0.35.
- Where should additional public types be exported for the 0.35
release?
Related to the general project of API stabilization we want to deprecate
the `types` package, and move its contents into a new `pkg` hierarchy;
however, the design of the `pkg` interface is currently underspecified.
If `types` is going to remain for the 0.35 release, then we should consider
the impact of using multiple organizing modalities for this code within a
single release.
## Future Work
- Improve or simplify the `service.Service` interface. There are some
pretty clear limitations with this interface as written (there's no
way to timeout slow startup or shut down, the cycle between the
`service.BaseService` and `service.Service` implementations is
troubling, the default panic in `OnReset` seems troubling.)
- As part of the refactor of `service.Service` have all services/nodes
respect the lifetime of a `context.Context` object, and avoid the
current practice of creating `context.Context` objects in p2p and
reactor code. This would be required for in-process multi-tenancy.
- Support explicit dependencies between components and allow for
parallel startup, so that different reactors can startup at the same
time, where possible.
## References
- [this
branch](https://github.com/tendermint/tendermint/tree/tychoish/scratch-node-minimize)
contains experimental work in the implementation of the node package
to unwind some of the hard dependencies between components.
- [the component
graph](https://peter.bourgon.org/go-for-industrial-programming/#the-component-graph)
as a framing for internal service construction.
## Appendix
### Dependencies
There's a relationship between the blockchain and consensus reactor
described by the following dependency graph makes replacing some of
these components more difficult relative to other reactors or
components.
![consensus blockchain dependency graph](./img/consensus_blockchain.png)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 672 KiB

View File

@@ -36,7 +36,7 @@ proxy-app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "anonymous"
# If this node is many blocks behind the tip of the chain, BlockSync
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast-sync = true
@@ -275,13 +275,9 @@ dial-timeout = "3s"
#######################################################
[mempool]
# Mempool version to use:
# 1) "v0" - The legacy non-prioritized mempool reactor.
# 2) "v1" (default) - The prioritized mempool reactor.
version = "v1"
recheck = true
broadcast = true
wal-dir = ""
# Maximum number of transactions in the mempool
size = 5000
@@ -308,22 +304,6 @@ max-tx-bytes = 1048576
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max-batch-bytes = 0
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
# can exist for in the mempool.
#
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
# insertion time into the mempool is beyond ttl-duration.
ttl-duration = "0s"
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
# can exist for in the mempool.
#
# Note, if ttl-duration is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if
# it's insertion time into the mempool is beyond ttl-duration.
ttl-num-blocks = 0
#######################################################
### State Sync Configuration Options ###
#######################################################
@@ -354,12 +334,12 @@ discovery-time = "15s"
temp-dir = ""
#######################################################
### BlockSync Configuration Connections ###
### Fast Sync Configuration Connections ###
#######################################################
[fastsync]
# Block Sync version to use:
# 1) "v0" (default) - the legacy block sync implementation
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "v0"
@@ -441,6 +421,7 @@ max-open-connections = 3
# Instrumentation namespace
namespace = "tendermint"
```
## Empty blocks VS no empty blocks

View File

@@ -32,7 +32,7 @@ tendermint start --log-level "info"
Here is the list of modules you may encounter in Tendermint's log and a
little overview what they do.
- `abci-client` As mentioned in [Application Architecture Guide](../app-dev/app-architecture.md), Tendermint acts as an ABCI
- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI
client with respect to the application and maintains 3 connections:
mempool, consensus and query. The code used by Tendermint Core can
be found [here](https://github.com/tendermint/tendermint/tree/master/abci/client).
@@ -45,12 +45,12 @@ little overview what they do.
from a crash.
[here](https://github.com/tendermint/tendermint/blob/master/types/events.go).
You can subscribe to them by calling `subscribe` RPC method. Refer
to [RPC docs](../tendermint-core/rpc.md) for additional information.
to [RPC docs](./rpc.md) for additional information.
- `mempool` Mempool module handles all incoming transactions, whenever
they are coming from peers or the application.
- `p2p` Provides an abstraction around peer-to-peer communication. For
more details, please check out the
[README](https://github.com/tendermint/spec/tree/master/spec/p2p).
[README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md).
- `rpc-server` RPC server. For implementation details, please read the
[doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/jsonrpc/doc.go).
- `state` Represents the latest state and execution submodule, which

View File

@@ -40,7 +40,7 @@ Default logging level (`log-level = "info"`) should suffice for
normal operation mode. Read [this
post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756)
for details on how to configure `log-level` config variable. Some of the
modules can be found [here](logging.md#list-of-modules). If
modules can be found [here](../nodes/logging#list-of-modules). If
you're trying to debug Tendermint or asked to provide logs with debug
logging level, you can do so by running Tendermint with
`--log-level="debug"`.
@@ -114,7 +114,7 @@ just the votes seen at the current height.
If, after consulting with the logs and above endpoints, you still have no idea
what's happening, consider using `tendermint debug kill` sub-command. This
command will scrap all the available info and kill the process. See
[Debugging](../tools/debugging/README.md) for the exact format.
[Debugging](../tools/debugging.md) for the exact format.
You can inspect the resulting archive yourself or create an issue on
[Github](https://github.com/tendermint/tendermint). Before opening an issue
@@ -134,7 +134,7 @@ Tendermint also can report and serve Prometheus metrics. See
[Metrics](./metrics.md).
`tendermint debug dump` sub-command can be used to periodically dump useful
information into an archive. See [Debugging](../tools/debugging/README.md) for more
information into an archive. See [Debugging](../tools/debugging.md) for more
information.
## What happens when my app dies
@@ -268,8 +268,6 @@ While we do not favor any operation system, more secure and stable Linux server
distributions (like Centos) should be preferred over desktop operation systems
(like Mac OS).
Native Windows support is not provided. If you are using a windows machine, you can try using the [bash shell](https://docs.microsoft.com/en-us/windows/wsl/install-win10).
### Miscellaneous
NOTE: if you are going to use Tendermint in a public domain, make sure
@@ -315,7 +313,7 @@ We want `skip-timeout-commit=false` when there is economics on the line
because proposers should wait to hear for more votes. But if you don't
care about that and want the fastest consensus, you can skip it. It will
be kept false by default for public deployments (e.g. [Cosmos
Hub](https://hub.cosmos.network/main/hub-overview/overview.html)) while for enterprise
Hub](https://cosmos.network/intro/hub)) while for enterprise
applications, setting it to true is not a problem.
- `consensus.peer-gossip-sleep-duration`

View File

@@ -14,7 +14,7 @@ This section dives into the internals of Go-Tendermint.
- [Subscribing to events](./subscription.md)
- [Block Structure](./block-structure.md)
- [RPC](./rpc.md)
- [Block Sync](./block-sync.md)
- [Fast Sync](./fast-sync.md)
- [State Sync](./state-sync.md)
- [Mempool](./mempool.md)
- [Light Client](./light-client.md)

View File

@@ -2,8 +2,7 @@
order: 10
---
# Block Sync
*Formerly known as Fast Sync*
# Fast Sync
In a proof of work blockchain, syncing with the chain is the same
process as staying up-to-date with the consensus: download blocks, and
@@ -15,7 +14,7 @@ scratch can take a very long time. It's much faster to just download
blocks and check the merkle tree of validators than to run the real-time
consensus gossip protocol.
## Using Block Sync
## Using Fast Sync
To support faster syncing, Tendermint offers a `fast-sync` mode, which
is enabled by default, and can be toggled in the `config.toml` or via
@@ -23,36 +22,26 @@ is enabled by default, and can be toggled in the `config.toml` or via
In this mode, the Tendermint daemon will sync hundreds of times faster
than if it used the real-time consensus process. Once caught up, the
daemon will switch out of Block Sync and into the normal consensus mode.
daemon will switch out of fast sync and into the normal consensus mode.
After running for some time, the node is considered `caught up` if it
has at least one peer and it's height is at least as high as the max
reported peer height. See [the IsCaughtUp
method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128).
Note: There are two versions of Block Sync. We recommend using v0 as v2 is still in beta.
Note: There are three versions of fast sync. We recommend using v0 as v2 is still in beta.
If you would like to use a different version you can do so by changing the version in the `config.toml`:
```toml
#######################################################
### Block Sync Configuration Connections ###
### Fast Sync Configuration Connections ###
#######################################################
[fastsync]
# Block Sync version to use:
# 1) "v0" (default) - the legacy Block Sync implementation
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "v0"
```
If we're lagging sufficiently, we should go back to block syncing, but
If we're lagging sufficiently, we should go back to fast syncing, but
this is an [open issue](https://github.com/tendermint/tendermint/issues/129).
## The Block Sync event
When the tendermint blockchain core launches, it might switch to the `block-sync`
mode to catch up the states to the current network best height. the core will emits
a fast-sync event to expose the current status and the sync height. Once it catched
the network best height, it will switches to the state sync mechanism and then emit
another event for exposing the fast-sync `complete` status and the state `height`.
The user can query the events by subscribing `EventQueryBlockSyncStatus`
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.

View File

@@ -4,15 +4,8 @@ order: 11
# State Sync
With block sync a node is downloading all of the data of an application from genesis and verifying it.
With fast sync a node is downloading all of the data of an application from genesis and verifying it.
With state sync your node will download data related to the head or near the head of the chain and verify the data.
This leads to drastically shorter times for joining a network.
Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md)
## Events
When a node starts with the statesync flag enabled in the config file, it will emit two events: one upon starting statesync and the other upon completion.
The user can query the events by subscribing `EventQueryStateSyncStatus`
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.

View File

@@ -36,7 +36,7 @@ more information on query syntax and other options.
You can also use tags, given you had included them into DeliverTx
response, to query transaction results. See [Indexing
transactions](../app-dev/indexing-transactions.md) for details.
transactions](./indexing-transactions.md) for details.
## ValidatorSetUpdates

View File

@@ -552,7 +552,8 @@ To make a Tendermint network that can tolerate one of the validators
failing, you need at least four validator nodes (e.g., 2/3).
Updating validators in a live network is supported but must be
explicitly programmed by the application developer.
explicitly programmed by the application developer. See the [application
developers guide](../app-dev/app-development.md) for more details.
### Local Network

17
go.mod
View File

@@ -3,41 +3,40 @@ module github.com/tendermint/tendermint
go 1.16
require (
github.com/BurntSushi/toml v0.4.1
github.com/BurntSushi/toml v0.3.1
github.com/Masterminds/squirrel v1.5.0
github.com/Workiva/go-datastructures v1.0.53
github.com/adlio/schema v1.1.13
github.com/btcsuite/btcd v0.22.0-beta
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
github.com/fortytw2/leaktest v1.3.0
github.com/go-kit/kit v0.11.0
github.com/go-kit/kit v0.10.0
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.2
github.com/golangci/golangci-lint v1.41.1
github.com/google/orderedcode v0.0.1
github.com/google/uuid v1.3.0
github.com/google/uuid v1.2.0
github.com/gorilla/websocket v1.4.2
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/lib/pq v1.10.2
github.com/libp2p/go-buffer-pool v0.0.2
github.com/minio/highwayhash v1.0.2
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
github.com/ory/dockertest v3.3.5+incompatible
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.11.0
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
github.com/rs/cors v1.8.0
github.com/rs/zerolog v1.23.0
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
github.com/spf13/cobra v1.2.1
github.com/spf13/cobra v1.2.0
github.com/spf13/viper v1.8.1
github.com/stretchr/testify v1.7.0
github.com/tendermint/tm-db v0.6.4
github.com/vektra/mockery/v2 v2.9.0
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
google.golang.org/grpc v1.40.0
google.golang.org/grpc v1.39.0
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
pgregory.net/rapid v0.4.7
)

578
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
/*
Package blockchain provides two implementations of the fast-sync protocol.
- v0 was the very first implementation. it's battle tested, but does not have a
lot of test coverage.
- v2 is the newest implementation, with a focus on testability and readability.
Check out ADR-40 for the formal model and requirements.
# Termination criteria
1. the maximum peer height is reached
2. termination timeout is triggered, which is set if the peer set is empty or
there are no pending requests.
*/
package blockchain

View File

@@ -1,7 +1,7 @@
package blocksync
package blockchain
import (
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/types"
)

View File

@@ -65,7 +65,7 @@ type BlockRequest struct {
PeerID types.NodeID
}
// BlockPool keeps track of the block sync peers, block requests and block responses.
// BlockPool keeps track of the fast sync peers, block requests and block responses.
type BlockPool struct {
service.BaseService
lastAdvance time.Time
@@ -83,10 +83,6 @@ type BlockPool struct {
requestsCh chan<- BlockRequest
errorsCh chan<- peerError
startHeight int64
lastHundredBlockTimeStamp time.Time
lastSyncRate float64
}
// NewBlockPool returns a new BlockPool with the height equal to start. Block
@@ -95,14 +91,12 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
bp := &BlockPool{
peers: make(map[types.NodeID]*bpPeer),
requesters: make(map[int64]*bpRequester),
height: start,
startHeight: start,
numPending: 0,
requesters: make(map[int64]*bpRequester),
height: start,
numPending: 0,
requestsCh: requestsCh,
errorsCh: errorsCh,
lastSyncRate: 0,
requestsCh: requestsCh,
errorsCh: errorsCh,
}
bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp)
return bp
@@ -112,7 +106,6 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
// pool's start time.
func (pool *BlockPool) OnStart() error {
pool.lastAdvance = time.Now()
pool.lastHundredBlockTimeStamp = pool.lastAdvance
go pool.makeRequestersRoutine()
return nil
}
@@ -223,19 +216,6 @@ func (pool *BlockPool) PopRequest() {
delete(pool.requesters, pool.height)
pool.height++
pool.lastAdvance = time.Now()
// the lastSyncRate will be updated every 100 blocks, it uses the adaptive filter
// to smooth the block sync rate and the unit represents the number of blocks per second.
if (pool.height-pool.startHeight)%100 == 0 {
newSyncRate := 100 / time.Since(pool.lastHundredBlockTimeStamp).Seconds()
if pool.lastSyncRate == 0 {
pool.lastSyncRate = newSyncRate
} else {
pool.lastSyncRate = 0.9*pool.lastSyncRate + 0.1*newSyncRate
}
pool.lastHundredBlockTimeStamp = time.Now()
}
} else {
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
}
@@ -448,20 +428,6 @@ func (pool *BlockPool) debug() string {
return str
}
func (pool *BlockPool) targetSyncBlocks() int64 {
pool.mtx.RLock()
defer pool.mtx.RUnlock()
return pool.maxPeerHeight - pool.startHeight + 1
}
func (pool *BlockPool) getLastSyncRate() float64 {
pool.mtx.RLock()
defer pool.mtx.RUnlock()
return pool.lastSyncRate
}
//-------------------------------------
type bpPeer struct {

View File

@@ -2,17 +2,15 @@ package v0
import (
"fmt"
"runtime/debug"
"sync"
"time"
bc "github.com/tendermint/tendermint/internal/blocksync"
bc "github.com/tendermint/tendermint/internal/blockchain"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
tmSync "github.com/tendermint/tendermint/libs/sync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
@@ -60,7 +58,7 @@ const (
)
type consensusReactor interface {
// For when we switch from blockchain reactor and block sync to the consensus
// For when we switch from blockchain reactor and fast sync to the consensus
// machine.
SwitchToConsensus(state sm.State, skipWAL bool)
}
@@ -74,7 +72,7 @@ func (e peerError) Error() string {
return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
}
// Reactor handles long-term catchup syncing.
// BlockchainReactor handles long-term catchup syncing.
type Reactor struct {
service.BaseService
@@ -85,19 +83,12 @@ type Reactor struct {
store *store.BlockStore
pool *BlockPool
consReactor consensusReactor
blockSync *tmSync.AtomicBool
fastSync bool
blockchainCh *p2p.Channel
// blockchainOutBridgeCh defines a channel that acts as a bridge between sending Envelope
// messages that the reactor will consume in processBlockchainCh and receiving messages
// from the peer updates channel and other goroutines. We do this instead of directly
// sending on blockchainCh.Out to avoid race conditions in the case where other goroutines
// send Envelopes directly to the to blockchainCh.Out channel, since processBlockchainCh
// may close the blockchainCh.Out channel at the same time that other goroutines send to
// blockchainCh.Out.
blockchainOutBridgeCh chan p2p.Envelope
peerUpdates *p2p.PeerUpdates
closeCh chan struct{}
blockchainCh *p2p.Channel
peerUpdates *p2p.PeerUpdates
peerUpdatesCh chan p2p.Envelope
closeCh chan struct{}
requestsCh <-chan BlockRequest
errorsCh <-chan peerError
@@ -108,8 +99,6 @@ type Reactor struct {
poolWG sync.WaitGroup
metrics *cons.Metrics
syncStartTime time.Time
}
// NewReactor returns new reactor instance.
@@ -121,7 +110,7 @@ func NewReactor(
consReactor consensusReactor,
blockchainCh *p2p.Channel,
peerUpdates *p2p.PeerUpdates,
blockSync bool,
fastSync bool,
metrics *cons.Metrics,
) (*Reactor, error) {
if state.LastBlockHeight != store.Height() {
@@ -137,20 +126,19 @@ func NewReactor(
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
r := &Reactor{
initialState: state,
blockExec: blockExec,
store: store,
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
consReactor: consReactor,
blockSync: tmSync.NewBool(blockSync),
requestsCh: requestsCh,
errorsCh: errorsCh,
blockchainCh: blockchainCh,
blockchainOutBridgeCh: make(chan p2p.Envelope),
peerUpdates: peerUpdates,
closeCh: make(chan struct{}),
metrics: metrics,
syncStartTime: time.Time{},
initialState: state,
blockExec: blockExec,
store: store,
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
consReactor: consReactor,
fastSync: fastSync,
requestsCh: requestsCh,
errorsCh: errorsCh,
blockchainCh: blockchainCh,
peerUpdates: peerUpdates,
peerUpdatesCh: make(chan p2p.Envelope),
closeCh: make(chan struct{}),
metrics: metrics,
}
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
@@ -162,10 +150,10 @@ func NewReactor(
// messages on that p2p channel accordingly. The caller must be sure to execute
// OnStop to ensure the outbound p2p Channels are closed.
//
// If blockSync is enabled, we also start the pool and the pool processing
// If fastSync is enabled, we also start the pool and the pool processing
// goroutine. If the pool fails to start, an error is returned.
func (r *Reactor) OnStart() error {
if r.blockSync.IsSet() {
if r.fastSync {
if err := r.pool.Start(); err != nil {
return err
}
@@ -183,7 +171,7 @@ func (r *Reactor) OnStart() error {
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
// blocking until they all exit.
func (r *Reactor) OnStop() {
if r.blockSync.IsSet() {
if r.fastSync {
if err := r.pool.Stop(); err != nil {
r.Logger.Error("failed to stop pool", "err", err)
}
@@ -277,11 +265,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("panic in processing message: %v", e)
r.Logger.Error(
"recovering from processing message panic",
"err", err,
"stack", string(debug.Stack()),
)
r.Logger.Error("recovering from processing message panic", "err", err)
}
}()
@@ -299,7 +283,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
}
// processBlockchainCh initiates a blocking process where we listen for and handle
// envelopes on the BlockchainChannel and blockchainOutBridgeCh. Any error encountered during
// envelopes on the BlockchainChannel and peerUpdatesCh. Any error encountered during
// message execution will result in a PeerError being sent on the BlockchainChannel.
// When the reactor is stopped, we will catch the signal and close the p2p Channel
// gracefully.
@@ -317,8 +301,8 @@ func (r *Reactor) processBlockchainCh() {
}
}
case envelope := <-r.blockchainOutBridgeCh:
r.blockchainCh.Out <- envelope
case envelop := <-r.peerUpdatesCh:
r.blockchainCh.Out <- envelop
case <-r.closeCh:
r.Logger.Debug("stopped listening on blockchain channel; closing...")
@@ -340,7 +324,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
switch peerUpdate.Status {
case p2p.PeerStatusUp:
// send a status update the newly added peer
r.blockchainOutBridgeCh <- p2p.Envelope{
r.peerUpdatesCh <- p2p.Envelope{
To: peerUpdate.NodeID,
Message: &bcproto.StatusResponse{
Base: r.store.Base(),
@@ -371,10 +355,10 @@ func (r *Reactor) processPeerUpdates() {
}
}
// SwitchToBlockSync is called by the state sync reactor when switching to fast
// SwitchToFastSync is called by the state sync reactor when switching to fast
// sync.
func (r *Reactor) SwitchToBlockSync(state sm.State) error {
r.blockSync.Set()
func (r *Reactor) SwitchToFastSync(state sm.State) error {
r.fastSync = true
r.initialState = state
r.pool.height = state.LastBlockHeight + 1
@@ -382,8 +366,6 @@ func (r *Reactor) SwitchToBlockSync(state sm.State) error {
return err
}
r.syncStartTime = time.Now()
r.poolWG.Add(1)
go r.poolRoutine(true)
@@ -406,7 +388,7 @@ func (r *Reactor) requestRoutine() {
return
case request := <-r.requestsCh:
r.blockchainOutBridgeCh <- p2p.Envelope{
r.blockchainCh.Out <- p2p.Envelope{
To: request.PeerID,
Message: &bcproto.BlockRequest{Height: request.Height},
}
@@ -423,7 +405,7 @@ func (r *Reactor) requestRoutine() {
go func() {
defer r.poolWG.Done()
r.blockchainOutBridgeCh <- p2p.Envelope{
r.blockchainCh.Out <- p2p.Envelope{
Broadcast: true,
Message: &bcproto.StatusRequest{},
}
@@ -496,8 +478,6 @@ FOR_LOOP:
r.Logger.Error("failed to stop pool", "err", err)
}
r.blockSync.UnSet()
if r.consReactor != nil {
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
}
@@ -591,7 +571,7 @@ FOR_LOOP:
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
r.Logger.Info(
"block sync rate",
"fast sync rate",
"height", r.pool.height,
"max_peer_height", r.pool.MaxPeerHeight(),
"blocks/s", lastRate,
@@ -612,27 +592,3 @@ FOR_LOOP:
func (r *Reactor) GetMaxPeerBlockHeight() int64 {
return r.pool.MaxPeerHeight()
}
func (r *Reactor) GetTotalSyncedTime() time.Duration {
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
return time.Duration(0)
}
return time.Since(r.syncStartTime)
}
func (r *Reactor) GetRemainingSyncTime() time.Duration {
if !r.blockSync.IsSet() {
return time.Duration(0)
}
targetSyncs := r.pool.targetSyncBlocks()
currentSyncs := r.store.Height() - r.pool.startHeight + 1
lastSyncRate := r.pool.getLastSyncRate()
if currentSyncs < 0 || lastSyncRate < 0.001 {
return time.Duration(0)
}
remain := float64(targetSyncs-currentSyncs) / lastSyncRate
return time.Duration(int64(remain * float64(time.Second)))
}

View File

@@ -15,7 +15,7 @@ import (
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"
@@ -36,7 +36,7 @@ type reactorTestSuite struct {
peerChans map[types.NodeID]chan p2p.PeerUpdate
peerUpdates map[types.NodeID]*p2p.PeerUpdates
blockSync bool
fastSync bool
}
func setup(
@@ -61,7 +61,7 @@ func setup(
blockchainChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
blockSync: true,
fastSync: true,
}
chDesc := p2p.ChannelDescriptor{ID: byte(BlockchainChannel)}
@@ -163,7 +163,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
nil,
rts.blockchainChannels[nodeID],
rts.peerUpdates[nodeID],
rts.blockSync,
rts.fastSync,
cons.NopMetrics())
require.NoError(t, err)
@@ -215,29 +215,6 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0])
}
func TestReactor_SyncTime(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
maxBlockHeight := int64(101)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
rts.start(t)
require.Eventually(
t,
func() bool {
return rts.reactors[rts.nodes[1]].GetRemainingSyncTime() > time.Nanosecond &&
rts.reactors[rts.nodes[1]].pool.getLastSyncRate() > 0.001
},
10*time.Second,
10*time.Millisecond,
"expected node to be partially synced",
)
}
func TestReactor_NoBlockResponse(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)

View File

@@ -4,7 +4,7 @@ import (
"sync"
"testing"
bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
bh "github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
"github.com/tendermint/tendermint/types"
)

View File

@@ -5,7 +5,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/internal/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -42,7 +42,7 @@ const (
)
type consensusReactor interface {
// for when we switch from blockchain reactor and block sync to
// for when we switch from blockchain reactor and fast sync to
// the consensus machine
SwitchToConsensus(state state.State, skipWAL bool)
}

View File

@@ -7,14 +7,13 @@ import (
proto "github.com/gogo/protobuf/proto"
bc "github.com/tendermint/tendermint/internal/blocksync"
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
bc "github.com/tendermint/tendermint/internal/blockchain"
"github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/sync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -31,12 +30,12 @@ type blockStore interface {
Height() int64
}
// BlockchainReactor handles block sync protocol.
// BlockchainReactor handles fast sync protocol.
type BlockchainReactor struct {
p2p.BaseReactor
blockSync *sync.AtomicBool // enable block sync on start when it's been Set
stateSynced bool // set to true when SwitchToBlockSync is called by state sync
fastSync bool // if true, enable fast sync on start
stateSynced bool // set to true when SwitchToFastSync is called by state sync
scheduler *Routine
processor *Routine
logger log.Logger
@@ -44,15 +43,11 @@ type BlockchainReactor struct {
mtx tmsync.RWMutex
maxPeerHeight int64
syncHeight int64
events chan Event // non-nil during a block sync
events chan Event // non-nil during a fast sync
reporter behavior.Reporter
io iIO
store blockStore
syncStartTime time.Time
syncStartHeight int64
lastSyncRate float64 // # blocks sync per sec base on the last 100 blocks
}
type blockApplier interface {
@@ -61,7 +56,7 @@ type blockApplier interface {
// XXX: unify naming in this package around tmState
func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
blockApplier blockApplier, blockSync bool, metrics *cons.Metrics) *BlockchainReactor {
blockApplier blockApplier, fastSync bool, metrics *cons.Metrics) *BlockchainReactor {
initHeight := state.LastBlockHeight + 1
if initHeight == 1 {
initHeight = state.InitialHeight
@@ -73,15 +68,12 @@ func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
processor := newPcState(pContext)
return &BlockchainReactor{
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
processor: newRoutine("processor", processor.handle, chBufferSize),
store: store,
reporter: reporter,
logger: log.NewNopLogger(),
blockSync: sync.NewBool(blockSync),
syncStartHeight: initHeight,
syncStartTime: time.Time{},
lastSyncRate: 0,
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
processor: newRoutine("processor", processor.handle, chBufferSize),
store: store,
reporter: reporter,
logger: log.NewNopLogger(),
fastSync: fastSync,
}
}
@@ -90,10 +82,10 @@ func NewBlockchainReactor(
state state.State,
blockApplier blockApplier,
store blockStore,
blockSync bool,
fastSync bool,
metrics *cons.Metrics) *BlockchainReactor {
reporter := behavior.NewMockReporter()
return newReactor(state, store, reporter, blockApplier, blockSync, metrics)
return newReactor(state, store, reporter, blockApplier, fastSync, metrics)
}
// SetSwitch implements Reactor interface.
@@ -137,22 +129,22 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) {
// Start implements cmn.Service interface
func (r *BlockchainReactor) Start() error {
r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch)
if r.blockSync.IsSet() {
if r.fastSync {
err := r.startSync(nil)
if err != nil {
return fmt.Errorf("failed to start block sync: %w", err)
return fmt.Errorf("failed to start fast sync: %w", err)
}
}
return nil
}
// startSync begins a block sync, signaled by r.events being non-nil. If state is non-nil,
// startSync begins a fast sync, signaled by r.events being non-nil. If state is non-nil,
// the scheduler and processor is updated with this state on startup.
func (r *BlockchainReactor) startSync(state *state.State) error {
r.mtx.Lock()
defer r.mtx.Unlock()
if r.events != nil {
return errors.New("block sync already in progress")
return errors.New("fast sync already in progress")
}
r.events = make(chan Event, chBufferSize)
go r.scheduler.start()
@@ -167,7 +159,7 @@ func (r *BlockchainReactor) startSync(state *state.State) error {
return nil
}
// endSync ends a block sync
// endSync ends a fast sync
func (r *BlockchainReactor) endSync() {
r.mtx.Lock()
defer r.mtx.Unlock()
@@ -179,17 +171,11 @@ func (r *BlockchainReactor) endSync() {
r.processor.stop()
}
// SwitchToBlockSync is called by the state sync reactor when switching to block sync.
func (r *BlockchainReactor) SwitchToBlockSync(state state.State) error {
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (r *BlockchainReactor) SwitchToFastSync(state state.State) error {
r.stateSynced = true
state = state.Copy()
err := r.startSync(&state)
if err == nil {
r.syncStartTime = time.Now()
}
return err
return r.startSync(&state)
}
// reactor generated ticker events:
@@ -297,6 +283,7 @@ func (e bcResetState) String() string {
// Takes the channel as a parameter to avoid race conditions on r.events.
func (r *BlockchainReactor) demux(events <-chan Event) {
var lastRate = 0.0
var lastHundred = time.Now()
var (
@@ -427,27 +414,21 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
switch event := event.(type) {
case pcBlockProcessed:
r.setSyncHeight(event.height)
if (r.syncHeight-r.syncStartHeight)%100 == 0 {
newSyncRate := 100 / time.Since(lastHundred).Seconds()
if r.lastSyncRate == 0 {
r.lastSyncRate = newSyncRate
} else {
r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate
}
r.logger.Info("block sync Rate", "height", r.syncHeight,
"max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate)
if r.syncHeight%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
r.logger.Info("Fast Sync Rate", "height", r.syncHeight,
"max_peer_height", r.maxPeerHeight, "blocks/s", lastRate)
lastHundred = time.Now()
}
r.scheduler.send(event)
case pcBlockVerificationFailure:
r.scheduler.send(event)
case pcFinished:
r.logger.Info("block sync complete, switching to consensus")
r.logger.Info("Fast sync complete, switching to consensus")
if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) {
r.logger.Error("Failed to switch to consensus reactor")
}
r.endSync()
r.blockSync.UnSet()
return
case noOpEvent:
default:
@@ -615,29 +596,3 @@ func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 {
defer r.mtx.RUnlock()
return r.maxPeerHeight
}
func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration {
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
return time.Duration(0)
}
return time.Since(r.syncStartTime)
}
func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration {
if !r.blockSync.IsSet() {
return time.Duration(0)
}
r.mtx.RLock()
defer r.mtx.RUnlock()
targetSyncs := r.maxPeerHeight - r.syncStartHeight
currentSyncs := r.syncHeight - r.syncStartHeight + 1
if currentSyncs < 0 || r.lastSyncRate < 0.001 {
return time.Duration(0)
}
remain := float64(targetSyncs-currentSyncs) / r.lastSyncRate
return time.Duration(int64(remain * float64(time.Second)))
}

View File

@@ -15,7 +15,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
"github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p"
@@ -23,7 +23,7 @@ import (
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"

View File

@@ -163,7 +163,7 @@ type scheduler struct {
height int64
// lastAdvance tracks the last time a block execution happened.
// syncTimeout is the maximum time the scheduler waits to advance in the block sync process before finishing.
// syncTimeout is the maximum time the scheduler waits to advance in the fast sync process before finishing.
// This covers the cases where there are no peers or all peers have a lower height.
lastAdvance time.Time
syncTimeout time.Duration

View File

@@ -1,36 +0,0 @@
/*
Package blocksync implements two versions of a reactor Service that are
responsible for block propagation and gossip between peers. This mechanism was
formerly known as fast-sync.
In order for a full node to successfully participate in consensus, it must have
the latest view of state. The blocksync protocol is a mechanism in which peers
may exchange and gossip entire blocks with one another, in a request/response
type model, until they've successfully synced to the latest head block. Once
succussfully synced, the full node can switch to an active role in consensus and
will no longer blocksync and thus no longer run the blocksync process.
Note, the blocksync reactor Service gossips entire block and relevant data such
that each receiving peer may construct the entire view of the blocksync state.
There are currently two versions of the blocksync reactor Service:
- v0: The initial implementation that is battle-tested, but whose test coverage
is lacking and is not formally verifiable.
- v2: The latest implementation that has much higher test coverage and is formally
verified. However, the current implementation of v2 is not as battle-tested and
is known to have various bugs that could make it unreliable in production
environments.
The v0 blocksync reactor Service has one p2p channel, BlockchainChannel. This
channel is responsible for handling messages that both request blocks and respond
to block requests from peers. For every block request from a peer, the reactor
will execute respondToPeer which will fetch the block from the node's state store
and respond to the peer. For every block response, the node will add the block
to its pool via AddBlock.
Internally, v0 runs a poolRoutine that constantly checks for what blocks it needs
and requests them. The poolRoutine is also responsible for taking blocks from the
pool, saving and executing each block.
*/
package blocksync

View File

@@ -35,7 +35,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
prevoteHeight := int64(2)
testName := "consensus_byzantine_test"
tickerFunc := newMockTickerFunc(true)
appFunc := newKVStore
appFunc := newCounter
genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, 30)
states := make([]*State, nValidators)

View File

@@ -19,6 +19,7 @@ import (
dbm "github.com/tendermint/tm-db"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/counter"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
@@ -88,7 +89,6 @@ type validatorStub struct {
Round int32
types.PrivValidator
VotingPower int64
lastVote *types.Vote
}
const testMinPower int64 = 10
@@ -122,18 +122,8 @@ func (vs *validatorStub) signVote(
BlockID: types.BlockID{Hash: hash, PartSetHeader: header},
}
v := vote.ToProto()
if err := vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v); err != nil {
return nil, fmt.Errorf("sign vote failed: %w", err)
}
// ref: signVote in FilePV, the vote should use the privious vote info when the sign data is the same.
if signDataIsEqual(vs.lastVote, v) {
v.Signature = vs.lastVote.Signature
v.Timestamp = vs.lastVote.Timestamp
}
err = vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v)
vote.Signature = v.Signature
vote.Timestamp = v.Timestamp
return vote, err
}
@@ -150,9 +140,6 @@ func signVote(
if err != nil {
panic(fmt.Errorf("failed to sign vote: %v", err))
}
vs.lastVote = v
return v
}
@@ -462,7 +449,7 @@ func randState(config *cfg.Config, nValidators int) (*State, []*validatorStub) {
vss := make([]*validatorStub, nValidators)
cs := newState(state, privVals[0], kvstore.NewApplication())
cs := newState(state, privVals[0], counter.NewApplication(true))
for i := 0; i < nValidators; i++ {
vss[i] = newValidatorStub(privVals[i], int32(i))
@@ -875,6 +862,10 @@ func (m *mockTicker) Chan() <-chan timeoutInfo {
func (*mockTicker) SetLogger(log.Logger) {}
func newCounter() abci.Application {
return counter.NewApplication(true)
}
func newPersistentKVStore() abci.Application {
dir, err := ioutil.TempDir("", "persistent-kvstore")
if err != nil {
@@ -883,23 +874,6 @@ func newPersistentKVStore() abci.Application {
return kvstore.NewPersistentKVStoreApplication(dir)
}
func newKVStore() abci.Application {
return kvstore.NewApplication()
}
func newPersistentKVStoreWithPath(dbDir string) abci.Application {
return kvstore.NewPersistentKVStoreApplication(dbDir)
}
func signDataIsEqual(v1 *types.Vote, v2 *tmproto.Vote) bool {
if v1 == nil || v2 == nil {
return false
}
return v1.Type == v2.Type &&
bytes.Equal(v1.BlockID.Hash, v2.BlockID.GetHash()) &&
v1.Height == v2.GetHeight() &&
v1.Round == v2.Round &&
bytes.Equal(v1.ValidatorAddress.Bytes(), v2.GetValidatorAddress()) &&
v1.ValidatorIndex == v2.GetValidatorIndex()
}

View File

@@ -18,9 +18,7 @@ func TestReactorInvalidPrecommit(t *testing.T) {
config := configSetup(t)
n := 4
states, cleanup := randConsensusState(t,
config, n, "consensus_reactor_test",
newMockTickerFunc(true), newKVStore)
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
t.Cleanup(cleanup)
for i := 0; i < 4; i++ {

View File

@@ -161,8 +161,8 @@ func TestMempoolRmBadTx(t *testing.T) {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
resDeliver := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes})
assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
resDeliver := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
assert.False(t, resDeliver.Txs[0].IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
resCommit := app.Commit()
assert.True(t, len(resCommit.Data) > 0)
@@ -233,15 +233,16 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}
}
func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
txValue := txAsUint64(req.Tx)
func (app *CounterApplication) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
txValue := txAsUint64(req.Txs[0])
if txValue != uint64(app.txCount) {
return abci.ResponseDeliverTx{
return abci.ResponseFinalizeBlock{Txs: []*abci.ResponseDeliverTx{{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
}
}
app.txCount++
return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
return abci.ResponseFinalizeBlock{Txs: []*abci.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
}
func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {

View File

@@ -54,8 +54,8 @@ type Metrics struct {
TotalTxs metrics.Gauge
// The latest block height.
CommittedHeight metrics.Gauge
// Whether or not a node is block syncing. 1 if yes, 0 if no.
BlockSyncing metrics.Gauge
// Whether or not a node is fast syncing. 1 if yes, 0 if no.
FastSyncing metrics.Gauge
// Whether or not a node is state syncing. 1 if yes, 0 if no.
StateSyncing metrics.Gauge
@@ -169,11 +169,11 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
Name: "latest_block_height",
Help: "The latest block height.",
}, labels).With(labelsAndValues...),
BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "block_syncing",
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
Name: "fast_syncing",
Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.",
}, labels).With(labelsAndValues...),
StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: namespace,
@@ -214,7 +214,7 @@ func NopMetrics() *Metrics {
BlockSizeBytes: discard.NewHistogram(),
TotalTxs: discard.NewGauge(),
CommittedHeight: discard.NewGauge(),
BlockSyncing: discard.NewGauge(),
FastSyncing: discard.NewGauge(),
StateSyncing: discard.NewGauge(),
BlockParts: discard.NewCounter(),
}

View File

@@ -1,28 +0,0 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
state "github.com/tendermint/tendermint/state"
)
// ConsSyncReactor is an autogenerated mock type for the ConsSyncReactor type
type ConsSyncReactor struct {
mock.Mock
}
// SetBlockSyncingMetrics provides a mock function with given fields: _a0
func (_m *ConsSyncReactor) SetBlockSyncingMetrics(_a0 float64) {
_m.Called(_a0)
}
// SetStateSyncingMetrics provides a mock function with given fields: _a0
func (_m *ConsSyncReactor) SetStateSyncingMetrics(_a0 float64) {
_m.Called(_a0)
}
// SwitchToConsensus provides a mock function with given fields: _a0, _a1
func (_m *ConsSyncReactor) SwitchToConsensus(_a0 state.State, _a1 bool) {
_m.Called(_a0, _a1)
}

View File

@@ -1,71 +0,0 @@
// Code generated by mockery 2.7.5. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
state "github.com/tendermint/tendermint/state"
time "time"
)
// BlockSyncReactor is an autogenerated mock type for the BlockSyncReactor type
type BlockSyncReactor struct {
mock.Mock
}
// GetMaxPeerBlockHeight provides a mock function with given fields:
func (_m *BlockSyncReactor) GetMaxPeerBlockHeight() int64 {
ret := _m.Called()
var r0 int64
if rf, ok := ret.Get(0).(func() int64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int64)
}
return r0
}
// GetRemainingSyncTime provides a mock function with given fields:
func (_m *BlockSyncReactor) GetRemainingSyncTime() time.Duration {
ret := _m.Called()
var r0 time.Duration
if rf, ok := ret.Get(0).(func() time.Duration); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(time.Duration)
}
return r0
}
// GetTotalSyncedTime provides a mock function with given fields:
func (_m *BlockSyncReactor) GetTotalSyncedTime() time.Duration {
ret := _m.Called()
var r0 time.Duration
if rf, ok := ret.Get(0).(func() time.Duration); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(time.Duration)
}
return r0
}
// SwitchToBlockSync provides a mock function with given fields: _a0
func (_m *BlockSyncReactor) SwitchToBlockSync(_a0 state.State) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(state.State) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@@ -2,7 +2,6 @@ package consensus
import (
"fmt"
"runtime/debug"
"time"
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
@@ -96,28 +95,12 @@ const (
type ReactorOption func(*Reactor)
// NOTE: Temporary interface for switching to block sync, we should get rid of v0.
// Temporary interface for switching to fast sync, we should get rid of v0.
// See: https://github.com/tendermint/tendermint/issues/4595
type BlockSyncReactor interface {
SwitchToBlockSync(sm.State) error
type FastSyncReactor interface {
SwitchToFastSync(sm.State) error
GetMaxPeerBlockHeight() int64
// GetTotalSyncedTime returns the time duration since the blocksync starting.
GetTotalSyncedTime() time.Duration
// GetRemainingSyncTime returns the estimating time the node will be fully synced,
// if will return 0 if the blocksync does not perform or the number of block synced is
// too small (less than 100).
GetRemainingSyncTime() time.Duration
}
//go:generate ../../scripts/mockery_generate.sh ConsSyncReactor
// ConsSyncReactor defines an interface used for testing abilities of node.startStateSync.
type ConsSyncReactor interface {
SwitchToConsensus(sm.State, bool)
SetStateSyncingMetrics(float64)
SetBlockSyncingMetrics(float64)
}
// Reactor defines a reactor for the consensus service.
@@ -265,7 +248,7 @@ func (r *Reactor) SetEventBus(b *types.EventBus) {
r.state.SetEventBus(b)
}
// WaitSync returns whether the consensus reactor is waiting for state/block sync.
// WaitSync returns whether the consensus reactor is waiting for state/fast sync.
func (r *Reactor) WaitSync() bool {
r.mtx.RLock()
defer r.mtx.RUnlock()
@@ -278,8 +261,8 @@ func ReactorMetrics(metrics *Metrics) ReactorOption {
return func(r *Reactor) { r.Metrics = metrics }
}
// SwitchToConsensus switches from block-sync mode to consensus mode. It resets
// the state, turns off block-sync, and starts the consensus state-machine.
// SwitchToConsensus switches from fast-sync mode to consensus mode. It resets
// the state, turns off fast-sync, and starts the consensus state-machine.
func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
r.Logger.Info("switching to consensus")
@@ -296,7 +279,7 @@ func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
r.waitSync = false
r.mtx.Unlock()
r.Metrics.BlockSyncing.Set(0)
r.Metrics.FastSyncing.Set(0)
r.Metrics.StateSyncing.Set(0)
if skipWAL {
@@ -312,11 +295,6 @@ conS:
conR:
%+v`, err, r.state, r))
}
d := types.EventDataBlockSyncStatus{Complete: true, Height: state.LastBlockHeight}
if err := r.eventBus.PublishEventBlockSyncStatus(d); err != nil {
r.Logger.Error("failed to emit the blocksync complete event", "err", err)
}
}
// String returns a string representation of the Reactor.
@@ -969,7 +947,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
go r.gossipVotesRoutine(ps)
go r.queryMaj23Routine(ps)
// Send our state to the peer. If we're block-syncing, broadcast a
// Send our state to the peer. If we're fast-syncing, broadcast a
// RoundStepMessage later upon SwitchToConsensus().
if !r.waitSync {
go r.sendNewRoundStepMessage(ps.peerID)
@@ -1219,24 +1197,21 @@ func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message)
// It will handle errors and any possible panics gracefully. A caller can handle
// any error returned by sending a PeerError on the respective channel.
//
// NOTE: We process these messages even when we're block syncing. Messages affect
// either a peer state or the consensus state. Peer state updates can happen in
// parallel, but processing of proposals, block parts, and votes are ordered by
// the p2p channel.
//
// NOTE: We block on consensus state for proposals, block parts, and votes.
func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("panic in processing message: %v", e)
r.Logger.Error(
"recovering from processing message panic",
"err", err,
"stack", string(debug.Stack()),
)
r.Logger.Error("recovering from processing message panic", "err", err)
}
}()
// Just skip the entire message during syncing so that we can
// process fewer messages.
if r.WaitSync() {
return
}
// We wrap the envelope's message in a Proto wire type so we can convert back
// the domain type that individual channel message handlers can work with. We
// do this here once to avoid having to do it for each individual message type.
@@ -1437,11 +1412,3 @@ func (r *Reactor) peerStatsRoutine() {
func (r *Reactor) GetConsensusState() *State {
return r.state
}
func (r *Reactor) SetStateSyncingMetrics(v float64) {
r.Metrics.StateSyncing.Set(v)
}
func (r *Reactor) SetBlockSyncingMetrics(v float64) {
r.Metrics.BlockSyncing.Set(v)
}

View File

@@ -25,7 +25,6 @@ import (
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
sm "github.com/tendermint/tendermint/state"
statemocks "github.com/tendermint/tendermint/state/mocks"
@@ -43,7 +42,6 @@ type reactorTestSuite struct {
states map[types.NodeID]*State
reactors map[types.NodeID]*Reactor
subs map[types.NodeID]types.Subscription
blocksyncSubs map[types.NodeID]types.Subscription
stateChannels map[types.NodeID]*p2p.Channel
dataChannels map[types.NodeID]*p2p.Channel
voteChannels map[types.NodeID]*p2p.Channel
@@ -60,11 +58,10 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
t.Helper()
rts := &reactorTestSuite{
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
states: make(map[types.NodeID]*State),
reactors: make(map[types.NodeID]*Reactor, numNodes),
subs: make(map[types.NodeID]types.Subscription, numNodes),
blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes),
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
states: make(map[types.NodeID]*State),
reactors: make(map[types.NodeID]*Reactor, numNodes),
subs: make(map[types.NodeID]types.Subscription, numNodes),
}
rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size)
@@ -72,8 +69,6 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel), new(tmcons.Message), size)
rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel), new(tmcons.Message), size)
_, cancel := context.WithCancel(context.Background())
i := 0
for nodeID, node := range rts.network.Nodes {
state := states[i]
@@ -94,13 +89,9 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, size)
require.NoError(t, err)
fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryBlockSyncStatus, size)
require.NoError(t, err)
rts.states[nodeID] = state
rts.subs[nodeID] = blocksSub
rts.reactors[nodeID] = reactor
rts.blocksyncSubs[nodeID] = fsSub
// simulate handle initChain in handshake
if state.state.LastBlockHeight == 0 {
@@ -126,7 +117,6 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
}
leaktest.Check(t)
cancel()
})
return rts
@@ -263,22 +253,11 @@ func waitForBlockWithUpdatedValsAndValidateIt(
wg.Wait()
}
func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) {
t.Helper()
status, ok := msg.Data().(types.EventDataBlockSyncStatus)
require.True(t, ok)
require.Equal(t, complete, status.Complete)
require.Equal(t, height, status.Height)
}
func TestReactorBasic(t *testing.T) {
config := configSetup(t)
n := 4
states, cleanup := randConsensusState(t,
config, n, "consensus_reactor_test",
newMockTickerFunc(true), newKVStore)
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
t.Cleanup(cleanup)
rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock
@@ -294,21 +273,8 @@ func TestReactorBasic(t *testing.T) {
// wait till everyone makes the first new block
go func(s types.Subscription) {
defer wg.Done()
<-s.Out()
}(sub)
}
wg.Wait()
for _, sub := range rts.blocksyncSubs {
wg.Add(1)
// wait till everyone makes the consensus switch
go func(s types.Subscription) {
defer wg.Done()
msg := <-s.Out()
ensureBlockSyncStatus(t, msg, true, 0)
wg.Done()
}(sub)
}
@@ -321,7 +287,7 @@ func TestReactorWithEvidence(t *testing.T) {
n := 4
testName := "consensus_reactor_test"
tickerFunc := newMockTickerFunc(true)
appFunc := newKVStore
appFunc := newCounter
genDoc, privVals := factory.RandGenesisDoc(config, n, false, 30)
states := make([]*State, n)
@@ -421,7 +387,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
n,
"consensus_reactor_test",
newMockTickerFunc(true),
newKVStore,
newCounter,
func(c *cfg.Config) {
c.Consensus.CreateEmptyBlocks = false
},
@@ -465,9 +431,7 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
config := configSetup(t)
n := 4
states, cleanup := randConsensusState(t,
config, n, "consensus_reactor_test",
newMockTickerFunc(true), newKVStore)
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
t.Cleanup(cleanup)
rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock

View File

@@ -73,20 +73,15 @@ type mockProxyApp struct {
abciResponses *tmstate.ABCIResponses
}
func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
r := mock.abciResponses.DeliverTxs[mock.txCount]
func (mock *mockProxyApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
r := mock.abciResponses.FinalizeBlock
mock.txCount++
if r == nil {
return abci.ResponseDeliverTx{}
return abci.ResponseFinalizeBlock{}
}
return *r
}
func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
mock.txCount = 0
return *mock.abciResponses.EndBlock
}
func (mock *mockProxyApp) Commit() abci.ResponseCommit {
return abci.ResponseCommit{Data: mock.appHash}
}

View File

@@ -619,8 +619,8 @@ func TestMockProxyApp(t *testing.T) {
assert.NotPanics(t, func() {
abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses)
abciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0)
abciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, &abci.ResponseDeliverTx{})
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, 0)
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = append(abciResWithEmptyDeliverTx.FinalizeBlock.Txs, &abci.ResponseDeliverTx{})
// called when saveABCIResponses:
bytes, err := proto.Marshal(abciResWithEmptyDeliverTx)
@@ -634,28 +634,30 @@ func TestMockProxyApp(t *testing.T) {
mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes)
abciRes := new(tmstate.ABCIResponses)
abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs))
abciRes.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.FinalizeBlock.Txs))
// Execute transactions and get hash.
proxyCb := func(req *abci.Request, res *abci.Response) {
if r, ok := res.Value.(*abci.Response_DeliverTx); ok {
// TODO: make use of res.Log
// TODO: make use of this info
// Blocks may include invalid txs.
txRes := r.DeliverTx
if txRes.Code == abci.CodeTypeOK {
validTxs++
} else {
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
invalidTxs++
if r, ok := res.Value.(*abci.Response_FinalizeBlock); ok {
for i, tx := range r.FinalizeBlock.Txs {
// TODO: make use of res.Log
// TODO: make use of this info
// Blocks may include invalid txs.
txRes := tx
if txRes.Code == abci.CodeTypeOK {
validTxs++
} else {
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
invalidTxs++
}
abciRes.FinalizeBlock.Txs[i] = txRes
txIndex++
}
abciRes.DeliverTxs[txIndex] = txRes
txIndex++
}
}
mock.SetResponseCallback(proxyCb)
someTx := []byte("tx")
_, err = mock.DeliverTxAsync(context.Background(), abci.RequestDeliverTx{Tx: someTx})
_, err = mock.FinalizeBlockSync(context.Background(), abci.RequestFinalizeBlock{Txs: [][]byte{someTx}})
assert.NoError(t, err)
})
assert.True(t, validTxs == 1)
@@ -1203,8 +1205,8 @@ func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSe
func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit {
return bs.commits[height-1]
}
func (bs *mockBlockStore) LoadSeenCommit() *types.Commit {
return bs.commits[len(bs.commits)-1]
func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
return bs.commits[height-1]
}
func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) {

Some files were not shown because too many files have changed in this diff Show More