Compare commits

..

2 Commits

Author SHA1 Message Date
tycho garen
7497331f47 change lock 2021-05-12 10:38:47 -04:00
tycho garen
d893463bb8 libs/clist: omit channels and use waitgroups 2021-05-12 10:35:56 -04:00
572 changed files with 12489 additions and 29766 deletions

View File

@@ -121,7 +121,7 @@ jobs:
- run: |
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v1.5.2
- uses: codecov/codecov-action@v1.5.0
with:
file: ./coverage.txt
if: env.GIT_DIFF

View File

@@ -40,17 +40,17 @@ jobs:
platforms: all
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.5.0
uses: docker/setup-buildx-action@v1.3.0
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v1.10.0
uses: docker/login-action@v1.9.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2.6.1
uses: docker/build-push-action@v2.4.0
with:
context: .
file: ./DOCKER/Dockerfile

32
.github/workflows/docs.yml vendored Normal file
View File

@@ -0,0 +1,32 @@
name: Documentation
# This job builds and deploys documentation to github pages.
# It runs on every push to master, and can be manually triggered.
on:
workflow_dispatch: # allow running workflow manually
push:
branches:
- master
jobs:
build-and-deploy:
runs-on: ubuntu-latest
container:
image: tendermintdev/docker-website-deployment
steps:
- name: Checkout 🛎️
uses: actions/checkout@v2.3.4
with:
persist-credentials: false
fetch-depth: 0
- name: Install and Build 🔧
run: |
apk add rsync
make build-docs
- name: Deploy 🚀
uses: JamesIves/github-pages-deploy-action@4.1.2
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: ~/output

View File

@@ -49,7 +49,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on failure
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
@@ -65,7 +65,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on success
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal

View File

@@ -47,7 +47,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on failure
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
@@ -63,7 +63,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on success
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal

View File

@@ -76,7 +76,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack if any crashers
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal

View File

@@ -34,16 +34,16 @@ jobs:
echo ::set-output name=tags::${TAGS}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.5.0
uses: docker/setup-buildx-action@v1.3.0
- name: Login to DockerHub
uses: docker/login-action@v1.10.0
uses: docker/login-action@v1.9.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2.6.1
uses: docker/build-push-action@v2.4.0
with:
context: ./tools/proto
file: ./tools/proto/Dockerfile

View File

@@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v3.0.19
- uses: actions/stale@v3.0.18
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: "This pull request has been automatically marked as stale because it has not had

View File

@@ -28,7 +28,7 @@ jobs:
- name: install
run: make install install_abci
if: "env.GIT_DIFF != ''"
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.5
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -36,7 +36,7 @@ jobs:
${{ runner.os }}-go-
if: env.GIT_DIFF
# Cache binaries for use by other jobs
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.5
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
@@ -57,14 +57,14 @@ jobs:
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.5
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.5
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
@@ -89,14 +89,14 @@ jobs:
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.5
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.5
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
@@ -120,14 +120,14 @@ jobs:
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.5
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.5
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary

1
.gitignore vendored
View File

@@ -24,7 +24,6 @@ docs/.vuepress/dist
docs/_build
docs/dist
docs/node_modules/
docs/spec
index.html.md
libs/pubsub/query/fuzz_test/output
profile\.out

View File

@@ -39,7 +39,6 @@ linters:
# - wsl
# - gocognit
- nolintlint
- asciicheck
issues:
exclude-rules:

View File

@@ -1,29 +1,5 @@
# Changelog
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
## v0.34.11
*June 18, 2021*
This release improves the robustness of statesync; tweaking channel priorities and timeouts and
adding two new parameters to the state sync config.
### BREAKING CHANGES
- Apps
- [Version] \#6494 `TMCoreSemVer` is not required to be set as a ldflag any longer.
### IMPROVEMENTS
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
- [statesync] \#6582 Increase chunk priority and add multiple retry chunk requests (@cmwaters)
### BUG FIXES
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
## v0.34.10
*April 14, 2021*
@@ -32,6 +8,8 @@ This release fixes a bug where peers would sometimes try to send messages
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
this issue!
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
@@ -47,6 +25,8 @@ This release also includes a small Go API-breaking change, to reduce panics in t
Special thanks to our external contributors on this release: @gchaincl
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES
- Go API
@@ -69,6 +49,8 @@ Special thanks to our external contributors on this release: @gchaincl
This release, in conjunction with [a fix in the Cosmos SDK](https://github.com/cosmos/cosmos-sdk/pull/8641),
introduces changes that should mean the logs are much, much quieter. 🎉
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### IMPROVEMENTS
- [libs/log] [\#6174](https://github.com/tendermint/tendermint/issues/6174) Include timestamp (`ts` field; `time.RFC3339Nano` format) in JSON logger output (@melekes)
@@ -106,6 +88,8 @@ use remote signer implementations instead of `FilePV` in production.
Thank you to @joe-bowman for his assistance with this vulnerability and a particular
shout-out to @marbar3778 for diagnosing it quickly.
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### BUG FIXES
- [consensus] [\#6128](https://github.com/tendermint/tendermint/pull/6128) Remove privValidator from log call (@tessr)
@@ -126,6 +110,8 @@ Thank you to our friends at Crypto.com for the initial report of this memory lea
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### BUG FIXES
- [light] [\#6022](https://github.com/tendermint/tendermint/pull/6022) Fix a bug when the number of validators equals 100 (@melekes)
@@ -144,6 +130,8 @@ or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### BUG FIXES
- [evidence] [[security fix]](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) Use correct source of evidence time (@cmwaters)
@@ -157,6 +145,8 @@ This release fixes a substantial bug in evidence handling where evidence could
sometimes be broadcast before the block containing that evidence was fully committed,
resulting in some nodes panicking when trying to verify said evidence.
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES
- Go API
@@ -180,6 +170,8 @@ disconnecting from this node. As a temporary remedy (until the mempool package
is refactored), the `max-batch-bytes` was disabled. Transactions will be sent
one by one without batching.
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES
- CLI/RPC/Config
@@ -208,6 +200,8 @@ Holy smokes, this is a big one! For a more reader-friendly overview of the chang
Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio,
@joe-bowman, @cuonglm, @SadPencil and @dongsam.
And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES
- CLI/RPC/Config
@@ -448,6 +442,9 @@ as 2/3+ of the signatures are checked._
Special thanks to @njmurarka at Bluzelle Networks for reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
- [consensus] Do not allow signatures for a wrong block in commits (@ebuchman)
@@ -463,6 +460,8 @@ need to update your code.**
Special thanks to external contributors on this release: @tau3,
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- Go API
@@ -522,6 +521,8 @@ Special thanks to external contributors on this release: @tau3,
Special thanks to external contributors on this release: @whylee259, @greg-szabo
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- Go API
@@ -608,6 +609,9 @@ Notes:
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
and reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
@@ -620,6 +624,8 @@ and reporting this.
Special thanks to external contributors on this release:
@antho1404, @michaelfig, @gterzian, @tau3, @Shivani912
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- CLI/RPC/Config
@@ -670,6 +676,9 @@ Special thanks to external contributors on this release:
Special thanks to external contributors on this release:
@princesinha19
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### FEATURES:
- [rpc] [\#3333](https://github.com/tendermint/tendermint/issues/3333) Add `order_by` to `/tx_search` endpoint, allowing to change default ordering from asc to desc (@princesinha19)
@@ -688,6 +697,9 @@ Special thanks to external contributors on this release:
Special thanks to external contributors on this release: @mrekucci, @PSalant726, @princesinha19, @greg-szabo, @dongsam, @cuonglm, @jgimeno, @yenkhoon
Friendly reminder, we have a [bug bounty
program.](https://hackerone.com/tendermint).
*January 14, 2020*
This release contains breaking changes to the `Block#Header`, specifically
@@ -916,6 +928,9 @@ Notes:
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
and reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
@@ -927,6 +942,9 @@ _January, 9, 2020_
Special thanks to external contributors on this release: @greg-szabo, @gregzaitsev, @yenkhoon
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### FEATURES:
- [rpc/lib] [\#4248](https://github.com/tendermint/tendermint/issues/4248) RPC client basic authentication support (@greg-szabo)
@@ -948,6 +966,9 @@ Special thanks to external contributors on this release: @greg-szabo, @gregzaits
Special thanks to external contributors on this release: @erikgrinaker, @guagualvcha, @hsyis, @cosmostuba, @whunmr, @austinabell
Friendly reminder, we have a [bug bounty
program.](https://hackerone.com/tendermint).
### BREAKING CHANGES:
@@ -987,6 +1008,9 @@ identified and fixed here.
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
and reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- Go API
@@ -1013,6 +1037,9 @@ accepting new peers and only allowing `ed25519` pubkeys.
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
this out.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
@@ -1028,6 +1055,9 @@ All clients are recommended to upgrade. See
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
and reporting this issue.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
@@ -1038,6 +1068,9 @@ and reporting this issue.
Special thanks to external contributors on this release: @jon-certik, @gracenoah, @PSalant726, @gchaincl
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- CLI/RPC/Config
@@ -1073,6 +1106,9 @@ guide.
Special thanks to external contributors on this release:
@gchaincl, @bluele, @climber73
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### IMPROVEMENTS:
- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info)
@@ -1093,6 +1129,9 @@ Special thanks to external contributors on this release:
Special thanks to external contributors on this release:
@ruseinov, @bluele, @guagualvcha
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- Go API
@@ -1132,6 +1171,9 @@ This release contains a minor enhancement to the ABCI and some breaking changes
- CheckTx requests include a `CheckTxType` enum that can be set to `Recheck` to indicate to the application that this transaction was already checked/validated and certain expensive operations (like checking signatures) can be skipped
- Removed various functions from `libs` pkgs
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- Go API
@@ -1177,6 +1219,9 @@ and the RPC, namely:
[docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events)
- Bind RPC to localhost by default, not to the public interface [UPGRADING/RPC_Changes](./UPGRADING.md#rpc_changes)
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
* CLI/RPC/Config
@@ -1277,6 +1322,8 @@ Notes:
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
and reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
@@ -1297,6 +1344,8 @@ identified and fixed here.
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
and reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
@@ -1324,6 +1373,8 @@ accepting new peers and only allowing `ed25519` pubkeys.
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
this out.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
@@ -1340,6 +1391,8 @@ All clients are recommended to upgrade. See
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
and reporting this issue.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
@@ -1635,6 +1688,8 @@ See the [v0.31.0
Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for
more details.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
@@ -1855,6 +1910,8 @@ This release contains two important fixes: one for p2p layer where we sometimes
were not closing connections and one for consensus layer where consensus with
no empty blocks (`create_empty_blocks = false`) could halt.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### IMPROVEMENTS:
- [pex] [\#3037](https://github.com/tendermint/tendermint/issues/3037) Only log "Reached max attempts to dial" once
@@ -1894,6 +1951,8 @@ While we are trying to stabilize the Block protocol to preserve compatibility
with old chains, there may be some final changes yet to come before Cosmos
launch as we continue to audit and test the software.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
@@ -1942,6 +2001,8 @@ launch as we continue to audit and test the software.
Special thanks to external contributors on this release:
@HaoyangLiu
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BUG FIXES:
- [consensus] Fix consensus halt from proposing blocks with too much evidence
@@ -2070,6 +2131,8 @@ Special thanks to @dlguddus for discovering a [major
issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677)
in the proposer selection algorithm.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
This release is primarily about fixes to the proposer selection algorithm
in preparation for the [Cosmos Game of
@@ -2132,6 +2195,8 @@ Special thanks to external contributors on this release:
@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko,
@nagarajmanjunath, @tomtau
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### FEATURES:
@@ -2171,6 +2236,8 @@ Special thanks to external contributors on this release:
Special thanks to external contributors on this release:
@danil-lashin, @kevlubkcm, @krhubert, @srmo
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
@@ -2215,6 +2282,8 @@ Special thanks to external contributors on this release:
Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### FEATURES:
- [rpc] [\#2582](https://github.com/tendermint/tendermint/issues/2582) Enable CORS on RPC API (@hleb-albau)
@@ -2232,6 +2301,8 @@ Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
Special thanks to external contributors on this release: @katakonst
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### IMPROVEMENTS:
- [consensus] [\#2704](https://github.com/tendermint/tendermint/issues/2704) Simplify valid POL round logic
@@ -2405,6 +2476,8 @@ It also addresses some issues found via security audit, removes various unused
functions from `libs/common`, and implements
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
BREAKING CHANGES:
* CLI/RPC/Config

View File

@@ -9,7 +9,6 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
### BREAKING CHANGES
- CLI/RPC/Config
- [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
@@ -18,66 +17,43 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes)
- [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters)
- [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106)
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
- [libs/CList] \#6626 Automatically detach the prev/next elements in Remove function (@JayT106)
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on
startup (@cmwaters)
- Apps
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
- It is not required any longer to set ldflags to set version strings
- P2P Protocol
- Go API
- [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
- [p2p] \#6618 Move `p2p.NodeInfo` into `types` to support use of the SDK. (@tychoish)
- [p2p] \#6583 Make `p2p.NodeID` and `p2p.NetAddress` exported types to support their use in the RPC layer. (@tychoish)
- [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
- [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
- [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
- [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish)
- [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
- [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker)
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
- [proto/p2p] Renamed `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
- [all] \#6077 Change spelling from British English to American (@cmwaters)
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
- Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
- Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
and `TxInfo`. (@alexanderbez)
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
- [types] \#6627 Move `NodeKey` to types to make the type public.
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
- [libs/rand] \#6364 Removed most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
- Blockchain Protocol
- Data Storage
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
- [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
- Tooling
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
### FEATURES
@@ -87,22 +63,10 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
exchange reactors behave the same. (@cmwaters)
- [crypto] \#6376 Enable sr25519 as a validator key
- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez)
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
`mempool.version` configuration, where `v1` is the default configuration.
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
- Transactions are gossiped in FIFO order as they are in `v0`.
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
### IMPROVEMENTS
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
- [privval] \#5725 Add gRPC support to private validator.
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
@@ -130,20 +94,11 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106)
- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm)
- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778)
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778)
- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106)
- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs)
- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
### BUG FIXES
- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778)
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106)
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)

View File

@@ -245,7 +245,6 @@ If there were no release candidates, and you'd like to cut a major release direc
all PRs
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
or other upgrading flows.
- Bump TMVersionDefault version in `version.go`
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
@@ -273,7 +272,6 @@ If there were no release candidates, and you'd like to cut a major release direc
release, and add the github aliases of external contributors to the top of
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
- Reset the `CHANGELOG_PENDING.md`
- Bump TMVersionDefault version in `version.go`
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
- Make sure all significant breaking changes are covered in `UPGRADING.md`

View File

@@ -1,5 +1,5 @@
# stage 1 Generate Tendermint Binary
FROM golang:1.16-alpine as builder
FROM golang:1.15-alpine as builder
RUN apk update && \
apk upgrade && \
apk --no-cache add make

View File

@@ -9,7 +9,7 @@ RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm &
RUN yum -y groupinstall "Development Tools"
RUN yum -y install leveldb-devel which
ENV GOVERSION=1.16.5
ENV GOVERSION=1.12.9
RUN cd /tmp && \
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \

View File

@@ -12,7 +12,7 @@ else
VERSION := $(shell git describe)
endif
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
HTTPS_GIT := https://github.com/tendermint/tendermint.git
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf

View File

@@ -8,7 +8,7 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest)
[![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/tendermint/tendermint)
[![Go version](https://img.shields.io/badge/go-1.16-blue.svg)](https://github.com/moovweb/gvm)
[![Go version](https://img.shields.io/badge/go-1.15-blue.svg)](https://github.com/moovweb/gvm)
[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/vcExX9T)
[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE)
[![tendermint/tendermint](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
@@ -18,7 +18,8 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|--------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
| master | ![Tests](https://github.com/tendermint/tendermint/workflows/Tests/badge.svg?branch=master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | ![Lint](https://github.com/tendermint/tendermint/workflows/Lint/badge.svg) |
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines.
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
and securely replicates it on many machines.
For protocol details, see [the specification](https://github.com/tendermint/spec).
@@ -29,7 +30,9 @@ see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/ab
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
Tendermint is being used in production in both private and public environments,
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
See below for more details about [versioning](#versioning).
In any case, if you intend to run Tendermint in production, we're happy to help. You can
@@ -48,7 +51,7 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
| Requirement | Notes |
|-------------|------------------|
| Go version | Go1.16 or higher |
| Go version | Go1.15 or higher |
## Documentation
@@ -164,4 +167,4 @@ If you'd like to work full-time on Tendermint Core, [we're hiring](https://inter
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
that also maintains [tendermint.com](https://tendermint.com).
that also maintains [tendermint.com](https://tendermint.com).

View File

@@ -7,13 +7,10 @@ This guide provides instructions for upgrading to specific versions of Tendermin
### ABCI Changes
* Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors.
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
* Messages are written to a byte stream using uin64 length delimiters instead of int64.
* When mempool `v1` is enabled, transactions broadcasted via `sync` mode may return a successful
response with a transaction hash indicating that the transaction was successfully inserted into
the mempool. While this is true for `v0`, the `v1` mempool reactor may at a later point in time
evict or even drop this transaction after a hash has been returned. Thus, the user or client must
query for that transaction to check if it is still in the mempool.
### Config Changes
@@ -29,9 +26,6 @@ This guide provides instructions for upgrading to specific versions of Tendermin
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
persistent peers, there's no gaurantee that the node will remain connected with these peers.
- configuration values starting with `priv-validator-` have moved to the new
`priv-validator` section, without the `priv-validator-` prefix.
### CLI Changes
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
@@ -47,40 +41,13 @@ This guide provides instructions for upgrading to specific versions of Tendermin
* CLI commands and flags are all now hyphen-case instead of snake_case.
Make sure to adjust any scripts that calls a cli command with snake_casing
### API Changes
The p2p layer was reimplemented as part of the 0.35 release cycle, and
all reactors were refactored. As part of that work these
implementations moved into the `internal` package and are no longer
considered part of the public Go API of tendermint. These packages
are:
- `p2p`
- `mempool`
- `consensus`
- `statesync`
- `blockchain`
- `evidence`
Accordingly, the space `node` package was changed to reduce access to
tendermint internals: applications that use tendermint as a library
will need to change to accommodate these changes. Most notably:
- The `Node` type has become internal, and all constructors return a
`service.Service` implementation.
- The `node.DefaultNewNode` and `node.NewNode` constructors are no
longer exported and have been replaced with `node.New` and
`node.NewDefault` which provide more functional interfaces.
## v0.34.0
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
This release is not compatible with previous blockchains due to changes to
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
### ABCI Changes

View File

@@ -6,8 +6,8 @@ import (
"sync"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
)
const (
@@ -35,29 +35,33 @@ type Client interface {
FlushAsync(context.Context) (*ReqRes, error)
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
CommitAsync(context.Context) (*ReqRes, error)
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
FinalizeBlockAsync(context.Context, types.RequestFinalizeBlock) (*ReqRes, error)
// Synchronous requests
FlushSync(context.Context) error
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
CommitSync(context.Context) (*types.ResponseCommit, error)
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
FinalizeBlockSync(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
}
//----------------------------------------

View File

@@ -10,9 +10,9 @@ import (
"google.golang.org/grpc"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
)
// A gRPC client.
@@ -194,6 +194,16 @@ func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo)
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
req := types.ToRequestDeliverTx(params)
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
req := types.ToRequestCheckTx(params)
@@ -234,6 +244,26 @@ func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestI
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
req := types.ToRequestBeginBlock(params)
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
req := types.ToRequestEndBlock(params)
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
req := types.ToRequestListSnapshots(params)
@@ -284,22 +314,6 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(
)
}
func (cli *grpcClient) FinalizeBlockAsync(
ctx context.Context,
params types.RequestFinalizeBlock,
) (*ReqRes, error) {
req := types.ToRequestFinalizeBlock(params)
res, err := cli.client.FinalizeBlock(ctx, req.GetFinalizeBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(
ctx,
req,
&types.Response{Value: &types.Response_FinalizeBlock{FinalizeBlock: res}},
)
}
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
// with the response. We don't complete it until it's been ordered via the channel.
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
@@ -366,6 +380,18 @@ func (cli *grpcClient) InfoSync(
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
}
func (cli *grpcClient) DeliverTxSync(
ctx context.Context,
params types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
reqres, err := cli.DeliverTxAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
}
func (cli *grpcClient) CheckTxSync(
ctx context.Context,
params types.RequestCheckTx,
@@ -409,6 +435,30 @@ func (cli *grpcClient) InitChainSync(
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
}
func (cli *grpcClient) BeginBlockSync(
ctx context.Context,
params types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
reqres, err := cli.BeginBlockAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
}
func (cli *grpcClient) EndBlockSync(
ctx context.Context,
params types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
reqres, err := cli.EndBlockAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
}
func (cli *grpcClient) ListSnapshotsSync(
ctx context.Context,
params types.RequestListSnapshots,
@@ -454,14 +504,3 @@ func (cli *grpcClient) ApplySnapshotChunkSync(
}
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
}
func (cli *grpcClient) FinalizeBlockSync(
ctx context.Context,
params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
reqres, err := cli.FinalizeBlockAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetFinalizeBlock(), cli.Error()
}

View File

@@ -4,8 +4,8 @@ import (
"context"
types "github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
)
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
@@ -77,6 +77,17 @@ func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*
), nil
}
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(params)
return app.callback(
types.ToRequestDeliverTx(params),
types.ToResponseDeliverTx(res),
), nil
}
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -121,6 +132,28 @@ func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestIni
), nil
}
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
return app.callback(
types.ToRequestBeginBlock(req),
types.ToResponseBeginBlock(res),
), nil
}
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
return app.callback(
types.ToRequestEndBlock(req),
types.ToResponseEndBlock(res),
), nil
}
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -171,20 +204,6 @@ func (app *localClient) ApplySnapshotChunkAsync(
), nil
}
func (app *localClient) FinalizeBlockAsync(
ctx context.Context,
req types.RequestFinalizeBlock,
) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.FinalizeBlock(req)
return app.callback(
types.ToRequestFinalizeBlock(req),
types.ToResponseFinalizeBlock(res),
), nil
}
//-------------------------------------------------------
func (app *localClient) FlushSync(ctx context.Context) error {
@@ -203,6 +222,18 @@ func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*t
return &res, nil
}
func (app *localClient) DeliverTxSync(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(req)
return &res, nil
}
func (app *localClient) CheckTxSync(
ctx context.Context,
req types.RequestCheckTx,
@@ -245,6 +276,30 @@ func (app *localClient) InitChainSync(
return &res, nil
}
func (app *localClient) BeginBlockSync(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
return &res, nil
}
func (app *localClient) EndBlockSync(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
return &res, nil
}
func (app *localClient) ListSnapshotsSync(
ctx context.Context,
req types.RequestListSnapshots,
@@ -291,17 +346,6 @@ func (app *localClient) ApplySnapshotChunkSync(
return &res, nil
}
func (app *localClient) FinalizeBlockSync(
ctx context.Context,
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.FinalizeBlock(req)
return &res, nil
}
//-------------------------------------------------------
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {

View File

@@ -1,4 +1,4 @@
// Code generated by mockery 2.9.0. DO NOT EDIT.
// Code generated by mockery v2.5.1. DO NOT EDIT.
package mocks
@@ -65,6 +65,52 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
return r0, r1
}
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// BeginBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseBeginBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseBeginBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CheckTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
@@ -157,6 +203,52 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
return r0, r1
}
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeliverTxSync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseDeliverTx
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseDeliverTx)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// EchoAsync provides a mock function with given fields: ctx, msg
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
ret := _m.Called(ctx, msg)
@@ -203,6 +295,52 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
return r0, r1
}
// EndBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// EndBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseEndBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseEndBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Error provides a mock function with given fields:
func (_m *Client) Error() error {
ret := _m.Called()
@@ -217,52 +355,6 @@ func (_m *Client) Error() error {
return r0
}
// FinalizeBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) FinalizeBlockAsync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abcicli.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *abcicli.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abcicli.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// FinalizeBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) FinalizeBlockSync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseFinalizeBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// FlushAsync provides a mock function with given fields: _a0
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
ret := _m.Called(_a0)
@@ -704,8 +796,3 @@ func (_m *Client) String() string {
return r0
}
// Wait provides a mock function with given fields:
func (_m *Client) Wait() {
_m.Called()
}

View File

@@ -12,10 +12,10 @@ import (
"time"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/libs/timer"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/libs/timer"
)
const (
@@ -245,6 +245,10 @@ func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
}
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
}
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
}
@@ -261,6 +265,14 @@ func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestIn
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
}
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
}
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
}
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
}
@@ -283,13 +295,6 @@ func (cli *socketClient) ApplySnapshotChunkAsync(
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
}
func (cli *socketClient) FinalizeBlockAsync(
ctx context.Context,
req types.RequestFinalizeBlock,
) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestFinalizeBlock(req))
}
//----------------------------------------
func (cli *socketClient) FlushSync(ctx context.Context) error {
@@ -336,6 +341,18 @@ func (cli *socketClient) InfoSync(
return reqres.Response.GetInfo(), nil
}
func (cli *socketClient) DeliverTxSync(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
if err != nil {
return nil, err
}
return reqres.Response.GetDeliverTx(), nil
}
func (cli *socketClient) CheckTxSync(
ctx context.Context,
req types.RequestCheckTx,
@@ -378,6 +395,30 @@ func (cli *socketClient) InitChainSync(
return reqres.Response.GetInitChain(), nil
}
func (cli *socketClient) BeginBlockSync(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetBeginBlock(), nil
}
func (cli *socketClient) EndBlockSync(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetEndBlock(), nil
}
func (cli *socketClient) ListSnapshotsSync(
ctx context.Context,
req types.RequestListSnapshots,
@@ -424,17 +465,6 @@ func (cli *socketClient) ApplySnapshotChunkSync(
return reqres.Response.GetApplySnapshotChunk(), nil
}
func (cli *socketClient) FinalizeBlockSync(
ctx context.Context,
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestFinalizeBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetFinalizeBlock(), nil
}
//----------------------------------------
// queueRequest enqueues req onto the queue. If the queue is full, it ether
@@ -539,6 +569,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_Flush)
case *types.Request_Info:
_, ok = res.Value.(*types.Response_Info)
case *types.Request_DeliverTx:
_, ok = res.Value.(*types.Response_DeliverTx)
case *types.Request_CheckTx:
_, ok = res.Value.(*types.Response_CheckTx)
case *types.Request_Commit:
@@ -547,6 +579,10 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_Query)
case *types.Request_InitChain:
_, ok = res.Value.(*types.Response_InitChain)
case *types.Request_BeginBlock:
_, ok = res.Value.(*types.Response_BeginBlock)
case *types.Request_EndBlock:
_, ok = res.Value.(*types.Response_EndBlock)
case *types.Request_ApplySnapshotChunk:
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
case *types.Request_LoadSnapshotChunk:
@@ -555,8 +591,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_ListSnapshots)
case *types.Request_OfferSnapshot:
_, ok = res.Value.(*types.Response_OfferSnapshot)
case *types.Request_FinalizeBlock:
_, ok = res.Value.(*types.Response_FinalizeBlock)
}
return ok
}

View File

@@ -37,11 +37,11 @@ func TestProperSyncCalls(t *testing.T) {
resp := make(chan error, 1)
go func() {
// This is BeginBlockSync unrolled....
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
assert.NoError(t, err)
err = c.FlushSync(context.Background())
assert.NoError(t, err)
res := reqres.Response.GetFinalizeBlock()
res := reqres.Response.GetBeginBlock()
assert.NotNil(t, res)
resp <- c.Error()
}()
@@ -73,7 +73,7 @@ func TestHangingSyncCalls(t *testing.T) {
resp := make(chan error, 1)
go func() {
// Start BeginBlock and flush it
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
assert.NoError(t, err)
flush, err := c.FlushAsync(ctx)
assert.NoError(t, err)
@@ -84,7 +84,7 @@ func TestHangingSyncCalls(t *testing.T) {
err = s.Stop()
assert.NoError(t, err)
// wait for the response from FinalizeBlock
// wait for the response from BeginBlock
reqres.Wait()
flush.Wait()
resp <- c.Error()
@@ -121,7 +121,7 @@ type slowApp struct {
types.BaseApplication
}
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
time.Sleep(200 * time.Millisecond)
return types.ResponseFinalizeBlock{}
return types.ResponseBeginBlock{}
}

View File

@@ -68,9 +68,12 @@ var RootCmd = &cobra.Command{
}
if logger == nil {
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
allowLevel, err := log.AllowLevel(flagLogLevel)
if err != nil {
return err
}
logger = log.NewFilter(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), allowLevel)
}
if client == nil {
var err error
client, err = abcicli.NewClient(flagAddress, flagAbci, false)
@@ -504,18 +507,16 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
if err != nil {
return err
}
for _, tx := range res.Txs {
printResponse(cmd, args, response{
Code: tx.Code,
Data: tx.Data,
Info: tx.Info,
Log: tx.Log,
})
}
printResponse(cmd, args, response{
Code: res.Code,
Data: res.Data,
Info: res.Info,
Log: res.Log,
})
return nil
}
@@ -597,7 +598,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
func cmdCounter(cmd *cobra.Command, args []string) error {
app := counter.NewApplication(flagSerial)
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Start the listener
srv, err := server.NewServer(flagAddress, flagAbci, app)
@@ -622,7 +623,7 @@ func cmdCounter(cmd *cobra.Command, args []string) error {
}
func cmdKVStore(cmd *cobra.Command, args []string) error {
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Create the application - in memory or persisted to disk
var app types.Application

View File

@@ -24,30 +24,24 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
}
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
if app.serial {
for _, tx := range req.Txs {
if len(tx) > 8 {
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}},
}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue != uint64(app.txCount) {
return types.ResponseFinalizeBlock{
Txs: []*types.ResponseDeliverTx{{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
}
}
if len(req.Tx) > 8 {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue != uint64(app.txCount) {
return types.ResponseDeliverTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
}
}
app.txCount++
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
}
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {

View File

@@ -76,22 +76,20 @@ func testStream(t *testing.T, app types.Application) {
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
// Process response
switch r := res.Value.(type) {
case *types.Response_FinalizeBlock:
for _, tx := range r.FinalizeBlock.Txs {
counter++
if tx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", tx.Code)
}
if counter > numDeliverTxs {
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
}
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
close(done)
}()
return
}
case *types.Response_DeliverTx:
counter++
if r.DeliverTx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
}
if counter > numDeliverTxs {
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
}
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
close(done)
}()
return
}
case *types.Response_Flush:
// ignore
@@ -105,8 +103,7 @@ func testStream(t *testing.T, app types.Application) {
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
tx := []byte("test")
_, err = client.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
require.NoError(t, err)
// Sometimes send flush messages
@@ -166,25 +163,22 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
txt := []byte("test")
response, err := client.FinalizeBlock(context.Background(), &types.RequestFinalizeBlock{Txs: [][]byte{txt}})
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
if err != nil {
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
}
counter++
for _, tx := range response.Txs {
if tx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", tx.Code)
}
if counter > numDeliverTxs {
t.Fatal("Too many DeliverTx responses")
}
t.Log("response", counter)
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
}
if response.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", response.Code)
}
if counter > numDeliverTxs {
t.Fatal("Too many DeliverTx responses")
}
t.Log("response", counter)
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
}
}

View File

@@ -86,40 +86,35 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
}
// tx is either "key=value" or just arbitrary bytes
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
var key, value string
var txs = make([]*types.ResponseDeliverTx, len(req.Txs))
for i, tx := range req.Txs {
parts := bytes.Split(tx, []byte("="))
if len(parts) == 2 {
key, value = string(parts[0]), string(parts[1])
} else {
key, value = string(tx), string(tx)
}
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
if err != nil {
panic(err)
}
app.state.Size++
events := []types.Event{
{
Type: "app",
Attributes: []types.EventAttribute{
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
{Key: "key", Value: key, Index: true},
{Key: "index_key", Value: "index is working", Index: true},
{Key: "noindex_key", Value: "index is working", Index: false},
},
},
}
txs[i] = &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
parts := bytes.Split(req.Tx, []byte("="))
if len(parts) == 2 {
key, value = string(parts[0]), string(parts[1])
} else {
key, value = string(req.Tx), string(req.Tx)
}
return types.ResponseFinalizeBlock{Txs: txs}
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
if err != nil {
panic(err)
}
app.state.Size++
events := []types.Event{
{
Type: "app",
Attributes: []types.EventAttribute{
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
{Key: "key", Value: key, Index: true},
{Key: "index_key", Value: "index is working", Index: true},
{Key: "noindex_key", Value: "index is working", Index: false},
},
},
}
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
}
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {

View File

@@ -27,16 +27,12 @@ const (
var ctx = context.Background()
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
ar := app.FinalizeBlock(req)
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
req := types.RequestDeliverTx{Tx: tx}
ar := app.DeliverTx(req)
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar = app.FinalizeBlock(req)
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
ar = app.DeliverTx(req)
require.False(t, ar.IsErr(), ar)
// commit
app.Commit()
@@ -113,7 +109,8 @@ func TestPersistentKVStoreInfo(t *testing.T) {
header := tmproto.Header{
Height: height,
}
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.Commit()
resInfo = kvstore.Info(types.RequestInfo{})
@@ -203,15 +200,16 @@ func makeApplyBlock(
Height: height,
}
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
Hash: hash,
Header: header,
Txs: txs,
})
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
for _, tx := range txs {
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
t.Fatal(r)
}
}
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.Commit()
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
}
@@ -328,16 +326,13 @@ func runClientTests(t *testing.T, client abcicli.Client) {
}
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
ar, err := app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
ar, err = app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) // repeating tx doesn't raise error
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
require.NoError(t, err)
for _, tx := range ar.Txs {
require.False(t, tx.IsErr(), ar)
}
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// commit
_, err = app.CommitSync(ctx)
require.NoError(t, err)

View File

@@ -66,19 +66,19 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
return res
}
// // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
// func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
// // if it starts with "val:", update the validator set
// // format is "val:pubkey!power"
// if isValidatorTx(req.Tx) {
// // update validators in the merkle tree
// // and in app.ValUpdates
// return app.execValidatorTx(req.Tx)
// }
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
// if it starts with "val:", update the validator set
// format is "val:pubkey!power"
if isValidatorTx(req.Tx) {
// update validators in the merkle tree
// and in app.ValUpdates
return app.execValidatorTx(req.Tx)
}
// // otherwise, update the key-value store
// return app.app.DeliverTx(req)
// }
// otherwise, update the key-value store
return app.app.DeliverTx(req)
}
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
return app.app.CheckTx(req)
@@ -119,40 +119,8 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
return types.ResponseInitChain{}
}
func (app *PersistentKVStoreApplication) ListSnapshots(
req types.RequestListSnapshots) types.ResponseListSnapshots {
return types.ResponseListSnapshots{}
}
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
return types.ResponseLoadSnapshotChunk{}
}
func (app *PersistentKVStoreApplication) OfferSnapshot(
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
}
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
}
func (app *PersistentKVStoreApplication) FinalizeBlock(
req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
// for i, tx := range req.Txs {
// // if it starts with "val:", update the validator set
// // format is "val:pubkey!power"
// if isValidatorTx(tx) {
// // update validators in the merkle tree
// // and in app.ValUpdates
// return app.execValidatorTx(req.Tx)
// }
// // otherwise, update the key-value store
// return app.app.DeliverTx(tx)
// }
// Track the block hash and header information
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
// reset valset changes
app.ValUpdates = make([]types.ValidatorUpdate, 0)
@@ -174,7 +142,32 @@ func (app *PersistentKVStoreApplication) FinalizeBlock(
}
}
return types.ResponseFinalizeBlock{ValidatorUpdates: app.ValUpdates}
return types.ResponseBeginBlock{}
}
// Update the validator set
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
}
func (app *PersistentKVStoreApplication) ListSnapshots(
req types.RequestListSnapshots) types.ResponseListSnapshots {
return types.ResponseListSnapshots{}
}
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
return types.ResponseLoadSnapshotChunk{}
}
func (app *PersistentKVStoreApplication) OfferSnapshot(
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
}
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
}
//---------------------------------------------

View File

@@ -9,10 +9,10 @@ import (
"runtime"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
tmlog "github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
)
// var maxNumberConnections = 2
@@ -200,6 +200,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_Info:
res := s.app.Info(*r.Info)
responses <- types.ToResponseInfo(res)
case *types.Request_DeliverTx:
res := s.app.DeliverTx(*r.DeliverTx)
responses <- types.ToResponseDeliverTx(res)
case *types.Request_CheckTx:
res := s.app.CheckTx(*r.CheckTx)
responses <- types.ToResponseCheckTx(res)
@@ -212,6 +215,12 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_InitChain:
res := s.app.InitChain(*r.InitChain)
responses <- types.ToResponseInitChain(res)
case *types.Request_BeginBlock:
res := s.app.BeginBlock(*r.BeginBlock)
responses <- types.ToResponseBeginBlock(res)
case *types.Request_EndBlock:
res := s.app.EndBlock(*r.EndBlock)
responses <- types.ToResponseEndBlock(res)
case *types.Request_ListSnapshots:
res := s.app.ListSnapshots(*r.ListSnapshots)
responses <- types.ToResponseListSnapshots(res)
@@ -224,9 +233,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_ApplySnapshotChunk:
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
responses <- types.ToResponseApplySnapshotChunk(res)
case *types.Request_FinalizeBlock:
res := s.app.FinalizeBlock(*r.FinalizeBlock)
responses <- types.ToResponseFinalizeBlock(res)
default:
responses <- types.ToResponseException("Unknown request")
}

View File

@@ -51,22 +51,20 @@ func Commit(client abcicli.Client, hashExp []byte) error {
return nil
}
func FinalizeBlock(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
for _, tx := range res.Txs {
code, data, log := tx.Code, tx.Data, tx.Log
if code != codeExp {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("deliverTx error")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("deliverTx error")
}
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("deliverTx error")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("deliverTx error")
}
fmt.Println("Passed test: DeliverTx")
return nil

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"os"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/types"
@@ -18,7 +19,7 @@ func startClient(abciType string) abcicli.Client {
if err != nil {
panic(err.Error())
}
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
client.SetLogger(logger.With("module", "abcicli"))
if err := client.Start(); err != nil {
panicf("connecting to abci_app: %v", err.Error())
@@ -37,29 +38,16 @@ func commit(client abcicli.Client, hashExp []byte) {
}
}
type tx struct {
Data []byte
CodeExp uint32
DataExp []byte
}
func finalizeBlock(client abcicli.Client, txs []tx) {
var txsData = make([][]byte, len(txs))
for i, tx := range txs {
txsData[i] = tx.Data
}
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: txsData})
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
if err != nil {
panicf("client error: %v", err)
}
for i, tx := range res.Txs {
if tx.Code != txs[i].CodeExp {
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", tx.Code, txs[i].CodeExp, tx.Log)
}
if !bytes.Equal(tx.Data, txs[i].DataExp) {
panicf("DeliverTx response data was unexpected. Got %X expected %X", tx.Data, txs[i].DataExp)
}
if res.Code != codeExp {
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
}
if !bytes.Equal(res.Data, dataExp) {
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
}
}

View File

@@ -81,15 +81,13 @@ func testCounter() {
// commit(client, nil)
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
commit(client, nil)
finalizeBlock(client, []tx{{Data: []byte{0x00}, CodeExp: types.CodeTypeOK, DataExp: nil}})
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
txs := []tx{
{Data: []byte{0x01}, DataExp: nil, CodeExp: types.CodeTypeOK},
{Data: []byte{0x00, 0x02}, DataExp: nil, CodeExp: types.CodeTypeOK},
{Data: []byte{0x00, 0x03}, DataExp: nil, CodeExp: types.CodeTypeOK},
{Data: []byte{0x00, 0x00, 0x04}, DataExp: nil, CodeExp: types.CodeTypeOK}}
finalizeBlock(client, txs)
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
}

View File

@@ -17,9 +17,11 @@ type Application interface {
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
// Consensus Connection
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
// State Sync Connection
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
@@ -44,6 +46,10 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
return ResponseInfo{}
}
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
return ResponseDeliverTx{Code: CodeTypeOK}
}
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
return ResponseCheckTx{Code: CodeTypeOK}
}
@@ -60,6 +66,14 @@ func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
return ResponseInitChain{}
}
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
return ResponseBeginBlock{}
}
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
return ResponseEndBlock{}
}
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
return ResponseListSnapshots{}
}
@@ -76,10 +90,6 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
return ResponseApplySnapshotChunk{}
}
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
return ResponseFinalizeBlock{}
}
//-------------------------------------------------------
// GRPCApplication is a GRPC wrapper for Application
@@ -104,6 +114,11 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
return &res, nil
}
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
res := app.app.DeliverTx(*req)
return &res, nil
}
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
res := app.app.CheckTx(*req)
return &res, nil
@@ -124,6 +139,16 @@ func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain
return &res, nil
}
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
res := app.app.BeginBlock(*req)
return &res, nil
}
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
res := app.app.EndBlock(*req)
return &res, nil
}
func (app *GRPCApplication) ListSnapshots(
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
res := app.app.ListSnapshots(*req)
@@ -147,9 +172,3 @@ func (app *GRPCApplication) ApplySnapshotChunk(
res := app.app.ApplySnapshotChunk(*req)
return &res, nil
}
func (app *GRPCApplication) FinalizeBlock(
ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
res := app.app.FinalizeBlock(*req)
return &res, nil
}

View File

@@ -4,7 +4,7 @@ import (
"io"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/internal/libs/protoio"
"github.com/tendermint/tendermint/libs/protoio"
)
const (
@@ -48,6 +48,12 @@ func ToRequestInfo(req RequestInfo) *Request {
}
}
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
return &Request{
Value: &Request_DeliverTx{&req},
}
}
func ToRequestCheckTx(req RequestCheckTx) *Request {
return &Request{
Value: &Request_CheckTx{&req},
@@ -72,6 +78,18 @@ func ToRequestInitChain(req RequestInitChain) *Request {
}
}
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
return &Request{
Value: &Request_BeginBlock{&req},
}
}
func ToRequestEndBlock(req RequestEndBlock) *Request {
return &Request{
Value: &Request_EndBlock{&req},
}
}
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
return &Request{
Value: &Request_ListSnapshots{&req},
@@ -96,12 +114,6 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
}
}
func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request {
return &Request{
Value: &Request_FinalizeBlock{&req},
}
}
//----------------------------------------
func ToResponseException(errStr string) *Response {
@@ -127,6 +139,11 @@ func ToResponseInfo(res ResponseInfo) *Response {
Value: &Response_Info{&res},
}
}
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
return &Response{
Value: &Response_DeliverTx{&res},
}
}
func ToResponseCheckTx(res ResponseCheckTx) *Response {
return &Response{
@@ -152,6 +169,18 @@ func ToResponseInitChain(res ResponseInitChain) *Response {
}
}
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
return &Response{
Value: &Response_BeginBlock{&res},
}
}
func ToResponseEndBlock(res ResponseEndBlock) *Response {
return &Response{
Value: &Response_EndBlock{&res},
}
}
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
return &Response{
Value: &Response_ListSnapshots{&res},
@@ -175,9 +204,3 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
Value: &Response_ApplySnapshotChunk{&res},
}
}
func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response {
return &Response{
Value: &Response_FinalizeBlock{&res},
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -7,10 +7,11 @@ import (
"sync/atomic"
"time"
flow "github.com/tendermint/tendermint/internal/libs/flowrate"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
flow "github.com/tendermint/tendermint/libs/flowrate"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
@@ -62,7 +63,7 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
// PeerID responsible for delivering the block.
type BlockRequest struct {
Height int64
PeerID types.NodeID
PeerID p2p.NodeID
}
// BlockPool keeps track of the fast sync peers, block requests and block responses.
@@ -75,7 +76,7 @@ type BlockPool struct {
requesters map[int64]*bpRequester
height int64 // the lowest key in requesters.
// peers
peers map[types.NodeID]*bpPeer
peers map[p2p.NodeID]*bpPeer
maxPeerHeight int64 // the biggest reported height
// atomic
@@ -89,7 +90,7 @@ type BlockPool struct {
// requests and errors will be sent to requestsCh and errorsCh accordingly.
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
bp := &BlockPool{
peers: make(map[types.NodeID]*bpPeer),
peers: make(map[p2p.NodeID]*bpPeer),
requesters: make(map[int64]*bpRequester),
height: start,
@@ -224,13 +225,13 @@ func (pool *BlockPool) PopRequest() {
// RedoRequest invalidates the block at pool.height,
// Remove the peer and redo request from others.
// Returns the ID of the removed peer.
func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID {
pool.mtx.Lock()
defer pool.mtx.Unlock()
request := pool.requesters[height]
peerID := request.getPeerID()
if peerID != types.NodeID("") {
if peerID != p2p.NodeID("") {
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(peerID)
}
@@ -239,7 +240,7 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
// TODO: ensure that blocks come in order for each peer.
func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) {
func (pool *BlockPool) AddBlock(peerID p2p.NodeID, block *types.Block, blockSize int) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@@ -286,7 +287,7 @@ func (pool *BlockPool) LastAdvance() time.Time {
}
// SetPeerRange sets the peer's alleged blockchain base and height.
func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int64) {
func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@@ -307,14 +308,14 @@ func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int6
// RemovePeer removes the peer with peerID from the pool. If there's no peer
// with peerID, function is a no-op.
func (pool *BlockPool) RemovePeer(peerID types.NodeID) {
func (pool *BlockPool) RemovePeer(peerID p2p.NodeID) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
pool.removePeer(peerID)
}
func (pool *BlockPool) removePeer(peerID types.NodeID) {
func (pool *BlockPool) removePeer(peerID p2p.NodeID) {
for _, requester := range pool.requesters {
if requester.getPeerID() == peerID {
requester.redo(peerID)
@@ -395,14 +396,14 @@ func (pool *BlockPool) requestersLen() int64 {
return int64(len(pool.requesters))
}
func (pool *BlockPool) sendRequest(height int64, peerID types.NodeID) {
func (pool *BlockPool) sendRequest(height int64, peerID p2p.NodeID) {
if !pool.IsRunning() {
return
}
pool.requestsCh <- BlockRequest{height, peerID}
}
func (pool *BlockPool) sendError(err error, peerID types.NodeID) {
func (pool *BlockPool) sendError(err error, peerID p2p.NodeID) {
if !pool.IsRunning() {
return
}
@@ -436,7 +437,7 @@ type bpPeer struct {
height int64
base int64
pool *BlockPool
id types.NodeID
id p2p.NodeID
recvMonitor *flow.Monitor
timeout *time.Timer
@@ -444,7 +445,7 @@ type bpPeer struct {
logger log.Logger
}
func newBPPeer(pool *BlockPool, peerID types.NodeID, base int64, height int64) *bpPeer {
func newBPPeer(pool *BlockPool, peerID p2p.NodeID, base int64, height int64) *bpPeer {
peer := &bpPeer{
pool: pool,
id: peerID,
@@ -509,10 +510,10 @@ type bpRequester struct {
pool *BlockPool
height int64
gotBlockCh chan struct{}
redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat
redoCh chan p2p.NodeID // redo may send multitime, add peerId to identify repeat
mtx tmsync.Mutex
peerID types.NodeID
peerID p2p.NodeID
block *types.Block
}
@@ -521,7 +522,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
pool: pool,
height: height,
gotBlockCh: make(chan struct{}, 1),
redoCh: make(chan types.NodeID, 1),
redoCh: make(chan p2p.NodeID, 1),
peerID: "",
block: nil,
@@ -536,7 +537,7 @@ func (bpr *bpRequester) OnStart() error {
}
// Returns true if the peer matches and block doesn't already exist.
func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool {
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.NodeID) bool {
bpr.mtx.Lock()
if bpr.block != nil || bpr.peerID != peerID {
bpr.mtx.Unlock()
@@ -558,7 +559,7 @@ func (bpr *bpRequester) getBlock() *types.Block {
return bpr.block
}
func (bpr *bpRequester) getPeerID() types.NodeID {
func (bpr *bpRequester) getPeerID() p2p.NodeID {
bpr.mtx.Lock()
defer bpr.mtx.Unlock()
return bpr.peerID
@@ -580,7 +581,7 @@ func (bpr *bpRequester) reset() {
// Tells bpRequester to pick another peer and try again.
// NOTE: Nonblocking, and does nothing if another redo
// was already requested.
func (bpr *bpRequester) redo(peerID types.NodeID) {
func (bpr *bpRequester) redo(peerID p2p.NodeID) {
select {
case bpr.redoCh <- peerID:
default:

View File

@@ -11,6 +11,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
@@ -19,7 +20,7 @@ func init() {
}
type testPeer struct {
id types.NodeID
id p2p.NodeID
base int64
height int64
inputChan chan inputData // make sure each peer's data is sequential
@@ -49,7 +50,7 @@ func (p testPeer) simulateInput(input inputData) {
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
}
type testPeers map[types.NodeID]testPeer
type testPeers map[p2p.NodeID]testPeer
func (ps testPeers) start() {
for _, v := range ps {
@@ -66,7 +67,7 @@ func (ps testPeers) stop() {
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
peers := make(testPeers, numPeers)
for i := 0; i < numPeers; i++ {
peerID := types.NodeID(tmrand.Str(12))
peerID := p2p.NodeID(tmrand.Str(12))
height := minHeight + mrand.Int63n(maxHeight-minHeight)
base := minHeight + int64(i)
if base > height {
@@ -182,7 +183,7 @@ func TestBlockPoolTimeout(t *testing.T) {
// Pull from channels
counter := 0
timedOut := map[types.NodeID]struct{}{}
timedOut := map[p2p.NodeID]struct{}{}
for {
select {
case err := <-errorsCh:
@@ -203,7 +204,7 @@ func TestBlockPoolTimeout(t *testing.T) {
func TestBlockPoolRemovePeer(t *testing.T) {
peers := make(testPeers, 10)
for i := 0; i < 10; i++ {
peerID := types.NodeID(fmt.Sprintf("%d", i+1))
peerID := p2p.NodeID(fmt.Sprintf("%d", i+1))
height := int64(i + 1)
peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)}
}
@@ -227,10 +228,10 @@ func TestBlockPoolRemovePeer(t *testing.T) {
assert.EqualValues(t, 10, pool.MaxPeerHeight())
// remove not-existing peer
assert.NotPanics(t, func() { pool.RemovePeer(types.NodeID("Superman")) })
assert.NotPanics(t, func() { pool.RemovePeer(p2p.NodeID("Superman")) })
// remove peer with biggest height
pool.RemovePeer(types.NodeID("10"))
pool.RemovePeer(p2p.NodeID("10"))
assert.EqualValues(t, 9, pool.MaxPeerHeight())
// remove all peers

View File

@@ -5,11 +5,10 @@ import (
"sync"
"time"
bc "github.com/tendermint/tendermint/internal/blockchain"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/p2p"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
@@ -33,9 +32,10 @@ var (
ID: byte(BlockchainChannel),
Priority: 5,
SendQueueCapacity: 1000,
RecvBufferCapacity: 1024,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MaxSendBytes: 100,
MaxSendBytes: 100,
},
},
}
@@ -65,7 +65,7 @@ type consensusReactor interface {
type peerError struct {
err error
peerID types.NodeID
peerID p2p.NodeID
}
func (e peerError) Error() string {
@@ -85,10 +85,9 @@ type Reactor struct {
consReactor consensusReactor
fastSync bool
blockchainCh *p2p.Channel
peerUpdates *p2p.PeerUpdates
peerUpdatesCh chan p2p.Envelope
closeCh chan struct{}
blockchainCh *p2p.Channel
peerUpdates *p2p.PeerUpdates
closeCh chan struct{}
requestsCh <-chan BlockRequest
errorsCh <-chan peerError
@@ -97,8 +96,6 @@ type Reactor struct {
// requestRoutine spawned goroutines when stopping the reactor and before
// stopping the p2p Channel(s).
poolWG sync.WaitGroup
metrics *cons.Metrics
}
// NewReactor returns new reactor instance.
@@ -111,7 +108,6 @@ func NewReactor(
blockchainCh *p2p.Channel,
peerUpdates *p2p.PeerUpdates,
fastSync bool,
metrics *cons.Metrics,
) (*Reactor, error) {
if state.LastBlockHeight != store.Height() {
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
@@ -126,19 +122,17 @@ func NewReactor(
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
r := &Reactor{
initialState: state,
blockExec: blockExec,
store: store,
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
consReactor: consReactor,
fastSync: fastSync,
requestsCh: requestsCh,
errorsCh: errorsCh,
blockchainCh: blockchainCh,
peerUpdates: peerUpdates,
peerUpdatesCh: make(chan p2p.Envelope),
closeCh: make(chan struct{}),
metrics: metrics,
initialState: state,
blockExec: blockExec,
store: store,
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
consReactor: consReactor,
fastSync: fastSync,
requestsCh: requestsCh,
errorsCh: errorsCh,
blockchainCh: blockchainCh,
peerUpdates: peerUpdates,
closeCh: make(chan struct{}),
}
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
@@ -193,7 +187,7 @@ func (r *Reactor) OnStop() {
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
// Otherwise, we'll respond saying we do not have it.
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) {
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID p2p.NodeID) {
block := r.store.LoadBlock(msg.Height)
if block != nil {
blockProto, err := block.ToProto()
@@ -283,9 +277,9 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
}
// processBlockchainCh initiates a blocking process where we listen for and handle
// envelopes on the BlockchainChannel and peerUpdatesCh. Any error encountered during
// message execution will result in a PeerError being sent on the BlockchainChannel.
// When the reactor is stopped, we will catch the signal and close the p2p Channel
// envelopes on the BlockchainChannel. Any error encountered during message
// execution will result in a PeerError being sent on the BlockchainChannel. When
// the reactor is stopped, we will catch the signal and close the p2p Channel
// gracefully.
func (r *Reactor) processBlockchainCh() {
defer r.blockchainCh.Close()
@@ -301,13 +295,9 @@ func (r *Reactor) processBlockchainCh() {
}
}
case envelop := <-r.peerUpdatesCh:
r.blockchainCh.Out <- envelop
case <-r.closeCh:
r.Logger.Debug("stopped listening on blockchain channel; closing...")
return
}
}
}
@@ -324,7 +314,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
switch peerUpdate.Status {
case p2p.PeerStatusUp:
// send a status update the newly added peer
r.peerUpdatesCh <- p2p.Envelope{
r.blockchainCh.Out <- p2p.Envelope{
To: peerUpdate.NodeID,
Message: &bcproto.StatusResponse{
Base: r.store.Base(),
@@ -558,14 +548,12 @@ FOR_LOOP:
// TODO: Same thing for app - but we would need a way to get the hash
// without persisting the state.
state, err = r.blockExec.ApplyBlock(state, firstID, first)
state, _, err = r.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
// TODO: This is bad, are we zombie?
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
r.metrics.RecordConsMetrics(first)
blocksSynced++
if blocksSynced%100 == 0 {
@@ -588,7 +576,3 @@ FOR_LOOP:
}
}
}
func (r *Reactor) GetMaxPeerBlockHeight() int64 {
return r.pool.MaxPeerHeight()
}

View File

@@ -9,16 +9,14 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mempool/mock"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/p2ptest"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
@@ -27,14 +25,14 @@ import (
type reactorTestSuite struct {
network *p2ptest.Network
logger log.Logger
nodes []types.NodeID
nodes []p2p.NodeID
reactors map[types.NodeID]*Reactor
app map[types.NodeID]proxy.AppConns
reactors map[p2p.NodeID]*Reactor
app map[p2p.NodeID]proxy.AppConns
blockchainChannels map[types.NodeID]*p2p.Channel
peerChans map[types.NodeID]chan p2p.PeerUpdate
peerUpdates map[types.NodeID]*p2p.PeerUpdates
blockchainChannels map[p2p.NodeID]*p2p.Channel
peerChans map[p2p.NodeID]chan p2p.PeerUpdate
peerUpdates map[p2p.NodeID]*p2p.PeerUpdates
fastSync bool
}
@@ -55,12 +53,12 @@ func setup(
rts := &reactorTestSuite{
logger: log.TestingLogger().With("module", "blockchain", "testCase", t.Name()),
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
nodes: make([]types.NodeID, 0, numNodes),
reactors: make(map[types.NodeID]*Reactor, numNodes),
app: make(map[types.NodeID]proxy.AppConns, numNodes),
blockchainChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
nodes: make([]p2p.NodeID, 0, numNodes),
reactors: make(map[p2p.NodeID]*Reactor, numNodes),
app: make(map[p2p.NodeID]proxy.AppConns, numNodes),
blockchainChannels: make(map[p2p.NodeID]*p2p.Channel, numNodes),
peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, numNodes),
fastSync: true,
}
@@ -89,7 +87,7 @@ func setup(
}
func (rts *reactorTestSuite) addNode(t *testing.T,
nodeID types.NodeID,
nodeID p2p.NodeID,
genDoc *types.GenesisDoc,
privVal types.PrivValidator,
maxBlockHeight int64,
@@ -105,9 +103,11 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
stateStore := sm.NewStore(stateDB)
blockStore := store.NewBlockStore(blockDB)
state, err := sm.MakeGenesisState(genDoc)
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
require.NoError(t, err)
require.NoError(t, stateStore.Save(state))
db := dbm.NewMemDB()
stateStore = sm.NewStore(db)
blockExec := sm.NewBlockExecutor(
stateStore,
@@ -115,8 +115,8 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
rts.app[nodeID].Consensus(),
mock.Mempool{},
sm.EmptyEvidencePool{},
blockStore,
)
require.NoError(t, stateStore.Save(state))
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
@@ -142,11 +142,11 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
)
}
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
thisBlock := makeBlock(blockHeight, state, lastCommit)
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
require.NoError(t, err)
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
@@ -163,8 +163,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
nil,
rts.blockchainChannels[nodeID],
rts.peerUpdates[nodeID],
rts.fastSync,
cons.NopMetrics())
rts.fastSync)
require.NoError(t, err)
require.NoError(t, rts.reactors[nodeID].Start())
@@ -212,7 +211,7 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
Status: p2p.PeerStatusDown,
NodeID: rts.nodes[0],
}
rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0])
require.NoError(t, rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0]))
}
func TestReactor_NoBlockResponse(t *testing.T) {

View File

@@ -0,0 +1,18 @@
package v0
import (
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
func makeTxs(height int64) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
}
return txs
}
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
return block
}

View File

@@ -1,12 +1,14 @@
package behavior
import "github.com/tendermint/tendermint/types"
import (
"github.com/tendermint/tendermint/p2p"
)
// PeerBehavior is a struct describing a behavior a peer performed.
// `peerID` identifies the peer and reason characterizes the specific
// behavior performed by the peer.
type PeerBehavior struct {
peerID types.NodeID
peerID p2p.NodeID
reason interface{}
}
@@ -15,7 +17,7 @@ type badMessage struct {
}
// BadMessage returns a badMessage PeerBehavior.
func BadMessage(peerID types.NodeID, explanation string) PeerBehavior {
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: badMessage{explanation}}
}
@@ -24,7 +26,7 @@ type messageOutOfOrder struct {
}
// MessageOutOfOrder returns a messagOutOfOrder PeerBehavior.
func MessageOutOfOrder(peerID types.NodeID, explanation string) PeerBehavior {
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}}
}
@@ -33,7 +35,7 @@ type consensusVote struct {
}
// ConsensusVote returns a consensusVote PeerBehavior.
func ConsensusVote(peerID types.NodeID, explanation string) PeerBehavior {
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}}
}
@@ -42,6 +44,6 @@ type blockPart struct {
}
// BlockPart returns blockPart PeerBehavior.
func BlockPart(peerID types.NodeID, explanation string) PeerBehavior {
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: blockPart{explanation}}
}

View File

@@ -3,9 +3,8 @@ package behavior
import (
"errors"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/p2p"
)
// Reporter provides an interface for reactors to report the behavior
@@ -52,14 +51,14 @@ func (spbr *SwitchReporter) Report(behavior PeerBehavior) error {
// behavior in manufactured scenarios.
type MockReporter struct {
mtx tmsync.RWMutex
pb map[types.NodeID][]PeerBehavior
pb map[p2p.NodeID][]PeerBehavior
}
// NewMockReporter returns a Reporter which records all reported
// behaviors in memory.
func NewMockReporter() *MockReporter {
return &MockReporter{
pb: map[types.NodeID][]PeerBehavior{},
pb: map[p2p.NodeID][]PeerBehavior{},
}
}
@@ -73,7 +72,7 @@ func (mpbr *MockReporter) Report(behavior PeerBehavior) error {
}
// GetBehaviors returns all behaviors reported on the peer identified by peerID.
func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior {
func (mpbr *MockReporter) GetBehaviors(peerID p2p.NodeID) []PeerBehavior {
mpbr.mtx.RLock()
defer mpbr.mtx.RUnlock()
if items, ok := mpbr.pb[peerID]; ok {

View File

@@ -4,14 +4,14 @@ import (
"sync"
"testing"
bh "github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
"github.com/tendermint/tendermint/types"
bh "github.com/tendermint/tendermint/blockchain/v2/internal/behavior"
"github.com/tendermint/tendermint/p2p"
)
// TestMockReporter tests the MockReporter's ability to store reported
// peer behavior in memory indexed by the peerID.
func TestMockReporter(t *testing.T) {
var peerID types.NodeID = "MockPeer"
var peerID p2p.NodeID = "MockPeer"
pr := bh.NewMockReporter()
behaviors := pr.GetBehaviors(peerID)
@@ -34,7 +34,7 @@ func TestMockReporter(t *testing.T) {
}
type scriptItem struct {
peerID types.NodeID
peerID p2p.NodeID
behavior bh.PeerBehavior
}
@@ -76,10 +76,10 @@ func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool {
// freequencies that those behaviors occur.
func TestEqualPeerBehaviors(t *testing.T) {
var (
peerID types.NodeID = "MockPeer"
consensusVote = bh.ConsensusVote(peerID, "voted")
blockPart = bh.BlockPart(peerID, "blocked")
equals = []struct {
peerID p2p.NodeID = "MockPeer"
consensusVote = bh.ConsensusVote(peerID, "voted")
blockPart = bh.BlockPart(peerID, "blocked")
equals = []struct {
left []bh.PeerBehavior
right []bh.PeerBehavior
}{
@@ -128,7 +128,7 @@ func TestEqualPeerBehaviors(t *testing.T) {
func TestMockPeerBehaviorReporterConcurrency(t *testing.T) {
var (
behaviorScript = []struct {
peerID types.NodeID
peerID p2p.NodeID
behaviors []bh.PeerBehavior
}{
{"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}},

View File

@@ -4,7 +4,7 @@ import (
"errors"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"

View File

@@ -3,6 +3,7 @@ package v2
import (
"fmt"
"github.com/tendermint/tendermint/p2p"
tmState "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -12,8 +13,8 @@ import (
type pcBlockVerificationFailure struct {
priorityNormal
height int64
firstPeerID types.NodeID
secondPeerID types.NodeID
firstPeerID p2p.NodeID
secondPeerID p2p.NodeID
}
func (e pcBlockVerificationFailure) String() string {
@@ -25,7 +26,7 @@ func (e pcBlockVerificationFailure) String() string {
type pcBlockProcessed struct {
priorityNormal
height int64
peerID types.NodeID
peerID p2p.NodeID
}
func (e pcBlockProcessed) String() string {
@@ -45,7 +46,7 @@ func (p pcFinished) Error() string {
type queueItem struct {
block *types.Block
peerID types.NodeID
peerID p2p.NodeID
}
type blockQueue map[int64]queueItem
@@ -94,7 +95,7 @@ func (state *pcState) synced() bool {
return len(state.queue) <= 1
}
func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) {
func (state *pcState) enqueue(peerID p2p.NodeID, block *types.Block, height int64) {
if item, ok := state.queue[height]; ok {
panic(fmt.Sprintf(
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
@@ -109,7 +110,7 @@ func (state *pcState) height() int64 {
}
// purgePeer moves all unprocessed blocks from the queue
func (state *pcState) purgePeer(peerID types.NodeID) {
func (state *pcState) purgePeer(peerID p2p.NodeID) {
// what if height is less than state.height?
for height, item := range state.queue {
if item.peerID == peerID {
@@ -181,8 +182,6 @@ func (state *pcState) handle(event Event) (Event, error) {
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
state.context.recordConsMetrics(first)
delete(state.queue, first.Height)
state.blocksSynced++

View File

@@ -3,7 +3,6 @@ package v2
import (
"fmt"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -14,27 +13,24 @@ type processorContext interface {
saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
tmState() state.State
setState(state.State)
recordConsMetrics(block *types.Block)
}
type pContext struct {
store blockStore
applier blockApplier
state state.State
metrics *cons.Metrics
}
func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *cons.Metrics) *pContext {
func newProcessorContext(st blockStore, ex blockApplier, s state.State) *pContext {
return &pContext{
store: st,
applier: ex,
state: s,
metrics: m,
}
}
func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error {
newState, err := pc.applier.ApplyBlock(pc.state, blockID, block)
newState, _, err := pc.applier.ApplyBlock(pc.state, blockID, block)
pc.state = newState
return err
}
@@ -55,10 +51,6 @@ func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, see
pc.store.SaveBlock(block, blockParts, seenCommit)
}
func (pc *pContext) recordConsMetrics(block *types.Block) {
pc.metrics.RecordConsMetrics(block)
}
type mockPContext struct {
applicationBL []int64
verificationBL []int64
@@ -106,7 +98,3 @@ func (mpc *mockPContext) setState(state state.State) {
func (mpc *mockPContext) tmState() state.State {
return mpc.state
}
func (mpc *mockPContext) recordConsMetrics(block *types.Block) {
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/p2p"
tmState "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -39,7 +40,7 @@ func makeState(p *params) *pcState {
state := newPcState(context)
for _, item := range p.items {
state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height)
state.enqueue(p2p.NodeID(item.pid), makePcBlock(item.height), item.height)
}
state.blocksSynced = p.blocksSynced
@@ -47,7 +48,7 @@ func makeState(p *params) *pcState {
return state
}
func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived {
func mBlockResponse(peerID p2p.NodeID, height int64) scBlockReceived {
return scBlockReceived{
peerID: peerID,
block: makePcBlock(height),

View File

@@ -7,12 +7,11 @@ import (
proto "github.com/gogo/protobuf/proto"
bc "github.com/tendermint/tendermint/internal/blockchain"
"github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/blockchain/v2/internal/behavior"
"github.com/tendermint/tendermint/libs/log"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
@@ -51,18 +50,18 @@ type BlockchainReactor struct {
}
type blockApplier interface {
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error)
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error)
}
// XXX: unify naming in this package around tmState
func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
blockApplier blockApplier, fastSync bool, metrics *cons.Metrics) *BlockchainReactor {
blockApplier blockApplier, fastSync bool) *BlockchainReactor {
initHeight := state.LastBlockHeight + 1
if initHeight == 1 {
initHeight = state.InitialHeight
}
scheduler := newScheduler(initHeight, time.Now())
pContext := newProcessorContext(store, blockApplier, state, metrics)
pContext := newProcessorContext(store, blockApplier, state)
// TODO: Fix naming to just newProcesssor
// newPcState requires a processorContext
processor := newPcState(pContext)
@@ -82,10 +81,9 @@ func NewBlockchainReactor(
state state.State,
blockApplier blockApplier,
store blockStore,
fastSync bool,
metrics *cons.Metrics) *BlockchainReactor {
fastSync bool) *BlockchainReactor {
reporter := behavior.NewMockReporter()
return newReactor(state, store, reporter, blockApplier, fastSync, metrics)
return newReactor(state, store, reporter, blockApplier, fastSync)
}
// SetSwitch implements Reactor interface.
@@ -213,7 +211,7 @@ func (e rProcessBlock) String() string {
type bcBlockResponse struct {
priorityNormal
time time.Time
peerID types.NodeID
peerID p2p.NodeID
size int64
block *types.Block
}
@@ -227,7 +225,7 @@ func (resp bcBlockResponse) String() string {
type bcNoBlockResponse struct {
priorityNormal
time time.Time
peerID types.NodeID
peerID p2p.NodeID
height int64
}
@@ -240,7 +238,7 @@ func (resp bcNoBlockResponse) String() string {
type bcStatusResponse struct {
priorityNormal
time time.Time
peerID types.NodeID
peerID p2p.NodeID
base int64
height int64
}
@@ -253,7 +251,7 @@ func (resp bcStatusResponse) String() string {
// new peer is connected
type bcAddNewPeer struct {
priorityNormal
peerID types.NodeID
peerID p2p.NodeID
}
func (resp bcAddNewPeer) String() string {
@@ -263,7 +261,7 @@ func (resp bcAddNewPeer) String() string {
// existing peer is removed
type bcRemovePeer struct {
priorityHigh
peerID types.NodeID
peerID p2p.NodeID
reason interface{}
}
@@ -585,14 +583,8 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
ID: BlockchainChannel,
Priority: 5,
SendQueueCapacity: 2000,
RecvBufferCapacity: 1024,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
},
}
}
func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 {
r.mtx.RLock()
defer r.mtx.RUnlock()
return r.maxPeerHeight
}

View File

@@ -14,30 +14,28 @@ import (
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/blockchain/v2/internal/behavior"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/mempool/mock"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/conn"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"
tmstore "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
type mockPeer struct {
service.Service
id types.NodeID
id p2p.NodeID
}
func (mp mockPeer) FlushStop() {}
func (mp mockPeer) ID() types.NodeID { return mp.id }
func (mp mockPeer) ID() p2p.NodeID { return mp.id }
func (mp mockPeer) RemoteIP() net.IP { return net.IP{} }
func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} }
@@ -45,8 +43,8 @@ func (mp mockPeer) IsOutbound() bool { return true }
func (mp mockPeer) IsPersistent() bool { return true }
func (mp mockPeer) CloseConn() error { return nil }
func (mp mockPeer) NodeInfo() types.NodeInfo {
return types.NodeInfo{
func (mp mockPeer) NodeInfo() p2p.NodeInfo {
return p2p.NodeInfo{
NodeID: "",
ListenAddr: "",
}
@@ -86,9 +84,9 @@ type mockBlockApplier struct {
// XXX: Add whitelist/blacklist?
func (mba *mockBlockApplier) ApplyBlock(
state sm.State, blockID types.BlockID, block *types.Block,
) (sm.State, error) {
) (sm.State, int64, error) {
state.LastBlockHeight++
return state, nil
return state, 0, nil
}
type mockSwitchIo struct {
@@ -153,8 +151,8 @@ type testReactorParams struct {
mockA bool
}
func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
store, state, _ := newReactorStore(t, p.genDoc, p.privVals, p.startHeight)
func newTestReactor(p testReactorParams) *BlockchainReactor {
store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight)
reporter := behavior.NewMockReporter()
var appl blockApplier
@@ -166,17 +164,18 @@ func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
cc := proxy.NewLocalClientCreator(app)
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()
require.NoError(t, err)
if err != nil {
panic(fmt.Errorf("error start app: %w", err))
}
db := dbm.NewMemDB()
stateStore := sm.NewStore(db)
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
appl = sm.NewBlockExecutor(
stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
err = stateStore.Save(state)
require.NoError(t, err)
appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{})
if err = stateStore.Save(state); err != nil {
panic(err)
}
}
r := newReactor(state, store, reporter, appl, true, cons.NopMetrics())
r := newReactor(state, store, reporter, appl, true)
logger := log.TestingLogger()
r.SetLogger(logger.With("module", "blockchain"))
@@ -401,7 +400,7 @@ func TestReactorHelperMode(t *testing.T) {
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
reactor := newTestReactor(t, params)
reactor := newTestReactor(params)
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
reactor.io = mockSwitch
err := reactor.Start()
@@ -419,7 +418,7 @@ func TestReactorHelperMode(t *testing.T) {
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
case bcproto.BlockRequest:
if ev.Height > params.startHeight {
@@ -431,7 +430,7 @@ func TestReactorHelperMode(t *testing.T) {
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
} else {
old := mockSwitch.numBlockResponse
@@ -442,7 +441,7 @@ func TestReactorHelperMode(t *testing.T) {
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
}
}
@@ -458,7 +457,7 @@ func TestReactorSetSwitchNil(t *testing.T) {
defer os.RemoveAll(config.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
reactor := newTestReactor(t, testReactorParams{
reactor := newTestReactor(testReactorParams{
logger: log.TestingLogger(),
genDoc: genDoc,
privVals: privVals,
@@ -469,18 +468,34 @@ func TestReactorSetSwitchNil(t *testing.T) {
assert.Nil(t, reactor.io)
}
//----------------------------------------------
// utility funcs
func makeTxs(height int64) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
}
return txs
}
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
return block
}
type testApp struct {
abci.BaseApplication
}
// Why are we importing the entire blockExecutor dependency graph here
// when we have the facilities to
func newReactorStore(
t *testing.T,
genDoc *types.GenesisDoc,
privVals []types.PrivValidator,
maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) {
t.Helper()
require.Len(t, privVals, 1)
maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
if len(privVals) != 1 {
panic("only support one validator")
}
app := &testApp{}
cc := proxy.NewLocalClientCreator(app)
proxyApp := proxy.NewAppConns(cc)
@@ -490,15 +505,20 @@ func newReactorStore(
}
stateDB := dbm.NewMemDB()
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
blockStore := store.NewBlockStore(dbm.NewMemDB())
stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
if err != nil {
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
}
db := dbm.NewMemDB()
stateStore = sm.NewStore(db)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
err = stateStore.Save(state)
require.NoError(t, err)
mock.Mempool{}, sm.EmptyEvidencePool{})
if err = stateStore.Save(state); err != nil {
panic(err)
}
// add blocks in
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
@@ -513,18 +533,22 @@ func newReactorStore(
lastBlockMeta.BlockID,
time.Now(),
)
require.NoError(t, err)
if err != nil {
panic(err)
}
lastCommit = types.NewCommit(vote.Height, vote.Round,
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
}
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
thisBlock := makeBlock(blockHeight, state, lastCommit)
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
require.NoError(t, err)
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
if err != nil {
panic(fmt.Errorf("error apply block: %w", err))
}
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
}

View File

@@ -8,6 +8,7 @@ import (
"sort"
"time"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
@@ -25,7 +26,7 @@ func (e scFinishedEv) String() string {
// send a blockRequest message
type scBlockRequest struct {
priorityNormal
peerID types.NodeID
peerID p2p.NodeID
height int64
}
@@ -36,7 +37,7 @@ func (e scBlockRequest) String() string {
// a block has been received and validated by the scheduler
type scBlockReceived struct {
priorityNormal
peerID types.NodeID
peerID p2p.NodeID
block *types.Block
}
@@ -47,7 +48,7 @@ func (e scBlockReceived) String() string {
// scheduler detected a peer error
type scPeerError struct {
priorityHigh
peerID types.NodeID
peerID p2p.NodeID
reason error
}
@@ -58,7 +59,7 @@ func (e scPeerError) String() string {
// scheduler removed a set of peers (timed out or slow peer)
type scPeersPruned struct {
priorityHigh
peers []types.NodeID
peers []p2p.NodeID
}
func (e scPeersPruned) String() string {
@@ -125,7 +126,7 @@ func (e peerState) String() string {
}
type scPeer struct {
peerID types.NodeID
peerID p2p.NodeID
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
// updated to Removed when peer is removed
@@ -142,7 +143,7 @@ func (p scPeer) String() string {
p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
}
func newScPeer(peerID types.NodeID) *scPeer {
func newScPeer(peerID p2p.NodeID) *scPeer {
return &scPeer{
peerID: peerID,
state: peerStateNew,
@@ -170,7 +171,7 @@ type scheduler struct {
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
// track of peer specific state
peers map[types.NodeID]*scPeer
peers map[p2p.NodeID]*scPeer
peerTimeout time.Duration // maximum response time from a peer otherwise prune
minRecvRate int64 // minimum receive rate from peer otherwise prune
@@ -182,13 +183,13 @@ type scheduler struct {
blockStates map[int64]blockState
// a map of heights to the peer we are waiting a response from
pendingBlocks map[int64]types.NodeID
pendingBlocks map[int64]p2p.NodeID
// the time at which a block was put in blockStatePending
pendingTime map[int64]time.Time
// a map of heights to the peers that put the block in blockStateReceived
receivedBlocks map[int64]types.NodeID
receivedBlocks map[int64]p2p.NodeID
}
func (sc scheduler) String() string {
@@ -203,10 +204,10 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
syncTimeout: 60 * time.Second,
height: initHeight,
blockStates: make(map[int64]blockState),
peers: make(map[types.NodeID]*scPeer),
pendingBlocks: make(map[int64]types.NodeID),
peers: make(map[p2p.NodeID]*scPeer),
pendingBlocks: make(map[int64]p2p.NodeID),
pendingTime: make(map[int64]time.Time),
receivedBlocks: make(map[int64]types.NodeID),
receivedBlocks: make(map[int64]p2p.NodeID),
targetPending: 10, // TODO - pass as param
peerTimeout: 15 * time.Second, // TODO - pass as param
minRecvRate: 0, // int64(7680), TODO - pass as param
@@ -215,14 +216,14 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
return &sc
}
func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer {
func (sc *scheduler) ensurePeer(peerID p2p.NodeID) *scPeer {
if _, ok := sc.peers[peerID]; !ok {
sc.peers[peerID] = newScPeer(peerID)
}
return sc.peers[peerID]
}
func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error {
peer, ok := sc.peers[peerID]
if !ok {
return fmt.Errorf("couldn't find peer %s", peerID)
@@ -237,7 +238,7 @@ func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
return nil
}
func (sc *scheduler) removePeer(peerID types.NodeID) {
func (sc *scheduler) removePeer(peerID p2p.NodeID) {
peer, ok := sc.peers[peerID]
if !ok {
return
@@ -297,7 +298,7 @@ func (sc *scheduler) addNewBlocks() {
}
}
func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error {
func (sc *scheduler) setPeerRange(peerID p2p.NodeID, base int64, height int64) error {
peer := sc.ensurePeer(peerID)
if peer.state == peerStateRemoved {
@@ -332,8 +333,8 @@ func (sc *scheduler) getStateAtHeight(height int64) blockState {
}
}
func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
peers := make([]types.NodeID, 0)
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID {
peers := make([]p2p.NodeID, 0)
for _, peer := range sc.peers {
if peer.state != peerStateReady {
continue
@@ -345,8 +346,8 @@ func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
return peers
}
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID {
prunable := make([]types.NodeID, 0)
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.NodeID {
prunable := make([]p2p.NodeID, 0)
for peerID, peer := range sc.peers {
if peer.state != peerStateReady {
continue
@@ -365,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
}
// CONTRACT: peer exists and in Ready state.
func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error {
func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, now time.Time) error {
peer := sc.peers[peerID]
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
@@ -389,7 +390,7 @@ func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64,
return nil
}
func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error {
func (sc *scheduler) markPending(peerID p2p.NodeID, height int64, time time.Time) error {
state := sc.getStateAtHeight(height)
if state != blockStateNew {
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
@@ -471,7 +472,7 @@ func (sc *scheduler) nextHeightToSchedule() int64 {
return min
}
func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 {
var heights []int64
for height, pendingPeerID := range sc.pendingBlocks {
if pendingPeerID == peerID {
@@ -481,7 +482,7 @@ func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
return heights
}
func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
peers := sc.getPeersWithHeight(height)
if len(peers) == 0 {
return "", fmt.Errorf("cannot find peer for height %d", height)
@@ -489,7 +490,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
// create a map from number of pending requests to a list
// of peers having that number of pending requests.
pendingFrom := make(map[int][]types.NodeID)
pendingFrom := make(map[int][]p2p.NodeID)
for _, peerID := range peers {
numPending := len(sc.pendingFrom(peerID))
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
@@ -508,7 +509,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
}
// PeerByID is a list of peers sorted by peerID.
type PeerByID []types.NodeID
type PeerByID []p2p.NodeID
func (peers PeerByID) Len() int {
return len(peers)

View File

@@ -10,6 +10,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -19,9 +20,9 @@ type scTestParams struct {
initHeight int64
height int64
allB []int64
pending map[int64]types.NodeID
pending map[int64]p2p.NodeID
pendingTime map[int64]time.Time
received map[int64]types.NodeID
received map[int64]p2p.NodeID
peerTimeout time.Duration
minRecvRate int64
targetPending int
@@ -40,7 +41,7 @@ func verifyScheduler(sc *scheduler) {
}
func newTestScheduler(params scTestParams) *scheduler {
peers := make(map[types.NodeID]*scPeer)
peers := make(map[p2p.NodeID]*scPeer)
var maxHeight int64
initHeight := params.initHeight
@@ -53,8 +54,8 @@ func newTestScheduler(params scTestParams) *scheduler {
}
for id, peer := range params.peers {
peer.peerID = types.NodeID(id)
peers[types.NodeID(id)] = peer
peer.peerID = p2p.NodeID(id)
peers[p2p.NodeID(id)] = peer
if maxHeight < peer.height {
maxHeight = peer.height
}
@@ -121,7 +122,7 @@ func TestScMaxHeights(t *testing.T) {
name: "one ready peer",
sc: scheduler{
height: 3,
peers: map[types.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
peers: map[p2p.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
},
wantMax: 6,
},
@@ -129,7 +130,7 @@ func TestScMaxHeights(t *testing.T) {
name: "ready and removed peers",
sc: scheduler{
height: 1,
peers: map[types.NodeID]*scPeer{
peers: map[p2p.NodeID]*scPeer{
"P1": {height: 4, state: peerStateReady},
"P2": {height: 10, state: peerStateRemoved}},
},
@@ -139,7 +140,7 @@ func TestScMaxHeights(t *testing.T) {
name: "removed peers",
sc: scheduler{
height: 1,
peers: map[types.NodeID]*scPeer{
peers: map[p2p.NodeID]*scPeer{
"P1": {height: 4, state: peerStateRemoved},
"P2": {height: 10, state: peerStateRemoved}},
},
@@ -149,7 +150,7 @@ func TestScMaxHeights(t *testing.T) {
name: "new peers",
sc: scheduler{
height: 1,
peers: map[types.NodeID]*scPeer{
peers: map[p2p.NodeID]*scPeer{
"P1": {base: -1, height: -1, state: peerStateNew},
"P2": {base: -1, height: -1, state: peerStateNew}},
},
@@ -159,7 +160,7 @@ func TestScMaxHeights(t *testing.T) {
name: "mixed peers",
sc: scheduler{
height: 1,
peers: map[types.NodeID]*scPeer{
peers: map[p2p.NodeID]*scPeer{
"P1": {height: -1, state: peerStateNew},
"P2": {height: 10, state: peerStateReady},
"P3": {height: 20, state: peerStateRemoved},
@@ -186,7 +187,7 @@ func TestScMaxHeights(t *testing.T) {
func TestScEnsurePeer(t *testing.T) {
type args struct {
peerID types.NodeID
peerID p2p.NodeID
}
tests := []struct {
name string
@@ -243,7 +244,7 @@ func TestScTouchPeer(t *testing.T) {
now := time.Now()
type args struct {
peerID types.NodeID
peerID p2p.NodeID
time time.Time
}
@@ -315,13 +316,13 @@ func TestScPrunablePeers(t *testing.T) {
name string
fields scTestParams
args args
wantResult []types.NodeID
wantResult []p2p.NodeID
}{
{
name: "no peers",
fields: scTestParams{peers: map[string]*scPeer{}},
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "mixed peers",
@@ -340,7 +341,7 @@ func TestScPrunablePeers(t *testing.T) {
"P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90},
}},
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
wantResult: []types.NodeID{"P4", "P5", "P6"},
wantResult: []p2p.NodeID{"P4", "P5", "P6"},
},
}
@@ -360,7 +361,7 @@ func TestScPrunablePeers(t *testing.T) {
func TestScRemovePeer(t *testing.T) {
type args struct {
peerID types.NodeID
peerID p2p.NodeID
}
tests := []struct {
name string
@@ -423,13 +424,13 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{1: "P1"},
pending: map[int64]p2p.NodeID{1: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
allB: []int64{},
pending: map[int64]types.NodeID{},
pending: map[int64]p2p.NodeID{},
},
},
{
@@ -437,13 +438,13 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
received: map[int64]types.NodeID{1: "P1"},
received: map[int64]p2p.NodeID{1: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
allB: []int64{},
received: map[int64]types.NodeID{},
received: map[int64]p2p.NodeID{},
},
},
{
@@ -451,15 +452,15 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{1: "P1", 3: "P1"},
received: map[int64]types.NodeID{2: "P1", 4: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 3: "P1"},
received: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}},
allB: []int64{},
pending: map[int64]types.NodeID{},
received: map[int64]types.NodeID{},
pending: map[int64]p2p.NodeID{},
received: map[int64]p2p.NodeID{},
},
},
{
@@ -470,8 +471,8 @@ func TestScRemovePeer(t *testing.T) {
"P2": {height: 6, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4, 5, 6},
pending: map[int64]types.NodeID{1: "P1", 3: "P2", 6: "P1"},
received: map[int64]types.NodeID{2: "P1", 4: "P2", 5: "P2"},
pending: map[int64]p2p.NodeID{1: "P1", 3: "P2", 6: "P1"},
received: map[int64]p2p.NodeID{2: "P1", 4: "P2", 5: "P2"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
@@ -480,8 +481,8 @@ func TestScRemovePeer(t *testing.T) {
"P2": {height: 6, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4, 5, 6},
pending: map[int64]types.NodeID{3: "P2"},
received: map[int64]types.NodeID{4: "P2", 5: "P2"},
pending: map[int64]p2p.NodeID{3: "P2"},
received: map[int64]p2p.NodeID{4: "P2", 5: "P2"},
},
},
}
@@ -500,7 +501,7 @@ func TestScRemovePeer(t *testing.T) {
func TestScSetPeerRange(t *testing.T) {
type args struct {
peerID types.NodeID
peerID p2p.NodeID
base int64
height int64
}
@@ -621,25 +622,25 @@ func TestScGetPeersWithHeight(t *testing.T) {
name string
fields scTestParams
args args
wantResult []types.NodeID
wantResult []p2p.NodeID
}{
{
name: "no peers",
fields: scTestParams{peers: map[string]*scPeer{}},
args: args{height: 10},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "only new peers",
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}},
args: args{height: 10},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "only Removed peers",
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}},
args: args{height: 2},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "one Ready shorter peer",
@@ -648,7 +649,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 5},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "one Ready equal peer",
@@ -657,7 +658,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []types.NodeID{"P1"},
wantResult: []p2p.NodeID{"P1"},
},
{
name: "one Ready higher peer",
@@ -667,7 +668,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []types.NodeID{"P1"},
wantResult: []p2p.NodeID{"P1"},
},
{
name: "one Ready higher peer at base",
@@ -677,7 +678,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []types.NodeID{"P1"},
wantResult: []p2p.NodeID{"P1"},
},
{
name: "one Ready higher peer with higher base",
@@ -687,7 +688,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "multiple mixed peers",
@@ -702,7 +703,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{8, 9, 10, 11},
},
args: args{height: 8},
wantResult: []types.NodeID{"P2", "P5"},
wantResult: []p2p.NodeID{"P2", "P5"},
},
}
@@ -724,7 +725,7 @@ func TestScMarkPending(t *testing.T) {
now := time.Now()
type args struct {
peerID types.NodeID
peerID p2p.NodeID
height int64
tm time.Time
}
@@ -820,14 +821,14 @@ func TestScMarkPending(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1"},
pending: map[int64]p2p.NodeID{1: "P1"},
pendingTime: map[int64]time.Time{1: now},
},
args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)},
},
},
@@ -850,7 +851,7 @@ func TestScMarkReceived(t *testing.T) {
now := time.Now()
type args struct {
peerID types.NodeID
peerID p2p.NodeID
height int64
size int64
tm time.Time
@@ -890,7 +891,7 @@ func TestScMarkReceived(t *testing.T) {
"P2": {height: 4, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
@@ -899,7 +900,7 @@ func TestScMarkReceived(t *testing.T) {
"P2": {height: 4, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
},
wantErr: true,
},
@@ -908,13 +909,13 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{},
pending: map[int64]p2p.NodeID{},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{},
pending: map[int64]p2p.NodeID{},
},
wantErr: true,
},
@@ -923,14 +924,14 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
},
wantErr: true,
@@ -940,16 +941,16 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1"},
pending: map[int64]p2p.NodeID{1: "P1"},
pendingTime: map[int64]time.Time{1: now},
received: map[int64]types.NodeID{2: "P1"},
received: map[int64]p2p.NodeID{2: "P1"},
},
},
}
@@ -990,7 +991,7 @@ func TestScMarkProcessed(t *testing.T) {
height: 2,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{2},
pending: map[int64]types.NodeID{2: "P1"},
pending: map[int64]p2p.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
targetPending: 1,
},
@@ -1008,15 +1009,15 @@ func TestScMarkProcessed(t *testing.T) {
height: 1,
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{2: "P1"},
pending: map[int64]p2p.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
received: map[int64]types.NodeID{1: "P1"}},
received: map[int64]p2p.NodeID{1: "P1"}},
args: args{height: 1},
wantFields: scTestParams{
height: 2,
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{2},
pending: map[int64]types.NodeID{2: "P1"},
pending: map[int64]p2p.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now}},
},
}
@@ -1100,7 +1101,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
},
wantResult: false,
@@ -1110,7 +1111,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
},
wantResult: false,
},
@@ -1121,7 +1122,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
peers: map[string]*scPeer{
"P1": {height: 4, state: peerStateReady}},
allB: []int64{4},
received: map[int64]types.NodeID{4: "P1"},
received: map[int64]p2p.NodeID{4: "P1"},
},
wantResult: true,
},
@@ -1130,7 +1131,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{2: "P1", 4: "P1"},
pending: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{2: now, 4: now},
},
wantResult: false,
@@ -1178,7 +1179,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
},
wantHeight: -1,
@@ -1189,7 +1190,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
},
wantHeight: -1,
},
@@ -1208,7 +1209,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{2: "P1"},
pending: map[int64]p2p.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
},
wantHeight: 1,
@@ -1238,7 +1239,7 @@ func TestScSelectPeer(t *testing.T) {
name string
fields scTestParams
args args
wantResult types.NodeID
wantResult p2p.NodeID
wantError bool
}{
{
@@ -1306,7 +1307,7 @@ func TestScSelectPeer(t *testing.T) {
"P1": {height: 8, state: peerStateReady},
"P2": {height: 9, state: peerStateReady}},
allB: []int64{4, 5, 6, 7, 8, 9},
pending: map[int64]types.NodeID{
pending: map[int64]p2p.NodeID{
4: "P1", 6: "P1",
5: "P2",
},
@@ -1322,7 +1323,7 @@ func TestScSelectPeer(t *testing.T) {
"P1": {height: 15, state: peerStateReady},
"P3": {height: 15, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
pending: map[int64]types.NodeID{
pending: map[int64]p2p.NodeID{
1: "P1", 2: "P1",
3: "P3", 4: "P3",
5: "P2", 6: "P2",
@@ -1391,7 +1392,7 @@ func TestScHandleBlockResponse(t *testing.T) {
now := time.Now()
block6FromP1 := bcBlockResponse{
time: now.Add(time.Millisecond),
peerID: types.NodeID("P1"),
peerID: p2p.NodeID("P1"),
size: 100,
block: makeScBlock(6),
}
@@ -1432,7 +1433,7 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P2"},
pending: map[int64]p2p.NodeID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: block6FromP1},
@@ -1443,7 +1444,7 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now.Add(time.Second)},
},
args: args{event: block6FromP1},
@@ -1454,7 +1455,7 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: block6FromP1},
@@ -1476,7 +1477,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
now := time.Now()
noBlock6FromP1 := bcNoBlockResponse{
time: now.Add(time.Millisecond),
peerID: types.NodeID("P1"),
peerID: p2p.NodeID("P1"),
height: 6,
}
@@ -1512,14 +1513,14 @@ func TestScHandleNoBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P2"},
pending: map[int64]p2p.NodeID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
wantEvent: noOpEvent{},
wantFields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P2"},
pending: map[int64]p2p.NodeID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
},
@@ -1528,7 +1529,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
@@ -1551,7 +1552,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
func TestScHandleBlockProcessed(t *testing.T) {
now := time.Now()
processed6FromP1 := pcBlockProcessed{
peerID: types.NodeID("P1"),
peerID: p2p.NodeID("P1"),
height: 6,
}
@@ -1578,7 +1579,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: processed6FromP1},
@@ -1590,7 +1591,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
allB: []int64{6, 7},
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
},
args: args{event: processed6FromP1},
wantEvent: scFinishedEv{},
@@ -1601,8 +1602,8 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
received: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{6: "P1"},
},
args: args{event: processed6FromP1},
wantEvent: noOpEvent{},
@@ -1645,7 +1646,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
@@ -1657,7 +1658,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
@@ -1669,7 +1670,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
allB: []int64{6, 7},
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}},
wantEvent: scFinishedEv{},
@@ -1680,8 +1681,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 5,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
allB: []int64{5, 6, 7, 8},
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}},
wantEvent: noOpEvent{},
@@ -1696,8 +1697,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
"P3": {height: 8, state: peerStateReady},
},
allB: []int64{5, 6, 7, 8},
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}},
wantEvent: noOpEvent{},
@@ -1716,7 +1717,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
func TestScHandleAddNewPeer(t *testing.T) {
addP1 := bcAddNewPeer{
peerID: types.NodeID("P1"),
peerID: p2p.NodeID("P1"),
}
type args struct {
event bcAddNewPeer
@@ -1827,7 +1828,7 @@ func TestScHandleTryPrunePeer(t *testing.T) {
allB: []int64{1, 2, 3, 4, 5, 6, 7},
peerTimeout: time.Second},
args: args{event: pruneEv},
wantEvent: scPeersPruned{peers: []types.NodeID{"P4", "P5", "P6"}},
wantEvent: scPeersPruned{peers: []p2p.NodeID{"P4", "P5", "P6"}},
},
{
name: "mixed peers, finish after pruning",
@@ -1925,7 +1926,7 @@ func TestScHandleTrySchedule(t *testing.T) {
"P1": {height: 4, state: peerStateReady},
"P2": {height: 5, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5},
pending: map[int64]types.NodeID{
pending: map[int64]p2p.NodeID{
1: "P1", 2: "P1",
3: "P2",
},
@@ -1943,7 +1944,7 @@ func TestScHandleTrySchedule(t *testing.T) {
"P1": {height: 8, state: peerStateReady},
"P3": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{
pending: map[int64]p2p.NodeID{
1: "P1", 2: "P1",
3: "P3", 4: "P3",
5: "P2", 6: "P2",
@@ -2105,7 +2106,7 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{1: "P1"},
pending: map[int64]p2p.NodeID{1: "P1"},
pendingTime: map[int64]time.Time{1: tick[1]},
height: 1,
},
@@ -2117,7 +2118,7 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]},
height: 1,
},
@@ -2129,7 +2130,7 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]},
height: 1,
},
@@ -2141,9 +2142,9 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{2: "P1", 3: "P1"},
pending: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]},
received: map[int64]types.NodeID{1: "P1"},
received: map[int64]p2p.NodeID{1: "P1"},
height: 1,
},
},
@@ -2154,9 +2155,9 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{3: "P1"},
pending: map[int64]p2p.NodeID{3: "P1"},
pendingTime: map[int64]time.Time{3: tick[3]},
received: map[int64]types.NodeID{1: "P1", 2: "P1"},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
height: 1,
},
},
@@ -2167,29 +2168,29 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
height: 1,
},
},
{ // processed block 1
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 1}},
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 1}},
wantEvent: noOpEvent{},
wantSc: &scTestParams{
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{2, 3},
received: map[int64]types.NodeID{2: "P1", 3: "P1"},
received: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
height: 2,
},
},
{ // processed block 2
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 2}},
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 2}},
wantEvent: scFinishedEv{},
wantSc: &scTestParams{
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{3},
received: map[int64]types.NodeID{3: "P1"},
received: map[int64]p2p.NodeID{3: "P1"},
height: 3,
},
},
@@ -2205,7 +2206,7 @@ func TestScHandle(t *testing.T) {
"P1": {height: 4, state: peerStateReady, lastTouched: tick[6]},
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3, 4},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
height: 1,
},
args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}},
@@ -2216,7 +2217,7 @@ func TestScHandle(t *testing.T) {
"P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]},
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3},
received: map[int64]types.NodeID{},
received: map[int64]p2p.NodeID{},
height: 1,
},
},

View File

@@ -46,8 +46,9 @@ func main() {
rootCA = flag.String("rootcafile", "", "absolute path to root CA")
prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)")
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false).
With("module", "priv_val")
logger = log.NewTMLogger(
log.NewSyncWriter(os.Stdout),
).With("module", "priv_val")
)
flag.Parse()

View File

@@ -1,6 +1,8 @@
package debug
import (
"os"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/libs/log"
@@ -15,7 +17,7 @@ var (
flagProfAddr = "pprof-laddr"
flagFrequency = "frequency"
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
)
// DebugCmd defines the root command containing subcommands that assist in

View File

@@ -6,7 +6,7 @@ import (
"github.com/spf13/cobra"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/p2p"
)
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
@@ -20,7 +20,7 @@ var GenNodeKeyCmd = &cobra.Command{
}
func genNodeKey(cmd *cobra.Command, args []string) error {
nodeKey := types.GenNodeKey()
nodeKey := p2p.GenNodeKey()
bz, err := tmjson.Marshal(nodeKey)
if err != nil {

View File

@@ -10,9 +10,10 @@ import (
cfg "github.com/tendermint/tendermint/config"
tmos "github.com/tendermint/tendermint/libs/os"
tmrand "github.com/tendermint/tendermint/libs/rand"
tmtime "github.com/tendermint/tendermint/libs/time"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
// InitFilesCmd initializes a fresh Tendermint Core instance.
@@ -50,8 +51,8 @@ func initFilesWithConfig(config *cfg.Config) error {
if config.Mode == cfg.ModeValidator {
// private validator
privValKeyFile := config.PrivValidator.KeyFile()
privValStateFile := config.PrivValidator.StateFile()
privValKeyFile := config.PrivValidatorKeyFile()
privValStateFile := config.PrivValidatorStateFile()
if tmos.FileExists(privValKeyFile) {
pv, err = privval.LoadFilePV(privValKeyFile, privValStateFile)
if err != nil {
@@ -75,7 +76,7 @@ func initFilesWithConfig(config *cfg.Config) error {
if tmos.FileExists(nodeKeyFile) {
logger.Info("Found node key", "path", nodeKeyFile)
} else {
if _, err := types.LoadOrGenNodeKey(nodeKeyFile); err != nil {
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
return err
}
logger.Info("Generated node key", "path", nodeKeyFile)

View File

@@ -1,6 +1,7 @@
package commands
import (
"bufio"
"context"
"errors"
"fmt"
@@ -66,8 +67,7 @@ var (
trustedHash []byte
trustLevelStr string
logLevel string
logFormat string
verbose bool
primaryKey = []byte("primary")
witnessesKey = []byte("witnesses")
@@ -91,8 +91,7 @@ func init() {
"trusting period that headers can be verified within. Should be significantly less than the unbonding period")
LightCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height")
LightCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash")
LightCmd.Flags().StringVar(&logLevel, "log-level", log.LogLevelInfo, "The logging level (debug|info|warn|error|fatal)")
LightCmd.Flags().StringVar(&logFormat, "log-format", log.LogFormatPlain, "The logging format (text|json)")
LightCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output")
LightCmd.Flags().StringVar(&trustLevelStr, "trust-level", "1/3",
"trust level. Must be between 1/3 and 3/3",
)
@@ -102,10 +101,15 @@ func init() {
}
func runProxy(cmd *cobra.Command, args []string) error {
logger, err := log.NewDefaultLogger(logFormat, logLevel, false)
if err != nil {
return err
// Initialize logger.
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
var option log.Option
if verbose {
option, _ = log.AllowLevel("debug")
} else {
option, _ = log.AllowLevel("info")
}
logger = log.NewFilter(logger, option)
chainID = args[0]
logger.Info("Creating client...", "chainID", chainID)
@@ -144,7 +148,25 @@ func runProxy(cmd *cobra.Command, args []string) error {
return fmt.Errorf("can't parse trust level: %w", err)
}
options := []light.Option{light.Logger(logger)}
options := []light.Option{
light.Logger(logger),
light.ConfirmationFunction(func(action string) bool {
fmt.Println(action)
scanner := bufio.NewScanner(os.Stdin)
for {
scanner.Scan()
response := scanner.Text()
switch response {
case "y", "Y":
return true
case "n", "N":
return false
default:
fmt.Println("please input 'Y' or 'n' and press ENTER")
}
}
}),
}
if sequential {
options = append(options, light.SequentialVerification())
@@ -152,21 +174,31 @@ func runProxy(cmd *cobra.Command, args []string) error {
options = append(options, light.SkippingVerification(trustLevel))
}
// Initiate the light client. If the trusted store already has blocks in it, this
// will be used else we use the trusted options.
c, err := light.NewHTTPClient(
context.Background(),
chainID,
light.TrustOptions{
Period: trustingPeriod,
Height: trustedHeight,
Hash: trustedHash,
},
primaryAddr,
witnessesAddrs,
dbs.New(db),
options...,
)
var c *light.Client
if trustedHeight > 0 && len(trustedHash) > 0 { // fresh installation
c, err = light.NewHTTPClient(
context.Background(),
chainID,
light.TrustOptions{
Period: trustingPeriod,
Height: trustedHeight,
Hash: trustedHash,
},
primaryAddr,
witnessesAddrs,
dbs.New(db),
options...,
)
} else { // continue from latest state
c, err = light.NewHTTPClientFromTrustedStore(
chainID,
trustingPeriod,
primaryAddr,
witnessesAddrs,
dbs.New(db),
options...,
)
}
if err != nil {
return err
}

View File

@@ -5,8 +5,8 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/internal/p2p/upnp"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/p2p/upnp"
)
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.

View File

@@ -2,7 +2,8 @@ package commands
import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/consensus"
)
// ReplayCmd allows replaying of messages from the WAL.

View File

@@ -41,14 +41,14 @@ var ResetPrivValidatorCmd = &cobra.Command{
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetAll(cmd *cobra.Command, args []string) error {
return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(),
config.PrivValidator.StateFile(), logger)
return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
config.PrivValidatorStateFile(), logger)
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) error {
return resetFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger)
return resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
}
// ResetAll removes address book files plus all data, and resets the privValdiator data.

View File

@@ -2,6 +2,7 @@ package commands
import (
"fmt"
"os"
"strings"
"time"
@@ -9,12 +10,14 @@ import (
"github.com/spf13/viper"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cli"
tmflags "github.com/tendermint/tendermint/libs/cli/flags"
"github.com/tendermint/tendermint/libs/log"
)
var (
config = cfg.DefaultConfig()
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
ctxTimeout = 4 * time.Second
)
@@ -56,11 +59,19 @@ var RootCmd = &cobra.Command{
return err
}
logger, err = log.NewDefaultLogger(config.LogFormat, config.LogLevel, false)
if config.LogFormat == cfg.LogFormatJSON {
logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout))
}
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel)
if err != nil {
return err
}
if viper.GetBool(cli.TraceFlag) {
logger = log.NewTracingLogger(logger)
}
logger = logger.With("module", "main")
return nil
},

View File

@@ -18,6 +18,10 @@ import (
tmos "github.com/tendermint/tendermint/libs/os"
)
var (
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
)
// clearConfig clears env vars, the given root dir, and resets viper.
func clearConfig(dir string) {
if err := os.Unsetenv("TMHOME"); err != nil {
@@ -48,10 +52,10 @@ func testRootCmd() *cobra.Command {
}
func testSetup(rootDir string, args []string, env map[string]string) error {
clearConfig(rootDir)
clearConfig(defaultRoot)
rootCmd := testRootCmd()
cmd := cli.PrepareBaseCmd(rootCmd, "TM", rootDir)
cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot)
// run with the args and env
args = append([]string{rootCmd.Use}, args...)
@@ -59,7 +63,6 @@ func testSetup(rootDir string, args []string, env map[string]string) error {
}
func TestRootHome(t *testing.T) {
defaultRoot := t.TempDir()
newRoot := filepath.Join(defaultRoot, "something-else")
cases := []struct {
args []string
@@ -102,7 +105,6 @@ func TestRootFlagsEnv(t *testing.T) {
{nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env
}
defaultRoot := t.TempDir()
for i, tc := range cases {
idxString := strconv.Itoa(i)
@@ -116,7 +118,7 @@ func TestRootFlagsEnv(t *testing.T) {
func TestRootConfig(t *testing.T) {
// write non-default config
nonDefaultLogLvl := "debug"
nonDefaultLogLvl := "abc:debug"
cvals := map[string]string{
"log-level": nonDefaultLogLvl,
}
@@ -127,13 +129,12 @@ func TestRootConfig(t *testing.T) {
logLvl string
}{
{nil, nil, nonDefaultLogLvl}, // should load config
{[]string{"--log-level=info"}, nil, "info"}, // flag over rides
{nil, map[string]string{"TM_LOG_LEVEL": "info"}, "info"}, // env over rides
{nil, nil, nonDefaultLogLvl}, // should load config
{[]string{"--log-level=abc:info"}, nil, "abc:info"}, // flag over rides
{nil, map[string]string{"TM_LOG_LEVEL": "abc:info"}, "abc:info"}, // env over rides
}
for i, tc := range cases {
defaultRoot := t.TempDir()
idxString := strconv.Itoa(i)
clearConfig(defaultRoot)

View File

@@ -11,6 +11,7 @@ import (
cfg "github.com/tendermint/tendermint/config"
tmos "github.com/tendermint/tendermint/libs/os"
nm "github.com/tendermint/tendermint/node"
)
var (
@@ -29,7 +30,7 @@ func AddNodeFlags(cmd *cobra.Command) {
// priv val flags
cmd.Flags().String(
"priv-validator-laddr",
config.PrivValidator.ListenAddr,
config.PrivValidatorListenAddr,
"socket address to listen on for connections from external priv-validator process")
// node flags
@@ -98,7 +99,7 @@ func AddNodeFlags(cmd *cobra.Command) {
// NewRunNodeCmd returns the command that allows the CLI to start a node.
// It can be used with a custom PrivValidator and in-process ABCI application.
func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command {
func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command {
cmd := &cobra.Command{
Use: "start",
Aliases: []string{"node", "run"},
@@ -117,7 +118,7 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command {
return fmt.Errorf("failed to start node: %w", err)
}
logger.Info("started node", "node", n.String())
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {

View File

@@ -4,6 +4,8 @@ import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/p2p"
)
// ShowNodeIDCmd dumps node's ID to the standard output.
@@ -16,11 +18,11 @@ var ShowNodeIDCmd = &cobra.Command{
}
func showNodeID(cmd *cobra.Command, args []string) error {
nodeKeyID, err := config.LoadNodeKeyID()
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil {
return err
}
fmt.Println(nodeKeyID)
fmt.Println(nodeKey.ID)
return nil
}

View File

@@ -30,15 +30,10 @@ func showValidator(cmd *cobra.Command, args []string) error {
)
//TODO: remove once gRPC is the only supported protocol
protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr)
protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidatorListenAddr)
switch protocol {
case "grpc":
pvsc, err := tmgrpc.DialRemoteSigner(
config.PrivValidator,
config.ChainID(),
logger,
config.Instrumentation.Prometheus,
)
pvsc, err := tmgrpc.DialRemoteSigner(config, config.ChainID(), logger)
if err != nil {
return fmt.Errorf("can't connect to remote validator %w", err)
}
@@ -52,12 +47,12 @@ func showValidator(cmd *cobra.Command, args []string) error {
}
default:
keyFilePath := config.PrivValidator.KeyFile()
keyFilePath := config.PrivValidatorKeyFile()
if !tmos.FileExists(keyFilePath) {
return fmt.Errorf("private validator file %s does not exist", keyFilePath)
}
pv, err := privval.LoadFilePV(keyFilePath, config.PrivValidator.StateFile())
pv, err := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile())
if err != nil {
return err
}

View File

@@ -14,9 +14,10 @@ import (
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/bytes"
tmrand "github.com/tendermint/tendermint/libs/rand"
tmtime "github.com/tendermint/tendermint/libs/time"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
var (
@@ -143,8 +144,8 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
return err
}
pvKeyFile := filepath.Join(nodeDir, config.PrivValidator.Key)
pvStateFile := filepath.Join(nodeDir, config.PrivValidator.State)
pvKeyFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorKey)
pvStateFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorState)
pv, err := privval.LoadFilePV(pvKeyFile, pvStateFile)
if err != nil {
return err
@@ -273,11 +274,11 @@ func persistentPeersArray(config *cfg.Config) ([]string, error) {
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
config.SetRoot(nodeDir)
nodeKey, err := config.LoadNodeKeyID()
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil {
return []string{}, err
}
peers[i] = nodeKey.AddressString(fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
peers[i] = p2p.IDAddressString(nodeKey.ID, fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
}
return peers, nil
}

View File

@@ -13,6 +13,6 @@ var VersionCmd = &cobra.Command{
Use: "version",
Short: "Show version info",
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(version.TMVersion)
fmt.Println(version.TMCoreSemVer)
},
}

View File

@@ -38,8 +38,8 @@ func main() {
// * Supply a genesis doc file from another source
// * Provide their own DB implementation
// can copy this file and use something other than the
// node.NewDefault function
nodeFunc := nm.NewDefault
// DefaultNewNode function
nodeFunc := nm.DefaultNewNode
// Create & start node
rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc))

View File

@@ -4,16 +4,10 @@ import (
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"time"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/types"
)
const (
@@ -22,6 +16,11 @@ const (
// FuzzModeDelay is a mode in which we randomly sleep
FuzzModeDelay
// LogFormatPlain is a format for colored text
LogFormatPlain = "plain"
// LogFormatJSON is a format for json output
LogFormatJSON = "json"
// DefaultLogLevel defines a default log level as INFO.
DefaultLogLevel = "info"
@@ -31,9 +30,6 @@ const (
BlockchainV0 = "v0"
BlockchainV2 = "v2"
MempoolV0 = "v0"
MempoolV1 = "v1"
)
// NOTE: Most of the structs & relevant comments + the
@@ -80,7 +76,6 @@ type Config struct {
Consensus *ConsensusConfig `mapstructure:"consensus"`
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
PrivValidator *PrivValidatorConfig `mapstructure:"priv-validator"`
}
// DefaultConfig returns a default configuration for a Tendermint node
@@ -95,7 +90,6 @@ func DefaultConfig() *Config {
Consensus: DefaultConsensusConfig(),
TxIndex: DefaultTxIndexConfig(),
Instrumentation: DefaultInstrumentationConfig(),
PrivValidator: DefaultPrivValidatorConfig(),
}
}
@@ -118,7 +112,6 @@ func TestConfig() *Config {
Consensus: TestConsensusConfig(),
TxIndex: TestTxIndexConfig(),
Instrumentation: TestInstrumentationConfig(),
PrivValidator: DefaultPrivValidatorConfig(),
}
}
@@ -129,7 +122,6 @@ func (cfg *Config) SetRoot(root string) *Config {
cfg.P2P.RootDir = root
cfg.Mempool.RootDir = root
cfg.Consensus.RootDir = root
cfg.PrivValidator.RootDir = root
return cfg
}
@@ -232,6 +224,26 @@ type BaseConfig struct { //nolint: maligned
// Path to the JSON file containing the initial validator set and other meta data
Genesis string `mapstructure:"genesis-file"`
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
PrivValidatorKey string `mapstructure:"priv-validator-key-file"`
// Path to the JSON file containing the last sign state of a validator
PrivValidatorState string `mapstructure:"priv-validator-state-file"`
// TCP or UNIX socket address for Tendermint to listen on for
// connections from an external PrivValidator process
PrivValidatorListenAddr string `mapstructure:"priv-validator-laddr"`
// Client certificate generated while creating needed files for secure connection.
// If a remote validator address is provided but no certificate, the connection will be insecure
PrivValidatorClientCertificate string `mapstructure:"priv-validator-client-certificate-file"`
// Client key generated while creating certificates for secure connection
PrivValidatorClientKey string `mapstructure:"priv-validator-client-key-file"`
// Path Root Certificate Authority used to sign both client and server certificates
PrivValidatorRootCA string `mapstructure:"priv-validator-root-ca-file"`
// A JSON file containing the private key to use for p2p authenticated encryption
NodeKey string `mapstructure:"node-key-file"`
@@ -246,18 +258,20 @@ type BaseConfig struct { //nolint: maligned
// DefaultBaseConfig returns a default base configuration for a Tendermint node
func DefaultBaseConfig() BaseConfig {
return BaseConfig{
Genesis: defaultGenesisJSONPath,
NodeKey: defaultNodeKeyPath,
Mode: defaultMode,
Moniker: defaultMoniker,
ProxyApp: "tcp://127.0.0.1:26658",
ABCI: "socket",
LogLevel: DefaultLogLevel,
LogFormat: log.LogFormatPlain,
FastSyncMode: true,
FilterPeers: false,
DBBackend: "goleveldb",
DBPath: "data",
Genesis: defaultGenesisJSONPath,
PrivValidatorKey: defaultPrivValKeyPath,
PrivValidatorState: defaultPrivValStatePath,
NodeKey: defaultNodeKeyPath,
Mode: defaultMode,
Moniker: defaultMoniker,
ProxyApp: "tcp://127.0.0.1:26658",
ABCI: "socket",
LogLevel: DefaultLogLevel,
LogFormat: LogFormatPlain,
FastSyncMode: true,
FilterPeers: false,
DBBackend: "goleveldb",
DBPath: "data",
}
}
@@ -281,147 +295,72 @@ func (cfg BaseConfig) GenesisFile() string {
return rootify(cfg.Genesis, cfg.RootDir)
}
// PrivValidatorClientKeyFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorClientKeyFile() string {
return rootify(cfg.PrivValidatorClientKey, cfg.RootDir)
}
// PrivValidatorClientCertificateFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorClientCertificateFile() string {
return rootify(cfg.PrivValidatorClientCertificate, cfg.RootDir)
}
// PrivValidatorCertificateAuthorityFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorRootCAFile() string {
return rootify(cfg.PrivValidatorRootCA, cfg.RootDir)
}
// PrivValidatorKeyFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorKeyFile() string {
return rootify(cfg.PrivValidatorKey, cfg.RootDir)
}
// PrivValidatorFile returns the full path to the priv_validator_state.json file
func (cfg BaseConfig) PrivValidatorStateFile() string {
return rootify(cfg.PrivValidatorState, cfg.RootDir)
}
// NodeKeyFile returns the full path to the node_key.json file
func (cfg BaseConfig) NodeKeyFile() string {
return rootify(cfg.NodeKey, cfg.RootDir)
}
// LoadNodeKey loads NodeKey located in filePath.
func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) {
jsonBytes, err := ioutil.ReadFile(cfg.NodeKeyFile())
if err != nil {
return "", err
}
nodeKey := types.NodeKey{}
err = tmjson.Unmarshal(jsonBytes, &nodeKey)
if err != nil {
return "", err
}
nodeKey.ID = types.NodeIDFromPubKey(nodeKey.PubKey())
return nodeKey.ID, nil
}
// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If
// the file does not exist, it generates and saves a new NodeKey.
func (cfg BaseConfig) LoadOrGenNodeKeyID() (types.NodeID, error) {
if tmos.FileExists(cfg.NodeKeyFile()) {
nodeKey, err := cfg.LoadNodeKeyID()
if err != nil {
return "", err
}
return nodeKey, nil
}
nodeKey := types.GenNodeKey()
if err := nodeKey.SaveAs(cfg.NodeKeyFile()); err != nil {
return "", err
}
return nodeKey.ID, nil
}
// DBDir returns the full path to the database directory
func (cfg BaseConfig) DBDir() string {
return rootify(cfg.DBPath, cfg.RootDir)
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg BaseConfig) ValidateBasic() error {
switch cfg.LogFormat {
case log.LogFormatJSON, log.LogFormatText, log.LogFormatPlain:
default:
return errors.New("unknown log format (must be 'plain', 'text' or 'json')")
}
switch cfg.Mode {
case ModeFull, ModeValidator, ModeSeed:
case "":
return errors.New("no mode has been set")
default:
return fmt.Errorf("unknown mode: %v", cfg.Mode)
}
return nil
}
//-----------------------------------------------------------------------------
// PrivValidatorConfig
// PrivValidatorConfig defines the configuration parameters for running a validator
type PrivValidatorConfig struct {
RootDir string `mapstructure:"home"`
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
Key string `mapstructure:"key-file"`
// Path to the JSON file containing the last sign state of a validator
State string `mapstructure:"state-file"`
// TCP or UNIX socket address for Tendermint to listen on for
// connections from an external PrivValidator process
ListenAddr string `mapstructure:"laddr"`
// Client certificate generated while creating needed files for secure connection.
// If a remote validator address is provided but no certificate, the connection will be insecure
ClientCertificate string `mapstructure:"client-certificate-file"`
// Client key generated while creating certificates for secure connection
ClientKey string `mapstructure:"client-key-file"`
// Path Root Certificate Authority used to sign both client and server certificates
RootCA string `mapstructure:"root-ca-file"`
}
// DefaultBaseConfig returns a default private validator configuration
// for a Tendermint node.
func DefaultPrivValidatorConfig() *PrivValidatorConfig {
return &PrivValidatorConfig{
Key: defaultPrivValKeyPath,
State: defaultPrivValStatePath,
}
}
// ClientKeyFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) ClientKeyFile() string {
return rootify(cfg.ClientKey, cfg.RootDir)
}
// ClientCertificateFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) ClientCertificateFile() string {
return rootify(cfg.ClientCertificate, cfg.RootDir)
}
// CertificateAuthorityFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) RootCAFile() string {
return rootify(cfg.RootCA, cfg.RootDir)
}
// KeyFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) KeyFile() string {
return rootify(cfg.Key, cfg.RootDir)
}
// StateFile returns the full path to the priv_validator_state.json file
func (cfg *PrivValidatorConfig) StateFile() string {
return rootify(cfg.State, cfg.RootDir)
}
func (cfg *PrivValidatorConfig) AreSecurityOptionsPresent() bool {
func (cfg *BaseConfig) ArePrivValidatorClientSecurityOptionsPresent() bool {
switch {
case cfg.RootCA == "":
case cfg.PrivValidatorRootCA == "":
return false
case cfg.ClientKey == "":
case cfg.PrivValidatorClientKey == "":
return false
case cfg.ClientCertificate == "":
case cfg.PrivValidatorClientCertificate == "":
return false
default:
return true
}
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg BaseConfig) ValidateBasic() error {
switch cfg.LogFormat {
case LogFormatPlain, LogFormatJSON:
default:
return errors.New("unknown log format (must be 'plain' or 'json')")
}
switch cfg.Mode {
case ModeFull, ModeValidator, ModeSeed:
case "":
return errors.New("no mode has been set")
default:
return fmt.Errorf("unknown mode: %v", cfg.Mode)
}
return nil
}
//-----------------------------------------------------------------------------
// RPCConfig
@@ -776,9 +715,8 @@ func (cfg *P2PConfig) ValidateBasic() error {
//-----------------------------------------------------------------------------
// MempoolConfig
// MempoolConfig defines the configuration options for the Tendermint mempool.
// MempoolConfig defines the configuration options for the Tendermint mempool
type MempoolConfig struct {
Version string `mapstructure:"version"`
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
Broadcast bool `mapstructure:"broadcast"`
@@ -803,10 +741,9 @@ type MempoolConfig struct {
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
}
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
func DefaultMempoolConfig() *MempoolConfig {
return &MempoolConfig{
Version: MempoolV1,
Recheck: true,
Broadcast: true,
// Each signature verification takes .5ms, Size reduced until we implement
@@ -848,15 +785,13 @@ func (cfg *MempoolConfig) ValidateBasic() error {
// StateSyncConfig defines the configuration for the Tendermint state sync service
type StateSyncConfig struct {
Enable bool `mapstructure:"enable"`
TempDir string `mapstructure:"temp-dir"`
RPCServers []string `mapstructure:"rpc-servers"`
TrustPeriod time.Duration `mapstructure:"trust-period"`
TrustHeight int64 `mapstructure:"trust-height"`
TrustHash string `mapstructure:"trust-hash"`
DiscoveryTime time.Duration `mapstructure:"discovery-time"`
ChunkRequestTimeout time.Duration `mapstructure:"chunk-request-timeout"`
Fetchers int32 `mapstructure:"fetchers"`
Enable bool `mapstructure:"enable"`
TempDir string `mapstructure:"temp-dir"`
RPCServers []string `mapstructure:"rpc-servers"`
TrustPeriod time.Duration `mapstructure:"trust-period"`
TrustHeight int64 `mapstructure:"trust-height"`
TrustHash string `mapstructure:"trust-hash"`
DiscoveryTime time.Duration `mapstructure:"discovery-time"`
}
func (cfg *StateSyncConfig) TrustHashBytes() []byte {
@@ -871,10 +806,8 @@ func (cfg *StateSyncConfig) TrustHashBytes() []byte {
// DefaultStateSyncConfig returns a default configuration for the state sync service
func DefaultStateSyncConfig() *StateSyncConfig {
return &StateSyncConfig{
TrustPeriod: 168 * time.Hour,
DiscoveryTime: 15 * time.Second,
ChunkRequestTimeout: 15 * time.Second,
Fetchers: 4,
TrustPeriod: 168 * time.Hour,
DiscoveryTime: 15 * time.Second,
}
}
@@ -889,17 +822,14 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
if len(cfg.RPCServers) == 0 {
return errors.New("rpc-servers is required")
}
if len(cfg.RPCServers) < 2 {
return errors.New("at least two rpc-servers entries is required")
}
for _, server := range cfg.RPCServers {
if len(server) == 0 {
return errors.New("found empty rpc-servers entry")
}
}
if cfg.DiscoveryTime != 0 && cfg.DiscoveryTime < 5*time.Second {
return errors.New("discovery time must be 0s or greater than five seconds")
}
@@ -907,29 +837,17 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
if cfg.TrustPeriod <= 0 {
return errors.New("trusted-period is required")
}
if cfg.TrustHeight <= 0 {
return errors.New("trusted-height is required")
}
if len(cfg.TrustHash) == 0 {
return errors.New("trusted-hash is required")
}
_, err := hex.DecodeString(cfg.TrustHash)
if err != nil {
return fmt.Errorf("invalid trusted-hash: %w", err)
}
if cfg.ChunkRequestTimeout < 5*time.Second {
return errors.New("chunk-request-timeout must be at least 5 seconds")
}
if cfg.Fetchers <= 0 {
return errors.New("fetchers is required")
}
}
return nil
}
@@ -1140,25 +1058,19 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
// TxIndexConfig defines the configuration for the transaction indexer,
// including composite keys to index.
type TxIndexConfig struct {
// The backend database list to back the indexer.
// If list contains `null`, meaning no indexer service will be used.
// What indexer to use for transactions
//
// Options:
// 1) "null" - no indexer services.
// 1) "null"
// 2) "kv" (default) - the simplest possible indexer,
// backed by key-value storage (defaults to levelDB; see DBBackend).
// 3) "psql" - the indexer services backed by PostgreSQL.
Indexer []string `mapstructure:"indexer"`
// The PostgreSQL connection configuration, the connection format:
// postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
PsqlConn string `mapstructure:"psql-conn"`
Indexer string `mapstructure:"indexer"`
}
// DefaultTxIndexConfig returns a default configuration for the transaction indexer.
func DefaultTxIndexConfig() *TxIndexConfig {
return &TxIndexConfig{
Indexer: []string{"kv"},
Indexer: "kv",
}
}

View File

@@ -1,26 +0,0 @@
package config
import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
db "github.com/tendermint/tm-db"
)
// ServiceProvider takes a config and a logger and returns a ready to go Node.
type ServiceProvider func(*Config, log.Logger) (service.Service, error)
// DBContext specifies config information for loading a new DB.
type DBContext struct {
ID string
Config *Config
}
// DBProvider takes a DBContext and returns an instantiated DB.
type DBProvider func(*DBContext) (db.DB, error)
// DefaultDBProvider returns a database using the DBBackend and DBDir
// specified in the Config.
func DefaultDBProvider(ctx *DBContext) (db.DB, error) {
dbType := db.BackendType(ctx.Config.DBBackend)
return db.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
}

View File

@@ -137,6 +137,27 @@ log-format = "{{ .BaseConfig.LogFormat }}"
# Path to the JSON file containing the initial validator set and other meta data
genesis-file = "{{ js .BaseConfig.Genesis }}"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv-validator-key-file = "{{ js .BaseConfig.PrivValidatorKey }}"
# Path to the JSON file containing the last sign state of a validator
priv-validator-state-file = "{{ js .BaseConfig.PrivValidatorState }}"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client
priv-validator-laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}"
# Client certificate generated while creating needed files for secure connection.
# If a remote validator address is provided but no certificate, the connection will be insecure
priv-validator-client-certificate-file = "{{ js .BaseConfig.PrivValidatorClientCertificate }}"
# Client key generated while creating certificates for secure connection
priv-validator-client-key-file = "{{ js .BaseConfig.PrivValidatorClientKey }}"
# Path Root Certificate Authority used to sign both client and server certificates
priv-validator-certificate-authority = "{{ js .BaseConfig.PrivValidatorRootCA }}"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node-key-file = "{{ js .BaseConfig.NodeKey }}"
@@ -147,34 +168,6 @@ abci = "{{ .BaseConfig.ABCI }}"
# so the app can decide if we should keep the connection or not
filter-peers = {{ .BaseConfig.FilterPeers }}
#######################################################
### Priv Validator Configuration ###
#######################################################
[priv-validator]
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
key-file = "{{ js .PrivValidator.Key }}"
# Path to the JSON file containing the last sign state of a validator
state-file = "{{ js .PrivValidator.State }}"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client
laddr = "{{ .PrivValidator.ListenAddr }}"
# Client certificate generated while creating needed files for secure connection.
# If a remote validator address is provided but no certificate, the connection will be insecure
client-certificate-file = "{{ js .PrivValidator.ClientCertificate }}"
# Client key generated while creating certificates for secure connection
validator-client-key-file = "{{ js .PrivValidator.ClientKey }}"
# Path Root Certificate Authority used to sign both client and server certificates
certificate-authority = "{{ js .PrivValidator.RootCA }}"
#######################################################################
### Advanced Configuration Options ###
#######################################################################
@@ -280,8 +273,7 @@ laddr = "{{ .P2P.ListenAddress }}"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address. ip and port are required
# example: 159.89.10.97:26656
# to figure out the address.
external-address = "{{ .P2P.ExternalAddress }}"
# Comma separated list of seed nodes to connect to
@@ -349,7 +341,6 @@ recv-rate = {{ .P2P.RecvRate }}
pex = {{ .P2P.PexReactor }}
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055
private-peer-ids = "{{ .P2P.PrivatePeerIDs }}"
# Toggle to disable guard against peers connecting from the same ip.
@@ -364,11 +355,6 @@ dial-timeout = "{{ .P2P.DialTimeout }}"
#######################################################
[mempool]
# Mempool version to use:
# 1) "v0" - The legacy non-prioritized mempool reactor.
# 2) "v1" (default) - The prioritized mempool reactor.
version = "{{ .Mempool.Version }}"
recheck = {{ .Mempool.Recheck }}
broadcast = {{ .Mempool.Broadcast }}
@@ -426,13 +412,6 @@ discovery-time = "{{ .StateSync.DiscoveryTime }}"
# Will create a new, randomly named directory within, and remove it when done.
temp-dir = "{{ .StateSync.TempDir }}"
# The timeout duration before re-requesting a chunk, possibly from a different
# peer (default: 15 seconds).
chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}"
# The number of concurrent chunk and block fetchers to run (default: 4).
fetchers = "{{ .StateSync.Fetchers }}"
#######################################################
### Fast Sync Configuration Connections ###
#######################################################
@@ -489,8 +468,7 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
#######################################################
[tx-index]
# The backend database list to back the indexer.
# If list contains null, meaning no indexer service will be used.
# What indexer to use for transactions
#
# The application will set which txs to index. In some cases a node operator will be able
# to decide which txs to index based on configuration set in the application.
@@ -499,12 +477,7 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
# 3) "psql" - the indexer services backed by PostgreSQL.
indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}]
# The PostgreSQL connection configuration, the connection format:
# postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
psql-conn = "{{ .TxIndex.PsqlConn }}"
indexer = "{{ .TxIndex.Indexer }}"
#######################################################
### Instrumentation Configuration Options ###
@@ -549,10 +522,10 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config {
panic(err)
}
conf := DefaultConfig()
genesisFilePath := filepath.Join(rootDir, conf.Genesis)
privKeyFilePath := filepath.Join(rootDir, conf.PrivValidator.Key)
privStateFilePath := filepath.Join(rootDir, conf.PrivValidator.State)
baseConfig := DefaultBaseConfig()
genesisFilePath := filepath.Join(rootDir, baseConfig.Genesis)
privKeyFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorKey)
privStateFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorState)
// Write default config file if missing.
writeDefaultConfigFileIfNone(rootDir)

View File

@@ -63,8 +63,7 @@ func TestEnsureTestRoot(t *testing.T) {
// TODO: make sure the cfg returned and testconfig are the same!
baseConfig := DefaultBaseConfig()
pvConfig := DefaultPrivValidatorConfig()
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, pvConfig.Key, pvConfig.State)
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidatorKey, baseConfig.PrivValidatorState)
}
func checkConfig(configFile string) bool {

View File

@@ -12,12 +12,12 @@ import (
"github.com/stretchr/testify/require"
abcicli "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/internal/evidence"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/evidence"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
tmsync "github.com/tendermint/tendermint/libs/sync"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
sm "github.com/tendermint/tendermint/state"
@@ -45,9 +45,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
logger := consensusLogger().With("test", "byzantine", "validator", i)
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
require.NoError(t, stateStore.Save(state))
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
defer os.RemoveAll(thisConfig.RootDir)
@@ -66,7 +64,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
@@ -78,7 +76,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
require.NoError(t, err)
// Make State
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
cs.SetLogger(cs.Logger)
// set private validator
@@ -100,7 +98,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
rts := setup(t, nValidators, states, 100) // buffer must be large enough to not deadlock
var bzNodeID types.NodeID
var bzNodeID p2p.NodeID
// Set the first state's reactor as the dedicated byzantine reactor and grab
// the NodeID that corresponds to the state so we can reference the reactor.

View File

@@ -12,6 +12,7 @@ import (
"testing"
"time"
"github.com/go-kit/kit/log/term"
"github.com/stretchr/testify/require"
"path"
@@ -23,20 +24,20 @@ import (
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/internal/test/factory"
tmbytes "github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
tmtime "github.com/tendermint/tendermint/libs/time"
tmsync "github.com/tendermint/tendermint/libs/sync"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/privval"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
const (
@@ -384,8 +385,8 @@ func newStateWithConfig(
pv types.PrivValidator,
app abci.Application,
) *State {
blockStore := store.NewBlockStore(dbm.NewMemDB())
return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockStore)
blockDB := dbm.NewMemDB()
return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB)
}
func newStateWithConfigAndBlockStore(
@@ -393,15 +394,18 @@ func newStateWithConfigAndBlockStore(
state sm.State,
pv types.PrivValidator,
app abci.Application,
blockStore *store.BlockStore,
blockDB dbm.DB,
) *State {
// Get BlockStore
blockStore := store.NewBlockStore(blockDB)
// one for mempool, one for consensus
mtx := new(tmsync.RWMutex)
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
@@ -416,7 +420,7 @@ func newStateWithConfigAndBlockStore(
panic(err)
}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
cs.SetPrivValidator(pv)
@@ -432,9 +436,9 @@ func newStateWithConfigAndBlockStore(
}
func loadPrivValidator(config *cfg.Config) *privval.FilePV {
privValidatorKeyFile := config.PrivValidator.KeyFile()
privValidatorKeyFile := config.PrivValidatorKeyFile()
ensureDir(filepath.Dir(privValidatorKeyFile), 0700)
privValidatorStateFile := config.PrivValidator.StateFile()
privValidatorStateFile := config.PrivValidatorStateFile()
privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
if err != nil {
panic(err)
@@ -686,11 +690,17 @@ func ensureNewEventOnChannel(ch <-chan tmpubsub.Message) {
// consensusLogger is a TestingLogger which uses a different
// color for each validator ("validator" key must exist).
func consensusLogger() log.Logger {
return log.TestingLogger().With("module", "consensus")
return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
for i := 0; i < len(keyvals)-1; i += 2 {
if keyvals[i] == "validator" {
return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
}
}
return term.FgBgColor{}
}).With("module", "consensus")
}
func randConsensusState(
t *testing.T,
config *cfg.Config,
nValidators int,
testName string,
@@ -708,9 +718,9 @@ func randConsensusState(
configRootDirs := make([]string, 0, nValidators)
for i := 0; i < nValidators; i++ {
blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB)
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)
@@ -729,7 +739,7 @@ func randConsensusState(
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, blockStore)
css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
css[i].SetTimeoutTicker(tickerFunc())
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
}
@@ -760,7 +770,9 @@ func randConsensusNetWithPeers(
var peer0Config *cfg.Config
configRootDirs := make([]string, 0, nPeers)
for i := 0; i < nPeers; i++ {
state, _ := sm.MakeGenesisState(genDoc)
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB)
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal

View File

@@ -6,9 +6,9 @@ import (
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/bytes"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/p2p"
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
@@ -18,7 +18,7 @@ func TestReactorInvalidPrecommit(t *testing.T) {
config := configSetup(t)
n := 4
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
states, cleanup := randConsensusState(config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
t.Cleanup(cleanup)
for i := 0; i < 4; i++ {

View File

@@ -1,7 +1,6 @@
package consensus
import (
"context"
"encoding/binary"
"fmt"
"os"
@@ -15,9 +14,8 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
abci "github.com/tendermint/tendermint/abci/types"
mempl "github.com/tendermint/tendermint/internal/mempool"
mempl "github.com/tendermint/tendermint/mempool"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
@@ -113,7 +111,7 @@ func deliverTxsRange(cs *State, start, end int) {
for i := start; i < end; i++ {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(i))
err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, nil, mempl.TxInfo{})
err := assertMempool(cs.txNotifier).CheckTx(txBytes, nil, mempl.TxInfo{})
if err != nil {
panic(fmt.Sprintf("Error after CheckTx: %v", err))
}
@@ -124,9 +122,9 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
config := configSetup(t)
state, privVals := randGenesisState(config, 1, false, 10)
stateStore := sm.NewStore(dbm.NewMemDB())
blockStore := store.NewBlockStore(dbm.NewMemDB())
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockStore)
blockDB := dbm.NewMemDB()
stateStore := sm.NewStore(blockDB)
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB)
err := stateStore.Save(state)
require.NoError(t, err)
newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader)
@@ -151,9 +149,9 @@ func TestMempoolRmBadTx(t *testing.T) {
state, privVals := randGenesisState(config, 1, false, 10)
app := NewCounterApplication()
stateStore := sm.NewStore(dbm.NewMemDB())
blockStore := store.NewBlockStore(dbm.NewMemDB())
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockStore)
blockDB := dbm.NewMemDB()
stateStore := sm.NewStore(blockDB)
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
err := stateStore.Save(state)
require.NoError(t, err)
@@ -161,8 +159,8 @@ func TestMempoolRmBadTx(t *testing.T) {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
resDeliver := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
assert.False(t, resDeliver.Txs[0].IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
resDeliver := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes})
assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
resCommit := app.Commit()
assert.True(t, len(resCommit.Data) > 0)
@@ -173,7 +171,7 @@ func TestMempoolRmBadTx(t *testing.T) {
// Try to send the tx through the mempool.
// CheckTx should not err, but the app should return a bad abci code
// and the tx should get removed from the pool
err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, func(r *abci.Response) {
err := assertMempool(cs.txNotifier).CheckTx(txBytes, func(r *abci.Response) {
if r.GetCheckTx().Code != code.CodeTypeBadNonce {
t.Errorf("expected checktx to return bad nonce, got %v", r)
return
@@ -233,16 +231,15 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}
}
func (app *CounterApplication) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
txValue := txAsUint64(req.Txs[0])
func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
txValue := txAsUint64(req.Tx)
if txValue != uint64(app.txCount) {
return abci.ResponseFinalizeBlock{Txs: []*abci.ResponseDeliverTx{{
return abci.ResponseDeliverTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
}
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
}
app.txCount++
return abci.ResponseFinalizeBlock{Txs: []*abci.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
}
func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {

View File

@@ -3,7 +3,6 @@ package consensus
import (
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/discard"
"github.com/tendermint/tendermint/types"
prometheus "github.com/go-kit/kit/metrics/prometheus"
stdprometheus "github.com/prometheus/client_golang/prometheus"
@@ -49,7 +48,7 @@ type Metrics struct {
// Number of transactions.
NumTxs metrics.Gauge
// Size of the block.
BlockSizeBytes metrics.Histogram
BlockSizeBytes metrics.Gauge
// Total number of transactions.
TotalTxs metrics.Gauge
// The latest block height.
@@ -151,7 +150,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
Name: "num_txs",
Help: "Number of transactions.",
}, labels).With(labelsAndValues...),
BlockSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "block_size_bytes",
@@ -211,7 +210,7 @@ func NopMetrics() *Metrics {
BlockIntervalSeconds: discard.NewHistogram(),
NumTxs: discard.NewGauge(),
BlockSizeBytes: discard.NewHistogram(),
BlockSizeBytes: discard.NewGauge(),
TotalTxs: discard.NewGauge(),
CommittedHeight: discard.NewGauge(),
FastSyncing: discard.NewGauge(),
@@ -219,11 +218,3 @@ func NopMetrics() *Metrics {
BlockParts: discard.NewCounter(),
}
}
// RecordConsMetrics uses for recording the block related metrics during fast-sync.
func (m *Metrics) RecordConsMetrics(block *types.Block) {
m.NumTxs.Set(float64(len(block.Data.Txs)))
m.TotalTxs.Add(float64(len(block.Data.Txs)))
m.BlockSizeBytes.Observe(float64(block.Size()))
m.CommittedHeight.Set(float64(block.Height))
}

View File

@@ -4,10 +4,11 @@ import (
"errors"
"fmt"
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/libs/bits"
tmjson "github.com/tendermint/tendermint/libs/json"
tmmath "github.com/tendermint/tendermint/libs/math"
"github.com/tendermint/tendermint/p2p"
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
@@ -650,7 +651,7 @@ func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) {
}
pb = msgInfo{
Msg: walMsg,
PeerID: types.NodeID(msg.MsgInfo.PeerID),
PeerID: p2p.NodeID(msg.MsgInfo.PeerID),
}
case *tmcons.WALMessage_TimeoutInfo:

View File

@@ -11,13 +11,14 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/crypto/tmhash"
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/bits"
"github.com/tendermint/tendermint/libs/bytes"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/p2p"
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
@@ -248,7 +249,7 @@ func TestWALMsgProto(t *testing.T) {
Round: 1,
Part: &parts,
},
PeerID: types.NodeID("string"),
PeerID: p2p.NodeID("string"),
}, &tmcons.WALMessage{
Sum: &tmcons.WALMessage_MsgInfo{
MsgInfo: &tmcons.MsgInfo{

View File

@@ -6,14 +6,15 @@ import (
"sync"
"time"
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/libs/bits"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/libs/log"
tmtime "github.com/tendermint/tendermint/libs/time"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/p2p"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
var (
@@ -36,7 +37,7 @@ func (pss peerStateStats) String() string {
// NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go.
// Be mindful of what you Expose.
type PeerState struct {
peerID types.NodeID
peerID p2p.NodeID
logger log.Logger
// NOTE: Modify below using setters, never directly.
@@ -50,7 +51,7 @@ type PeerState struct {
}
// NewPeerState returns a new PeerState for the given node ID.
func NewPeerState(logger log.Logger, peerID types.NodeID) *PeerState {
func NewPeerState(logger log.Logger, peerID p2p.NodeID) *PeerState {
return &PeerState{
peerID: peerID,
logger: logger,

View File

@@ -4,13 +4,13 @@ import (
"fmt"
"time"
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/libs/bits"
tmevents "github.com/tendermint/tendermint/libs/events"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/p2p"
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
sm "github.com/tendermint/tendermint/state"
@@ -33,11 +33,11 @@ var (
MsgType: new(tmcons.Message),
Descriptor: &p2p.ChannelDescriptor{
ID: byte(StateChannel),
Priority: 8,
SendQueueCapacity: 64,
Priority: 6,
SendQueueCapacity: 100,
RecvMessageCapacity: maxMsgSize,
RecvBufferCapacity: 128,
MaxSendBytes: 12000,
MaxSendBytes: 12000,
},
},
DataChannel: {
@@ -47,33 +47,36 @@ var (
// stuff. Once we gossip the whole block there is nothing left to send
// until next height or round.
ID: byte(DataChannel),
Priority: 12,
SendQueueCapacity: 64,
RecvBufferCapacity: 512,
Priority: 10,
SendQueueCapacity: 100,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: maxMsgSize,
MaxSendBytes: 40000,
MaxSendBytes: 40000,
},
},
VoteChannel: {
MsgType: new(tmcons.Message),
Descriptor: &p2p.ChannelDescriptor{
ID: byte(VoteChannel),
Priority: 10,
SendQueueCapacity: 64,
RecvBufferCapacity: 128,
Priority: 7,
SendQueueCapacity: 100,
RecvBufferCapacity: 100 * 100,
RecvMessageCapacity: maxMsgSize,
MaxSendBytes: 150,
MaxSendBytes: 150,
},
},
VoteSetBitsChannel: {
MsgType: new(tmcons.Message),
Descriptor: &p2p.ChannelDescriptor{
ID: byte(VoteSetBitsChannel),
Priority: 5,
SendQueueCapacity: 8,
RecvBufferCapacity: 128,
Priority: 1,
SendQueueCapacity: 2,
RecvBufferCapacity: 1024,
RecvMessageCapacity: maxMsgSize,
MaxSendBytes: 50,
MaxSendBytes: 50,
},
},
}
@@ -95,14 +98,6 @@ const (
type ReactorOption func(*Reactor)
// Temporary interface for switching to fast sync, we should get rid of v0.
// See: https://github.com/tendermint/tendermint/issues/4595
type FastSyncReactor interface {
SwitchToFastSync(sm.State) error
GetMaxPeerBlockHeight() int64
}
// Reactor defines a reactor for the consensus service.
type Reactor struct {
service.BaseService
@@ -112,7 +107,7 @@ type Reactor struct {
Metrics *Metrics
mtx tmsync.RWMutex
peers map[types.NodeID]*PeerState
peers map[p2p.NodeID]*PeerState
waitSync bool
stateCh *p2p.Channel
@@ -148,7 +143,7 @@ func NewReactor(
r := &Reactor{
state: cs,
waitSync: waitSync,
peers: make(map[types.NodeID]*PeerState),
peers: make(map[p2p.NodeID]*PeerState),
Metrics: NopMetrics(),
stateCh: stateCh,
dataCh: dataCh,
@@ -324,7 +319,7 @@ func (r *Reactor) StringIndented(indent string) string {
}
// GetPeerState returns PeerState for a given NodeID.
func (r *Reactor) GetPeerState(peerID types.NodeID) (*PeerState, bool) {
func (r *Reactor) GetPeerState(peerID p2p.NodeID) (*PeerState, bool) {
r.mtx.RLock()
defer r.mtx.RUnlock()
@@ -371,7 +366,7 @@ func (r *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
func (r *Reactor) subscribeToBroadcastEvents() {
err := r.state.evsw.AddListenerForEvent(
listenerIDConsensus,
types.EventNewRoundStepValue,
types.EventNewRoundStep,
func(data tmevents.EventData) {
r.broadcastNewRoundStepMessage(data.(*cstypes.RoundState))
select {
@@ -386,7 +381,7 @@ func (r *Reactor) subscribeToBroadcastEvents() {
err = r.state.evsw.AddListenerForEvent(
listenerIDConsensus,
types.EventValidBlockValue,
types.EventValidBlock,
func(data tmevents.EventData) {
r.broadcastNewValidBlockMessage(data.(*cstypes.RoundState))
},
@@ -397,7 +392,7 @@ func (r *Reactor) subscribeToBroadcastEvents() {
err = r.state.evsw.AddListenerForEvent(
listenerIDConsensus,
types.EventVoteValue,
types.EventVote,
func(data tmevents.EventData) {
r.broadcastHasVoteMessage(data.(*types.Vote))
},
@@ -421,7 +416,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) *tmcons.NewRoundStep {
}
}
func (r *Reactor) sendNewRoundStepMessage(peerID types.NodeID) {
func (r *Reactor) sendNewRoundStepMessage(peerID p2p.NodeID) {
rs := r.state.GetRoundState()
msg := makeRoundStepMessage(rs)
r.stateCh.Out <- p2p.Envelope{
@@ -1197,6 +1192,11 @@ func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message)
// It will handle errors and any possible panics gracefully. A caller can handle
// any error returned by sending a PeerError on the respective channel.
//
// NOTE: We process these messages even when we're fast_syncing. Messages affect
// either a peer state or the consensus state. Peer state updates can happen in
// parallel, but processing of proposals, block parts, and votes are ordered by
// the p2p channel.
//
// NOTE: We block on consensus state for proposals, block parts, and votes.
func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
defer func() {
@@ -1206,12 +1206,6 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
}
}()
// Just skip the entire message during syncing so that we can
// process fewer messages.
if r.WaitSync() {
return
}
// We wrap the envelope's message in a Proto wire type so we can convert back
// the domain type that individual channel message handlers can work with. We
// do this here once to avoid having to do it for each individual message type.
@@ -1408,7 +1402,3 @@ func (r *Reactor) peerStatsRoutine() {
}
}
}
func (r *Reactor) GetConsensusState() *State {
return r.state
}

View File

@@ -18,13 +18,12 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/mempool"
mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
tmsync "github.com/tendermint/tendermint/libs/sync"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/p2ptest"
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
sm "github.com/tendermint/tendermint/state"
statemocks "github.com/tendermint/tendermint/state/mocks"
@@ -39,13 +38,13 @@ var (
type reactorTestSuite struct {
network *p2ptest.Network
states map[types.NodeID]*State
reactors map[types.NodeID]*Reactor
subs map[types.NodeID]types.Subscription
stateChannels map[types.NodeID]*p2p.Channel
dataChannels map[types.NodeID]*p2p.Channel
voteChannels map[types.NodeID]*p2p.Channel
voteSetBitsChannels map[types.NodeID]*p2p.Channel
states map[p2p.NodeID]*State
reactors map[p2p.NodeID]*Reactor
subs map[p2p.NodeID]types.Subscription
stateChannels map[p2p.NodeID]*p2p.Channel
dataChannels map[p2p.NodeID]*p2p.Channel
voteChannels map[p2p.NodeID]*p2p.Channel
voteSetBitsChannels map[p2p.NodeID]*p2p.Channel
}
func chDesc(chID p2p.ChannelID) p2p.ChannelDescriptor {
@@ -59,9 +58,9 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
rts := &reactorTestSuite{
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
states: make(map[types.NodeID]*State),
reactors: make(map[types.NodeID]*Reactor, numNodes),
subs: make(map[types.NodeID]types.Subscription, numNodes),
states: make(map[p2p.NodeID]*State),
reactors: make(map[p2p.NodeID]*Reactor, numNodes),
subs: make(map[p2p.NodeID]types.Subscription, numNodes),
}
rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size)
@@ -155,7 +154,7 @@ func waitForAndValidateBlock(
require.NoError(t, validateBlock(newBlock, activeVals))
for _, tx := range txs {
require.NoError(t, assertMempool(states[j].txNotifier).CheckTx(context.Background(), tx, nil, mempool.TxInfo{}))
require.NoError(t, assertMempool(states[j].txNotifier).CheckTx(tx, nil, mempl.TxInfo{}))
}
}
@@ -257,7 +256,7 @@ func TestReactorBasic(t *testing.T) {
config := configSetup(t)
n := 4
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
states, cleanup := randConsensusState(config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
t.Cleanup(cleanup)
rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock
@@ -296,8 +295,7 @@ func TestReactorWithEvidence(t *testing.T) {
for i := 0; i < n; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
defer os.RemoveAll(thisConfig.RootDir)
@@ -316,7 +314,7 @@ func TestReactorWithEvidence(t *testing.T) {
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
@@ -335,14 +333,14 @@ func TestReactorWithEvidence(t *testing.T) {
evpool2 := sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
cs.SetPrivValidator(pv)
eventBus := types.NewEventBus()
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
err = eventBus.Start()
err := eventBus.Start()
require.NoError(t, err)
cs.SetEventBus(eventBus)
@@ -382,7 +380,6 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
n := 4
states, cleanup := randConsensusState(
t,
config,
n,
"consensus_reactor_test",
@@ -403,15 +400,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
}
// send a tx
require.NoError(
t,
assertMempool(states[3].txNotifier).CheckTx(
context.Background(),
[]byte{1, 2, 3},
nil,
mempool.TxInfo{},
),
)
require.NoError(t, assertMempool(states[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil, mempl.TxInfo{}))
var wg sync.WaitGroup
for _, sub := range rts.subs {
@@ -431,7 +420,7 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
config := configSetup(t)
n := 4
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
states, cleanup := randConsensusState(config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
t.Cleanup(cleanup)
rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock
@@ -489,7 +478,6 @@ func TestReactorVotingPowerChange(t *testing.T) {
n := 4
states, cleanup := randConsensusState(
t,
config,
n,
"consensus_voting_power_changes_test",

View File

@@ -473,7 +473,7 @@ func (h *Handshaker) replayBlocks(
// We emit events for the index services at the final block due to the sync issue when
// the node shutdown during the block committing status.
blockExec := sm.NewBlockExecutor(
h.stateStore, h.logger, proxyApp.Consensus(), emptyMempool{}, sm.EmptyEvidencePool{}, h.store)
h.stateStore, h.logger, proxyApp.Consensus(), emptyMempool{}, sm.EmptyEvidencePool{})
blockExec.SetEventBus(h.eventBus)
appHash, err = sm.ExecCommitBlock(
blockExec, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state)
@@ -511,11 +511,11 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
// Use stubs for both mempool and evidence pool since no transactions nor
// evidence are needed here - block already exists.
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}, h.store)
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{})
blockExec.SetEventBus(h.eventBus)
var err error
state, err = blockExec.ApplyBlock(state, meta.BlockID, block)
state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block)
if err != nil {
return sm.State{}, err
}

View File

@@ -15,7 +15,6 @@ import (
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
@@ -59,8 +58,7 @@ func (cs *State) ReplayFile(file string, console bool) error {
return fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)
}
defer func() {
args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep}
if err := cs.eventBus.Unsubscribe(ctx, args); err != nil {
if err := cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep); err != nil {
cs.Logger.Error("Error unsubscribing to event bus", "err", err)
}
}()
@@ -227,8 +225,7 @@ func (pb *playback) replayConsoleLoop() int {
tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep))
}
defer func() {
args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep}
if err := pb.cs.eventBus.Unsubscribe(ctx, args); err != nil {
if err := pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep); err != nil {
pb.cs.Logger.Error("Error unsubscribing from eventBus", "err", err)
}
}()
@@ -331,7 +328,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
}
mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
consensusState := NewState(csConfig, state.Copy(), blockExec,
blockStore, mempool, evpool)

View File

@@ -1,11 +1,9 @@
package consensus
import (
"context"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/internal/libs/clist"
mempl "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/libs/clist"
mempl "github.com/tendermint/tendermint/mempool"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
@@ -20,7 +18,7 @@ var _ mempl.Mempool = emptyMempool{}
func (emptyMempool) Lock() {}
func (emptyMempool) Unlock() {}
func (emptyMempool) Size() int { return 0 }
func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
func (emptyMempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
return nil
}
func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
@@ -38,7 +36,7 @@ func (emptyMempool) Flush() {}
func (emptyMempool) FlushAppConn() error { return nil }
func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) }
func (emptyMempool) EnableTxsAvailable() {}
func (emptyMempool) SizeBytes() int64 { return 0 }
func (emptyMempool) TxsBytes() int64 { return 0 }
func (emptyMempool) TxsFront() *clist.CElement { return nil }
func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil }
@@ -73,15 +71,20 @@ type mockProxyApp struct {
abciResponses *tmstate.ABCIResponses
}
func (mock *mockProxyApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
r := mock.abciResponses.FinalizeBlock
func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
r := mock.abciResponses.DeliverTxs[mock.txCount]
mock.txCount++
if r == nil {
return abci.ResponseFinalizeBlock{}
return abci.ResponseDeliverTx{}
}
return *r
}
func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
mock.txCount = 0
return *mock.abciResponses.EndBlock
}
func (mock *mockProxyApp) Commit() abci.ResponseCommit {
return abci.ResponseCommit{Data: mock.appHash}
}

View File

@@ -24,17 +24,15 @@ import (
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
mempl "github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/privval"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
@@ -56,23 +54,21 @@ import (
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
logger := log.TestingLogger()
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
require.NoError(t, err)
state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
privValidator := loadPrivValidator(consensusReplayConfig)
blockStore := store.NewBlockStore(dbm.NewMemDB())
cs := newStateWithConfigAndBlockStore(
consensusReplayConfig,
state,
privValidator,
kvstore.NewApplication(),
blockStore,
blockDB,
)
cs.SetLogger(logger)
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
t.Logf("====== WAL: \n\r%X\n", bytes)
err = cs.Start()
err := cs.Start()
require.NoError(t, err)
defer func() {
if err := cs.Stop(); err != nil {
@@ -102,7 +98,7 @@ func sendTxs(ctx context.Context, cs *State) {
return
default:
tx := []byte{byte(i)}
if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempl.TxInfo{}); err != nil {
if err := assertMempool(cs.txNotifier).CheckTx(tx, nil, mempl.TxInfo{}); err != nil {
panic(err)
}
i++
@@ -151,7 +147,6 @@ LOOP:
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
blockStore := store.NewBlockStore(blockDB)
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
require.NoError(t, err)
privValidator := loadPrivValidator(consensusReplayConfig)
@@ -160,7 +155,7 @@ LOOP:
state,
privValidator,
kvstore.NewApplication(),
blockStore,
blockDB,
)
cs.SetLogger(logger)
@@ -363,7 +358,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1)
require.NoError(t, err)
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx1, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil, mempl.TxInfo{})
assert.Nil(t, err)
propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts := propBlock.MakePartSet(partSize)
@@ -395,7 +390,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1)
require.NoError(t, err)
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), updateValidatorTx1, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil, mempl.TxInfo{})
assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize)
@@ -427,14 +422,14 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2)
require.NoError(t, err)
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx2, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil, mempl.TxInfo{})
assert.Nil(t, err)
newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(context.Background())
require.NoError(t, err)
newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3)
require.NoError(t, err)
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx3, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil, mempl.TxInfo{})
assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize)
@@ -474,7 +469,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
ensureNewProposal(proposalCh, height, round)
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx2, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2, nil, mempl.TxInfo{})
assert.Nil(t, err)
rs = css[0].GetRoundState()
@@ -513,7 +508,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
height++
incrementHeight(vss...)
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx3, nil, mempl.TxInfo{})
err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil, mempl.TxInfo{})
assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize)
@@ -619,8 +614,8 @@ func TestMockProxyApp(t *testing.T) {
assert.NotPanics(t, func() {
abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses)
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, 0)
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = append(abciResWithEmptyDeliverTx.FinalizeBlock.Txs, &abci.ResponseDeliverTx{})
abciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0)
abciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, &abci.ResponseDeliverTx{})
// called when saveABCIResponses:
bytes, err := proto.Marshal(abciResWithEmptyDeliverTx)
@@ -634,30 +629,28 @@ func TestMockProxyApp(t *testing.T) {
mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes)
abciRes := new(tmstate.ABCIResponses)
abciRes.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.FinalizeBlock.Txs))
abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs))
// Execute transactions and get hash.
proxyCb := func(req *abci.Request, res *abci.Response) {
if r, ok := res.Value.(*abci.Response_FinalizeBlock); ok {
for i, tx := range r.FinalizeBlock.Txs {
// TODO: make use of res.Log
// TODO: make use of this info
// Blocks may include invalid txs.
txRes := tx
if txRes.Code == abci.CodeTypeOK {
validTxs++
} else {
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
invalidTxs++
}
abciRes.FinalizeBlock.Txs[i] = txRes
txIndex++
if r, ok := res.Value.(*abci.Response_DeliverTx); ok {
// TODO: make use of res.Log
// TODO: make use of this info
// Blocks may include invalid txs.
txRes := r.DeliverTx
if txRes.Code == abci.CodeTypeOK {
validTxs++
} else {
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
invalidTxs++
}
abciRes.DeliverTxs[txIndex] = txRes
txIndex++
}
}
mock.SetResponseCallback(proxyCb)
someTx := []byte("tx")
_, err = mock.FinalizeBlockSync(context.Background(), abci.RequestFinalizeBlock{Txs: [][]byte{someTx}})
_, err = mock.DeliverTxAsync(context.Background(), abci.RequestDeliverTx{Tx: someTx})
assert.NoError(t, err)
})
assert.True(t, validTxs == 1)
@@ -708,7 +701,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
walFile := tempWALWithData(walBody)
config.Consensus.SetWalFile(walFile)
privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
privVal, err := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
require.NoError(t, err)
wal, err := NewWAL(walFile)
@@ -734,7 +727,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
state := genesisState.Copy()
// run the chain through state.ApplyBlock to build up the tendermint state
state = buildTMStateFromChain(config, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store)
state = buildTMStateFromChain(config, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode)
latestAppHash := state.AppHash
// make a new client creator
@@ -751,7 +744,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
stateStore := sm.NewStore(stateDB1)
err := stateStore.Save(genesisState)
require.NoError(t, err)
buildAppStateFromChain(proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store)
buildAppStateFromChain(proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode)
}
// Prune block store if requested
@@ -816,13 +809,12 @@ func applyBlock(stateStore sm.Store,
evpool sm.EvidencePool,
st sm.State,
blk *types.Block,
proxyApp proxy.AppConns,
blockStore *mockBlockStore) sm.State {
proxyApp proxy.AppConns) sm.State {
testPartSize := types.BlockPartSizeBytes
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()}
newState, err := blockExec.ApplyBlock(st, blkID, blk)
newState, _, err := blockExec.ApplyBlock(st, blkID, blk)
if err != nil {
panic(err)
}
@@ -836,9 +828,7 @@ func buildAppStateFromChain(
evpool sm.EvidencePool,
state sm.State,
chain []*types.Block,
nBlocks int,
mode uint,
blockStore *mockBlockStore) {
nBlocks int, mode uint) {
// start a new app without handshake, play nBlocks blocks
if err := proxyApp.Start(); err != nil {
panic(err)
@@ -859,18 +849,18 @@ func buildAppStateFromChain(
case 0:
for i := 0; i < nBlocks; i++ {
block := chain[i]
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp)
}
case 1, 2, 3:
for i := 0; i < nBlocks-1; i++ {
block := chain[i]
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp)
}
if mode == 2 || mode == 3 {
// update the kvstore height and apphash
// as if we ran commit but not
state = applyBlock(stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, blockStore)
state = applyBlock(stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp)
}
default:
panic(fmt.Sprintf("unknown mode %v", mode))
@@ -886,8 +876,7 @@ func buildTMStateFromChain(
state sm.State,
chain []*types.Block,
nBlocks int,
mode uint,
blockStore *mockBlockStore) sm.State {
mode uint) sm.State {
// run the whole chain against this client to build up the tendermint state
kvstoreApp := kvstore.NewPersistentKVStoreApplication(
filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))
@@ -914,19 +903,19 @@ func buildTMStateFromChain(
case 0:
// sync right up
for _, block := range chain {
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp)
}
case 1, 2, 3:
// sync up to the penultimate as if we stored the block.
// whether we commit or not depends on the appHash
for _, block := range chain[:len(chain)-1] {
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore)
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp)
}
// apply the final block to a state copy so we can
// get the right next appHash but keep the state back
applyBlock(stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore)
applyBlock(stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp)
default:
panic(fmt.Sprintf("unknown mode %v", mode))
}
@@ -941,7 +930,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
// - 0x03
config := ResetConfig("handshake_test_")
t.Cleanup(func() { os.RemoveAll(config.RootDir) })
privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
privVal, err := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
require.NoError(t, err)
const appVersion = 0x0
pubKey, err := privVal.GetPubKey(context.Background())
@@ -951,7 +940,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
state.LastValidators = state.Validators.Copy()
// mode = 0 for committing all the blocks
blocks := sf.MakeBlocks(3, &state, privVal)
blocks := makeBlocks(3, &state, privVal)
store.chain = blocks
// 2. Tendermint must panic if app returns wrong hash for the first block
@@ -1003,6 +992,51 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
}
}
func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block {
blocks := make([]*types.Block, 0)
var (
prevBlock *types.Block
prevBlockMeta *types.BlockMeta
)
appHeight := byte(0x01)
for i := 0; i < n; i++ {
height := int64(i + 1)
block, parts := makeBlock(*state, prevBlock, prevBlockMeta, privVal, height)
blocks = append(blocks, block)
prevBlock = block
prevBlockMeta = types.NewBlockMeta(block, parts)
// update state
state.AppHash = []byte{appHeight}
appHeight++
state.LastBlockHeight = height
}
return blocks
}
func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta,
privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) {
lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil)
if height > 1 {
vote, _ := factory.MakeVote(
privVal,
lastBlock.Header.ChainID,
1, lastBlock.Header.Height, 0, 2,
lastBlockMeta.BlockID,
time.Now())
lastCommit = types.NewCommit(vote.Height, vote.Round,
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
}
return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address)
}
type badApp struct {
abci.BaseApplication
numBlocks byte
@@ -1232,7 +1266,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
config := ResetConfig("handshake_test_")
t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) })
privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
privVal, err := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
require.NoError(t, err)
pubKey, err := privVal.GetPubKey(context.Background())
require.NoError(t, err)

View File

@@ -13,22 +13,21 @@ import (
"github.com/gogo/protobuf/proto"
cfg "github.com/tendermint/tendermint/config"
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/crypto"
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
"github.com/tendermint/tendermint/internal/libs/fail"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
tmevents "github.com/tendermint/tendermint/libs/events"
"github.com/tendermint/tendermint/libs/fail"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/libs/service"
tmtime "github.com/tendermint/tendermint/libs/time"
"github.com/tendermint/tendermint/privval"
tmgrpc "github.com/tendermint/tendermint/privval/grpc"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/p2p"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
// Consensus sentinel errors
@@ -45,8 +44,8 @@ var msgQueueSize = 1000
// msgs from the reactor which may update the state
type msgInfo struct {
Msg Message `json:"msg"`
PeerID types.NodeID `json:"peer_key"`
Msg Message `json:"msg"`
PeerID p2p.NodeID `json:"peer_key"`
}
// internally generated messages which may update the state
@@ -80,9 +79,8 @@ type State struct {
service.BaseService
// config details
config *cfg.ConsensusConfig
privValidator types.PrivValidator // for signing votes
privValidatorType types.PrivValidatorType
config *cfg.ConsensusConfig
privValidator types.PrivValidator // for signing votes
// store blocks and commits
blockStore sm.BlockStore
@@ -275,26 +273,6 @@ func (cs *State) SetPrivValidator(priv types.PrivValidator) {
cs.privValidator = priv
if priv != nil {
switch t := priv.(type) {
case *privval.RetrySignerClient:
cs.privValidatorType = types.RetrySignerClient
case *privval.FilePV:
cs.privValidatorType = types.FileSignerClient
case *privval.SignerClient:
cs.privValidatorType = types.SignerSocketClient
case *tmgrpc.SignerClient:
cs.privValidatorType = types.SignerGRPCClient
case types.MockPV:
cs.privValidatorType = types.MockSignerClient
case *types.ErroringMockPV:
cs.privValidatorType = types.ErrorMockSignerClient
default:
cs.Logger.Error("unsupported priv validator type", "err",
fmt.Errorf("error privValidatorType %s", t))
}
}
if err := cs.updatePrivValidatorPubKey(); err != nil {
cs.Logger.Error("failed to get private validator pubkey", "err", err)
}
@@ -493,7 +471,7 @@ func (cs *State) OpenWAL(walFile string) (WAL, error) {
// TODO: should these return anything or let callers just use events?
// AddVote inputs a vote.
func (cs *State) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) {
func (cs *State) AddVote(vote *types.Vote, peerID p2p.NodeID) (added bool, err error) {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""}
} else {
@@ -505,7 +483,7 @@ func (cs *State) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err
}
// SetProposal inputs a proposal.
func (cs *State) SetProposal(proposal *types.Proposal, peerID types.NodeID) error {
func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.NodeID) error {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""}
@@ -518,7 +496,7 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID types.NodeID) erro
}
// AddProposalBlockPart inputs a part of the proposal block.
func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID types.NodeID) error {
func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.NodeID) error {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""}
@@ -535,7 +513,7 @@ func (cs *State) SetProposalAndBlock(
proposal *types.Proposal,
block *types.Block,
parts *types.PartSet,
peerID types.NodeID,
peerID p2p.NodeID,
) error {
if err := cs.SetProposal(proposal, peerID); err != nil {
@@ -733,7 +711,7 @@ func (cs *State) newStep() {
cs.Logger.Error("failed publishing new round step", "err", err)
}
cs.evsw.FireEvent(types.EventNewRoundStepValue, &cs.RoundState)
cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState)
}
}
@@ -1563,7 +1541,7 @@ func (cs *State) enterCommit(height int64, commitRound int32) {
logger.Error("failed publishing valid block", "err", err)
}
cs.evsw.FireEvent(types.EventValidBlockValue, &cs.RoundState)
cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState)
}
}
}
@@ -1677,7 +1655,12 @@ func (cs *State) finalizeCommit(height int64) {
// Execute and commit the block, update and save the state, and update the mempool.
// NOTE The block.AppHash wont reflect these txs until the next block.
stateCopy, err := cs.blockExec.ApplyBlock(
var (
err error
retainHeight int64
)
stateCopy, retainHeight, err = cs.blockExec.ApplyBlock(
stateCopy,
types.BlockID{
Hash: block.Hash(),
@@ -1692,8 +1675,18 @@ func (cs *State) finalizeCommit(height int64) {
fail.Fail() // XXX
// Prune old heights, if requested by ABCI app.
if retainHeight > 0 {
pruned, err := cs.pruneBlocks(retainHeight)
if err != nil {
logger.Error("failed to prune blocks", "retain_height", retainHeight, "err", err)
} else {
logger.Debug("pruned blocks", "pruned", pruned, "retain_height", retainHeight)
}
}
// must be called before we update state
cs.RecordMetrics(height, block)
cs.recordMetrics(height, block)
// NewHeightStep!
cs.updateToState(stateCopy)
@@ -1715,7 +1708,24 @@ func (cs *State) finalizeCommit(height int64) {
// * cs.StartTime is set to when we will start round0.
}
func (cs *State) RecordMetrics(height int64, block *types.Block) {
func (cs *State) pruneBlocks(retainHeight int64) (uint64, error) {
base := cs.blockStore.Base()
if retainHeight <= base {
return 0, nil
}
pruned, err := cs.blockStore.PruneBlocks(retainHeight)
if err != nil {
return 0, fmt.Errorf("failed to prune block store: %w", err)
}
err = cs.blockExec.Store().PruneStates(retainHeight)
if err != nil {
return 0, fmt.Errorf("failed to prune state store: %w", err)
}
return pruned, nil
}
func (cs *State) recordMetrics(height int64, block *types.Block) {
cs.metrics.Validators.Set(float64(cs.Validators.Size()))
cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower()))
@@ -1735,9 +1745,8 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
address types.Address
)
if commitSize != valSetLen {
cs.Logger.Error(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v",
panic(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v",
commitSize, valSetLen, block.Height, block.LastCommit.Signatures, cs.LastValidators.Validators))
return
}
if cs.privValidator != nil {
@@ -1775,10 +1784,9 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
// NOTE: byzantine validators power and count is only for consensus evidence i.e. duplicate vote
var (
byzantineValidatorsPower int64
byzantineValidatorsCount int64
byzantineValidatorsPower = int64(0)
byzantineValidatorsCount = int64(0)
)
for _, ev := range block.Evidence.Evidence {
if dve, ok := ev.(*types.DuplicateVoteEvidence); ok {
if _, val := cs.Validators.GetByAddress(dve.VoteA.ValidatorAddress); val != nil {
@@ -1801,7 +1809,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
cs.metrics.NumTxs.Set(float64(len(block.Data.Txs)))
cs.metrics.TotalTxs.Add(float64(len(block.Data.Txs)))
cs.metrics.BlockSizeBytes.Observe(float64(block.Size()))
cs.metrics.BlockSizeBytes.Set(float64(block.Size()))
cs.metrics.CommittedHeight.Set(float64(block.Height))
}
@@ -1849,7 +1857,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
// NOTE: block is not necessarily valid.
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit,
// once we have the full block.
func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID) (added bool, err error) {
func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.NodeID) (added bool, err error) {
height, round, part := msg.Height, msg.Round, msg.Part
// Blocks might be reused, so round mismatch is OK
@@ -1947,7 +1955,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID
}
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) {
func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.NodeID) (bool, error) {
added, err := cs.addVote(vote, peerID)
if err != nil {
// If the vote height is off, we'll just ignore it,
@@ -1995,7 +2003,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error)
return added, nil
}
func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) {
func (cs *State) addVote(vote *types.Vote, peerID p2p.NodeID) (added bool, err error) {
cs.Logger.Debug(
"adding vote",
"vote_height", vote.Height,
@@ -2023,7 +2031,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err
return added, err
}
cs.evsw.FireEvent(types.EventVoteValue, vote)
cs.evsw.FireEvent(types.EventVote, vote)
// if we can skip timeoutCommit and have all the votes now,
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
@@ -2052,7 +2060,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err
if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil {
return added, err
}
cs.evsw.FireEvent(types.EventVoteValue, vote)
cs.evsw.FireEvent(types.EventVote, vote)
switch vote.Type {
case tmproto.PrevoteType:
@@ -2106,7 +2114,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader)
}
cs.evsw.FireEvent(types.EventValidBlockValue, &cs.RoundState)
cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState)
if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil {
return added, err
}
@@ -2290,13 +2298,7 @@ func (cs *State) updatePrivValidatorPubKey() error {
timeout = cs.config.TimeoutPrevote
}
// no GetPubKey retry beyond the proposal/voting in RetrySignerClient
if cs.Step >= cstypes.RoundStepPrecommit && cs.privValidatorType == types.RetrySignerClient {
timeout = 0
}
// set context timeout depending on the configuration and the State step,
// this helps in avoiding blocking of the remote signer connection.
// set a hard timeout for 2 seconds. This helps in avoiding blocking of the remote signer connection
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
pubKey, err := cs.privValidator.GetPubKey(ctx)

View File

@@ -11,12 +11,12 @@ import (
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/counter"
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/crypto/tmhash"
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
p2pmock "github.com/tendermint/tendermint/internal/p2p/mock"
"github.com/tendermint/tendermint/libs/log"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
tmrand "github.com/tendermint/tendermint/libs/rand"
p2pmock "github.com/tendermint/tendermint/p2p/mock"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
)

View File

@@ -8,6 +8,7 @@ import (
tmjson "github.com/tendermint/tendermint/libs/json"
tmmath "github.com/tendermint/tendermint/libs/math"
"github.com/tendermint/tendermint/p2p"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
)
@@ -43,9 +44,9 @@ type HeightVoteSet struct {
valSet *types.ValidatorSet
mtx sync.Mutex
round int32 // max tracked round
roundVoteSets map[int32]RoundVoteSet // keys: [0...round]
peerCatchupRounds map[types.NodeID][]int32 // keys: peer.ID; values: at most 2 rounds
round int32 // max tracked round
roundVoteSets map[int32]RoundVoteSet // keys: [0...round]
peerCatchupRounds map[p2p.NodeID][]int32 // keys: peer.ID; values: at most 2 rounds
}
func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet {
@@ -63,7 +64,7 @@ func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) {
hvs.height = height
hvs.valSet = valSet
hvs.roundVoteSets = make(map[int32]RoundVoteSet)
hvs.peerCatchupRounds = make(map[types.NodeID][]int32)
hvs.peerCatchupRounds = make(map[p2p.NodeID][]int32)
hvs.addRound(0)
hvs.round = 0
@@ -113,7 +114,7 @@ func (hvs *HeightVoteSet) addRound(round int32) {
// Duplicate votes return added=false, err=nil.
// By convention, peerID is "" if origin is self.
func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) {
func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.NodeID) (added bool, err error) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
if !types.IsVoteTypeValid(vote.Type) {
@@ -184,7 +185,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType
func (hvs *HeightVoteSet) SetPeerMaj23(
round int32,
voteType tmproto.SignedMsgType,
peerID types.NodeID,
peerID p2p.NodeID,
blockID types.BlockID) error {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()

Some files were not shown because too many files have changed in this diff Show More