mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-26 22:52:04 +00:00
Compare commits
5 Commits
finalizeBl
...
bez/consen
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b61f7270b1 | ||
|
|
ab476e89b3 | ||
|
|
58f0a87015 | ||
|
|
f8775952c3 | ||
|
|
faea2f20aa |
2
.github/workflows/coverage.yml
vendored
2
.github/workflows/coverage.yml
vendored
@@ -121,7 +121,7 @@ jobs:
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v1.5.2
|
||||
- uses: codecov/codecov-action@v1.5.0
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@@ -40,17 +40,17 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.5.0
|
||||
uses: docker/setup-buildx-action@v1.3.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1.10.0
|
||||
uses: docker/login-action@v1.9.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.6.1
|
||||
uses: docker/build-push-action@v2.5.0
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
4
.github/workflows/e2e-nightly-34x.yml
vendored
4
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -49,7 +49,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
4
.github/workflows/e2e-nightly-master.yml
vendored
4
.github/workflows/e2e-nightly-master.yml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
2
.github/workflows/fuzz-nightly.yml
vendored
2
.github/workflows/fuzz-nightly.yml
vendored
@@ -76,7 +76,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
6
.github/workflows/proto-docker.yml
vendored
6
.github/workflows/proto-docker.yml
vendored
@@ -34,16 +34,16 @@ jobs:
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.5.0
|
||||
uses: docker/setup-buildx-action@v1.3.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1.10.0
|
||||
uses: docker/login-action@v1.9.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.6.1
|
||||
uses: docker/build-push-action@v2.5.0
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
|
||||
@@ -39,7 +39,6 @@ linters:
|
||||
# - wsl
|
||||
# - gocognit
|
||||
- nolintlint
|
||||
- asciicheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
|
||||
121
CHANGELOG.md
121
CHANGELOG.md
@@ -1,29 +1,5 @@
|
||||
# Changelog
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
## v0.34.11
|
||||
|
||||
*June 18, 2021*
|
||||
|
||||
This release improves the robustness of statesync; tweaking channel priorities and timeouts and
|
||||
adding two new parameters to the state sync config.
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Apps
|
||||
- [Version] \#6494 `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
||||
- [statesync] \#6582 Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
||||
|
||||
## v0.34.10
|
||||
|
||||
*April 14, 2021*
|
||||
@@ -32,6 +8,8 @@ This release fixes a bug where peers would sometimes try to send messages
|
||||
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
|
||||
this issue!
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
|
||||
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
|
||||
|
||||
@@ -47,6 +25,8 @@ This release also includes a small Go API-breaking change, to reduce panics in t
|
||||
|
||||
Special thanks to our external contributors on this release: @gchaincl
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
@@ -69,6 +49,8 @@ Special thanks to our external contributors on this release: @gchaincl
|
||||
This release, in conjunction with [a fix in the Cosmos SDK](https://github.com/cosmos/cosmos-sdk/pull/8641),
|
||||
introduces changes that should mean the logs are much, much quieter. 🎉
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] [\#6174](https://github.com/tendermint/tendermint/issues/6174) Include timestamp (`ts` field; `time.RFC3339Nano` format) in JSON logger output (@melekes)
|
||||
@@ -106,6 +88,8 @@ use remote signer implementations instead of `FilePV` in production.
|
||||
Thank you to @joe-bowman for his assistance with this vulnerability and a particular
|
||||
shout-out to @marbar3778 for diagnosing it quickly.
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#6128](https://github.com/tendermint/tendermint/pull/6128) Remove privValidator from log call (@tessr)
|
||||
@@ -126,6 +110,8 @@ Thank you to our friends at Crypto.com for the initial report of this memory lea
|
||||
|
||||
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6022](https://github.com/tendermint/tendermint/pull/6022) Fix a bug when the number of validators equals 100 (@melekes)
|
||||
@@ -144,6 +130,8 @@ or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [[security fix]](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) Use correct source of evidence time (@cmwaters)
|
||||
@@ -157,6 +145,8 @@ This release fixes a substantial bug in evidence handling where evidence could
|
||||
sometimes be broadcast before the block containing that evidence was fully committed,
|
||||
resulting in some nodes panicking when trying to verify said evidence.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
@@ -180,6 +170,8 @@ disconnecting from this node. As a temporary remedy (until the mempool package
|
||||
is refactored), the `max-batch-bytes` was disabled. Transactions will be sent
|
||||
one by one without batching.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -208,6 +200,8 @@ Holy smokes, this is a big one! For a more reader-friendly overview of the chang
|
||||
Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio,
|
||||
@joe-bowman, @cuonglm, @SadPencil and @dongsam.
|
||||
|
||||
And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -448,6 +442,9 @@ as 2/3+ of the signatures are checked._
|
||||
|
||||
Special thanks to @njmurarka at Bluzelle Networks for reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [consensus] Do not allow signatures for a wrong block in commits (@ebuchman)
|
||||
@@ -463,6 +460,8 @@ need to update your code.**
|
||||
|
||||
Special thanks to external contributors on this release: @tau3,
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -522,6 +521,8 @@ Special thanks to external contributors on this release: @tau3,
|
||||
|
||||
Special thanks to external contributors on this release: @whylee259, @greg-szabo
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -608,6 +609,9 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -620,6 +624,8 @@ and reporting this.
|
||||
Special thanks to external contributors on this release:
|
||||
@antho1404, @michaelfig, @gterzian, @tau3, @Shivani912
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -670,6 +676,9 @@ Special thanks to external contributors on this release:
|
||||
Special thanks to external contributors on this release:
|
||||
@princesinha19
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#3333](https://github.com/tendermint/tendermint/issues/3333) Add `order_by` to `/tx_search` endpoint, allowing to change default ordering from asc to desc (@princesinha19)
|
||||
@@ -688,6 +697,9 @@ Special thanks to external contributors on this release:
|
||||
|
||||
Special thanks to external contributors on this release: @mrekucci, @PSalant726, @princesinha19, @greg-szabo, @dongsam, @cuonglm, @jgimeno, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
*January 14, 2020*
|
||||
|
||||
This release contains breaking changes to the `Block#Header`, specifically
|
||||
@@ -916,6 +928,9 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -927,6 +942,9 @@ _January, 9, 2020_
|
||||
|
||||
Special thanks to external contributors on this release: @greg-szabo, @gregzaitsev, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc/lib] [\#4248](https://github.com/tendermint/tendermint/issues/4248) RPC client basic authentication support (@greg-szabo)
|
||||
@@ -948,6 +966,9 @@ Special thanks to external contributors on this release: @greg-szabo, @gregzaits
|
||||
|
||||
Special thanks to external contributors on this release: @erikgrinaker, @guagualvcha, @hsyis, @cosmostuba, @whunmr, @austinabell
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -987,6 +1008,9 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1013,6 +1037,9 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
|
||||
@@ -1028,6 +1055,9 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
|
||||
@@ -1038,6 +1068,9 @@ and reporting this issue.
|
||||
|
||||
Special thanks to external contributors on this release: @jon-certik, @gracenoah, @PSalant726, @gchaincl
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -1073,6 +1106,9 @@ guide.
|
||||
Special thanks to external contributors on this release:
|
||||
@gchaincl, @bluele, @climber73
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info)
|
||||
@@ -1093,6 +1129,9 @@ Special thanks to external contributors on this release:
|
||||
Special thanks to external contributors on this release:
|
||||
@ruseinov, @bluele, @guagualvcha
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1132,6 +1171,9 @@ This release contains a minor enhancement to the ABCI and some breaking changes
|
||||
- CheckTx requests include a `CheckTxType` enum that can be set to `Recheck` to indicate to the application that this transaction was already checked/validated and certain expensive operations (like checking signatures) can be skipped
|
||||
- Removed various functions from `libs` pkgs
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1177,6 +1219,9 @@ and the RPC, namely:
|
||||
[docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events)
|
||||
- Bind RPC to localhost by default, not to the public interface [UPGRADING/RPC_Changes](./UPGRADING.md#rpc_changes)
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1277,6 +1322,8 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1297,6 +1344,8 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1324,6 +1373,8 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1340,6 +1391,8 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1635,6 +1688,8 @@ See the [v0.31.0
|
||||
Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for
|
||||
more details.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1855,6 +1910,8 @@ This release contains two important fixes: one for p2p layer where we sometimes
|
||||
were not closing connections and one for consensus layer where consensus with
|
||||
no empty blocks (`create_empty_blocks = false`) could halt.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [pex] [\#3037](https://github.com/tendermint/tendermint/issues/3037) Only log "Reached max attempts to dial" once
|
||||
@@ -1894,6 +1951,8 @@ While we are trying to stabilize the Block protocol to preserve compatibility
|
||||
with old chains, there may be some final changes yet to come before Cosmos
|
||||
launch as we continue to audit and test the software.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1942,6 +2001,8 @@ launch as we continue to audit and test the software.
|
||||
Special thanks to external contributors on this release:
|
||||
@HaoyangLiu
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES:
|
||||
- [consensus] Fix consensus halt from proposing blocks with too much evidence
|
||||
@@ -2070,6 +2131,8 @@ Special thanks to @dlguddus for discovering a [major
|
||||
issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677)
|
||||
in the proposer selection algorithm.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
This release is primarily about fixes to the proposer selection algorithm
|
||||
in preparation for the [Cosmos Game of
|
||||
@@ -2132,6 +2195,8 @@ Special thanks to external contributors on this release:
|
||||
@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko,
|
||||
@nagarajmanjunath, @tomtau
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
@@ -2171,6 +2236,8 @@ Special thanks to external contributors on this release:
|
||||
Special thanks to external contributors on this release:
|
||||
@danil-lashin, @kevlubkcm, @krhubert, @srmo
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -2215,6 +2282,8 @@ Special thanks to external contributors on this release:
|
||||
|
||||
Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#2582](https://github.com/tendermint/tendermint/issues/2582) Enable CORS on RPC API (@hleb-albau)
|
||||
@@ -2232,6 +2301,8 @@ Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
|
||||
|
||||
Special thanks to external contributors on this release: @katakonst
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#2704](https://github.com/tendermint/tendermint/issues/2704) Simplify valid POL round logic
|
||||
@@ -2405,6 +2476,8 @@ It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
|
||||
@@ -9,7 +9,6 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
|
||||
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
@@ -18,10 +17,9 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes)
|
||||
- [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters)
|
||||
- [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106)
|
||||
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [libs/CList] \#6626 Automatically detach the prev/next elements in Remove function (@JayT106)
|
||||
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on
|
||||
startup (@cmwaters)
|
||||
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section.
|
||||
|
||||
- Apps
|
||||
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
@@ -34,18 +32,14 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
|
||||
- [p2p] \#6618 Move `p2p.NodeInfo` into `types` to support use of the SDK. (@tychoish)
|
||||
- [p2p] \#6583 Make `p2p.NodeID` and `p2p.NetAddress` exported types to support their use in the RPC layer. (@tychoish)
|
||||
- [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
|
||||
- [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
|
||||
- [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
|
||||
- [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish)
|
||||
- [consensus] \#6532 Move the `consensus` package to the `internal` package. (@alexanderbez)
|
||||
- Remove `ConsensusState()` and `ConsensusReactor()` on the `Node` type.
|
||||
- Move `consensus` metrics to the root `metrics` package.
|
||||
- [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
|
||||
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Renamed `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||
@@ -61,20 +55,16 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
|
||||
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
|
||||
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
|
||||
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
|
||||
- [libs/rand] \#6364 Removed most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
|
||||
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
|
||||
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
|
||||
and `TxInfo`. (@alexanderbez)
|
||||
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||
- [types] \#6627 Move `NodeKey` to types to make the type public.
|
||||
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
|
||||
- [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
- Tooling
|
||||
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
@@ -97,12 +87,9 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||
|
||||
### IMPROVEMENTS
|
||||
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
|
||||
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
|
||||
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
|
||||
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
|
||||
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [privval] \#5725 Add gRPC support to private validator.
|
||||
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
|
||||
@@ -132,18 +119,11 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm)
|
||||
- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778)
|
||||
- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
|
||||
- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106)
|
||||
- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs)
|
||||
- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778)
|
||||
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
||||
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
||||
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
||||
- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106)
|
||||
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.16-alpine as builder
|
||||
FROM golang:1.15-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
|
||||
@@ -9,7 +9,7 @@ RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm &
|
||||
RUN yum -y groupinstall "Development Tools"
|
||||
RUN yum -y install leveldb-devel which
|
||||
|
||||
ENV GOVERSION=1.16.5
|
||||
ENV GOVERSION=1.12.9
|
||||
|
||||
RUN cd /tmp && \
|
||||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
|
||||
@@ -8,7 +8,7 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/vcExX9T)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
@@ -48,7 +48,7 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.16 or higher |
|
||||
| Go version | Go1.15 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
29
UPGRADING.md
29
UPGRADING.md
@@ -47,40 +47,13 @@ This guide provides instructions for upgrading to specific versions of Tendermin
|
||||
|
||||
* CLI commands and flags are all now hyphen-case instead of snake_case.
|
||||
Make sure to adjust any scripts that calls a cli command with snake_casing
|
||||
|
||||
### API Changes
|
||||
|
||||
The p2p layer was reimplemented as part of the 0.35 release cycle, and
|
||||
all reactors were refactored. As part of that work these
|
||||
implementations moved into the `internal` package and are no longer
|
||||
considered part of the public Go API of tendermint. These packages
|
||||
are:
|
||||
|
||||
- `p2p`
|
||||
- `mempool`
|
||||
- `consensus`
|
||||
- `statesync`
|
||||
- `blockchain`
|
||||
- `evidence`
|
||||
|
||||
Accordingly, the space `node` package was changed to reduce access to
|
||||
tendermint internals: applications that use tendermint as a library
|
||||
will need to change to accommodate these changes. Most notably:
|
||||
|
||||
- The `Node` type has become internal, and all constructors return a
|
||||
`service.Service` implementation.
|
||||
|
||||
- The `node.DefaultNewNode` and `node.NewNode` constructors are no
|
||||
longer exported and have been replaced with `node.New` and
|
||||
`node.NewDefault` which provide more functional interfaces.
|
||||
|
||||
## v0.34.0
|
||||
|
||||
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
|
||||
This release is not compatible with previous blockchains due to changes to
|
||||
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
|
||||
|
||||
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
|
||||
@@ -35,29 +35,33 @@ type Client interface {
|
||||
FlushAsync(context.Context) (*ReqRes, error)
|
||||
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
||||
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
||||
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
|
||||
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
||||
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
||||
CommitAsync(context.Context) (*ReqRes, error)
|
||||
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
||||
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
|
||||
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
|
||||
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
||||
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
||||
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||
FinalizeBlockAsync(context.Context, types.RequestFinalizeBlock) (*ReqRes, error)
|
||||
|
||||
// Synchronous requests
|
||||
FlushSync(context.Context) error
|
||||
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
FinalizeBlockSync(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -194,6 +194,16 @@ func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo)
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
@@ -234,6 +244,26 @@ func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestI
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
@@ -284,22 +314,6 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(
|
||||
)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestFinalizeBlock(params)
|
||||
res, err := cli.client.FinalizeBlock(ctx, req.GetFinalizeBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(
|
||||
ctx,
|
||||
req,
|
||||
&types.Response{Value: &types.Response_FinalizeBlock{FinalizeBlock: res}},
|
||||
)
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
|
||||
@@ -366,6 +380,18 @@ func (cli *grpcClient) InfoSync(
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.DeliverTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestCheckTx,
|
||||
@@ -409,6 +435,30 @@ func (cli *grpcClient) InitChainSync(
|
||||
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.BeginBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.EndBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
params types.RequestListSnapshots,
|
||||
@@ -454,14 +504,3 @@ func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
reqres, err := cli.FinalizeBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetFinalizeBlock(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -77,6 +77,17 @@ func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(params)
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -121,6 +132,28 @@ func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestIni
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -171,20 +204,6 @@ func (app *localClient) ApplySnapshotChunkAsync(
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.FinalizeBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestFinalizeBlock(req),
|
||||
types.ToResponseFinalizeBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync(ctx context.Context) error {
|
||||
@@ -203,6 +222,18 @@ func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*t
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
@@ -245,6 +276,30 @@ func (app *localClient) InitChainSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
@@ -291,17 +346,6 @@ func (app *localClient) ApplySnapshotChunkSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.FinalizeBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.5.1. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -65,6 +65,52 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@@ -157,6 +203,52 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoAsync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
@@ -203,6 +295,52 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEndBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Error provides a mock function with given fields:
|
||||
func (_m *Client) Error() error {
|
||||
ret := _m.Called()
|
||||
@@ -217,52 +355,6 @@ func (_m *Client) Error() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// FinalizeBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlockAsync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FinalizeBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlockSync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FlushAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
@@ -704,8 +796,3 @@ func (_m *Client) String() string {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Wait provides a mock function with given fields:
|
||||
func (_m *Client) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
@@ -245,6 +245,10 @@ func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
||||
}
|
||||
@@ -261,6 +265,14 @@ func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestIn
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
||||
}
|
||||
@@ -283,13 +295,6 @@ func (cli *socketClient) ApplySnapshotChunkAsync(
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestFinalizeBlock(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync(ctx context.Context) error {
|
||||
@@ -336,6 +341,18 @@ func (cli *socketClient) InfoSync(
|
||||
return reqres.Response.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetDeliverTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
@@ -378,6 +395,30 @@ func (cli *socketClient) InitChainSync(
|
||||
return reqres.Response.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetBeginBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetEndBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
@@ -424,17 +465,6 @@ func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
return reqres.Response.GetApplySnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestFinalizeBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetFinalizeBlock(), nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// queueRequest enqueues req onto the queue. If the queue is full, it ether
|
||||
@@ -539,6 +569,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Flush)
|
||||
case *types.Request_Info:
|
||||
_, ok = res.Value.(*types.Response_Info)
|
||||
case *types.Request_DeliverTx:
|
||||
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||
case *types.Request_CheckTx:
|
||||
_, ok = res.Value.(*types.Response_CheckTx)
|
||||
case *types.Request_Commit:
|
||||
@@ -547,6 +579,10 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Query)
|
||||
case *types.Request_InitChain:
|
||||
_, ok = res.Value.(*types.Response_InitChain)
|
||||
case *types.Request_BeginBlock:
|
||||
_, ok = res.Value.(*types.Response_BeginBlock)
|
||||
case *types.Request_EndBlock:
|
||||
_, ok = res.Value.(*types.Response_EndBlock)
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
@@ -555,8 +591,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_ListSnapshots)
|
||||
case *types.Request_OfferSnapshot:
|
||||
_, ok = res.Value.(*types.Response_OfferSnapshot)
|
||||
case *types.Request_FinalizeBlock:
|
||||
_, ok = res.Value.(*types.Response_FinalizeBlock)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -37,11 +37,11 @@ func TestProperSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
err = c.FlushSync(context.Background())
|
||||
assert.NoError(t, err)
|
||||
res := reqres.Response.GetFinalizeBlock()
|
||||
res := reqres.Response.GetBeginBlock()
|
||||
assert.NotNil(t, res)
|
||||
resp <- c.Error()
|
||||
}()
|
||||
@@ -73,7 +73,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// Start BeginBlock and flush it
|
||||
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
flush, err := c.FlushAsync(ctx)
|
||||
assert.NoError(t, err)
|
||||
@@ -84,7 +84,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
err = s.Stop()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// wait for the response from FinalizeBlock
|
||||
// wait for the response from BeginBlock
|
||||
reqres.Wait()
|
||||
flush.Wait()
|
||||
resp <- c.Error()
|
||||
@@ -121,7 +121,7 @@ type slowApp struct {
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseFinalizeBlock{}
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
@@ -68,9 +68,12 @@ var RootCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
allowLevel, err := log.AllowLevel(flagLogLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger = log.NewFilter(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), allowLevel)
|
||||
}
|
||||
|
||||
if client == nil {
|
||||
var err error
|
||||
client, err = abcicli.NewClient(flagAddress, flagAbci, false)
|
||||
@@ -504,18 +507,16 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tx := range res.Txs {
|
||||
printResponse(cmd, args, response{
|
||||
Code: tx.Code,
|
||||
Data: tx.Data,
|
||||
Info: tx.Info,
|
||||
Log: tx.Log,
|
||||
})
|
||||
}
|
||||
printResponse(cmd, args, response{
|
||||
Code: res.Code,
|
||||
Data: res.Data,
|
||||
Info: res.Info,
|
||||
Log: res.Log,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -597,7 +598,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
app := counter.NewApplication(flagSerial)
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
@@ -622,7 +623,7 @@ func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
|
||||
@@ -24,30 +24,24 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) > 8 {
|
||||
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}},
|
||||
}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(tx):], tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseFinalizeBlock{
|
||||
Txs: []*types.ResponseDeliverTx{{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
|
||||
}
|
||||
}
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
|
||||
@@ -76,22 +76,20 @@ func testStream(t *testing.T, app types.Application) {
|
||||
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
|
||||
// Process response
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_FinalizeBlock:
|
||||
for _, tx := range r.FinalizeBlock.Txs {
|
||||
counter++
|
||||
if tx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", tx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
|
||||
}
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
close(done)
|
||||
}()
|
||||
return
|
||||
}
|
||||
case *types.Response_DeliverTx:
|
||||
counter++
|
||||
if r.DeliverTx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
|
||||
}
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
close(done)
|
||||
}()
|
||||
return
|
||||
}
|
||||
case *types.Response_Flush:
|
||||
// ignore
|
||||
@@ -105,8 +103,7 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
tx := []byte("test")
|
||||
_, err = client.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sometimes send flush messages
|
||||
@@ -166,25 +163,22 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
txt := []byte("test")
|
||||
response, err := client.FinalizeBlock(context.Background(), &types.RequestFinalizeBlock{Txs: [][]byte{txt}})
|
||||
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
|
||||
}
|
||||
counter++
|
||||
for _, tx := range response.Txs {
|
||||
if tx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", tx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatal("Too many DeliverTx responses")
|
||||
}
|
||||
t.Log("response", counter)
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
}
|
||||
if response.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", response.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatal("Too many DeliverTx responses")
|
||||
}
|
||||
t.Log("response", counter)
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -86,40 +86,35 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
|
||||
}
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var key, value string
|
||||
var txs = make([]*types.ResponseDeliverTx, len(req.Txs))
|
||||
|
||||
for i, tx := range req.Txs {
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = string(tx), string(tx)
|
||||
}
|
||||
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.state.Size++
|
||||
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
txs[i] = &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = string(req.Tx), string(req.Tx)
|
||||
}
|
||||
|
||||
return types.ResponseFinalizeBlock{Txs: txs}
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.state.Size++
|
||||
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
|
||||
@@ -27,16 +27,12 @@ const (
|
||||
var ctx = context.Background()
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
|
||||
ar := app.FinalizeBlock(req)
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar = app.FinalizeBlock(req)
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
ar = app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
app.Commit()
|
||||
|
||||
@@ -113,7 +109,8 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
}
|
||||
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
|
||||
resInfo = kvstore.Info(types.RequestInfo{})
|
||||
@@ -203,15 +200,16 @@ func makeApplyBlock(
|
||||
Height: height,
|
||||
}
|
||||
|
||||
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
|
||||
Hash: hash,
|
||||
Header: header,
|
||||
Txs: txs,
|
||||
})
|
||||
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
for _, tx := range txs {
|
||||
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
|
||||
t.Fatal(r)
|
||||
}
|
||||
}
|
||||
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
|
||||
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
|
||||
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||
|
||||
}
|
||||
|
||||
@@ -328,16 +326,13 @@ func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
|
||||
ar, err = app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) // repeating tx doesn't raise error
|
||||
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
_, err = app.CommitSync(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -66,19 +66,19 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
|
||||
return res
|
||||
}
|
||||
|
||||
// // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
// func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// // if it starts with "val:", update the validator set
|
||||
// // format is "val:pubkey!power"
|
||||
// if isValidatorTx(req.Tx) {
|
||||
// // update validators in the merkle tree
|
||||
// // and in app.ValUpdates
|
||||
// return app.execValidatorTx(req.Tx)
|
||||
// }
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(req.Tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(req.Tx)
|
||||
}
|
||||
|
||||
// // otherwise, update the key-value store
|
||||
// return app.app.DeliverTx(req)
|
||||
// }
|
||||
// otherwise, update the key-value store
|
||||
return app.app.DeliverTx(req)
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(req)
|
||||
@@ -119,40 +119,8 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
|
||||
return types.ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) FinalizeBlock(
|
||||
req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
// for i, tx := range req.Txs {
|
||||
// // if it starts with "val:", update the validator set
|
||||
// // format is "val:pubkey!power"
|
||||
// if isValidatorTx(tx) {
|
||||
// // update validators in the merkle tree
|
||||
// // and in app.ValUpdates
|
||||
// return app.execValidatorTx(req.Tx)
|
||||
// }
|
||||
|
||||
// // otherwise, update the key-value store
|
||||
// return app.app.DeliverTx(tx)
|
||||
// }
|
||||
// Track the block hash and header information
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
@@ -174,7 +142,32 @@ func (app *PersistentKVStoreApplication) FinalizeBlock(
|
||||
}
|
||||
}
|
||||
|
||||
return types.ResponseFinalizeBlock{ValidatorUpdates: app.ValUpdates}
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// Update the validator set
|
||||
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
|
||||
@@ -200,6 +200,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_Info:
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
case *types.Request_CheckTx:
|
||||
res := s.app.CheckTx(*r.CheckTx)
|
||||
responses <- types.ToResponseCheckTx(res)
|
||||
@@ -212,6 +215,12 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_InitChain:
|
||||
res := s.app.InitChain(*r.InitChain)
|
||||
responses <- types.ToResponseInitChain(res)
|
||||
case *types.Request_BeginBlock:
|
||||
res := s.app.BeginBlock(*r.BeginBlock)
|
||||
responses <- types.ToResponseBeginBlock(res)
|
||||
case *types.Request_EndBlock:
|
||||
res := s.app.EndBlock(*r.EndBlock)
|
||||
responses <- types.ToResponseEndBlock(res)
|
||||
case *types.Request_ListSnapshots:
|
||||
res := s.app.ListSnapshots(*r.ListSnapshots)
|
||||
responses <- types.ToResponseListSnapshots(res)
|
||||
@@ -224,9 +233,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
|
||||
responses <- types.ToResponseApplySnapshotChunk(res)
|
||||
case *types.Request_FinalizeBlock:
|
||||
res := s.app.FinalizeBlock(*r.FinalizeBlock)
|
||||
responses <- types.ToResponseFinalizeBlock(res)
|
||||
default:
|
||||
responses <- types.ToResponseException("Unknown request")
|
||||
}
|
||||
|
||||
@@ -51,22 +51,20 @@ func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func FinalizeBlock(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
for _, tx := range res.Txs {
|
||||
code, data, log := tx.Code, tx.Data, tx.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
fmt.Println("Passed test: DeliverTx")
|
||||
return nil
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
@@ -18,7 +19,7 @@ func startClient(abciType string) abcicli.Client {
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
client.SetLogger(logger.With("module", "abcicli"))
|
||||
if err := client.Start(); err != nil {
|
||||
panicf("connecting to abci_app: %v", err.Error())
|
||||
@@ -37,29 +38,16 @@ func commit(client abcicli.Client, hashExp []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
type tx struct {
|
||||
Data []byte
|
||||
CodeExp uint32
|
||||
DataExp []byte
|
||||
}
|
||||
|
||||
func finalizeBlock(client abcicli.Client, txs []tx) {
|
||||
var txsData = make([][]byte, len(txs))
|
||||
for i, tx := range txs {
|
||||
txsData[i] = tx.Data
|
||||
}
|
||||
|
||||
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: txsData})
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
for i, tx := range res.Txs {
|
||||
if tx.Code != txs[i].CodeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", tx.Code, txs[i].CodeExp, tx.Log)
|
||||
}
|
||||
if !bytes.Equal(tx.Data, txs[i].DataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", tx.Data, txs[i].DataExp)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -81,15 +81,13 @@ func testCounter() {
|
||||
// commit(client, nil)
|
||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
finalizeBlock(client, []tx{{Data: []byte{0x00}, CodeExp: types.CodeTypeOK, DataExp: nil}})
|
||||
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
txs := []tx{
|
||||
{Data: []byte{0x01}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x02}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x03}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x00, 0x04}, DataExp: nil, CodeExp: types.CodeTypeOK}}
|
||||
finalizeBlock(client, txs)
|
||||
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
|
||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
|
||||
@@ -17,9 +17,11 @@ type Application interface {
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
|
||||
// State Sync Connection
|
||||
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
|
||||
@@ -44,6 +46,10 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
}
|
||||
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
|
||||
return ResponseCheckTx{Code: CodeTypeOK}
|
||||
}
|
||||
@@ -60,6 +66,14 @@ func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
|
||||
return ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
|
||||
return ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
|
||||
return ResponseEndBlock{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
|
||||
return ResponseListSnapshots{}
|
||||
}
|
||||
@@ -76,10 +90,6 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
|
||||
return ResponseApplySnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
|
||||
return ResponseFinalizeBlock{}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// GRPCApplication is a GRPC wrapper for Application
|
||||
@@ -104,6 +114,11 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
res := app.app.CheckTx(*req)
|
||||
return &res, nil
|
||||
@@ -124,6 +139,16 @@ func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
|
||||
res := app.app.BeginBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
|
||||
res := app.app.EndBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ListSnapshots(
|
||||
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||
res := app.app.ListSnapshots(*req)
|
||||
@@ -147,9 +172,3 @@ func (app *GRPCApplication) ApplySnapshotChunk(
|
||||
res := app.app.ApplySnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) FinalizeBlock(
|
||||
ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
|
||||
res := app.app.FinalizeBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -48,6 +48,12 @@ func ToRequestInfo(req RequestInfo) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestCheckTx(req RequestCheckTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_CheckTx{&req},
|
||||
@@ -72,6 +78,18 @@ func ToRequestInitChain(req RequestInitChain) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_BeginBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestEndBlock(req RequestEndBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_EndBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ListSnapshots{&req},
|
||||
@@ -96,12 +114,6 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_FinalizeBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func ToResponseException(errStr string) *Response {
|
||||
@@ -127,6 +139,11 @@ func ToResponseInfo(res ResponseInfo) *Response {
|
||||
Value: &Response_Info{&res},
|
||||
}
|
||||
}
|
||||
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_DeliverTx{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseCheckTx(res ResponseCheckTx) *Response {
|
||||
return &Response{
|
||||
@@ -152,6 +169,18 @@ func ToResponseInitChain(res ResponseInitChain) *Response {
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_BeginBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseEndBlock(res ResponseEndBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_EndBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ListSnapshots{&res},
|
||||
@@ -175,9 +204,3 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
|
||||
Value: &Response_ApplySnapshotChunk{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_FinalizeBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,6 +11,7 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -62,7 +63,7 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
|
||||
// PeerID responsible for delivering the block.
|
||||
type BlockRequest struct {
|
||||
Height int64
|
||||
PeerID types.NodeID
|
||||
PeerID p2p.NodeID
|
||||
}
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
@@ -75,7 +76,7 @@ type BlockPool struct {
|
||||
requesters map[int64]*bpRequester
|
||||
height int64 // the lowest key in requesters.
|
||||
// peers
|
||||
peers map[types.NodeID]*bpPeer
|
||||
peers map[p2p.NodeID]*bpPeer
|
||||
maxPeerHeight int64 // the biggest reported height
|
||||
|
||||
// atomic
|
||||
@@ -89,7 +90,7 @@ type BlockPool struct {
|
||||
// requests and errors will be sent to requestsCh and errorsCh accordingly.
|
||||
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
|
||||
bp := &BlockPool{
|
||||
peers: make(map[types.NodeID]*bpPeer),
|
||||
peers: make(map[p2p.NodeID]*bpPeer),
|
||||
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
@@ -224,13 +225,13 @@ func (pool *BlockPool) PopRequest() {
|
||||
// RedoRequest invalidates the block at pool.height,
|
||||
// Remove the peer and redo request from others.
|
||||
// Returns the ID of the removed peer.
|
||||
func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
|
||||
func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
request := pool.requesters[height]
|
||||
peerID := request.getPeerID()
|
||||
if peerID != types.NodeID("") {
|
||||
if peerID != p2p.NodeID("") {
|
||||
// RemovePeer will redo all requesters associated with this peer.
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
@@ -239,7 +240,7 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) {
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.NodeID, block *types.Block, blockSize int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
@@ -286,7 +287,7 @@ func (pool *BlockPool) LastAdvance() time.Time {
|
||||
}
|
||||
|
||||
// SetPeerRange sets the peer's alleged blockchain base and height.
|
||||
func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int64) {
|
||||
func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
@@ -307,14 +308,14 @@ func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int6
|
||||
|
||||
// RemovePeer removes the peer with peerID from the pool. If there's no peer
|
||||
// with peerID, function is a no-op.
|
||||
func (pool *BlockPool) RemovePeer(peerID types.NodeID) {
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.NodeID) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removePeer(peerID types.NodeID) {
|
||||
func (pool *BlockPool) removePeer(peerID p2p.NodeID) {
|
||||
for _, requester := range pool.requesters {
|
||||
if requester.getPeerID() == peerID {
|
||||
requester.redo(peerID)
|
||||
@@ -395,14 +396,14 @@ func (pool *BlockPool) requestersLen() int64 {
|
||||
return int64(len(pool.requesters))
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendRequest(height int64, peerID types.NodeID) {
|
||||
func (pool *BlockPool) sendRequest(height int64, peerID p2p.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
pool.requestsCh <- BlockRequest{height, peerID}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendError(err error, peerID types.NodeID) {
|
||||
func (pool *BlockPool) sendError(err error, peerID p2p.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
@@ -436,7 +437,7 @@ type bpPeer struct {
|
||||
height int64
|
||||
base int64
|
||||
pool *BlockPool
|
||||
id types.NodeID
|
||||
id p2p.NodeID
|
||||
recvMonitor *flow.Monitor
|
||||
|
||||
timeout *time.Timer
|
||||
@@ -444,7 +445,7 @@ type bpPeer struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func newBPPeer(pool *BlockPool, peerID types.NodeID, base int64, height int64) *bpPeer {
|
||||
func newBPPeer(pool *BlockPool, peerID p2p.NodeID, base int64, height int64) *bpPeer {
|
||||
peer := &bpPeer{
|
||||
pool: pool,
|
||||
id: peerID,
|
||||
@@ -509,10 +510,10 @@ type bpRequester struct {
|
||||
pool *BlockPool
|
||||
height int64
|
||||
gotBlockCh chan struct{}
|
||||
redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat
|
||||
redoCh chan p2p.NodeID // redo may send multitime, add peerId to identify repeat
|
||||
|
||||
mtx tmsync.Mutex
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
@@ -521,7 +522,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
pool: pool,
|
||||
height: height,
|
||||
gotBlockCh: make(chan struct{}, 1),
|
||||
redoCh: make(chan types.NodeID, 1),
|
||||
redoCh: make(chan p2p.NodeID, 1),
|
||||
|
||||
peerID: "",
|
||||
block: nil,
|
||||
@@ -536,7 +537,7 @@ func (bpr *bpRequester) OnStart() error {
|
||||
}
|
||||
|
||||
// Returns true if the peer matches and block doesn't already exist.
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool {
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.NodeID) bool {
|
||||
bpr.mtx.Lock()
|
||||
if bpr.block != nil || bpr.peerID != peerID {
|
||||
bpr.mtx.Unlock()
|
||||
@@ -558,7 +559,7 @@ func (bpr *bpRequester) getBlock() *types.Block {
|
||||
return bpr.block
|
||||
}
|
||||
|
||||
func (bpr *bpRequester) getPeerID() types.NodeID {
|
||||
func (bpr *bpRequester) getPeerID() p2p.NodeID {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
return bpr.peerID
|
||||
@@ -580,7 +581,7 @@ func (bpr *bpRequester) reset() {
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
// NOTE: Nonblocking, and does nothing if another redo
|
||||
// was already requested.
|
||||
func (bpr *bpRequester) redo(peerID types.NodeID) {
|
||||
func (bpr *bpRequester) redo(peerID p2p.NodeID) {
|
||||
select {
|
||||
case bpr.redoCh <- peerID:
|
||||
default:
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -19,7 +20,7 @@ func init() {
|
||||
}
|
||||
|
||||
type testPeer struct {
|
||||
id types.NodeID
|
||||
id p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
inputChan chan inputData // make sure each peer's data is sequential
|
||||
@@ -49,7 +50,7 @@ func (p testPeer) simulateInput(input inputData) {
|
||||
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
|
||||
}
|
||||
|
||||
type testPeers map[types.NodeID]testPeer
|
||||
type testPeers map[p2p.NodeID]testPeer
|
||||
|
||||
func (ps testPeers) start() {
|
||||
for _, v := range ps {
|
||||
@@ -66,7 +67,7 @@ func (ps testPeers) stop() {
|
||||
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
|
||||
peers := make(testPeers, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerID := types.NodeID(tmrand.Str(12))
|
||||
peerID := p2p.NodeID(tmrand.Str(12))
|
||||
height := minHeight + mrand.Int63n(maxHeight-minHeight)
|
||||
base := minHeight + int64(i)
|
||||
if base > height {
|
||||
@@ -182,7 +183,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
|
||||
// Pull from channels
|
||||
counter := 0
|
||||
timedOut := map[types.NodeID]struct{}{}
|
||||
timedOut := map[p2p.NodeID]struct{}{}
|
||||
for {
|
||||
select {
|
||||
case err := <-errorsCh:
|
||||
@@ -203,7 +204,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
peers := make(testPeers, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
peerID := types.NodeID(fmt.Sprintf("%d", i+1))
|
||||
peerID := p2p.NodeID(fmt.Sprintf("%d", i+1))
|
||||
height := int64(i + 1)
|
||||
peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)}
|
||||
}
|
||||
@@ -227,10 +228,10 @@ func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
assert.EqualValues(t, 10, pool.MaxPeerHeight())
|
||||
|
||||
// remove not-existing peer
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(types.NodeID("Superman")) })
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(p2p.NodeID("Superman")) })
|
||||
|
||||
// remove peer with biggest height
|
||||
pool.RemovePeer(types.NodeID("10"))
|
||||
pool.RemovePeer(p2p.NodeID("10"))
|
||||
assert.EqualValues(t, 9, pool.MaxPeerHeight())
|
||||
|
||||
// remove all peers
|
||||
@@ -5,11 +5,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bc "github.com/tendermint/tendermint/internal/blockchain"
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -33,9 +32,10 @@ var (
|
||||
ID: byte(BlockchainChannel),
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
MaxSendBytes: 100,
|
||||
|
||||
MaxSendBytes: 100,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -65,7 +65,7 @@ type consensusReactor interface {
|
||||
|
||||
type peerError struct {
|
||||
err error
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e peerError) Error() string {
|
||||
@@ -85,10 +85,9 @@ type Reactor struct {
|
||||
consReactor consensusReactor
|
||||
fastSync bool
|
||||
|
||||
blockchainCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
peerUpdatesCh chan p2p.Envelope
|
||||
closeCh chan struct{}
|
||||
blockchainCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
closeCh chan struct{}
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
@@ -97,8 +96,6 @@ type Reactor struct {
|
||||
// requestRoutine spawned goroutines when stopping the reactor and before
|
||||
// stopping the p2p Channel(s).
|
||||
poolWG sync.WaitGroup
|
||||
|
||||
metrics *cons.Metrics
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
@@ -111,7 +108,6 @@ func NewReactor(
|
||||
blockchainCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdates,
|
||||
fastSync bool,
|
||||
metrics *cons.Metrics,
|
||||
) (*Reactor, error) {
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
|
||||
@@ -126,19 +122,17 @@ func NewReactor(
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
|
||||
r := &Reactor{
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: fastSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
peerUpdates: peerUpdates,
|
||||
peerUpdatesCh: make(chan p2p.Envelope),
|
||||
closeCh: make(chan struct{}),
|
||||
metrics: metrics,
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: fastSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
|
||||
@@ -193,7 +187,7 @@ func (r *Reactor) OnStop() {
|
||||
|
||||
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
|
||||
// Otherwise, we'll respond saying we do not have it.
|
||||
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) {
|
||||
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID p2p.NodeID) {
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
blockProto, err := block.ToProto()
|
||||
@@ -283,9 +277,9 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
}
|
||||
|
||||
// processBlockchainCh initiates a blocking process where we listen for and handle
|
||||
// envelopes on the BlockchainChannel and peerUpdatesCh. Any error encountered during
|
||||
// message execution will result in a PeerError being sent on the BlockchainChannel.
|
||||
// When the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// envelopes on the BlockchainChannel. Any error encountered during message
|
||||
// execution will result in a PeerError being sent on the BlockchainChannel. When
|
||||
// the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// gracefully.
|
||||
func (r *Reactor) processBlockchainCh() {
|
||||
defer r.blockchainCh.Close()
|
||||
@@ -301,13 +295,9 @@ func (r *Reactor) processBlockchainCh() {
|
||||
}
|
||||
}
|
||||
|
||||
case envelop := <-r.peerUpdatesCh:
|
||||
r.blockchainCh.Out <- envelop
|
||||
|
||||
case <-r.closeCh:
|
||||
r.Logger.Debug("stopped listening on blockchain channel; closing...")
|
||||
return
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -324,7 +314,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
switch peerUpdate.Status {
|
||||
case p2p.PeerStatusUp:
|
||||
// send a status update the newly added peer
|
||||
r.peerUpdatesCh <- p2p.Envelope{
|
||||
r.blockchainCh.Out <- p2p.Envelope{
|
||||
To: peerUpdate.NodeID,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: r.store.Base(),
|
||||
@@ -558,14 +548,12 @@ FOR_LOOP:
|
||||
|
||||
// TODO: Same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
state, err = r.blockExec.ApplyBlock(state, firstID, first)
|
||||
state, _, err = r.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO: This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
r.metrics.RecordConsMetrics(first)
|
||||
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
@@ -588,7 +576,3 @@ FOR_LOOP:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) GetMaxPeerBlockHeight() int64 {
|
||||
return r.pool.MaxPeerHeight()
|
||||
}
|
||||
@@ -9,16 +9,14 @@ import (
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/mempool/mock"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/p2ptest"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
@@ -27,14 +25,14 @@ import (
|
||||
type reactorTestSuite struct {
|
||||
network *p2ptest.Network
|
||||
logger log.Logger
|
||||
nodes []types.NodeID
|
||||
nodes []p2p.NodeID
|
||||
|
||||
reactors map[types.NodeID]*Reactor
|
||||
app map[types.NodeID]proxy.AppConns
|
||||
reactors map[p2p.NodeID]*Reactor
|
||||
app map[p2p.NodeID]proxy.AppConns
|
||||
|
||||
blockchainChannels map[types.NodeID]*p2p.Channel
|
||||
peerChans map[types.NodeID]chan p2p.PeerUpdate
|
||||
peerUpdates map[types.NodeID]*p2p.PeerUpdates
|
||||
blockchainChannels map[p2p.NodeID]*p2p.Channel
|
||||
peerChans map[p2p.NodeID]chan p2p.PeerUpdate
|
||||
peerUpdates map[p2p.NodeID]*p2p.PeerUpdates
|
||||
|
||||
fastSync bool
|
||||
}
|
||||
@@ -55,12 +53,12 @@ func setup(
|
||||
rts := &reactorTestSuite{
|
||||
logger: log.TestingLogger().With("module", "blockchain", "testCase", t.Name()),
|
||||
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
|
||||
nodes: make([]types.NodeID, 0, numNodes),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
app: make(map[types.NodeID]proxy.AppConns, numNodes),
|
||||
blockchainChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
|
||||
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
|
||||
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
|
||||
nodes: make([]p2p.NodeID, 0, numNodes),
|
||||
reactors: make(map[p2p.NodeID]*Reactor, numNodes),
|
||||
app: make(map[p2p.NodeID]proxy.AppConns, numNodes),
|
||||
blockchainChannels: make(map[p2p.NodeID]*p2p.Channel, numNodes),
|
||||
peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, numNodes),
|
||||
peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, numNodes),
|
||||
fastSync: true,
|
||||
}
|
||||
|
||||
@@ -89,7 +87,7 @@ func setup(
|
||||
}
|
||||
|
||||
func (rts *reactorTestSuite) addNode(t *testing.T,
|
||||
nodeID types.NodeID,
|
||||
nodeID p2p.NodeID,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVal types.PrivValidator,
|
||||
maxBlockHeight int64,
|
||||
@@ -105,9 +103,11 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
@@ -115,8 +115,8 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
|
||||
rts.app[nodeID].Consensus(),
|
||||
mock.Mempool{},
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStore,
|
||||
)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
@@ -142,11 +142,11 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
|
||||
)
|
||||
}
|
||||
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
@@ -163,8 +163,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
|
||||
nil,
|
||||
rts.blockchainChannels[nodeID],
|
||||
rts.peerUpdates[nodeID],
|
||||
rts.fastSync,
|
||||
cons.NopMetrics())
|
||||
rts.fastSync)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, rts.reactors[nodeID].Start())
|
||||
18
blockchain/v0/test_util.go
Normal file
18
blockchain/v0/test_util.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
@@ -1,12 +1,14 @@
|
||||
package behavior
|
||||
|
||||
import "github.com/tendermint/tendermint/types"
|
||||
import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// PeerBehavior is a struct describing a behavior a peer performed.
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behavior performed by the peer.
|
||||
type PeerBehavior struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
@@ -15,7 +17,7 @@ type badMessage struct {
|
||||
}
|
||||
|
||||
// BadMessage returns a badMessage PeerBehavior.
|
||||
func BadMessage(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: badMessage{explanation}}
|
||||
}
|
||||
|
||||
@@ -24,7 +26,7 @@ type messageOutOfOrder struct {
|
||||
}
|
||||
|
||||
// MessageOutOfOrder returns a messagOutOfOrder PeerBehavior.
|
||||
func MessageOutOfOrder(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}}
|
||||
}
|
||||
|
||||
@@ -33,7 +35,7 @@ type consensusVote struct {
|
||||
}
|
||||
|
||||
// ConsensusVote returns a consensusVote PeerBehavior.
|
||||
func ConsensusVote(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}}
|
||||
}
|
||||
|
||||
@@ -42,6 +44,6 @@ type blockPart struct {
|
||||
}
|
||||
|
||||
// BlockPart returns blockPart PeerBehavior.
|
||||
func BlockPart(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: blockPart{explanation}}
|
||||
}
|
||||
@@ -4,8 +4,7 @@ import (
|
||||
"errors"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// Reporter provides an interface for reactors to report the behavior
|
||||
@@ -52,14 +51,14 @@ func (spbr *SwitchReporter) Report(behavior PeerBehavior) error {
|
||||
// behavior in manufactured scenarios.
|
||||
type MockReporter struct {
|
||||
mtx tmsync.RWMutex
|
||||
pb map[types.NodeID][]PeerBehavior
|
||||
pb map[p2p.NodeID][]PeerBehavior
|
||||
}
|
||||
|
||||
// NewMockReporter returns a Reporter which records all reported
|
||||
// behaviors in memory.
|
||||
func NewMockReporter() *MockReporter {
|
||||
return &MockReporter{
|
||||
pb: map[types.NodeID][]PeerBehavior{},
|
||||
pb: map[p2p.NodeID][]PeerBehavior{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +72,7 @@ func (mpbr *MockReporter) Report(behavior PeerBehavior) error {
|
||||
}
|
||||
|
||||
// GetBehaviors returns all behaviors reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior {
|
||||
func (mpbr *MockReporter) GetBehaviors(peerID p2p.NodeID) []PeerBehavior {
|
||||
mpbr.mtx.RLock()
|
||||
defer mpbr.mtx.RUnlock()
|
||||
if items, ok := mpbr.pb[peerID]; ok {
|
||||
@@ -4,14 +4,14 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
bh "github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
bh "github.com/tendermint/tendermint/blockchain/v2/internal/behavior"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behavior in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID types.NodeID = "MockPeer"
|
||||
var peerID p2p.NodeID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviors := pr.GetBehaviors(peerID)
|
||||
@@ -34,7 +34,7 @@ func TestMockReporter(t *testing.T) {
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
behavior bh.PeerBehavior
|
||||
}
|
||||
|
||||
@@ -76,10 +76,10 @@ func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool {
|
||||
// freequencies that those behaviors occur.
|
||||
func TestEqualPeerBehaviors(t *testing.T) {
|
||||
var (
|
||||
peerID types.NodeID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
peerID p2p.NodeID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
left []bh.PeerBehavior
|
||||
right []bh.PeerBehavior
|
||||
}{
|
||||
@@ -128,7 +128,7 @@ func TestEqualPeerBehaviors(t *testing.T) {
|
||||
func TestMockPeerBehaviorReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviorScript = []struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
behaviors []bh.PeerBehavior
|
||||
}{
|
||||
{"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}},
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -3,6 +3,7 @@ package v2
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -12,8 +13,8 @@ import (
|
||||
type pcBlockVerificationFailure struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
firstPeerID types.NodeID
|
||||
secondPeerID types.NodeID
|
||||
firstPeerID p2p.NodeID
|
||||
secondPeerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockVerificationFailure) String() string {
|
||||
@@ -25,7 +26,7 @@ func (e pcBlockVerificationFailure) String() string {
|
||||
type pcBlockProcessed struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockProcessed) String() string {
|
||||
@@ -45,7 +46,7 @@ func (p pcFinished) Error() string {
|
||||
|
||||
type queueItem struct {
|
||||
block *types.Block
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
type blockQueue map[int64]queueItem
|
||||
@@ -94,7 +95,7 @@ func (state *pcState) synced() bool {
|
||||
return len(state.queue) <= 1
|
||||
}
|
||||
|
||||
func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) {
|
||||
func (state *pcState) enqueue(peerID p2p.NodeID, block *types.Block, height int64) {
|
||||
if item, ok := state.queue[height]; ok {
|
||||
panic(fmt.Sprintf(
|
||||
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
|
||||
@@ -109,7 +110,7 @@ func (state *pcState) height() int64 {
|
||||
}
|
||||
|
||||
// purgePeer moves all unprocessed blocks from the queue
|
||||
func (state *pcState) purgePeer(peerID types.NodeID) {
|
||||
func (state *pcState) purgePeer(peerID p2p.NodeID) {
|
||||
// what if height is less than state.height?
|
||||
for height, item := range state.queue {
|
||||
if item.peerID == peerID {
|
||||
@@ -181,8 +182,6 @@ func (state *pcState) handle(event Event) (Event, error) {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
state.context.recordConsMetrics(first)
|
||||
|
||||
delete(state.queue, first.Height)
|
||||
state.blocksSynced++
|
||||
|
||||
@@ -3,7 +3,6 @@ package v2
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -14,27 +13,24 @@ type processorContext interface {
|
||||
saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
|
||||
tmState() state.State
|
||||
setState(state.State)
|
||||
recordConsMetrics(block *types.Block)
|
||||
}
|
||||
|
||||
type pContext struct {
|
||||
store blockStore
|
||||
applier blockApplier
|
||||
state state.State
|
||||
metrics *cons.Metrics
|
||||
}
|
||||
|
||||
func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *cons.Metrics) *pContext {
|
||||
func newProcessorContext(st blockStore, ex blockApplier, s state.State) *pContext {
|
||||
return &pContext{
|
||||
store: st,
|
||||
applier: ex,
|
||||
state: s,
|
||||
metrics: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
newState, err := pc.applier.ApplyBlock(pc.state, blockID, block)
|
||||
newState, _, err := pc.applier.ApplyBlock(pc.state, blockID, block)
|
||||
pc.state = newState
|
||||
return err
|
||||
}
|
||||
@@ -55,10 +51,6 @@ func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, see
|
||||
pc.store.SaveBlock(block, blockParts, seenCommit)
|
||||
}
|
||||
|
||||
func (pc *pContext) recordConsMetrics(block *types.Block) {
|
||||
pc.metrics.RecordConsMetrics(block)
|
||||
}
|
||||
|
||||
type mockPContext struct {
|
||||
applicationBL []int64
|
||||
verificationBL []int64
|
||||
@@ -106,7 +98,3 @@ func (mpc *mockPContext) setState(state state.State) {
|
||||
func (mpc *mockPContext) tmState() state.State {
|
||||
return mpc.state
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) recordConsMetrics(block *types.Block) {
|
||||
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -39,7 +40,7 @@ func makeState(p *params) *pcState {
|
||||
state := newPcState(context)
|
||||
|
||||
for _, item := range p.items {
|
||||
state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height)
|
||||
state.enqueue(p2p.NodeID(item.pid), makePcBlock(item.height), item.height)
|
||||
}
|
||||
|
||||
state.blocksSynced = p.blocksSynced
|
||||
@@ -47,7 +48,7 @@ func makeState(p *params) *pcState {
|
||||
return state
|
||||
}
|
||||
|
||||
func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived {
|
||||
func mBlockResponse(peerID p2p.NodeID, height int64) scBlockReceived {
|
||||
return scBlockReceived{
|
||||
peerID: peerID,
|
||||
block: makePcBlock(height),
|
||||
@@ -7,12 +7,11 @@ import (
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
bc "github.com/tendermint/tendermint/internal/blockchain"
|
||||
"github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/blockchain/v2/internal/behavior"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -51,18 +50,18 @@ type BlockchainReactor struct {
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error)
|
||||
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error)
|
||||
}
|
||||
|
||||
// XXX: unify naming in this package around tmState
|
||||
func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
|
||||
blockApplier blockApplier, fastSync bool, metrics *cons.Metrics) *BlockchainReactor {
|
||||
blockApplier blockApplier, fastSync bool) *BlockchainReactor {
|
||||
initHeight := state.LastBlockHeight + 1
|
||||
if initHeight == 1 {
|
||||
initHeight = state.InitialHeight
|
||||
}
|
||||
scheduler := newScheduler(initHeight, time.Now())
|
||||
pContext := newProcessorContext(store, blockApplier, state, metrics)
|
||||
pContext := newProcessorContext(store, blockApplier, state)
|
||||
// TODO: Fix naming to just newProcesssor
|
||||
// newPcState requires a processorContext
|
||||
processor := newPcState(pContext)
|
||||
@@ -82,10 +81,9 @@ func NewBlockchainReactor(
|
||||
state state.State,
|
||||
blockApplier blockApplier,
|
||||
store blockStore,
|
||||
fastSync bool,
|
||||
metrics *cons.Metrics) *BlockchainReactor {
|
||||
fastSync bool) *BlockchainReactor {
|
||||
reporter := behavior.NewMockReporter()
|
||||
return newReactor(state, store, reporter, blockApplier, fastSync, metrics)
|
||||
return newReactor(state, store, reporter, blockApplier, fastSync)
|
||||
}
|
||||
|
||||
// SetSwitch implements Reactor interface.
|
||||
@@ -213,7 +211,7 @@ func (e rProcessBlock) String() string {
|
||||
type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
size int64
|
||||
block *types.Block
|
||||
}
|
||||
@@ -227,7 +225,7 @@ func (resp bcBlockResponse) String() string {
|
||||
type bcNoBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
@@ -240,7 +238,7 @@ func (resp bcNoBlockResponse) String() string {
|
||||
type bcStatusResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
@@ -253,7 +251,7 @@ func (resp bcStatusResponse) String() string {
|
||||
// new peer is connected
|
||||
type bcAddNewPeer struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (resp bcAddNewPeer) String() string {
|
||||
@@ -263,7 +261,7 @@ func (resp bcAddNewPeer) String() string {
|
||||
// existing peer is removed
|
||||
type bcRemovePeer struct {
|
||||
priorityHigh
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
@@ -585,14 +583,8 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
ID: BlockchainChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.maxPeerHeight
|
||||
}
|
||||
@@ -14,30 +14,28 @@ import (
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/blockchain/v2/internal/behavior"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/mempool/mock"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
tmstore "github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type mockPeer struct {
|
||||
service.Service
|
||||
id types.NodeID
|
||||
id p2p.NodeID
|
||||
}
|
||||
|
||||
func (mp mockPeer) FlushStop() {}
|
||||
func (mp mockPeer) ID() types.NodeID { return mp.id }
|
||||
func (mp mockPeer) ID() p2p.NodeID { return mp.id }
|
||||
func (mp mockPeer) RemoteIP() net.IP { return net.IP{} }
|
||||
func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} }
|
||||
|
||||
@@ -45,8 +43,8 @@ func (mp mockPeer) IsOutbound() bool { return true }
|
||||
func (mp mockPeer) IsPersistent() bool { return true }
|
||||
func (mp mockPeer) CloseConn() error { return nil }
|
||||
|
||||
func (mp mockPeer) NodeInfo() types.NodeInfo {
|
||||
return types.NodeInfo{
|
||||
func (mp mockPeer) NodeInfo() p2p.NodeInfo {
|
||||
return p2p.NodeInfo{
|
||||
NodeID: "",
|
||||
ListenAddr: "",
|
||||
}
|
||||
@@ -86,9 +84,9 @@ type mockBlockApplier struct {
|
||||
// XXX: Add whitelist/blacklist?
|
||||
func (mba *mockBlockApplier) ApplyBlock(
|
||||
state sm.State, blockID types.BlockID, block *types.Block,
|
||||
) (sm.State, error) {
|
||||
) (sm.State, int64, error) {
|
||||
state.LastBlockHeight++
|
||||
return state, nil
|
||||
return state, 0, nil
|
||||
}
|
||||
|
||||
type mockSwitchIo struct {
|
||||
@@ -153,8 +151,8 @@ type testReactorParams struct {
|
||||
mockA bool
|
||||
}
|
||||
|
||||
func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
|
||||
store, state, _ := newReactorStore(t, p.genDoc, p.privVals, p.startHeight)
|
||||
func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight)
|
||||
reporter := behavior.NewMockReporter()
|
||||
|
||||
var appl blockApplier
|
||||
@@ -166,17 +164,18 @@ func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
db := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(db)
|
||||
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
|
||||
appl = sm.NewBlockExecutor(
|
||||
stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
|
||||
err = stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
r := newReactor(state, store, reporter, appl, true, cons.NopMetrics())
|
||||
r := newReactor(state, store, reporter, appl, true)
|
||||
logger := log.TestingLogger()
|
||||
r.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
@@ -401,7 +400,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
reactor := newTestReactor(t, params)
|
||||
reactor := newTestReactor(params)
|
||||
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
reactor.io = mockSwitch
|
||||
err := reactor.Start()
|
||||
@@ -419,7 +418,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
|
||||
case bcproto.BlockRequest:
|
||||
if ev.Height > params.startHeight {
|
||||
@@ -431,7 +430,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
|
||||
} else {
|
||||
old := mockSwitch.numBlockResponse
|
||||
@@ -442,7 +441,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
|
||||
}
|
||||
}
|
||||
@@ -458,7 +457,7 @@ func TestReactorSetSwitchNil(t *testing.T) {
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
|
||||
|
||||
reactor := newTestReactor(t, testReactorParams{
|
||||
reactor := newTestReactor(testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
@@ -469,18 +468,34 @@ func TestReactorSetSwitchNil(t *testing.T) {
|
||||
assert.Nil(t, reactor.io)
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
// Why are we importing the entire blockExecutor dependency graph here
|
||||
// when we have the facilities to
|
||||
func newReactorStore(
|
||||
t *testing.T,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
t.Helper()
|
||||
|
||||
require.Len(t, privVals, 1)
|
||||
maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
@@ -490,15 +505,20 @@ func newReactorStore(
|
||||
}
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
|
||||
err = stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// add blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
@@ -513,18 +533,22 @@ func newReactorStore(
|
||||
lastBlockMeta.BlockID,
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
require.NoError(t, err)
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error apply block: %w", err))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -25,7 +26,7 @@ func (e scFinishedEv) String() string {
|
||||
// send a blockRequest message
|
||||
type scBlockRequest struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
@@ -36,7 +37,7 @@ func (e scBlockRequest) String() string {
|
||||
// a block has been received and validated by the scheduler
|
||||
type scBlockReceived struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
@@ -47,7 +48,7 @@ func (e scBlockReceived) String() string {
|
||||
// scheduler detected a peer error
|
||||
type scPeerError struct {
|
||||
priorityHigh
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
reason error
|
||||
}
|
||||
|
||||
@@ -58,7 +59,7 @@ func (e scPeerError) String() string {
|
||||
// scheduler removed a set of peers (timed out or slow peer)
|
||||
type scPeersPruned struct {
|
||||
priorityHigh
|
||||
peers []types.NodeID
|
||||
peers []p2p.NodeID
|
||||
}
|
||||
|
||||
func (e scPeersPruned) String() string {
|
||||
@@ -125,7 +126,7 @@ func (e peerState) String() string {
|
||||
}
|
||||
|
||||
type scPeer struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
|
||||
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
|
||||
// updated to Removed when peer is removed
|
||||
@@ -142,7 +143,7 @@ func (p scPeer) String() string {
|
||||
p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
|
||||
}
|
||||
|
||||
func newScPeer(peerID types.NodeID) *scPeer {
|
||||
func newScPeer(peerID p2p.NodeID) *scPeer {
|
||||
return &scPeer{
|
||||
peerID: peerID,
|
||||
state: peerStateNew,
|
||||
@@ -170,7 +171,7 @@ type scheduler struct {
|
||||
|
||||
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
|
||||
// track of peer specific state
|
||||
peers map[types.NodeID]*scPeer
|
||||
peers map[p2p.NodeID]*scPeer
|
||||
peerTimeout time.Duration // maximum response time from a peer otherwise prune
|
||||
minRecvRate int64 // minimum receive rate from peer otherwise prune
|
||||
|
||||
@@ -182,13 +183,13 @@ type scheduler struct {
|
||||
blockStates map[int64]blockState
|
||||
|
||||
// a map of heights to the peer we are waiting a response from
|
||||
pendingBlocks map[int64]types.NodeID
|
||||
pendingBlocks map[int64]p2p.NodeID
|
||||
|
||||
// the time at which a block was put in blockStatePending
|
||||
pendingTime map[int64]time.Time
|
||||
|
||||
// a map of heights to the peers that put the block in blockStateReceived
|
||||
receivedBlocks map[int64]types.NodeID
|
||||
receivedBlocks map[int64]p2p.NodeID
|
||||
}
|
||||
|
||||
func (sc scheduler) String() string {
|
||||
@@ -203,10 +204,10 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
|
||||
syncTimeout: 60 * time.Second,
|
||||
height: initHeight,
|
||||
blockStates: make(map[int64]blockState),
|
||||
peers: make(map[types.NodeID]*scPeer),
|
||||
pendingBlocks: make(map[int64]types.NodeID),
|
||||
peers: make(map[p2p.NodeID]*scPeer),
|
||||
pendingBlocks: make(map[int64]p2p.NodeID),
|
||||
pendingTime: make(map[int64]time.Time),
|
||||
receivedBlocks: make(map[int64]types.NodeID),
|
||||
receivedBlocks: make(map[int64]p2p.NodeID),
|
||||
targetPending: 10, // TODO - pass as param
|
||||
peerTimeout: 15 * time.Second, // TODO - pass as param
|
||||
minRecvRate: 0, // int64(7680), TODO - pass as param
|
||||
@@ -215,14 +216,14 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
|
||||
return &sc
|
||||
}
|
||||
|
||||
func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer {
|
||||
func (sc *scheduler) ensurePeer(peerID p2p.NodeID) *scPeer {
|
||||
if _, ok := sc.peers[peerID]; !ok {
|
||||
sc.peers[peerID] = newScPeer(peerID)
|
||||
}
|
||||
return sc.peers[peerID]
|
||||
}
|
||||
|
||||
func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
|
||||
func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't find peer %s", peerID)
|
||||
@@ -237,7 +238,7 @@ func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) removePeer(peerID types.NodeID) {
|
||||
func (sc *scheduler) removePeer(peerID p2p.NodeID) {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return
|
||||
@@ -297,7 +298,7 @@ func (sc *scheduler) addNewBlocks() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error {
|
||||
func (sc *scheduler) setPeerRange(peerID p2p.NodeID, base int64, height int64) error {
|
||||
peer := sc.ensurePeer(peerID)
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
@@ -332,8 +333,8 @@ func (sc *scheduler) getStateAtHeight(height int64) blockState {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
|
||||
peers := make([]types.NodeID, 0)
|
||||
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID {
|
||||
peers := make([]p2p.NodeID, 0)
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
@@ -345,8 +346,8 @@ func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID {
|
||||
prunable := make([]types.NodeID, 0)
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.NodeID {
|
||||
prunable := make([]p2p.NodeID, 0)
|
||||
for peerID, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
@@ -365,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
|
||||
}
|
||||
|
||||
// CONTRACT: peer exists and in Ready state.
|
||||
func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error {
|
||||
func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, now time.Time) error {
|
||||
peer := sc.peers[peerID]
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
@@ -389,7 +390,7 @@ func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error {
|
||||
func (sc *scheduler) markPending(peerID p2p.NodeID, height int64, time time.Time) error {
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateNew {
|
||||
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
|
||||
@@ -471,7 +472,7 @@ func (sc *scheduler) nextHeightToSchedule() int64 {
|
||||
return min
|
||||
}
|
||||
|
||||
func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
|
||||
func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 {
|
||||
var heights []int64
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
@@ -481,7 +482,7 @@ func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
|
||||
return heights
|
||||
}
|
||||
|
||||
func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
|
||||
func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
|
||||
peers := sc.getPeersWithHeight(height)
|
||||
if len(peers) == 0 {
|
||||
return "", fmt.Errorf("cannot find peer for height %d", height)
|
||||
@@ -489,7 +490,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
|
||||
|
||||
// create a map from number of pending requests to a list
|
||||
// of peers having that number of pending requests.
|
||||
pendingFrom := make(map[int][]types.NodeID)
|
||||
pendingFrom := make(map[int][]p2p.NodeID)
|
||||
for _, peerID := range peers {
|
||||
numPending := len(sc.pendingFrom(peerID))
|
||||
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
|
||||
@@ -508,7 +509,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
|
||||
}
|
||||
|
||||
// PeerByID is a list of peers sorted by peerID.
|
||||
type PeerByID []types.NodeID
|
||||
type PeerByID []p2p.NodeID
|
||||
|
||||
func (peers PeerByID) Len() int {
|
||||
return len(peers)
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -19,9 +20,9 @@ type scTestParams struct {
|
||||
initHeight int64
|
||||
height int64
|
||||
allB []int64
|
||||
pending map[int64]types.NodeID
|
||||
pending map[int64]p2p.NodeID
|
||||
pendingTime map[int64]time.Time
|
||||
received map[int64]types.NodeID
|
||||
received map[int64]p2p.NodeID
|
||||
peerTimeout time.Duration
|
||||
minRecvRate int64
|
||||
targetPending int
|
||||
@@ -40,7 +41,7 @@ func verifyScheduler(sc *scheduler) {
|
||||
}
|
||||
|
||||
func newTestScheduler(params scTestParams) *scheduler {
|
||||
peers := make(map[types.NodeID]*scPeer)
|
||||
peers := make(map[p2p.NodeID]*scPeer)
|
||||
var maxHeight int64
|
||||
|
||||
initHeight := params.initHeight
|
||||
@@ -53,8 +54,8 @@ func newTestScheduler(params scTestParams) *scheduler {
|
||||
}
|
||||
|
||||
for id, peer := range params.peers {
|
||||
peer.peerID = types.NodeID(id)
|
||||
peers[types.NodeID(id)] = peer
|
||||
peer.peerID = p2p.NodeID(id)
|
||||
peers[p2p.NodeID(id)] = peer
|
||||
if maxHeight < peer.height {
|
||||
maxHeight = peer.height
|
||||
}
|
||||
@@ -121,7 +122,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "one ready peer",
|
||||
sc: scheduler{
|
||||
height: 3,
|
||||
peers: map[types.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
|
||||
peers: map[p2p.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
|
||||
},
|
||||
wantMax: 6,
|
||||
},
|
||||
@@ -129,7 +130,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "ready and removed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: 4, state: peerStateReady},
|
||||
"P2": {height: 10, state: peerStateRemoved}},
|
||||
},
|
||||
@@ -139,7 +140,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "removed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: 4, state: peerStateRemoved},
|
||||
"P2": {height: 10, state: peerStateRemoved}},
|
||||
},
|
||||
@@ -149,7 +150,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "new peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {base: -1, height: -1, state: peerStateNew},
|
||||
"P2": {base: -1, height: -1, state: peerStateNew}},
|
||||
},
|
||||
@@ -159,7 +160,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "mixed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: -1, state: peerStateNew},
|
||||
"P2": {height: 10, state: peerStateReady},
|
||||
"P3": {height: 20, state: peerStateRemoved},
|
||||
@@ -186,7 +187,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
func TestScEnsurePeer(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -243,7 +244,7 @@ func TestScTouchPeer(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
time time.Time
|
||||
}
|
||||
|
||||
@@ -315,13 +316,13 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult []types.NodeID
|
||||
wantResult []p2p.NodeID
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{}},
|
||||
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "mixed peers",
|
||||
@@ -340,7 +341,7 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
"P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90},
|
||||
}},
|
||||
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
|
||||
wantResult: []types.NodeID{"P4", "P5", "P6"},
|
||||
wantResult: []p2p.NodeID{"P4", "P5", "P6"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -360,7 +361,7 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
func TestScRemovePeer(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -423,13 +424,13 @@ func TestScRemovePeer(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
pending: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -437,13 +438,13 @@ func TestScRemovePeer(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]types.NodeID{1: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
received: map[int64]types.NodeID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -451,15 +452,15 @@ func TestScRemovePeer(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 3: "P1"},
|
||||
received: map[int64]types.NodeID{2: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
pending: map[int64]types.NodeID{},
|
||||
received: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -470,8 +471,8 @@ func TestScRemovePeer(t *testing.T) {
|
||||
"P2": {height: 6, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6},
|
||||
pending: map[int64]types.NodeID{1: "P1", 3: "P2", 6: "P1"},
|
||||
received: map[int64]types.NodeID{2: "P1", 4: "P2", 5: "P2"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 3: "P2", 6: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 4: "P2", 5: "P2"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
@@ -480,8 +481,8 @@ func TestScRemovePeer(t *testing.T) {
|
||||
"P2": {height: 6, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6},
|
||||
pending: map[int64]types.NodeID{3: "P2"},
|
||||
received: map[int64]types.NodeID{4: "P2", 5: "P2"},
|
||||
pending: map[int64]p2p.NodeID{3: "P2"},
|
||||
received: map[int64]p2p.NodeID{4: "P2", 5: "P2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -500,7 +501,7 @@ func TestScRemovePeer(t *testing.T) {
|
||||
func TestScSetPeerRange(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
@@ -621,25 +622,25 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult []types.NodeID
|
||||
wantResult []p2p.NodeID
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{}},
|
||||
args: args{height: 10},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "only new peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}},
|
||||
args: args{height: 10},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "only Removed peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}},
|
||||
args: args{height: 2},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "one Ready shorter peer",
|
||||
@@ -648,7 +649,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 5},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "one Ready equal peer",
|
||||
@@ -657,7 +658,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer",
|
||||
@@ -667,7 +668,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer at base",
|
||||
@@ -677,7 +678,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer with higher base",
|
||||
@@ -687,7 +688,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "multiple mixed peers",
|
||||
@@ -702,7 +703,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{8, 9, 10, 11},
|
||||
},
|
||||
args: args{height: 8},
|
||||
wantResult: []types.NodeID{"P2", "P5"},
|
||||
wantResult: []p2p.NodeID{"P2", "P5"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -724,7 +725,7 @@ func TestScMarkPending(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
tm time.Time
|
||||
}
|
||||
@@ -820,14 +821,14 @@ func TestScMarkPending(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)},
|
||||
},
|
||||
},
|
||||
@@ -850,7 +851,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
size int64
|
||||
tm time.Time
|
||||
@@ -890,7 +891,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
"P2": {height: 4, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
@@ -899,7 +900,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
"P2": {height: 4, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@@ -908,13 +909,13 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@@ -923,14 +924,14 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
|
||||
},
|
||||
wantErr: true,
|
||||
@@ -940,16 +941,16 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now},
|
||||
received: map[int64]types.NodeID{2: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -990,7 +991,7 @@ func TestScMarkProcessed(t *testing.T) {
|
||||
height: 2,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{2},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
targetPending: 1,
|
||||
},
|
||||
@@ -1008,15 +1009,15 @@ func TestScMarkProcessed(t *testing.T) {
|
||||
height: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
received: map[int64]types.NodeID{1: "P1"}},
|
||||
received: map[int64]p2p.NodeID{1: "P1"}},
|
||||
args: args{height: 1},
|
||||
wantFields: scTestParams{
|
||||
height: 2,
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{2},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now}},
|
||||
},
|
||||
}
|
||||
@@ -1100,7 +1101,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
|
||||
},
|
||||
wantResult: false,
|
||||
@@ -1110,7 +1111,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
},
|
||||
wantResult: false,
|
||||
},
|
||||
@@ -1121,7 +1122,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
peers: map[string]*scPeer{
|
||||
"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{4},
|
||||
received: map[int64]types.NodeID{4: "P1"},
|
||||
received: map[int64]p2p.NodeID{4: "P1"},
|
||||
},
|
||||
wantResult: true,
|
||||
},
|
||||
@@ -1130,7 +1131,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{2: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now, 4: now},
|
||||
},
|
||||
wantResult: false,
|
||||
@@ -1178,7 +1179,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
|
||||
},
|
||||
wantHeight: -1,
|
||||
@@ -1189,7 +1190,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
},
|
||||
wantHeight: -1,
|
||||
},
|
||||
@@ -1208,7 +1209,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
},
|
||||
wantHeight: 1,
|
||||
@@ -1238,7 +1239,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult types.NodeID
|
||||
wantResult p2p.NodeID
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
@@ -1306,7 +1307,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
"P1": {height: 8, state: peerStateReady},
|
||||
"P2": {height: 9, state: peerStateReady}},
|
||||
allB: []int64{4, 5, 6, 7, 8, 9},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
4: "P1", 6: "P1",
|
||||
5: "P2",
|
||||
},
|
||||
@@ -1322,7 +1323,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
"P1": {height: 15, state: peerStateReady},
|
||||
"P3": {height: 15, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P3", 4: "P3",
|
||||
5: "P2", 6: "P2",
|
||||
@@ -1391,7 +1392,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
now := time.Now()
|
||||
block6FromP1 := bcBlockResponse{
|
||||
time: now.Add(time.Millisecond),
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
size: 100,
|
||||
block: makeScBlock(6),
|
||||
}
|
||||
@@ -1432,7 +1433,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
@@ -1443,7 +1444,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now.Add(time.Second)},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
@@ -1454,7 +1455,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
@@ -1476,7 +1477,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
now := time.Now()
|
||||
noBlock6FromP1 := bcNoBlockResponse{
|
||||
time: now.Add(time.Millisecond),
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
height: 6,
|
||||
}
|
||||
|
||||
@@ -1512,14 +1513,14 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
wantEvent: noOpEvent{},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
},
|
||||
@@ -1528,7 +1529,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
|
||||
@@ -1551,7 +1552,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
func TestScHandleBlockProcessed(t *testing.T) {
|
||||
now := time.Now()
|
||||
processed6FromP1 := pcBlockProcessed{
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
height: 6,
|
||||
}
|
||||
|
||||
@@ -1578,7 +1579,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
@@ -1590,7 +1591,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
|
||||
allB: []int64{6, 7},
|
||||
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
wantEvent: scFinishedEv{},
|
||||
@@ -1601,8 +1602,8 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1"},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1645,7 +1646,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -1657,7 +1658,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -1669,7 +1670,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
|
||||
allB: []int64{6, 7},
|
||||
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
wantEvent: scFinishedEv{},
|
||||
@@ -1680,8 +1681,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 5,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1696,8 +1697,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
"P3": {height: 8, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1716,7 +1717,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
|
||||
func TestScHandleAddNewPeer(t *testing.T) {
|
||||
addP1 := bcAddNewPeer{
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
}
|
||||
type args struct {
|
||||
event bcAddNewPeer
|
||||
@@ -1827,7 +1828,7 @@ func TestScHandleTryPrunePeer(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7},
|
||||
peerTimeout: time.Second},
|
||||
args: args{event: pruneEv},
|
||||
wantEvent: scPeersPruned{peers: []types.NodeID{"P4", "P5", "P6"}},
|
||||
wantEvent: scPeersPruned{peers: []p2p.NodeID{"P4", "P5", "P6"}},
|
||||
},
|
||||
{
|
||||
name: "mixed peers, finish after pruning",
|
||||
@@ -1925,7 +1926,7 @@ func TestScHandleTrySchedule(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateReady},
|
||||
"P2": {height: 5, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P2",
|
||||
},
|
||||
@@ -1943,7 +1944,7 @@ func TestScHandleTrySchedule(t *testing.T) {
|
||||
"P1": {height: 8, state: peerStateReady},
|
||||
"P3": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P3", 4: "P3",
|
||||
5: "P2", 6: "P2",
|
||||
@@ -2105,7 +2106,7 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1]},
|
||||
height: 1,
|
||||
},
|
||||
@@ -2117,7 +2118,7 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]},
|
||||
height: 1,
|
||||
},
|
||||
@@ -2129,7 +2130,7 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]},
|
||||
height: 1,
|
||||
},
|
||||
@@ -2141,9 +2142,9 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{2: "P1", 3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]},
|
||||
received: map[int64]types.NodeID{1: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
@@ -2154,9 +2155,9 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{3: "P1"},
|
||||
pendingTime: map[int64]time.Time{3: tick[3]},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
@@ -2167,29 +2168,29 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
{ // processed block 1
|
||||
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 1}},
|
||||
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 1}},
|
||||
wantEvent: noOpEvent{},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{2, 3},
|
||||
received: map[int64]types.NodeID{2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
|
||||
height: 2,
|
||||
},
|
||||
},
|
||||
{ // processed block 2
|
||||
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 2}},
|
||||
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 2}},
|
||||
wantEvent: scFinishedEv{},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{3},
|
||||
received: map[int64]types.NodeID{3: "P1"},
|
||||
received: map[int64]p2p.NodeID{3: "P1"},
|
||||
height: 3,
|
||||
},
|
||||
},
|
||||
@@ -2205,7 +2206,7 @@ func TestScHandle(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateReady, lastTouched: tick[6]},
|
||||
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -2216,7 +2217,7 @@ func TestScHandle(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]},
|
||||
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]types.NodeID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
@@ -46,8 +46,9 @@ func main() {
|
||||
rootCA = flag.String("rootcafile", "", "absolute path to root CA")
|
||||
prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)")
|
||||
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false).
|
||||
With("module", "priv_val")
|
||||
logger = log.NewTMLogger(
|
||||
log.NewSyncWriter(os.Stdout),
|
||||
).With("module", "priv_val")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -15,7 +17,7 @@ var (
|
||||
flagProfAddr = "pprof-laddr"
|
||||
flagFrequency = "frequency"
|
||||
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
)
|
||||
|
||||
// DebugCmd defines the root command containing subcommands that assist in
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
|
||||
@@ -20,7 +20,7 @@ var GenNodeKeyCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func genNodeKey(cmd *cobra.Command, args []string) error {
|
||||
nodeKey := types.GenNodeKey()
|
||||
nodeKey := p2p.GenNodeKey()
|
||||
|
||||
bz, err := tmjson.Marshal(nodeKey)
|
||||
if err != nil {
|
||||
|
||||
@@ -10,9 +10,10 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
// InitFilesCmd initializes a fresh Tendermint Core instance.
|
||||
@@ -50,8 +51,8 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
|
||||
if config.Mode == cfg.ModeValidator {
|
||||
// private validator
|
||||
privValKeyFile := config.PrivValidator.KeyFile()
|
||||
privValStateFile := config.PrivValidator.StateFile()
|
||||
privValKeyFile := config.PrivValidatorKeyFile()
|
||||
privValStateFile := config.PrivValidatorStateFile()
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv, err = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
if err != nil {
|
||||
@@ -75,7 +76,7 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
if tmos.FileExists(nodeKeyFile) {
|
||||
logger.Info("Found node key", "path", nodeKeyFile)
|
||||
} else {
|
||||
if _, err := types.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("Generated node key", "path", nodeKeyFile)
|
||||
|
||||
@@ -66,8 +66,7 @@ var (
|
||||
trustedHash []byte
|
||||
trustLevelStr string
|
||||
|
||||
logLevel string
|
||||
logFormat string
|
||||
verbose bool
|
||||
|
||||
primaryKey = []byte("primary")
|
||||
witnessesKey = []byte("witnesses")
|
||||
@@ -91,8 +90,7 @@ func init() {
|
||||
"trusting period that headers can be verified within. Should be significantly less than the unbonding period")
|
||||
LightCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height")
|
||||
LightCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash")
|
||||
LightCmd.Flags().StringVar(&logLevel, "log-level", log.LogLevelInfo, "The logging level (debug|info|warn|error|fatal)")
|
||||
LightCmd.Flags().StringVar(&logFormat, "log-format", log.LogFormatPlain, "The logging format (text|json)")
|
||||
LightCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output")
|
||||
LightCmd.Flags().StringVar(&trustLevelStr, "trust-level", "1/3",
|
||||
"trust level. Must be between 1/3 and 3/3",
|
||||
)
|
||||
@@ -102,10 +100,15 @@ func init() {
|
||||
}
|
||||
|
||||
func runProxy(cmd *cobra.Command, args []string) error {
|
||||
logger, err := log.NewDefaultLogger(logFormat, logLevel, false)
|
||||
if err != nil {
|
||||
return err
|
||||
// Initialize logger.
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
var option log.Option
|
||||
if verbose {
|
||||
option, _ = log.AllowLevel("debug")
|
||||
} else {
|
||||
option, _ = log.AllowLevel("info")
|
||||
}
|
||||
logger = log.NewFilter(logger, option)
|
||||
|
||||
chainID = args[0]
|
||||
logger.Info("Creating client...", "chainID", chainID)
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/p2p/upnp"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/p2p/upnp"
|
||||
)
|
||||
|
||||
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.
|
||||
|
||||
@@ -2,6 +2,7 @@ package commands
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
)
|
||||
|
||||
|
||||
@@ -41,14 +41,14 @@ var ResetPrivValidatorCmd = &cobra.Command{
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAll(cmd *cobra.Command, args []string) error {
|
||||
return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(),
|
||||
config.PrivValidator.StateFile(), logger)
|
||||
return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(), logger)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) error {
|
||||
return resetFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger)
|
||||
return resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
|
||||
}
|
||||
|
||||
// ResetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
|
||||
@@ -2,6 +2,7 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -9,12 +10,14 @@ import (
|
||||
"github.com/spf13/viper"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cli"
|
||||
tmflags "github.com/tendermint/tendermint/libs/cli/flags"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var (
|
||||
config = cfg.DefaultConfig()
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
ctxTimeout = 4 * time.Second
|
||||
)
|
||||
|
||||
@@ -56,11 +59,19 @@ var RootCmd = &cobra.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
logger, err = log.NewDefaultLogger(config.LogFormat, config.LogLevel, false)
|
||||
if config.LogFormat == cfg.LogFormatJSON {
|
||||
logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout))
|
||||
}
|
||||
|
||||
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if viper.GetBool(cli.TraceFlag) {
|
||||
logger = log.NewTracingLogger(logger)
|
||||
}
|
||||
|
||||
logger = logger.With("module", "main")
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -18,6 +18,10 @@ import (
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
|
||||
)
|
||||
|
||||
// clearConfig clears env vars, the given root dir, and resets viper.
|
||||
func clearConfig(dir string) {
|
||||
if err := os.Unsetenv("TMHOME"); err != nil {
|
||||
@@ -48,10 +52,10 @@ func testRootCmd() *cobra.Command {
|
||||
}
|
||||
|
||||
func testSetup(rootDir string, args []string, env map[string]string) error {
|
||||
clearConfig(rootDir)
|
||||
clearConfig(defaultRoot)
|
||||
|
||||
rootCmd := testRootCmd()
|
||||
cmd := cli.PrepareBaseCmd(rootCmd, "TM", rootDir)
|
||||
cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot)
|
||||
|
||||
// run with the args and env
|
||||
args = append([]string{rootCmd.Use}, args...)
|
||||
@@ -59,7 +63,6 @@ func testSetup(rootDir string, args []string, env map[string]string) error {
|
||||
}
|
||||
|
||||
func TestRootHome(t *testing.T) {
|
||||
defaultRoot := t.TempDir()
|
||||
newRoot := filepath.Join(defaultRoot, "something-else")
|
||||
cases := []struct {
|
||||
args []string
|
||||
@@ -102,7 +105,6 @@ func TestRootFlagsEnv(t *testing.T) {
|
||||
{nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env
|
||||
}
|
||||
|
||||
defaultRoot := t.TempDir()
|
||||
for i, tc := range cases {
|
||||
idxString := strconv.Itoa(i)
|
||||
|
||||
@@ -116,7 +118,7 @@ func TestRootFlagsEnv(t *testing.T) {
|
||||
func TestRootConfig(t *testing.T) {
|
||||
|
||||
// write non-default config
|
||||
nonDefaultLogLvl := "debug"
|
||||
nonDefaultLogLvl := "abc:debug"
|
||||
cvals := map[string]string{
|
||||
"log-level": nonDefaultLogLvl,
|
||||
}
|
||||
@@ -127,13 +129,12 @@ func TestRootConfig(t *testing.T) {
|
||||
|
||||
logLvl string
|
||||
}{
|
||||
{nil, nil, nonDefaultLogLvl}, // should load config
|
||||
{[]string{"--log-level=info"}, nil, "info"}, // flag over rides
|
||||
{nil, map[string]string{"TM_LOG_LEVEL": "info"}, "info"}, // env over rides
|
||||
{nil, nil, nonDefaultLogLvl}, // should load config
|
||||
{[]string{"--log-level=abc:info"}, nil, "abc:info"}, // flag over rides
|
||||
{nil, map[string]string{"TM_LOG_LEVEL": "abc:info"}, "abc:info"}, // env over rides
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
defaultRoot := t.TempDir()
|
||||
idxString := strconv.Itoa(i)
|
||||
clearConfig(defaultRoot)
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -98,7 +99,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
|
||||
// NewRunNodeCmd returns the command that allows the CLI to start a node.
|
||||
// It can be used with a custom PrivValidator and in-process ABCI application.
|
||||
func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command {
|
||||
func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "start",
|
||||
Aliases: []string{"node", "run"},
|
||||
@@ -117,7 +118,7 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command {
|
||||
return fmt.Errorf("failed to start node: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("started node", "node", n.String())
|
||||
logger.Info("Started node", "nodeInfo", n.NodeInfo())
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// ShowNodeIDCmd dumps node's ID to the standard output.
|
||||
@@ -16,11 +18,11 @@ var ShowNodeIDCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func showNodeID(cmd *cobra.Command, args []string) error {
|
||||
nodeKeyID, err := config.LoadNodeKeyID()
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(nodeKeyID)
|
||||
fmt.Println(nodeKey.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -33,12 +33,7 @@ func showValidator(cmd *cobra.Command, args []string) error {
|
||||
protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr)
|
||||
switch protocol {
|
||||
case "grpc":
|
||||
pvsc, err := tmgrpc.DialRemoteSigner(
|
||||
config.PrivValidator,
|
||||
config.ChainID(),
|
||||
logger,
|
||||
config.Instrumentation.Prometheus,
|
||||
)
|
||||
pvsc, err := tmgrpc.DialRemoteSigner(config, config.ChainID(), logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't connect to remote validator %w", err)
|
||||
}
|
||||
@@ -52,12 +47,12 @@ func showValidator(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
default:
|
||||
|
||||
keyFilePath := config.PrivValidator.KeyFile()
|
||||
keyFilePath := config.PrivValidatorKeyFile()
|
||||
if !tmos.FileExists(keyFilePath) {
|
||||
return fmt.Errorf("private validator file %s does not exist", keyFilePath)
|
||||
}
|
||||
|
||||
pv, err := privval.LoadFilePV(keyFilePath, config.PrivValidator.StateFile())
|
||||
pv, err := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -14,9 +14,10 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -273,11 +274,11 @@ func persistentPeersArray(config *cfg.Config) ([]string, error) {
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
nodeKey, err := config.LoadNodeKeyID()
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
peers[i] = nodeKey.AddressString(fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
|
||||
peers[i] = p2p.IDAddressString(nodeKey.ID, fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
|
||||
}
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
@@ -38,8 +38,8 @@ func main() {
|
||||
// * Supply a genesis doc file from another source
|
||||
// * Provide their own DB implementation
|
||||
// can copy this file and use something other than the
|
||||
// node.NewDefault function
|
||||
nodeFunc := nm.NewDefault
|
||||
// DefaultNewNode function
|
||||
nodeFunc := nm.DefaultNewNode
|
||||
|
||||
// Create & start node
|
||||
rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc))
|
||||
|
||||
171
config/config.go
171
config/config.go
@@ -4,16 +4,10 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,6 +16,11 @@ const (
|
||||
// FuzzModeDelay is a mode in which we randomly sleep
|
||||
FuzzModeDelay
|
||||
|
||||
// LogFormatPlain is a format for colored text
|
||||
LogFormatPlain = "plain"
|
||||
// LogFormatJSON is a format for json output
|
||||
LogFormatJSON = "json"
|
||||
|
||||
// DefaultLogLevel defines a default log level as INFO.
|
||||
DefaultLogLevel = "info"
|
||||
|
||||
@@ -129,10 +128,34 @@ func (cfg *Config) SetRoot(root string) *Config {
|
||||
cfg.P2P.RootDir = root
|
||||
cfg.Mempool.RootDir = root
|
||||
cfg.Consensus.RootDir = root
|
||||
cfg.PrivValidator.RootDir = root
|
||||
return cfg
|
||||
}
|
||||
|
||||
// PrivValidatorClientKeyFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg Config) PrivValidatorClientKeyFile() string {
|
||||
return rootify(cfg.PrivValidator.ClientKey, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorClientCertificateFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg Config) PrivValidatorClientCertificateFile() string {
|
||||
return rootify(cfg.PrivValidator.ClientCertificate, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorCertificateAuthorityFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg Config) PrivValidatorRootCAFile() string {
|
||||
return rootify(cfg.PrivValidator.RootCA, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorKeyFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg Config) PrivValidatorKeyFile() string {
|
||||
return rootify(cfg.PrivValidator.Key, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorFile returns the full path to the priv_validator_state.json file
|
||||
func (cfg Config) PrivValidatorStateFile() string {
|
||||
return rootify(cfg.PrivValidator.State, cfg.RootDir)
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg *Config) ValidateBasic() error {
|
||||
@@ -253,7 +276,7 @@ func DefaultBaseConfig() BaseConfig {
|
||||
ProxyApp: "tcp://127.0.0.1:26658",
|
||||
ABCI: "socket",
|
||||
LogLevel: DefaultLogLevel,
|
||||
LogFormat: log.LogFormatPlain,
|
||||
LogFormat: LogFormatPlain,
|
||||
FastSyncMode: true,
|
||||
FilterPeers: false,
|
||||
DBBackend: "goleveldb",
|
||||
@@ -286,64 +309,39 @@ func (cfg BaseConfig) NodeKeyFile() string {
|
||||
return rootify(cfg.NodeKey, cfg.RootDir)
|
||||
}
|
||||
|
||||
// LoadNodeKey loads NodeKey located in filePath.
|
||||
func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) {
|
||||
jsonBytes, err := ioutil.ReadFile(cfg.NodeKeyFile())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
nodeKey := types.NodeKey{}
|
||||
err = tmjson.Unmarshal(jsonBytes, &nodeKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
nodeKey.ID = types.NodeIDFromPubKey(nodeKey.PubKey())
|
||||
return nodeKey.ID, nil
|
||||
}
|
||||
|
||||
// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If
|
||||
// the file does not exist, it generates and saves a new NodeKey.
|
||||
func (cfg BaseConfig) LoadOrGenNodeKeyID() (types.NodeID, error) {
|
||||
if tmos.FileExists(cfg.NodeKeyFile()) {
|
||||
nodeKey, err := cfg.LoadNodeKeyID()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return nodeKey, nil
|
||||
}
|
||||
|
||||
nodeKey := types.GenNodeKey()
|
||||
|
||||
if err := nodeKey.SaveAs(cfg.NodeKeyFile()); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return nodeKey.ID, nil
|
||||
}
|
||||
|
||||
// DBDir returns the full path to the database directory
|
||||
func (cfg BaseConfig) DBDir() string {
|
||||
return rootify(cfg.DBPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
func (cfg Config) ArePrivValidatorClientSecurityOptionsPresent() bool {
|
||||
switch {
|
||||
case cfg.PrivValidator.RootCA == "":
|
||||
return false
|
||||
case cfg.PrivValidator.ClientKey == "":
|
||||
return false
|
||||
case cfg.PrivValidator.ClientCertificate == "":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg BaseConfig) ValidateBasic() error {
|
||||
switch cfg.LogFormat {
|
||||
case log.LogFormatJSON, log.LogFormatText, log.LogFormatPlain:
|
||||
case LogFormatPlain, LogFormatJSON:
|
||||
default:
|
||||
return errors.New("unknown log format (must be 'plain', 'text' or 'json')")
|
||||
return errors.New("unknown log format (must be 'plain' or 'json')")
|
||||
}
|
||||
|
||||
switch cfg.Mode {
|
||||
case ModeFull, ModeValidator, ModeSeed:
|
||||
case "":
|
||||
return errors.New("no mode has been set")
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown mode: %v", cfg.Mode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -352,8 +350,6 @@ func (cfg BaseConfig) ValidateBasic() error {
|
||||
|
||||
// PrivValidatorConfig defines the configuration parameters for running a validator
|
||||
type PrivValidatorConfig struct {
|
||||
RootDir string `mapstructure:"home"`
|
||||
|
||||
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
Key string `mapstructure:"key-file"`
|
||||
|
||||
@@ -384,44 +380,6 @@ func DefaultPrivValidatorConfig() *PrivValidatorConfig {
|
||||
}
|
||||
}
|
||||
|
||||
// ClientKeyFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg *PrivValidatorConfig) ClientKeyFile() string {
|
||||
return rootify(cfg.ClientKey, cfg.RootDir)
|
||||
}
|
||||
|
||||
// ClientCertificateFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg *PrivValidatorConfig) ClientCertificateFile() string {
|
||||
return rootify(cfg.ClientCertificate, cfg.RootDir)
|
||||
}
|
||||
|
||||
// CertificateAuthorityFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg *PrivValidatorConfig) RootCAFile() string {
|
||||
return rootify(cfg.RootCA, cfg.RootDir)
|
||||
}
|
||||
|
||||
// KeyFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg *PrivValidatorConfig) KeyFile() string {
|
||||
return rootify(cfg.Key, cfg.RootDir)
|
||||
}
|
||||
|
||||
// StateFile returns the full path to the priv_validator_state.json file
|
||||
func (cfg *PrivValidatorConfig) StateFile() string {
|
||||
return rootify(cfg.State, cfg.RootDir)
|
||||
}
|
||||
|
||||
func (cfg *PrivValidatorConfig) AreSecurityOptionsPresent() bool {
|
||||
switch {
|
||||
case cfg.RootCA == "":
|
||||
return false
|
||||
case cfg.ClientKey == "":
|
||||
return false
|
||||
case cfg.ClientCertificate == "":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// RPCConfig
|
||||
|
||||
@@ -848,15 +806,13 @@ func (cfg *MempoolConfig) ValidateBasic() error {
|
||||
|
||||
// StateSyncConfig defines the configuration for the Tendermint state sync service
|
||||
type StateSyncConfig struct {
|
||||
Enable bool `mapstructure:"enable"`
|
||||
TempDir string `mapstructure:"temp-dir"`
|
||||
RPCServers []string `mapstructure:"rpc-servers"`
|
||||
TrustPeriod time.Duration `mapstructure:"trust-period"`
|
||||
TrustHeight int64 `mapstructure:"trust-height"`
|
||||
TrustHash string `mapstructure:"trust-hash"`
|
||||
DiscoveryTime time.Duration `mapstructure:"discovery-time"`
|
||||
ChunkRequestTimeout time.Duration `mapstructure:"chunk-request-timeout"`
|
||||
Fetchers int32 `mapstructure:"fetchers"`
|
||||
Enable bool `mapstructure:"enable"`
|
||||
TempDir string `mapstructure:"temp-dir"`
|
||||
RPCServers []string `mapstructure:"rpc-servers"`
|
||||
TrustPeriod time.Duration `mapstructure:"trust-period"`
|
||||
TrustHeight int64 `mapstructure:"trust-height"`
|
||||
TrustHash string `mapstructure:"trust-hash"`
|
||||
DiscoveryTime time.Duration `mapstructure:"discovery-time"`
|
||||
}
|
||||
|
||||
func (cfg *StateSyncConfig) TrustHashBytes() []byte {
|
||||
@@ -871,10 +827,8 @@ func (cfg *StateSyncConfig) TrustHashBytes() []byte {
|
||||
// DefaultStateSyncConfig returns a default configuration for the state sync service
|
||||
func DefaultStateSyncConfig() *StateSyncConfig {
|
||||
return &StateSyncConfig{
|
||||
TrustPeriod: 168 * time.Hour,
|
||||
DiscoveryTime: 15 * time.Second,
|
||||
ChunkRequestTimeout: 15 * time.Second,
|
||||
Fetchers: 4,
|
||||
TrustPeriod: 168 * time.Hour,
|
||||
DiscoveryTime: 15 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -889,17 +843,14 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
|
||||
if len(cfg.RPCServers) == 0 {
|
||||
return errors.New("rpc-servers is required")
|
||||
}
|
||||
|
||||
if len(cfg.RPCServers) < 2 {
|
||||
return errors.New("at least two rpc-servers entries is required")
|
||||
}
|
||||
|
||||
for _, server := range cfg.RPCServers {
|
||||
if len(server) == 0 {
|
||||
return errors.New("found empty rpc-servers entry")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.DiscoveryTime != 0 && cfg.DiscoveryTime < 5*time.Second {
|
||||
return errors.New("discovery time must be 0s or greater than five seconds")
|
||||
}
|
||||
@@ -907,29 +858,17 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
|
||||
if cfg.TrustPeriod <= 0 {
|
||||
return errors.New("trusted-period is required")
|
||||
}
|
||||
|
||||
if cfg.TrustHeight <= 0 {
|
||||
return errors.New("trusted-height is required")
|
||||
}
|
||||
|
||||
if len(cfg.TrustHash) == 0 {
|
||||
return errors.New("trusted-hash is required")
|
||||
}
|
||||
|
||||
_, err := hex.DecodeString(cfg.TrustHash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid trusted-hash: %w", err)
|
||||
}
|
||||
|
||||
if cfg.ChunkRequestTimeout < 5*time.Second {
|
||||
return errors.New("chunk-request-timeout must be at least 5 seconds")
|
||||
}
|
||||
|
||||
if cfg.Fetchers <= 0 {
|
||||
return errors.New("fetchers is required")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
26
config/db.go
26
config/db.go
@@ -1,26 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
db "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
// ServiceProvider takes a config and a logger and returns a ready to go Node.
|
||||
type ServiceProvider func(*Config, log.Logger) (service.Service, error)
|
||||
|
||||
// DBContext specifies config information for loading a new DB.
|
||||
type DBContext struct {
|
||||
ID string
|
||||
Config *Config
|
||||
}
|
||||
|
||||
// DBProvider takes a DBContext and returns an instantiated DB.
|
||||
type DBProvider func(*DBContext) (db.DB, error)
|
||||
|
||||
// DefaultDBProvider returns a database using the DBBackend and DBDir
|
||||
// specified in the Config.
|
||||
func DefaultDBProvider(ctx *DBContext) (db.DB, error) {
|
||||
dbType := db.BackendType(ctx.Config.DBBackend)
|
||||
return db.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
|
||||
}
|
||||
@@ -280,8 +280,7 @@ laddr = "{{ .P2P.ListenAddress }}"
|
||||
# Address to advertise to peers for them to dial
|
||||
# If empty, will use the same port as the laddr,
|
||||
# and will introspect on the listener or use UPnP
|
||||
# to figure out the address. ip and port are required
|
||||
# example: 159.89.10.97:26656
|
||||
# to figure out the address.
|
||||
external-address = "{{ .P2P.ExternalAddress }}"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
@@ -426,13 +425,6 @@ discovery-time = "{{ .StateSync.DiscoveryTime }}"
|
||||
# Will create a new, randomly named directory within, and remove it when done.
|
||||
temp-dir = "{{ .StateSync.TempDir }}"
|
||||
|
||||
# The timeout duration before re-requesting a chunk, possibly from a different
|
||||
# peer (default: 15 seconds).
|
||||
chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}"
|
||||
|
||||
# The number of concurrent chunk and block fetchers to run (default: 4).
|
||||
fetchers = "{{ .StateSync.Fetchers }}"
|
||||
|
||||
#######################################################
|
||||
### Fast Sync Configuration Connections ###
|
||||
#######################################################
|
||||
|
||||
@@ -20,14 +20,3 @@ func CreateBatchVerifier(pk crypto.PubKey) (crypto.BatchVerifier, bool) {
|
||||
// case where the key does not support batch verification
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// SupportsBatchVerifier checks if a key type implements the batch verifier
|
||||
// interface.
|
||||
func SupportsBatchVerifier(pk crypto.PubKey) bool {
|
||||
switch pk.Type() {
|
||||
case ed25519.KeyType, sr25519.KeyType:
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -46,9 +46,7 @@ type Symmetric interface {
|
||||
type BatchVerifier interface {
|
||||
// Add appends an entry into the BatchVerifier.
|
||||
Add(key PubKey, message, signature []byte) error
|
||||
// Verify verifies all the entries in the BatchVerifier, and returns
|
||||
// if every signature in the batch is valid, and a vector of bools
|
||||
// indicating the verification status of each signature (in the order
|
||||
// that signatures were added to the batch).
|
||||
Verify() (bool, []bool)
|
||||
// Verify verifies all the entries in the BatchVerifier.
|
||||
// If the verification fails it is unknown which entry failed and each entry will need to be verified individually.
|
||||
Verify() bool
|
||||
}
|
||||
|
||||
@@ -28,37 +28,22 @@ func BenchmarkVerification(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkVerifyBatch(b *testing.B) {
|
||||
msg := []byte("BatchVerifyTest")
|
||||
|
||||
for _, sigsCount := range []int{1, 8, 64, 1024} {
|
||||
sigsCount := sigsCount
|
||||
b.Run(fmt.Sprintf("sig-count-%d", sigsCount), func(b *testing.B) {
|
||||
// Pre-generate all of the keys, and signatures, but do not
|
||||
// benchmark key-generation and signing.
|
||||
pubs := make([]crypto.PubKey, 0, sigsCount)
|
||||
sigs := make([][]byte, 0, sigsCount)
|
||||
b.ReportAllocs()
|
||||
v := NewBatchVerifier()
|
||||
for i := 0; i < sigsCount; i++ {
|
||||
priv := GenPrivKey()
|
||||
pub := priv.PubKey()
|
||||
msg := []byte("BatchVerifyTest")
|
||||
sig, _ := priv.Sign(msg)
|
||||
pubs = append(pubs, priv.PubKey().(PubKey))
|
||||
sigs = append(sigs, sig)
|
||||
err := v.Add(pub, msg, sig)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.ReportAllocs()
|
||||
// NOTE: dividing by n so that metrics are per-signature
|
||||
for i := 0; i < b.N/sigsCount; i++ {
|
||||
// The benchmark could just benchmark the Verify()
|
||||
// routine, but there is non-trivial overhead associated
|
||||
// with BatchVerifier.Add(), which should be included
|
||||
// in the benchmark.
|
||||
v := NewBatchVerifier()
|
||||
for i := 0; i < sigsCount; i++ {
|
||||
err := v.Add(pubs[i], msg, sigs[i])
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
if ok, _ := v.Verify(); !ok {
|
||||
if !v.Verify() {
|
||||
b.Fatal("signature set failed batch verification")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,13 +2,13 @@ package ed25519
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ed25519"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/ed25519"
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/ed25519/extra/cache"
|
||||
"github.com/hdevalence/ed25519consensus"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
@@ -17,18 +17,7 @@ import (
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
var (
|
||||
_ crypto.PrivKey = PrivKey{}
|
||||
|
||||
// curve25519-voi's Ed25519 implementation supports configurable
|
||||
// verification behavior, and tendermint uses the ZIP-215 verification
|
||||
// semantics.
|
||||
verifyOptions = &ed25519.Options{
|
||||
Verify: ed25519.VerifyOptionsZIP_215,
|
||||
}
|
||||
|
||||
cachingVerifier = cache.NewVerifier(cache.NewLRUCache(cacheSize))
|
||||
)
|
||||
var _ crypto.PrivKey = PrivKey{}
|
||||
|
||||
const (
|
||||
PrivKeyName = "tendermint/PrivKeyEd25519"
|
||||
@@ -45,14 +34,6 @@ const (
|
||||
SeedSize = 32
|
||||
|
||||
KeyType = "ed25519"
|
||||
|
||||
// cacheSize is the number of public keys that will be cached in
|
||||
// an expanded format for repeated signature verification.
|
||||
//
|
||||
// TODO/perf: Either this should exclude single verification, or be
|
||||
// tuned to `> validatorSize + maxTxnsPerBlock` to avoid cache
|
||||
// thrashing.
|
||||
cacheSize = 4096
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -126,12 +107,14 @@ func GenPrivKey() PrivKey {
|
||||
|
||||
// genPrivKey generates a new ed25519 private key using the provided reader.
|
||||
func genPrivKey(rand io.Reader) PrivKey {
|
||||
_, priv, err := ed25519.GenerateKey(rand)
|
||||
seed := make([]byte, SeedSize)
|
||||
|
||||
_, err := io.ReadFull(rand, seed)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return PrivKey(priv)
|
||||
return PrivKey(ed25519.NewKeyFromSeed(seed))
|
||||
}
|
||||
|
||||
// GenPrivKeyFromSecret hashes the secret with SHA2, and uses
|
||||
@@ -170,7 +153,7 @@ func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return cachingVerifier.VerifyWithOptions(ed25519.PublicKey(pubKey), msg, sig, verifyOptions)
|
||||
return ed25519consensus.Verify(ed25519.PublicKey(pubKey), msg, sig)
|
||||
}
|
||||
|
||||
func (pubKey PubKey) String() string {
|
||||
@@ -192,36 +175,30 @@ func (pubKey PubKey) Equals(other crypto.PubKey) bool {
|
||||
var _ crypto.BatchVerifier = &BatchVerifier{}
|
||||
|
||||
// BatchVerifier implements batch verification for ed25519.
|
||||
// https://github.com/hdevalence/ed25519consensus is used for batch verification
|
||||
type BatchVerifier struct {
|
||||
*ed25519.BatchVerifier
|
||||
ed25519consensus.BatchVerifier
|
||||
}
|
||||
|
||||
func NewBatchVerifier() crypto.BatchVerifier {
|
||||
return &BatchVerifier{ed25519.NewBatchVerifier()}
|
||||
return &BatchVerifier{ed25519consensus.NewBatchVerifier()}
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error {
|
||||
pkEd, ok := key.(PubKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("pubkey is not Ed25519")
|
||||
}
|
||||
|
||||
pkBytes := pkEd.Bytes()
|
||||
|
||||
if l := len(pkBytes); l != PubKeySize {
|
||||
if l := len(key.Bytes()); l != PubKeySize {
|
||||
return fmt.Errorf("pubkey size is incorrect; expected: %d, got %d", PubKeySize, l)
|
||||
}
|
||||
|
||||
// check that the signature is the correct length
|
||||
if len(signature) != SignatureSize {
|
||||
// check that the signature is the correct length & the last byte is set correctly
|
||||
if len(signature) != SignatureSize || signature[63]&224 != 0 {
|
||||
return errors.New("invalid signature")
|
||||
}
|
||||
|
||||
cachingVerifier.AddWithOptions(b.BatchVerifier, ed25519.PublicKey(pkBytes), msg, signature, verifyOptions)
|
||||
b.BatchVerifier.Add(ed25519.PublicKey(key.Bytes()), msg, signature)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Verify() (bool, []bool) {
|
||||
return b.BatchVerifier.Verify(crypto.CReader())
|
||||
func (b *BatchVerifier) Verify() bool {
|
||||
return b.BatchVerifier.Verify()
|
||||
}
|
||||
|
||||
@@ -50,6 +50,5 @@ func TestBatchSafe(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
ok, _ := v.Verify()
|
||||
require.True(t, ok)
|
||||
require.True(t, v.Verify())
|
||||
}
|
||||
|
||||
@@ -9,11 +9,10 @@ import (
|
||||
"math/big"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
"golang.org/x/crypto/ripemd160" // nolint: staticcheck // necessary for Bitcoin address format
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
|
||||
// necessary for Bitcoin address format
|
||||
"golang.org/x/crypto/ripemd160" // nolint: staticcheck
|
||||
)
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
@@ -3,44 +3,40 @@ package sr25519
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/sr25519"
|
||||
schnorrkel "github.com/ChainSafe/go-schnorrkel"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
var _ crypto.BatchVerifier = &BatchVerifier{}
|
||||
var _ crypto.BatchVerifier = BatchVerifier{}
|
||||
|
||||
// BatchVerifier implements batch verification for sr25519.
|
||||
// https://github.com/ChainSafe/go-schnorrkel is used for batch verification
|
||||
type BatchVerifier struct {
|
||||
*sr25519.BatchVerifier
|
||||
*schnorrkel.BatchVerifier
|
||||
}
|
||||
|
||||
func NewBatchVerifier() crypto.BatchVerifier {
|
||||
return &BatchVerifier{sr25519.NewBatchVerifier()}
|
||||
return BatchVerifier{schnorrkel.NewBatchVerifier()}
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error {
|
||||
pk, ok := key.(PubKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("sr25519: pubkey is not sr25519")
|
||||
func (b BatchVerifier) Add(key crypto.PubKey, msg, sig []byte) error {
|
||||
var sig64 [SignatureSize]byte
|
||||
copy(sig64[:], sig)
|
||||
signature := new(schnorrkel.Signature)
|
||||
err := signature.Decode(sig64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to decode signature: %w", err)
|
||||
}
|
||||
|
||||
var srpk sr25519.PublicKey
|
||||
if err := srpk.UnmarshalBinary(pk); err != nil {
|
||||
return fmt.Errorf("sr25519: invalid public key: %w", err)
|
||||
}
|
||||
signingContext := schnorrkel.NewSigningContext([]byte{}, msg)
|
||||
|
||||
var sig sr25519.Signature
|
||||
if err := sig.UnmarshalBinary(signature); err != nil {
|
||||
return fmt.Errorf("sr25519: unable to decode signature: %w", err)
|
||||
}
|
||||
var pk [PubKeySize]byte
|
||||
copy(pk[:], key.Bytes())
|
||||
|
||||
st := signingCtx.NewTranscriptBytes(msg)
|
||||
b.BatchVerifier.Add(&srpk, st, &sig)
|
||||
|
||||
return nil
|
||||
return b.BatchVerifier.Add(signingContext, signature, schnorrkel.NewPublicKey(pk))
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Verify() (bool, []bool) {
|
||||
return b.BatchVerifier.Verify(crypto.CReader())
|
||||
func (b BatchVerifier) Verify() bool {
|
||||
return b.BatchVerifier.Verify()
|
||||
}
|
||||
|
||||
@@ -28,37 +28,22 @@ func BenchmarkVerification(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkVerifyBatch(b *testing.B) {
|
||||
msg := []byte("BatchVerifyTest")
|
||||
|
||||
for _, sigsCount := range []int{1, 8, 64, 1024} {
|
||||
sigsCount := sigsCount
|
||||
b.Run(fmt.Sprintf("sig-count-%d", sigsCount), func(b *testing.B) {
|
||||
// Pre-generate all of the keys, and signatures, but do not
|
||||
// benchmark key-generation and signing.
|
||||
pubs := make([]crypto.PubKey, 0, sigsCount)
|
||||
sigs := make([][]byte, 0, sigsCount)
|
||||
for i := 0; i < sigsCount; i++ {
|
||||
priv := GenPrivKey()
|
||||
sig, _ := priv.Sign(msg)
|
||||
pubs = append(pubs, priv.PubKey().(PubKey))
|
||||
sigs = append(sigs, sig)
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
for _, n := range []int{1, 8, 64, 1024} {
|
||||
n := n
|
||||
b.Run(fmt.Sprintf("sig-count-%d", n), func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
v := NewBatchVerifier()
|
||||
for i := 0; i < n; i++ {
|
||||
priv := GenPrivKey()
|
||||
pub := priv.PubKey()
|
||||
msg := []byte("BatchVerifyTest")
|
||||
sig, _ := priv.Sign(msg)
|
||||
err := v.Add(pub, msg, sig)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
// NOTE: dividing by n so that metrics are per-signature
|
||||
for i := 0; i < b.N/sigsCount; i++ {
|
||||
// The benchmark could just benchmark the Verify()
|
||||
// routine, but there is non-trivial overhead associated
|
||||
// with BatchVerifier.Add(), which should be included
|
||||
// in the benchmark.
|
||||
v := NewBatchVerifier()
|
||||
for i := 0; i < sigsCount; i++ {
|
||||
err := v.Add(pubs[i], msg, sigs[i])
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
if ok, _ := v.Verify(); !ok {
|
||||
for i := 0; i < b.N/n; i++ {
|
||||
if !v.Verify() {
|
||||
b.Fatal("signature set failed batch verification")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,23 @@
|
||||
package sr25519
|
||||
|
||||
import tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
)
|
||||
|
||||
var _ crypto.PrivKey = PrivKey{}
|
||||
|
||||
const (
|
||||
PrivKeyName = "tendermint/PrivKeySr25519"
|
||||
PubKeyName = "tendermint/PubKeySr25519"
|
||||
|
||||
// SignatureSize is the size of an Edwards25519 signature. Namely the size of a compressed
|
||||
// Sr25519 point, and a field element. Both of which are 32 bytes.
|
||||
SignatureSize = 64
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
tmjson.RegisterType(PubKey{}, PubKeyName)
|
||||
tmjson.RegisterType(PrivKey{}, PrivKeyName)
|
||||
}
|
||||
|
||||
@@ -1,84 +1,70 @@
|
||||
package sr25519
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/sr25519"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
|
||||
schnorrkel "github.com/ChainSafe/go-schnorrkel"
|
||||
)
|
||||
|
||||
var (
|
||||
_ crypto.PrivKey = PrivKey{}
|
||||
// PrivKeySize is the number of bytes in an Sr25519 private key.
|
||||
const PrivKeySize = 32
|
||||
|
||||
signingCtx = sr25519.NewSigningContext([]byte{})
|
||||
)
|
||||
// PrivKeySr25519 implements crypto.PrivKey.
|
||||
type PrivKey []byte
|
||||
|
||||
const (
|
||||
// PrivKeySize is the size of a sr25519 signature in bytes.
|
||||
PrivKeySize = sr25519.MiniSecretKeySize
|
||||
|
||||
KeyType = "sr25519"
|
||||
)
|
||||
|
||||
// PrivKey implements crypto.PrivKey.
|
||||
type PrivKey struct {
|
||||
msk sr25519.MiniSecretKey
|
||||
kp *sr25519.KeyPair
|
||||
}
|
||||
|
||||
// Bytes returns the byte-encoded PrivKey.
|
||||
// Bytes returns the byte representation of the PrivKey.
|
||||
func (privKey PrivKey) Bytes() []byte {
|
||||
if privKey.kp == nil {
|
||||
return nil
|
||||
}
|
||||
return privKey.msk[:]
|
||||
return []byte(privKey)
|
||||
}
|
||||
|
||||
// Sign produces a signature on the provided message.
|
||||
func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
|
||||
if privKey.kp == nil {
|
||||
return nil, fmt.Errorf("sr25519: uninitialized private key")
|
||||
}
|
||||
|
||||
st := signingCtx.NewTranscriptBytes(msg)
|
||||
|
||||
sig, err := privKey.kp.Sign(crypto.CReader(), st)
|
||||
var p [PrivKeySize]byte
|
||||
copy(p[:], privKey)
|
||||
miniSecretKey, err := schnorrkel.NewMiniSecretKeyFromRaw(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sr25519: failed to sign message: %w", err)
|
||||
return []byte{}, err
|
||||
}
|
||||
secretKey := miniSecretKey.ExpandEd25519()
|
||||
|
||||
sigBytes, err := sig.MarshalBinary()
|
||||
signingContext := schnorrkel.NewSigningContext([]byte{}, msg)
|
||||
|
||||
sig, err := secretKey.Sign(signingContext)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sr25519: failed to serialize signature: %w", err)
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
return sigBytes, nil
|
||||
sigBytes := sig.Encode()
|
||||
return sigBytes[:], nil
|
||||
}
|
||||
|
||||
// PubKey gets the corresponding public key from the private key.
|
||||
//
|
||||
// Panics if the private key is not initialized.
|
||||
func (privKey PrivKey) PubKey() crypto.PubKey {
|
||||
if privKey.kp == nil {
|
||||
panic("sr25519: uninitialized private key")
|
||||
}
|
||||
|
||||
b, err := privKey.kp.PublicKey().MarshalBinary()
|
||||
var p [PrivKeySize]byte
|
||||
copy(p[:], privKey)
|
||||
miniSecretKey, err := schnorrkel.NewMiniSecretKeyFromRaw(p)
|
||||
if err != nil {
|
||||
panic("sr25519: failed to serialize public key: " + err.Error())
|
||||
panic(fmt.Sprintf("Invalid private key: %v", err))
|
||||
}
|
||||
secretKey := miniSecretKey.ExpandEd25519()
|
||||
|
||||
return PubKey(b)
|
||||
pubkey, err := secretKey.Public()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Could not generate public key: %v", err))
|
||||
}
|
||||
key := pubkey.Encode()
|
||||
return PubKey(key[:])
|
||||
}
|
||||
|
||||
// Equals - you probably don't need to use this.
|
||||
// Runs in constant time based on length of the keys.
|
||||
func (privKey PrivKey) Equals(other crypto.PrivKey) bool {
|
||||
if otherSr, ok := other.(PrivKey); ok {
|
||||
return privKey.msk.Equal(&otherSr.msk)
|
||||
if otherEd, ok := other.(PrivKey); ok {
|
||||
return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -87,44 +73,6 @@ func (privKey PrivKey) Type() string {
|
||||
return KeyType
|
||||
}
|
||||
|
||||
func (privKey PrivKey) MarshalJSON() ([]byte, error) {
|
||||
var b []byte
|
||||
|
||||
// Handle uninitialized private keys gracefully.
|
||||
if privKey.kp != nil {
|
||||
b = privKey.Bytes()
|
||||
}
|
||||
|
||||
return json.Marshal(b)
|
||||
}
|
||||
|
||||
func (privKey *PrivKey) UnmarshalJSON(data []byte) error {
|
||||
for i := range privKey.msk {
|
||||
privKey.msk[i] = 0
|
||||
}
|
||||
privKey.kp = nil
|
||||
|
||||
var b []byte
|
||||
if err := json.Unmarshal(data, &b); err != nil {
|
||||
return fmt.Errorf("sr25519: failed to deserialize JSON: %w", err)
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
msk, err := sr25519.NewMiniSecretKeyFromBytes(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sk := msk.ExpandEd25519()
|
||||
|
||||
privKey.msk = *msk
|
||||
privKey.kp = sk.KeyPair()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenPrivKey generates a new sr25519 private key.
|
||||
// It uses OS randomness in conjunction with the current global random seed
|
||||
// in tendermint/libs/common to generate the private key.
|
||||
@@ -132,18 +80,20 @@ func GenPrivKey() PrivKey {
|
||||
return genPrivKey(crypto.CReader())
|
||||
}
|
||||
|
||||
func genPrivKey(rng io.Reader) PrivKey {
|
||||
msk, err := sr25519.GenerateMiniSecretKey(rng)
|
||||
// genPrivKey generates a new sr25519 private key using the provided reader.
|
||||
func genPrivKey(rand io.Reader) PrivKey {
|
||||
var seed [64]byte
|
||||
|
||||
out := make([]byte, 64)
|
||||
_, err := io.ReadFull(rand, out)
|
||||
if err != nil {
|
||||
panic("sr25519: failed to generate MiniSecretKey: " + err.Error())
|
||||
panic(err)
|
||||
}
|
||||
|
||||
sk := msk.ExpandEd25519()
|
||||
copy(seed[:], out)
|
||||
|
||||
return PrivKey{
|
||||
msk: *msk,
|
||||
kp: sk.KeyPair(),
|
||||
}
|
||||
key := schnorrkel.NewMiniSecretKey(seed).ExpandEd25519().Encode()
|
||||
return key[:]
|
||||
}
|
||||
|
||||
// GenPrivKeyFromSecret hashes the secret with SHA2, and uses
|
||||
@@ -152,14 +102,9 @@ func genPrivKey(rng io.Reader) PrivKey {
|
||||
// if it's derived from user input.
|
||||
func GenPrivKeyFromSecret(secret []byte) PrivKey {
|
||||
seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes.
|
||||
|
||||
var privKey PrivKey
|
||||
if err := privKey.msk.UnmarshalBinary(seed); err != nil {
|
||||
panic("sr25519: failed to deserialize MiniSecretKey: " + err.Error())
|
||||
}
|
||||
|
||||
sk := privKey.msk.ExpandEd25519()
|
||||
privKey.kp = sk.KeyPair()
|
||||
|
||||
return privKey
|
||||
var bz [PrivKeySize]byte
|
||||
copy(bz[:], seed)
|
||||
privKey, _ := schnorrkel.NewMiniSecretKeyFromRaw(bz)
|
||||
key := privKey.ExpandEd25519().Encode()
|
||||
return key[:]
|
||||
}
|
||||
|
||||
@@ -4,65 +4,74 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/sr25519"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
|
||||
schnorrkel "github.com/ChainSafe/go-schnorrkel"
|
||||
)
|
||||
|
||||
var _ crypto.PubKey = PubKey{}
|
||||
|
||||
// PubKeySize is the number of bytes in an Sr25519 public key.
|
||||
const (
|
||||
// PubKeySize is the size of a sr25519 public key in bytes.
|
||||
PubKeySize = sr25519.PublicKeySize
|
||||
|
||||
// SignatureSize is the size of a sr25519 signature in bytes.
|
||||
SignatureSize = sr25519.SignatureSize
|
||||
PubKeySize = 32
|
||||
KeyType = "sr25519"
|
||||
)
|
||||
|
||||
// PubKey implements crypto.PubKey.
|
||||
// PubKeySr25519 implements crypto.PubKey for the Sr25519 signature scheme.
|
||||
type PubKey []byte
|
||||
|
||||
// Address is the SHA256-20 of the raw pubkey bytes.
|
||||
func (pubKey PubKey) Address() crypto.Address {
|
||||
if len(pubKey) != PubKeySize {
|
||||
panic("pubkey is incorrect size")
|
||||
}
|
||||
return crypto.Address(tmhash.SumTruncated(pubKey))
|
||||
return crypto.Address(tmhash.SumTruncated(pubKey[:]))
|
||||
}
|
||||
|
||||
// Bytes returns the PubKey byte format.
|
||||
// Bytes returns the byte representation of the PubKey.
|
||||
func (pubKey PubKey) Bytes() []byte {
|
||||
return []byte(pubKey)
|
||||
}
|
||||
|
||||
func (pubKey PubKey) Equals(other crypto.PubKey) bool {
|
||||
if otherSr, ok := other.(PubKey); ok {
|
||||
return bytes.Equal(pubKey[:], otherSr[:])
|
||||
func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool {
|
||||
// make sure we use the same algorithm to sign
|
||||
if len(sig) != SignatureSize {
|
||||
return false
|
||||
}
|
||||
var sig64 [SignatureSize]byte
|
||||
copy(sig64[:], sig)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (pubKey PubKey) VerifySignature(msg []byte, sigBytes []byte) bool {
|
||||
var srpk sr25519.PublicKey
|
||||
if err := srpk.UnmarshalBinary(pubKey); err != nil {
|
||||
publicKey := &(schnorrkel.PublicKey{})
|
||||
var p [PubKeySize]byte
|
||||
copy(p[:], pubKey)
|
||||
err := publicKey.Decode(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var sig sr25519.Signature
|
||||
if err := sig.UnmarshalBinary(sigBytes); err != nil {
|
||||
signingContext := schnorrkel.NewSigningContext([]byte{}, msg)
|
||||
|
||||
signature := &(schnorrkel.Signature{})
|
||||
err = signature.Decode(sig64)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
st := signingCtx.NewTranscriptBytes(msg)
|
||||
return srpk.Verify(st, &sig)
|
||||
}
|
||||
|
||||
func (pubKey PubKey) Type() string {
|
||||
return KeyType
|
||||
return publicKey.Verify(signature, signingContext)
|
||||
}
|
||||
|
||||
func (pubKey PubKey) String() string {
|
||||
return fmt.Sprintf("PubKeySr25519{%X}", []byte(pubKey))
|
||||
}
|
||||
|
||||
// Equals - checks that two public keys are the same time
|
||||
// Runs in constant time based on length of the keys.
|
||||
func (pubKey PubKey) Equals(other crypto.PubKey) bool {
|
||||
if otherEd, ok := other.(PubKey); ok {
|
||||
return bytes.Equal(pubKey[:], otherEd[:])
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (pubKey PubKey) Type() string {
|
||||
return KeyType
|
||||
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package sr25519_test
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -13,6 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSignAndValidateSr25519(t *testing.T) {
|
||||
|
||||
privKey := sr25519.GenPrivKey()
|
||||
pubKey := privKey.PubKey()
|
||||
|
||||
@@ -33,7 +32,6 @@ func TestSignAndValidateSr25519(t *testing.T) {
|
||||
|
||||
func TestBatchSafe(t *testing.T) {
|
||||
v := sr25519.NewBatchVerifier()
|
||||
vFail := sr25519.NewBatchVerifier()
|
||||
for i := 0; i <= 38; i++ {
|
||||
priv := sr25519.GenPrivKey()
|
||||
pub := priv.PubKey()
|
||||
@@ -50,49 +48,9 @@ func TestBatchSafe(t *testing.T) {
|
||||
|
||||
err = v.Add(pub, msg, sig)
|
||||
require.NoError(t, err)
|
||||
|
||||
switch i % 2 {
|
||||
case 0:
|
||||
err = vFail.Add(pub, msg, sig)
|
||||
case 1:
|
||||
msg[2] ^= byte(0x01)
|
||||
err = vFail.Add(pub, msg, sig)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
ok, valid := v.Verify()
|
||||
require.True(t, ok, "failed batch verification")
|
||||
for i, ok := range valid {
|
||||
require.Truef(t, ok, "sig[%d] should be marked valid", i)
|
||||
}
|
||||
|
||||
ok, valid = vFail.Verify()
|
||||
require.False(t, ok, "succeeded batch verification (invalid batch)")
|
||||
for i, ok := range valid {
|
||||
expected := (i % 2) == 0
|
||||
require.Equalf(t, expected, ok, "sig[%d] should be %v", i, expected)
|
||||
if !v.Verify() {
|
||||
t.Error("failed batch verification")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSON(t *testing.T) {
|
||||
privKey := sr25519.GenPrivKey()
|
||||
|
||||
t.Run("PrivKey", func(t *testing.T) {
|
||||
b, err := json.Marshal(privKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// b should be the base64 encoded MiniSecretKey, enclosed by doublequotes.
|
||||
b64 := base64.StdEncoding.EncodeToString(privKey.Bytes())
|
||||
b64 = "\"" + b64 + "\""
|
||||
require.Equal(t, []byte(b64), b)
|
||||
|
||||
var privKey2 sr25519.PrivKey
|
||||
err = json.Unmarshal(b, &privKey2)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, privKey2.Bytes(), sr25519.PrivKeySize)
|
||||
require.EqualValues(t, privKey.Bytes(), privKey2.Bytes())
|
||||
})
|
||||
|
||||
// PubKeys are just []byte, so there is no special handling.
|
||||
}
|
||||
|
||||
@@ -4,8 +4,7 @@
|
||||
|
||||
- 04/09/2020: Initial Draft (Unabridged)
|
||||
- 07/09/2020: First Version
|
||||
- 13/03/2021: Ammendment to accomodate forward lunatic attack
|
||||
- 29/06/2021: Add information about ABCI specific fields
|
||||
- 13.03.21: Ammendment to accomodate forward lunatic attack
|
||||
|
||||
## Scope
|
||||
|
||||
@@ -112,11 +111,6 @@ We have two concrete types of evidence that fulfil this interface
|
||||
type LightClientAttackEvidence struct {
|
||||
ConflictingBlock *LightBlock
|
||||
CommonHeight int64 // the last height at which the primary provider and witness provider had the same header
|
||||
|
||||
// abci specific information
|
||||
ByzantineValidators []*Validator // validators in the validator set that misbehaved in creating the conflicting block
|
||||
TotalVotingPower int64 // total voting power of the validator set at the common height
|
||||
Timestamp time.Time // timestamp of the block at the common height
|
||||
}
|
||||
```
|
||||
where the `Hash()` is the hash of the header and commonHeight.
|
||||
@@ -127,11 +121,6 @@ Note: It was also discussed whether to include the commit hash which captures th
|
||||
type DuplicateVoteEvidence {
|
||||
VoteA *Vote
|
||||
VoteB *Vote
|
||||
|
||||
// abci specific information
|
||||
TotalVotingPower int64
|
||||
ValidatorPower int64
|
||||
Timestamp time.Time
|
||||
}
|
||||
```
|
||||
where the `Hash()` is the hash of the two votes
|
||||
@@ -185,11 +174,19 @@ For `LightClientAttack`
|
||||
|
||||
- Lastly, for each validator, check the look up table to make sure there already isn't evidence against this validator
|
||||
|
||||
After verification we persist the evidence with the key `height/hash` to the pending evidence database in the evidence pool.
|
||||
After verification we persist the evidence with the key `height/hash` to the pending evidence database in the evidence pool with the following format:
|
||||
|
||||
#### ABCI Evidence
|
||||
```go
|
||||
type EvidenceInfo struct {
|
||||
ev Evidence
|
||||
time time.Time
|
||||
validators []Validator
|
||||
totalVotingPower int64
|
||||
}
|
||||
```
|
||||
|
||||
`time`, `validators` and `totalVotingPower` are need to form the `abci.Evidence` that we send to the application layer. More in this to come later.
|
||||
|
||||
Both evidence structures contain data (such as timestamp) that are necessary to be passed to the application but do not strictly constitute evidence of misbehaviour. As such, these fields are verified last. If any of these fields are invalid to a node i.e. they don't correspond with their state, nodes will reconstruct a new evidence struct from the existing fields and repopulate the abci specific fields with their own state data.
|
||||
|
||||
#### Broadcasting and receiving evidence
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ When preparing our public API for 1.0, we should keep these principles in mind:
|
||||
The following is the minimum set of public APIs that will be included in 1.0, in some form:
|
||||
|
||||
- `abci`
|
||||
- packages used for constructing nodes `config`, `libs/log`, and `version`
|
||||
- `node` and related packages (e.g. possibly `config`, `libs/log`, and `version`)
|
||||
- Client APIs, i.e. `rpc/client`, `light`, and `privval`.
|
||||
- `crypto` (possibly as a separate repo)
|
||||
|
||||
@@ -91,9 +91,9 @@ For comparison, the following are the number of Tendermint imports in the Cosmos
|
||||
4 github.com/tendermint/tendermint/libs/rand
|
||||
1 github.com/tendermint/tendermint/libs/strings
|
||||
5 github.com/tendermint/tendermint/light
|
||||
1 github.com/tendermint/tendermint/internal/mempool
|
||||
1 github.com/tendermint/tendermint/mempool
|
||||
3 github.com/tendermint/tendermint/node
|
||||
5 github.com/tendermint/tendermint/internal/p2p
|
||||
5 github.com/tendermint/tendermint/p2p
|
||||
4 github.com/tendermint/tendermint/privval
|
||||
10 github.com/tendermint/tendermint/proto/tendermint/crypto
|
||||
1 github.com/tendermint/tendermint/proto/tendermint/libs/bits
|
||||
|
||||
@@ -78,7 +78,7 @@ db-backend = "goleveldb"
|
||||
db-dir = "data"
|
||||
|
||||
# Output level for logging, including package level options
|
||||
log-level = "info"
|
||||
log-level = "main:info,state:info,statesync:info,*:error"
|
||||
|
||||
# Output format: 'plain' (colored text) or 'json'
|
||||
log-format = "plain"
|
||||
|
||||
@@ -4,27 +4,33 @@ order: 7
|
||||
|
||||
## Logging
|
||||
|
||||
Logging adds detail and allows the node operator to better identify what they are looking for. Tendermint supports log levels on a global and per-module basis. This allows the node operator to see only the information they need and the developer to hone in on specific changes they are working on.
|
||||
Logging adds detail and allows the node operator to better identify what they are looking for. Tendermint supports log levels on a global and per-module basis. This allows the node operator to see only the information they need and the developer to hone in on specific changes they are working on.
|
||||
|
||||
## Configuring Log Levels
|
||||
|
||||
|
||||
There are three log levels, `info`, `debug` and `error`. These can be configured either through the command line via `tendermint start --log-level ""` or within the `config.toml` file.
|
||||
|
||||
- `info` Info represents an informational message. It is used to show that modules have started, stopped and how they are functioning.
|
||||
- `debug` Debug is used to trace various calls or problems. Debug is used widely throughout a codebase and can lead to overly verbose logging.
|
||||
- `error` Error represents something that has gone wrong. An error log can represent a potential problem that can lead to a node halt.
|
||||
- `error` Error represents something that has gone wrong. An error log can represent a potential problem that can lead to a node halt.
|
||||
|
||||
|
||||
The default setting is a global `main:info,state:info,statesync:info,*:error` level. If you would like to set the log level for a specific module, it can be done in the following format:
|
||||
|
||||
> We are setting all modules to log level `info` and the mempool to `error`. This will log all errors within the mempool module.
|
||||
|
||||
Within the `config.toml`:
|
||||
|
||||
```toml
|
||||
# Output level for logging, including package level options
|
||||
log-level = "info"
|
||||
log-level = "*:info,mempool:error"
|
||||
```
|
||||
|
||||
Via the command line:
|
||||
|
||||
```sh
|
||||
tendermint start --log-level "info"
|
||||
tendermint start --log-level "*:info,mempool:error"
|
||||
```
|
||||
|
||||
## List of modules
|
||||
@@ -57,6 +63,7 @@ little overview what they do.
|
||||
executes blocks against the application.
|
||||
- `statesync` Provides a way to quickly sync a node with pruned history.
|
||||
|
||||
|
||||
### Walkabout example
|
||||
|
||||
We first create three connections (mempool, consensus and query) to the
|
||||
|
||||
@@ -36,14 +36,14 @@ Applications can use [state sync](state-sync.md) to help nodes bootstrap quickly
|
||||
|
||||
## Logging
|
||||
|
||||
Default logging level (`log-level = "info"`) should suffice for
|
||||
Default logging level (`log-level = "main:info,state:info,statesync:info,*:error"`) should suffice for
|
||||
normal operation mode. Read [this
|
||||
post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756)
|
||||
for details on how to configure `log-level` config variable. Some of the
|
||||
modules can be found [here](../nodes/logging#list-of-modules). If
|
||||
you're trying to debug Tendermint or asked to provide logs with debug
|
||||
logging level, you can do so by running Tendermint with
|
||||
`--log-level="debug"`.
|
||||
`--log-level="*:debug"`.
|
||||
|
||||
### Consensus WAL
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ Verify that you have the latest version of Go installed:
|
||||
|
||||
```bash
|
||||
$ go version
|
||||
go version go1.16.x darwin/amd64
|
||||
go version go1.15.x darwin/amd64
|
||||
```
|
||||
|
||||
## 1.2 Creating a new Go project
|
||||
@@ -351,7 +351,7 @@ import (
|
||||
tmflags "github.com/tendermint/tendermint/libs/cli/flags"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
)
|
||||
|
||||
@@ -43,7 +43,7 @@ Verify that you have the latest version of Go installed:
|
||||
|
||||
```bash
|
||||
$ go version
|
||||
go version go1.16.x darwin/amd64
|
||||
go version go1.15.x darwin/amd64
|
||||
```
|
||||
|
||||
## 1.2 Creating a new Go project
|
||||
@@ -446,7 +446,7 @@ This will populate the `go.mod` with a release number followed by a hash for Ten
|
||||
```go
|
||||
module github.com/me/example
|
||||
|
||||
go 1.16
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/dgraph-io/badger v1.6.2
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -489,14 +489,12 @@ func (evpool *Pool) batchExpiredPendingEvidence(batch dbm.Batch) (int64, time.Ti
|
||||
func (evpool *Pool) removeEvidenceFromList(
|
||||
blockEvidenceMap map[string]struct{}) {
|
||||
|
||||
el := evpool.evidenceList
|
||||
var nextE *clist.CElement
|
||||
for e := el.Front(); e != nil; e = nextE {
|
||||
nextE = e.Next()
|
||||
for e := evpool.evidenceList.Front(); e != nil; e = e.Next() {
|
||||
// Remove from clist
|
||||
ev := e.Value.(types.Evidence)
|
||||
if _, ok := blockEvidenceMap[evMapKey(ev)]; ok {
|
||||
evpool.evidenceList.Remove(e)
|
||||
e.DetachPrev()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -11,13 +11,12 @@ import (
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
"github.com/tendermint/tendermint/internal/evidence/mocks"
|
||||
"github.com/tendermint/tendermint/evidence"
|
||||
"github.com/tendermint/tendermint/evidence/mocks"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
smmocks "github.com/tendermint/tendermint/state/mocks"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
@@ -439,7 +438,8 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) *store.Bloc
|
||||
|
||||
for i := int64(1); i <= state.LastBlockHeight; i++ {
|
||||
lastCommit := makeCommit(i-1, valAddr)
|
||||
block := sf.MakeBlock(state, i, lastCommit)
|
||||
block, _ := state.MakeBlock(i, []types.Tx{}, lastCommit, nil,
|
||||
state.Validators.GetProposer().Address)
|
||||
block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute)
|
||||
block.Header.Version = version.Consensus{Block: version.BlockProtocol, App: 1}
|
||||
const parts = 1
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
|
||||
clist "github.com/tendermint/tendermint/internal/libs/clist"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -31,8 +31,8 @@ var (
|
||||
ID: byte(EvidenceChannel),
|
||||
Priority: 6,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
RecvBufferCapacity: 32,
|
||||
MaxSendBytes: 400,
|
||||
|
||||
MaxSendBytes: 400,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -62,7 +62,7 @@ type Reactor struct {
|
||||
peerWG sync.WaitGroup
|
||||
|
||||
mtx tmsync.Mutex
|
||||
peerRoutines map[types.NodeID]*tmsync.Closer
|
||||
peerRoutines map[p2p.NodeID]*tmsync.Closer
|
||||
}
|
||||
|
||||
// NewReactor returns a reference to a new evidence reactor, which implements the
|
||||
@@ -79,7 +79,7 @@ func NewReactor(
|
||||
evidenceCh: evidenceCh,
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
peerRoutines: make(map[types.NodeID]*tmsync.Closer),
|
||||
peerRoutines: make(map[p2p.NodeID]*tmsync.Closer),
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "Evidence", r)
|
||||
@@ -285,7 +285,7 @@ func (r *Reactor) processPeerUpdates() {
|
||||
// that the peer has already received or may not be ready for.
|
||||
//
|
||||
// REF: https://github.com/tendermint/tendermint/issues/4727
|
||||
func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Closer) {
|
||||
func (r *Reactor) broadcastEvidenceLoop(peerID p2p.NodeID, closer *tmsync.Closer) {
|
||||
var next *clist.CElement
|
||||
|
||||
defer func() {
|
||||
@@ -16,11 +16,11 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
"github.com/tendermint/tendermint/internal/evidence/mocks"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
"github.com/tendermint/tendermint/evidence"
|
||||
"github.com/tendermint/tendermint/evidence/mocks"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/p2ptest"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -35,11 +35,11 @@ var (
|
||||
type reactorTestSuite struct {
|
||||
network *p2ptest.Network
|
||||
logger log.Logger
|
||||
reactors map[types.NodeID]*evidence.Reactor
|
||||
pools map[types.NodeID]*evidence.Pool
|
||||
evidenceChannels map[types.NodeID]*p2p.Channel
|
||||
peerUpdates map[types.NodeID]*p2p.PeerUpdates
|
||||
peerChans map[types.NodeID]chan p2p.PeerUpdate
|
||||
reactors map[p2p.NodeID]*evidence.Reactor
|
||||
pools map[p2p.NodeID]*evidence.Pool
|
||||
evidenceChannels map[p2p.NodeID]*p2p.Channel
|
||||
peerUpdates map[p2p.NodeID]*p2p.PeerUpdates
|
||||
peerChans map[p2p.NodeID]chan p2p.PeerUpdate
|
||||
nodes []*p2ptest.Node
|
||||
numStateStores int
|
||||
}
|
||||
@@ -56,10 +56,10 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite {
|
||||
numStateStores: numStateStores,
|
||||
logger: log.TestingLogger().With("testCase", t.Name()),
|
||||
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numStateStores}),
|
||||
reactors: make(map[types.NodeID]*evidence.Reactor, numStateStores),
|
||||
pools: make(map[types.NodeID]*evidence.Pool, numStateStores),
|
||||
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numStateStores),
|
||||
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numStateStores),
|
||||
reactors: make(map[p2p.NodeID]*evidence.Reactor, numStateStores),
|
||||
pools: make(map[p2p.NodeID]*evidence.Pool, numStateStores),
|
||||
peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, numStateStores),
|
||||
peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, numStateStores),
|
||||
}
|
||||
|
||||
chDesc := p2p.ChannelDescriptor{ID: byte(evidence.EvidenceChannel)}
|
||||
@@ -124,7 +124,7 @@ func (rts *reactorTestSuite) start(t *testing.T) {
|
||||
"network does not have expected number of nodes")
|
||||
}
|
||||
|
||||
func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.EvidenceList, ids ...types.NodeID) {
|
||||
func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.EvidenceList, ids ...p2p.NodeID) {
|
||||
t.Helper()
|
||||
|
||||
fn := func(pool *evidence.Pool) {
|
||||
@@ -188,7 +188,7 @@ func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.Evidence
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(id types.NodeID) { defer wg.Done(); fn(rts.pools[id]) }(id)
|
||||
go func(id p2p.NodeID) { defer wg.Done(); fn(rts.pools[id]) }(id)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -293,7 +293,7 @@ func TestReactorBroadcastEvidence(t *testing.T) {
|
||||
// primary. As a result, the primary will gossip all evidence to each secondary.
|
||||
primary := rts.network.RandomNode()
|
||||
secondaries := make([]*p2ptest.Node, 0, len(rts.network.NodeIDs())-1)
|
||||
secondaryIDs := make([]types.NodeID, 0, cap(secondaries))
|
||||
secondaryIDs := make([]p2p.NodeID, 0, cap(secondaries))
|
||||
for id := range rts.network.Nodes {
|
||||
if id == primary.NodeID {
|
||||
continue
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user