mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-14 08:42:48 +00:00
Compare commits
3 Commits
wb/impleme
...
wb/issue-9
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d59c586d4d | ||
|
|
b3a2c1d9bb | ||
|
|
80c8b86553 |
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -55,9 +55,7 @@ updates:
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.37.x"
|
||||
# Only allow automated security-related dependency updates until we cut the
|
||||
# final v0.37.0 release.
|
||||
open-pull-requests-limit: 0
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
27
.github/workflows/e2e-nightly-34x.yml
vendored
27
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -77,3 +77,28 @@ jobs:
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":white_check_mark: Nightly E2E tests for `${{ env.BRANCH }}` passed."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
27
.github/workflows/e2e-nightly-37x.yml
vendored
27
.github/workflows/e2e-nightly-37x.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -77,3 +77,28 @@ jobs:
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":white_check_mark: Nightly E2E tests for `${{ env.BRANCH }}` passed."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
27
.github/workflows/e2e-nightly-main.yml
vendored
27
.github/workflows/e2e-nightly-main.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -66,3 +66,28 @@ jobs:
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":white_check_mark: Nightly E2E tests for `${{ env.BRANCH }}` passed."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/workflows/fuzz-nightly.yml
vendored
2
.github/workflows/fuzz-nightly.yml
vendored
@@ -76,7 +76,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
|
||||
41
.github/workflows/gosec.yml
vendored
41
.github/workflows/gosec.yml
vendored
@@ -1,41 +0,0 @@
|
||||
name: Run Gosec
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'feature/*'
|
||||
- 'v0.37.x'
|
||||
- 'v0.34.x'
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
|
||||
jobs:
|
||||
Gosec:
|
||||
permissions:
|
||||
security-events: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout Source
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run Gosec Security Scanner
|
||||
uses: cosmos/gosec@master
|
||||
with:
|
||||
# Let the report trigger a failure with the Github Security scanner features.
|
||||
args: "-no-fail -fmt sarif -out results.sarif ./..."
|
||||
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
# Path to SARIF file relative to the root of the repository
|
||||
sarif_file: results.sarif
|
||||
2
.github/workflows/janitor.yml
vendored
2
.github/workflows/janitor.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.10.1
|
||||
- uses: styfle/cancel-workflow-action@0.10.0
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
2
.github/workflows/proto-lint.yml
vendored
2
.github/workflows/proto-lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.8.0
|
||||
- uses: bufbuild/buf-setup-action@v1.7.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v6
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
|
||||
46
CHANGELOG.md
46
CHANGELOG.md
@@ -2,38 +2,6 @@
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Release highlights include:
|
||||
|
||||
- A new `[storage]` configuration section and flag `discard_abci_responses`,
|
||||
which, if enabled, discards all ABCI responses except the latest one in order
|
||||
to reduce disk space usage in the state store. When enabled, the
|
||||
`block_results` RPC endpoint can no longer function and will return an error.
|
||||
- A new CLI command, `reindex-event`, to re-index block and tx events to the
|
||||
event sinks. You can run this command when the event store backend
|
||||
dropped/disconnected or you want to replace the backend. When
|
||||
`discard_abci_responses` is enabled, you will not be able to use this command.
|
||||
|
||||
Special thanks to external contributors on this release: @rootwarp & @animart
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#9083](https://github.com/tendermint/tendermint/issues/9083) Backport command to reindex missed events (@cmwaters)
|
||||
- [cli] [\#9107](https://github.com/tendermint/tendermint/issues/9107) Add the `p2p.external-address` argument to set the node P2P external address (@amimart)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#9054](https://github.com/tendermint/tendermint/issues/9054) `discard_abci_responses` flag added to discard all ABCI
|
||||
responses except the last in order to save on storage space in the state
|
||||
store (@samricotta)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [mempool] [\#9033](https://github.com/tendermint/tendermint/issues/9033) Rework lock discipline to mitigate callback deadlocks in the
|
||||
priority mempool
|
||||
- [cli] [\#9103](https://github.com/tendermint/tendermint/issues/9103) fix unsafe-reset-all for working with home path (@rootwarp)
|
||||
|
||||
## v0.34.20
|
||||
|
||||
Special thanks to external contributors on this release: @joeabbey @yihuang
|
||||
@@ -863,7 +831,7 @@ and a validator address plus a timestamp. Note we may remove the validator
|
||||
address & timestamp fields in the future (see ADR-25).
|
||||
|
||||
`lite2` package has been added to solve `lite` issues and introduce weak
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/tendermint/tree/main/spec/consensus/light-client) for complete details.
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/tendermint/blob/main/spec/consensus/light-client.md) for complete details.
|
||||
`lite` package is now deprecated and will be removed in v0.34 release.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
@@ -1223,8 +1191,8 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah
|
||||
|
||||
*August 28, 2019*
|
||||
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://docs.tendermint.com/v0.34/tutorials/java.html)
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/main/docs/guides/java.md)
|
||||
guide.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@@ -2614,7 +2582,7 @@ are affected by a change.
|
||||
|
||||
A few more breaking changes are in the works - each will come with a clear
|
||||
Architecture Decision Record (ADR) explaining the change. You can review ADRs
|
||||
[here](https://github.com/tendermint/tendermint/tree/main/docs/architecture)
|
||||
[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture)
|
||||
or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls).
|
||||
You can also check in on the [issues marked as
|
||||
breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking).
|
||||
@@ -2893,7 +2861,7 @@ BREAKING CHANGES:
|
||||
FEATURES
|
||||
- [cmd] Added metrics (served under `/metrics` using a Prometheus client;
|
||||
disabled by default). See the new `instrumentation` section in the config and
|
||||
[metrics](https://github.com/tendermint/tendermint/blob/main/docs/tendermint-core/metrics.md)
|
||||
[metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html)
|
||||
guide.
|
||||
- [p2p] Add IPv6 support to peering.
|
||||
- [p2p] Add `external_address` to config to allow specifying the address for
|
||||
@@ -3007,7 +2975,7 @@ BREAKING:
|
||||
|
||||
FEATURES
|
||||
|
||||
- [rpc] the RPC documentation is now published to https://github.com/tendermint/tendermint/tree/main/spec/rpc
|
||||
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
|
||||
- [p2p] AllowDuplicateIP config option to refuse connections from same IP.
|
||||
- true by default for now, false by default in next breaking release
|
||||
- [docs] Add docs for query, tx indexing, events, pubsub
|
||||
@@ -3486,7 +3454,7 @@ containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusCon
|
||||
|
||||
- Logger
|
||||
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
|
||||
See our new [logging library](https://github.com/tendermint/tendermint/blob/main/libs/log/logger.go) and [blog post](https://blog.cosmos.network/abstracting-the-logger-interface-in-go-4cf96bf90bb7) for more details
|
||||
See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details
|
||||
- Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!)
|
||||
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger
|
||||
|
||||
|
||||
@@ -1,37 +1,5 @@
|
||||
# Unreleased Changes
|
||||
|
||||
## v0.38.0
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- Apps
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
- Tooling
|
||||
- [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
|
||||
### FEATURES
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [crypto/merkle] \#6443 & \#6513 Improve HashAlternatives performance (@cuonglm, @marbar3778)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [docker] \#9462 ensure Docker image uses consistent version of Go
|
||||
|
||||
## v0.37.0
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@@ -50,13 +18,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [abci/params] \#9287 Deduplicate `ConsensusParams` and `BlockParams` so only `types` proto definitions are used (@cmwaters)
|
||||
- Remove `TimeIotaMs` and use a hard-coded 1 millisecond value to ensure monotonically increasing block times.
|
||||
- Rename `AppVersion` to `App` so as to not stutter.
|
||||
- [types] \#9287 Reduce the use of protobuf types in core logic. (@cmwaters)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams` have become native types.
|
||||
They still utilize protobuf when being sent over the wire or written to disk.
|
||||
- Moved `ValidateConsensusParams` inside (now native type) `ConsensusParams`, and renamed it to `ValidateBasic`.
|
||||
- [abci] \#9301 New ABCI methods `PrepareProposal` and `ProcessProposal` which give the app control over transactions proposed and allows for verification of proposed blocks.
|
||||
- [abci] \#8216 Renamed `EvidenceType` to `MisbehaviorType` and `Evidence` to `Misbehavior` as a more accurate label of their contents. (@williambanfield, @sergio-mena)
|
||||
- [abci] \#9122 Renamed `LastCommitInfo` to `CommitInfo` in preparation for vote extensions. (@cmwaters)
|
||||
- [abci] \#8656, \#8901 Added cli commands for `PrepareProposal` and `ProcessProposal`. (@jmalicevic, @hvanz)
|
||||
- [abci] \#6403 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
|
||||
@@ -65,7 +27,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- Go API
|
||||
- [all] \#9144 Change spelling from British English to American (@cmwaters)
|
||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
@@ -74,26 +35,12 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [abci] \#9301 New ABCI methods `PrepareProposal` and `ProcessProposal` which give the app control over transactions proposed and allows for verification of proposed blocks.
|
||||
|
||||
### IMPROVEMENTS
|
||||
- [crypto] \#9250 Update to use btcec v2 and the latest btcutil. (@wcsiu)
|
||||
|
||||
- [cli] \#9171 add `--hard` flag to rollback command (and a boolean to the `RollbackState` method). This will rollback
|
||||
state and remove the last block. This command can be triggered multiple times. The application must also rollback
|
||||
state to the same height. (@tsutsu, @cmwaters)
|
||||
- [proto] \#9356 Migrate from `gogo/protobuf` to `cosmos/gogoproto` (@julienrbrt)
|
||||
- [rpc] \#9276 Added `header` and `header_by_hash` queries to the RPC client (@samricotta)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
|
||||
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
|
||||
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
|
||||
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778 & @Yawning)
|
||||
- [types] \#6120 use batch verification for verifying commits signatures. (@marbar3778 & @cmwaters & @Yawning)
|
||||
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
|
||||
- [state] \#9505 Added logic so when pruning, the evidence period is taken into consideration and only deletes unecessary data (@samricotta)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)
|
||||
- [docker] \#9073 enable cross platform build using docker buildx
|
||||
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout
|
||||
@@ -12,7 +12,7 @@ and hence to Tendermint.
|
||||
* We are committed to providing a friendly, safe and welcoming environment for
|
||||
all, regardless of level of experience, gender, gender identity and
|
||||
expression, sexual orientation, disability, personal appearance, body size,
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristics.
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
|
||||
* On Slack, please avoid using overtly sexual nicknames or other nicknames that
|
||||
might detract from a friendly, safe and welcoming environment for all.
|
||||
|
||||
@@ -12,7 +12,7 @@ landing changes in `main`.
|
||||
All work on the code base should be motivated by a [Github
|
||||
Issue](https://github.com/tendermint/tendermint/issues).
|
||||
[Search](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)
|
||||
is a good place to start when looking for places to contribute. If you
|
||||
is a good place start when looking for places to contribute. If you
|
||||
would like to work on an issue which already exists, please indicate so
|
||||
by leaving a comment.
|
||||
|
||||
@@ -213,7 +213,7 @@ Changes with multiple classifications should be doubly included (eg. a bug fix
|
||||
that is also a breaking change should be recorded under both).
|
||||
|
||||
Breaking changes are further subdivided according to the APIs/users they impact.
|
||||
Any change that affects multiple APIs/users should be recorded multiply - for
|
||||
Any change that effects multiple APIs/users should be recorded multiply - for
|
||||
instance, a change to the `Blockchain Protocol` that removes a field from the
|
||||
header should also be recorded under `CLI/RPC/Config` since the field will be
|
||||
removed from the header in RPC responses as well.
|
||||
@@ -247,7 +247,7 @@ To begin contributing, create a development branch either on `github.com/tenderm
|
||||
Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `main`. (Since pull requests are squash-merged, either is fine!)
|
||||
|
||||
Update the `UPGRADING.md` if the change you've made is breaking and the
|
||||
instructions should be in place for a user on how he/she can upgrade its
|
||||
instructions should be in place for a user on how he/she can upgrade it's
|
||||
software (ABCI application, Tendermint-based blockchain, light client, wallet).
|
||||
|
||||
Once you have submitted a pull request label the pull request with either `R:minor`, if the change should be included in the next minor release, or `R:major`, if the change is meant for a major release.
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
# Use a build arg to ensure that both stages use the same,
|
||||
# hopefully current, go version.
|
||||
ARG GOLANG_BASE_IMAGE=golang:1.18-alpine
|
||||
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM --platform=$BUILDPLATFORM $GOLANG_BASE_IMAGE as builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.18-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
@@ -12,7 +8,7 @@ WORKDIR /tendermint
|
||||
RUN TARGETPLATFORM=$TARGETPLATFORM make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM $GOLANG_BASE_IMAGE
|
||||
FROM golang:1.15-alpine
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
|
||||
@@ -8,7 +8,7 @@ Official releases can be found [here](https://github.com/tendermint/tendermint/r
|
||||
|
||||
The Dockerfile for Tendermint is not expected to change in the near future. The main file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/main/DOCKER/Dockerfile).
|
||||
|
||||
Respective versioned files can be found at `https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile` (replace the Xs with the version number).
|
||||
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
|
||||
|
||||
## Quick reference
|
||||
|
||||
|
||||
12
Makefile
12
Makefile
@@ -4,8 +4,14 @@ OUTPUT?=$(BUILDDIR)/tendermint
|
||||
|
||||
BUILD_TAGS?=tendermint
|
||||
|
||||
COMMIT_HASH := $(shell git rev-parse --short HEAD)
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMGitCommitHash=$(COMMIT_HASH)
|
||||
# If building a release, please checkout the version tag to get the correct version setting
|
||||
ifneq ($(shell git symbolic-ref -q --short HEAD),)
|
||||
VERSION := unreleased-$(shell git symbolic-ref -q --short HEAD)-$(shell git rev-parse HEAD)
|
||||
else
|
||||
VERSION := $(shell git describe)
|
||||
endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
CGO_ENABLED ?= 0
|
||||
@@ -400,4 +406,4 @@ $(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR)
|
||||
split-test-packages:$(BUILDDIR)/packages.txt
|
||||
split -d -n l/$(NUM_SPLIT) $< $<.
|
||||
test-group-%:split-test-packages
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=15m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
|
||||
@@ -70,8 +70,8 @@ See the [install instructions](./docs/introduction/install.md).
|
||||
### Quick Start
|
||||
|
||||
- [Single node](./docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](./docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/networks/terraform-and-ansible.md)
|
||||
- [Local cluster using docker-compose](./docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -145,7 +145,7 @@ Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Join us!
|
||||
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.io).
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
|
||||
If you'd like to work full-time on Tendermint Core,
|
||||
[we're hiring](https://interchain-gmbh.breezy.hr/)!
|
||||
|
||||
@@ -157,7 +157,7 @@ for-profit entity that also maintains [tendermint.com](https://tendermint.com).
|
||||
[bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance
|
||||
[smr]: https://en.wikipedia.org/wiki/State_machine_replication
|
||||
[Blockchain]: https://en.wikipedia.org/wiki/Blockchain
|
||||
[version-badge]: https://img.shields.io/github/v/release/tendermint/tendermint.svg
|
||||
[version-badge]: https://img.shields.io/github/tag/tendermint/tendermint.svg
|
||||
[version-url]: https://github.com/tendermint/tendermint/releases/latest
|
||||
[api-badge]: https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
||||
[api-url]: https://pkg.go.dev/github.com/tendermint/tendermint
|
||||
|
||||
16
RELEASES.md
16
RELEASES.md
@@ -45,7 +45,7 @@ the 0.38.x line.
|
||||
1. Start on `main`
|
||||
|
||||
2. Ensure that there is a [branch protection
|
||||
rule](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/managing-a-branch-protection-rule) for the
|
||||
rule](https://github.com/tendermint/tendermint/settings/branches) for the
|
||||
branch you are about to create (you will need admin access to the repository
|
||||
in order to do this).
|
||||
|
||||
@@ -93,14 +93,22 @@ the 0.38.x line.
|
||||
|
||||
After doing these steps, go back to `main` and do the following:
|
||||
|
||||
1. Create a new workflow to run e2e nightlies for the new backport branch. (See
|
||||
1. Tag `main` as the dev branch for the _next_ minor version release and push
|
||||
it up to GitHub.
|
||||
For example:
|
||||
```sh
|
||||
git tag -a v0.39.0-dev -m "Development base for Tendermint v0.39."
|
||||
git push origin v0.39.0-dev
|
||||
```
|
||||
|
||||
2. Create a new workflow to run e2e nightlies for the new backport branch. (See
|
||||
[e2e-nightly-main.yml][e2e] for an example.)
|
||||
|
||||
2. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
3. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.38.x`
|
||||
[label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered.
|
||||
|
||||
3. Add a new section to the Dependabot config (`.github/dependabot.yml`) to
|
||||
4. Add a new section to the Dependabot config (`.github/dependabot.yml`) to
|
||||
enable automatic update of Go dependencies on this branch. Copy and edit one
|
||||
of the existing branch configurations to set the correct `target-branch`.
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ Tendermint Core.
|
||||
* Added new ABCI methods `PrepareProposal` and `ProcessProposal`. For details,
|
||||
please see the [spec](spec/abci/README.md). Applications upgrading to
|
||||
v0.37.0 must implement these methods, at the very minimum, as described
|
||||
[here](./spec/abci/abci++_app_requirements.md)
|
||||
[here](spec/abci/apps.md)
|
||||
* Deduplicated `ConsensusParams` and `BlockParams`.
|
||||
In the v0.34 branch they are defined both in `abci/types.proto` and `types/params.proto`.
|
||||
The definitions in `abci/types.proto` have been removed.
|
||||
|
||||
@@ -19,8 +19,8 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](https://github.com/tendermint/tendermint/blob/main/spec/abci/README.md)
|
||||
- [A protobuf file](../proto/tendermint/types/types.proto)
|
||||
- [The main spec](https://github.com/tendermint/tendermint/blob/main/spec/abci/abci.md)
|
||||
- [A protobuf file](./types/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
## Protocol Buffers
|
||||
|
||||
@@ -79,7 +79,7 @@ func NewApplication() *Application {
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCISemVer,
|
||||
Version: version.ABCIVersion,
|
||||
AppVersion: ProtocolVersion,
|
||||
LastBlockHeight: app.state.Height,
|
||||
LastBlockAppHash: app.state.AppHash,
|
||||
|
||||
@@ -6,4 +6,4 @@ import (
|
||||
|
||||
// TODO: eliminate this after some version refactor
|
||||
|
||||
const Version = version.ABCISemVer
|
||||
const Version = version.ABCIVersion
|
||||
|
||||
@@ -19,6 +19,58 @@ const (
|
||||
BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
msg := bcproto.Message{}
|
||||
|
||||
switch pb := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
|
||||
case *bcproto.BlockResponse:
|
||||
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
|
||||
case *bcproto.NoBlockResponse:
|
||||
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
|
||||
case *bcproto.StatusRequest:
|
||||
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
|
||||
case *bcproto.StatusResponse:
|
||||
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", pb)
|
||||
}
|
||||
|
||||
bz, err := proto.Marshal(&msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *bcproto.Message_BlockRequest:
|
||||
return msg.BlockRequest, nil
|
||||
case *bcproto.Message_BlockResponse:
|
||||
return msg.BlockResponse, nil
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
return msg.NoBlockResponse, nil
|
||||
case *bcproto.Message_StatusRequest:
|
||||
return msg.StatusRequest, nil
|
||||
case *bcproto.Message_StatusResponse:
|
||||
return msg.StatusResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateMsg validates a message.
|
||||
func ValidateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
|
||||
@@ -80,7 +80,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
//nolint:lll // ignore line length in tests
|
||||
func TestBlocksyncMessageVectors(t *testing.T) {
|
||||
func TestBlockchainMessageVectors(t *testing.T) {
|
||||
block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil)
|
||||
block.Version.Block = 11 // overwrite updated protocol version
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@ const (
|
||||
maxTotalRequesters = 600
|
||||
maxPendingRequests = maxTotalRequesters
|
||||
maxPendingRequestsPerPeer = 20
|
||||
requestRetrySeconds = 30
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending us data at at least that rate, we
|
||||
@@ -603,7 +602,7 @@ OUTER_LOOP:
|
||||
}
|
||||
peer = bpr.pool.pickIncrAvailablePeer(bpr.height)
|
||||
if peer == nil {
|
||||
bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height)
|
||||
// log.Info("No peers available", "height", height)
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
continue PICK_PEER_LOOP
|
||||
}
|
||||
@@ -613,7 +612,6 @@ OUTER_LOOP:
|
||||
bpr.peerID = peer.id
|
||||
bpr.mtx.Unlock()
|
||||
|
||||
to := time.NewTimer(requestRetrySeconds * time.Second)
|
||||
// Send request and wait.
|
||||
bpr.pool.sendRequest(bpr.height, peer.id)
|
||||
WAIT_LOOP:
|
||||
@@ -626,11 +624,6 @@ OUTER_LOOP:
|
||||
return
|
||||
case <-bpr.Quit():
|
||||
return
|
||||
case <-to.C:
|
||||
bpr.Logger.Debug("Retrying block request after timeout", "height", bpr.height, "peer", bpr.peerID)
|
||||
// Simulate a redo
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP
|
||||
case peerID := <-bpr.redoCh:
|
||||
if peerID == bpr.peerID {
|
||||
bpr.reset()
|
||||
|
||||
@@ -30,7 +30,7 @@ const (
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blocksync reactor and block sync to
|
||||
// for when we switch from blockchain reactor and block sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
@@ -143,20 +143,21 @@ func (bcR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
})
|
||||
msgBytes, err := EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height()})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
peer.Send(BlocksyncChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -181,53 +182,69 @@ func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.BlockResponse{Block: bl},
|
||||
})
|
||||
msgBytes, err := EncodeMsg(&bcproto.BlockResponse{Block: bl})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
return src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
})
|
||||
|
||||
msgBytes, err := EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *Reactor) Receive(e p2p.Envelope) {
|
||||
if err := ValidateMsg(e.Message); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
bcR.Switch.StopPeerForError(e.Src, err)
|
||||
func (bcR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message)
|
||||
if err = ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch msg := e.Message.(type) {
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
bcR.respondToPeer(msg, e.Src)
|
||||
bcR.respondToPeer(msg, src)
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Block content is invalid", "err", err)
|
||||
return
|
||||
}
|
||||
bcR.pool.AddBlock(e.Src.ID(), bi, msg.Block.Size())
|
||||
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
e.Src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
},
|
||||
msgBytes, err := EncodeMsg(&bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobut", "err", err)
|
||||
return
|
||||
}
|
||||
src.TrySend(BlocksyncChannel, msgBytes)
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height)
|
||||
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
|
||||
case *bcproto.NoBlockResponse:
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -268,10 +285,13 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
queued := peer.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
})
|
||||
msgBytes, err := EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlocksyncChannel, msgBytes)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
@@ -386,7 +406,7 @@ FOR_LOOP:
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
@@ -410,9 +430,13 @@ FOR_LOOP:
|
||||
|
||||
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
|
||||
func (bcR *Reactor) BroadcastStatusRequest() error {
|
||||
bcR.Switch.NewBroadcast(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
bm, err := EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
return fmt.Errorf("could not convert msg to proto: %w", err)
|
||||
}
|
||||
|
||||
bcR.Switch.Broadcast(BlocksyncChannel, bm)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
mpmocks "github.com/tendermint/tendermint/mempool/mocks"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -43,7 +42,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: test.DefaultTestChainID,
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
@@ -104,7 +103,7 @@ func newReactor(
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mp, sm.EmptyEvidencePool{}, blockStore)
|
||||
mp, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -137,7 +136,7 @@ func newReactor(
|
||||
require.NoError(t, err)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error apply block: %w", err))
|
||||
}
|
||||
@@ -146,13 +145,13 @@ func newReactor(
|
||||
}
|
||||
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blocksync"))
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return ReactorPair{bcReactor, proxyApp}
|
||||
}
|
||||
|
||||
func TestNoBlockResponse(t *testing.T) {
|
||||
config = test.ResetTestRoot("blocksync_reactor_test")
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
@@ -164,7 +163,7 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
reactorPairs[1] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor)
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
@@ -214,7 +213,7 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestBadBlockStopsPeer(t *testing.T) {
|
||||
config = test.ResetTestRoot("blocksync_reactor_test")
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
@@ -239,7 +238,7 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
reactorPairs[3] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor)
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
@@ -278,7 +277,7 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[len(reactorPairs)-1].reactor)
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
|
||||
@@ -57,18 +57,12 @@ want to use this command.
|
||||
return
|
||||
}
|
||||
|
||||
state, err := ss.Load()
|
||||
if err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := checkValidHeight(bs); err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
bi, ti, err := loadEventSinks(config, state.ChainID)
|
||||
bi, ti, err := loadEventSinks(config)
|
||||
if err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
@@ -100,7 +94,7 @@ func init() {
|
||||
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
|
||||
}
|
||||
|
||||
func loadEventSinks(cfg *tmcfg.Config, chainID string) (indexer.BlockIndexer, txindex.TxIndexer, error) {
|
||||
func loadEventSinks(cfg *tmcfg.Config) (indexer.BlockIndexer, txindex.TxIndexer, error) {
|
||||
switch strings.ToLower(cfg.TxIndex.Indexer) {
|
||||
case "null":
|
||||
return nil, nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
|
||||
@@ -109,7 +103,7 @@ func loadEventSinks(cfg *tmcfg.Config, chainID string) (indexer.BlockIndexer, tx
|
||||
if conn == "" {
|
||||
return nil, nil, errors.New("the psql connection settings cannot be empty")
|
||||
}
|
||||
es, err := psql.NewEventSink(conn, chainID)
|
||||
es, err := psql.NewEventSink(conn, cfg.ChainID())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
blockmocks "github.com/tendermint/tendermint/state/indexer/mocks"
|
||||
"github.com/tendermint/tendermint/state/mocks"
|
||||
@@ -99,7 +98,7 @@ func TestLoadEventSink(t *testing.T) {
|
||||
cfg := tmcfg.TestConfig()
|
||||
cfg.TxIndex.Indexer = tc.sinks
|
||||
cfg.TxIndex.PsqlConn = tc.connURL
|
||||
_, _, err := loadEventSinks(cfg, test.DefaultTestChainID)
|
||||
_, _, err := loadEventSinks(cfg)
|
||||
if tc.loadErr {
|
||||
require.Error(t, err, idx)
|
||||
} else {
|
||||
|
||||
@@ -14,12 +14,6 @@ import (
|
||||
"github.com/tendermint/tendermint/store"
|
||||
)
|
||||
|
||||
var removeBlock bool = false
|
||||
|
||||
func init() {
|
||||
RollbackStateCmd.Flags().BoolVar(&removeBlock, "hard", false, "remove last block as well as state")
|
||||
}
|
||||
|
||||
var RollbackStateCmd = &cobra.Command{
|
||||
Use: "rollback",
|
||||
Short: "rollback tendermint state by one height",
|
||||
@@ -27,23 +21,17 @@ var RollbackStateCmd = &cobra.Command{
|
||||
A state rollback is performed to recover from an incorrect application state transition,
|
||||
when Tendermint has persisted an incorrect app hash and is thus unable to make
|
||||
progress. Rollback overwrites a state at height n with the state at height n - 1.
|
||||
The application should also roll back to height n - 1. If the --hard flag is not used,
|
||||
no blocks will be removed so upon restarting Tendermint the transactions in block n will be
|
||||
re-executed against the application. Using --hard will also remove block n. This can
|
||||
be done multiple times.
|
||||
The application should also roll back to height n - 1. No blocks are removed, so upon
|
||||
restarting Tendermint the transactions in block n will be re-executed against the
|
||||
application.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
height, hash, err := RollbackState(config, removeBlock)
|
||||
height, hash, err := RollbackState(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rollback state: %w", err)
|
||||
}
|
||||
|
||||
if removeBlock {
|
||||
fmt.Printf("Rolled back both state and block to height %d and hash %X\n", height, hash)
|
||||
} else {
|
||||
fmt.Printf("Rolled back state to height %d and hash %X\n", height, hash)
|
||||
}
|
||||
|
||||
fmt.Printf("Rolled back state to height %d and hash %v", height, hash)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -51,7 +39,7 @@ be done multiple times.
|
||||
// RollbackState takes the state at the current height n and overwrites it with the state
|
||||
// at height n - 1. Note state here refers to tendermint state not application state.
|
||||
// Returns the latest state height and app hash alongside an error if there was one.
|
||||
func RollbackState(config *cfg.Config, removeBlock bool) (int64, []byte, error) {
|
||||
func RollbackState(config *cfg.Config) (int64, []byte, error) {
|
||||
// use the parsed config to load the block and state store
|
||||
blockStore, stateStore, err := loadStateAndBlockStore(config)
|
||||
if err != nil {
|
||||
@@ -63,7 +51,7 @@ func RollbackState(config *cfg.Config, removeBlock bool) (int64, []byte, error)
|
||||
}()
|
||||
|
||||
// rollback the last state
|
||||
return state.Rollback(blockStore, stateStore, removeBlock)
|
||||
return state.Rollback(blockStore, stateStore)
|
||||
}
|
||||
|
||||
func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store, error) {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -14,30 +13,6 @@ var VersionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show version info",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
tmVersion := version.TMCoreSemVer
|
||||
if version.TMGitCommitHash != "" {
|
||||
tmVersion += "+" + version.TMGitCommitHash
|
||||
}
|
||||
|
||||
if verbose {
|
||||
values, _ := json.MarshalIndent(struct {
|
||||
Tendermint string `json:"tendermint"`
|
||||
ABCI string `json:"abci"`
|
||||
BlockProtocol uint64 `json:"block_protocol"`
|
||||
P2PProtocol uint64 `json:"p2p_protocol"`
|
||||
}{
|
||||
Tendermint: tmVersion,
|
||||
ABCI: version.ABCISemVer,
|
||||
BlockProtocol: version.BlockProtocol,
|
||||
P2PProtocol: version.P2PProtocol,
|
||||
}, "", " ")
|
||||
fmt.Println(string(values))
|
||||
} else {
|
||||
fmt.Println(tmVersion)
|
||||
}
|
||||
fmt.Println(version.TMCoreSemVer)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
VersionCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Show protocol and library versions")
|
||||
}
|
||||
|
||||
102
config/config.go
102
config/config.go
@@ -7,10 +7,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -31,19 +28,6 @@ const (
|
||||
// Default is v0.
|
||||
MempoolV0 = "v0"
|
||||
MempoolV1 = "v1"
|
||||
|
||||
DefaultTendermintDir = ".tendermint"
|
||||
DefaultConfigDir = "config"
|
||||
DefaultDataDir = "data"
|
||||
|
||||
DefaultConfigFileName = "config.toml"
|
||||
DefaultGenesisJSONName = "genesis.json"
|
||||
|
||||
DefaultPrivValKeyName = "priv_validator_key.json"
|
||||
DefaultPrivValStateName = "priv_validator_state.json"
|
||||
|
||||
DefaultNodeKeyName = "node_key.json"
|
||||
DefaultAddrBookName = "addrbook.json"
|
||||
)
|
||||
|
||||
// NOTE: Most of the structs & relevant comments + the
|
||||
@@ -53,19 +37,29 @@ const (
|
||||
// config/toml.go
|
||||
// NOTE: libs/cli must know to look in the config dir!
|
||||
var (
|
||||
defaultConfigFilePath = filepath.Join(DefaultConfigDir, DefaultConfigFileName)
|
||||
defaultGenesisJSONPath = filepath.Join(DefaultConfigDir, DefaultGenesisJSONName)
|
||||
defaultPrivValKeyPath = filepath.Join(DefaultConfigDir, DefaultPrivValKeyName)
|
||||
defaultPrivValStatePath = filepath.Join(DefaultDataDir, DefaultPrivValStateName)
|
||||
DefaultTendermintDir = ".tendermint"
|
||||
defaultConfigDir = "config"
|
||||
defaultDataDir = "data"
|
||||
|
||||
defaultNodeKeyPath = filepath.Join(DefaultConfigDir, DefaultNodeKeyName)
|
||||
defaultAddrBookPath = filepath.Join(DefaultConfigDir, DefaultAddrBookName)
|
||||
defaultConfigFileName = "config.toml"
|
||||
defaultGenesisJSONName = "genesis.json"
|
||||
|
||||
defaultPrivValKeyName = "priv_validator_key.json"
|
||||
defaultPrivValStateName = "priv_validator_state.json"
|
||||
|
||||
defaultNodeKeyName = "node_key.json"
|
||||
defaultAddrBookName = "addrbook.json"
|
||||
|
||||
defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName)
|
||||
defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
|
||||
defaultPrivValKeyPath = filepath.Join(defaultConfigDir, defaultPrivValKeyName)
|
||||
defaultPrivValStatePath = filepath.Join(defaultDataDir, defaultPrivValStateName)
|
||||
|
||||
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
||||
defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName)
|
||||
|
||||
minSubscriptionBufferSize = 100
|
||||
defaultSubscriptionBufferSize = 200
|
||||
|
||||
// taken from https://semver.org/
|
||||
semverRegexp = regexp.MustCompile(`^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`)
|
||||
)
|
||||
|
||||
// Config defines the top level configuration for a Tendermint node
|
||||
@@ -74,15 +68,18 @@ type Config struct {
|
||||
BaseConfig `mapstructure:",squash"`
|
||||
|
||||
// Options for services
|
||||
RPC *RPCConfig `mapstructure:"rpc"`
|
||||
P2P *P2PConfig `mapstructure:"p2p"`
|
||||
Mempool *MempoolConfig `mapstructure:"mempool"`
|
||||
StateSync *StateSyncConfig `mapstructure:"statesync"`
|
||||
BlockSync *BlockSyncConfig `mapstructure:"blocksync"`
|
||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||
Storage *StorageConfig `mapstructure:"storage"`
|
||||
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
|
||||
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
||||
RPC *RPCConfig `mapstructure:"rpc"`
|
||||
P2P *P2PConfig `mapstructure:"p2p"`
|
||||
Mempool *MempoolConfig `mapstructure:"mempool"`
|
||||
StateSync *StateSyncConfig `mapstructure:"statesync"`
|
||||
BlockSync *BlockSyncConfig `mapstructure:"blocksync"`
|
||||
//TODO(williambanfield): remove this field once v0.37 is released.
|
||||
// https://github.com/tendermint/tendermint/issues/9279
|
||||
DeprecatedFastSyncConfig map[interface{}]interface{} `mapstructure:"fastsync"`
|
||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||
Storage *StorageConfig `mapstructure:"storage"`
|
||||
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
|
||||
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration for a Tendermint node
|
||||
@@ -157,9 +154,14 @@ func (cfg *Config) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckDeprecated returns any deprecation warnings. These are printed to the operator on startup
|
||||
func (cfg *Config) CheckDeprecated() []string {
|
||||
var warnings []string
|
||||
if cfg.DeprecatedFastSyncConfig != nil {
|
||||
warnings = append(warnings, "[fastsync] table detected. This section has been renamed to [blocksync]. The values in this deprecated section will be disregarded.")
|
||||
}
|
||||
if cfg.BaseConfig.DeprecatedFastSyncMode != nil {
|
||||
warnings = append(warnings, "fast_sync key detected. This key has been renamed to block_sync. The value of this deprecated key will be disregarded.")
|
||||
}
|
||||
return warnings
|
||||
}
|
||||
|
||||
@@ -168,10 +170,8 @@ func (cfg *Config) CheckDeprecated() []string {
|
||||
|
||||
// BaseConfig defines the base configuration for a Tendermint node
|
||||
type BaseConfig struct { //nolint: maligned
|
||||
|
||||
// The version of the Tendermint binary that created
|
||||
// or last modified the config file
|
||||
Version string `mapstructure:"version"`
|
||||
// chainID is unexposed and immutable but here for convenience
|
||||
chainID string
|
||||
|
||||
// The root directory for all data.
|
||||
// This should be set in viper so it can unmarshal into this struct
|
||||
@@ -189,6 +189,10 @@ type BaseConfig struct { //nolint: maligned
|
||||
// and verifying their commits
|
||||
BlockSyncMode bool `mapstructure:"block_sync"`
|
||||
|
||||
//TODO(williambanfield): remove this field once v0.37 is released.
|
||||
// https://github.com/tendermint/tendermint/issues/9279
|
||||
DeprecatedFastSyncMode interface{} `mapstructure:"fast_sync"`
|
||||
|
||||
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
|
||||
// * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
// - pure go
|
||||
@@ -246,7 +250,6 @@ type BaseConfig struct { //nolint: maligned
|
||||
// DefaultBaseConfig returns a default base configuration for a Tendermint node
|
||||
func DefaultBaseConfig() BaseConfig {
|
||||
return BaseConfig{
|
||||
Version: version.TMCoreSemVer,
|
||||
Genesis: defaultGenesisJSONPath,
|
||||
PrivValidatorKey: defaultPrivValKeyPath,
|
||||
PrivValidatorState: defaultPrivValStatePath,
|
||||
@@ -259,19 +262,24 @@ func DefaultBaseConfig() BaseConfig {
|
||||
BlockSyncMode: true,
|
||||
FilterPeers: false,
|
||||
DBBackend: "goleveldb",
|
||||
DBPath: DefaultDataDir,
|
||||
DBPath: "data",
|
||||
}
|
||||
}
|
||||
|
||||
// TestBaseConfig returns a base configuration for testing a Tendermint node
|
||||
func TestBaseConfig() BaseConfig {
|
||||
cfg := DefaultBaseConfig()
|
||||
cfg.chainID = "tendermint_test"
|
||||
cfg.ProxyApp = "kvstore"
|
||||
cfg.BlockSyncMode = false
|
||||
cfg.DBBackend = "memdb"
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (cfg BaseConfig) ChainID() string {
|
||||
return cfg.chainID
|
||||
}
|
||||
|
||||
// GenesisFile returns the full path to the genesis.json file
|
||||
func (cfg BaseConfig) GenesisFile() string {
|
||||
return rootify(cfg.Genesis, cfg.RootDir)
|
||||
@@ -300,12 +308,6 @@ func (cfg BaseConfig) DBDir() string {
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg BaseConfig) ValidateBasic() error {
|
||||
// version on old config files aren't set so we can't expect it
|
||||
// always to exist
|
||||
if cfg.Version != "" && !semverRegexp.MatchString(cfg.Version) {
|
||||
return fmt.Errorf("invalid version string: %s", cfg.Version)
|
||||
}
|
||||
|
||||
switch cfg.LogFormat {
|
||||
case LogFormatPlain, LogFormatJSON:
|
||||
default:
|
||||
@@ -511,7 +513,7 @@ func (cfg RPCConfig) KeyFile() string {
|
||||
if filepath.IsAbs(path) {
|
||||
return path
|
||||
}
|
||||
return rootify(filepath.Join(DefaultConfigDir, path), cfg.RootDir)
|
||||
return rootify(filepath.Join(defaultConfigDir, path), cfg.RootDir)
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) CertFile() string {
|
||||
@@ -519,7 +521,7 @@ func (cfg RPCConfig) CertFile() string {
|
||||
if filepath.IsAbs(path) {
|
||||
return path
|
||||
}
|
||||
return rootify(filepath.Join(DefaultConfigDir, path), cfg.RootDir)
|
||||
return rootify(filepath.Join(defaultConfigDir, path), cfg.RootDir)
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) IsTLSEnabled() bool {
|
||||
@@ -968,7 +970,7 @@ type ConsensusConfig struct {
|
||||
// DefaultConsensusConfig returns a default configuration for the consensus service
|
||||
func DefaultConsensusConfig() *ConsensusConfig {
|
||||
return &ConsensusConfig{
|
||||
WalPath: filepath.Join(DefaultDataDir, "cs.wal", "wal"),
|
||||
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
|
||||
TimeoutPropose: 3000 * time.Millisecond,
|
||||
TimeoutProposeDelta: 500 * time.Millisecond,
|
||||
TimeoutPrevote: 1000 * time.Millisecond,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package config_test
|
||||
package config
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
@@ -7,15 +7,13 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
)
|
||||
|
||||
func TestDefaultConfig(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// set up some defaults
|
||||
cfg := config.DefaultConfig()
|
||||
cfg := DefaultConfig()
|
||||
assert.NotNil(cfg.P2P)
|
||||
assert.NotNil(cfg.Mempool)
|
||||
assert.NotNil(cfg.Consensus)
|
||||
@@ -33,7 +31,7 @@ func TestDefaultConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigValidateBasic(t *testing.T) {
|
||||
cfg := config.DefaultConfig()
|
||||
cfg := DefaultConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with timeout_propose
|
||||
@@ -43,7 +41,7 @@ func TestConfigValidateBasic(t *testing.T) {
|
||||
|
||||
func TestTLSConfiguration(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
cfg := config.DefaultConfig()
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetRoot("/home/user")
|
||||
|
||||
cfg.RPC.TLSCertFile = "file.crt"
|
||||
@@ -58,7 +56,7 @@ func TestTLSConfiguration(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBaseConfigValidateBasic(t *testing.T) {
|
||||
cfg := config.TestBaseConfig()
|
||||
cfg := TestBaseConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with log format
|
||||
@@ -67,7 +65,7 @@ func TestBaseConfigValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRPCConfigValidateBasic(t *testing.T) {
|
||||
cfg := config.TestRPCConfig()
|
||||
cfg := TestRPCConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
@@ -88,7 +86,7 @@ func TestRPCConfigValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestP2PConfigValidateBasic(t *testing.T) {
|
||||
cfg := config.TestP2PConfig()
|
||||
cfg := TestP2PConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
@@ -108,7 +106,7 @@ func TestP2PConfigValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMempoolConfigValidateBasic(t *testing.T) {
|
||||
cfg := config.TestMempoolConfig()
|
||||
cfg := TestMempoolConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
@@ -126,12 +124,12 @@ func TestMempoolConfigValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStateSyncConfigValidateBasic(t *testing.T) {
|
||||
cfg := config.TestStateSyncConfig()
|
||||
cfg := TestStateSyncConfig()
|
||||
require.NoError(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestBlockSyncConfigValidateBasic(t *testing.T) {
|
||||
cfg := config.TestBlockSyncConfig()
|
||||
cfg := TestBlockSyncConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with version
|
||||
@@ -145,33 +143,33 @@ func TestBlockSyncConfigValidateBasic(t *testing.T) {
|
||||
func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
//nolint: lll
|
||||
testcases := map[string]struct {
|
||||
modify func(*config.ConsensusConfig)
|
||||
modify func(*ConsensusConfig)
|
||||
expectErr bool
|
||||
}{
|
||||
"TimeoutPropose": {func(c *config.ConsensusConfig) { c.TimeoutPropose = time.Second }, false},
|
||||
"TimeoutPropose negative": {func(c *config.ConsensusConfig) { c.TimeoutPropose = -1 }, true},
|
||||
"TimeoutProposeDelta": {func(c *config.ConsensusConfig) { c.TimeoutProposeDelta = time.Second }, false},
|
||||
"TimeoutProposeDelta negative": {func(c *config.ConsensusConfig) { c.TimeoutProposeDelta = -1 }, true},
|
||||
"TimeoutPrevote": {func(c *config.ConsensusConfig) { c.TimeoutPrevote = time.Second }, false},
|
||||
"TimeoutPrevote negative": {func(c *config.ConsensusConfig) { c.TimeoutPrevote = -1 }, true},
|
||||
"TimeoutPrevoteDelta": {func(c *config.ConsensusConfig) { c.TimeoutPrevoteDelta = time.Second }, false},
|
||||
"TimeoutPrevoteDelta negative": {func(c *config.ConsensusConfig) { c.TimeoutPrevoteDelta = -1 }, true},
|
||||
"TimeoutPrecommit": {func(c *config.ConsensusConfig) { c.TimeoutPrecommit = time.Second }, false},
|
||||
"TimeoutPrecommit negative": {func(c *config.ConsensusConfig) { c.TimeoutPrecommit = -1 }, true},
|
||||
"TimeoutPrecommitDelta": {func(c *config.ConsensusConfig) { c.TimeoutPrecommitDelta = time.Second }, false},
|
||||
"TimeoutPrecommitDelta negative": {func(c *config.ConsensusConfig) { c.TimeoutPrecommitDelta = -1 }, true},
|
||||
"TimeoutCommit": {func(c *config.ConsensusConfig) { c.TimeoutCommit = time.Second }, false},
|
||||
"TimeoutCommit negative": {func(c *config.ConsensusConfig) { c.TimeoutCommit = -1 }, true},
|
||||
"PeerGossipSleepDuration": {func(c *config.ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false},
|
||||
"PeerGossipSleepDuration negative": {func(c *config.ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true},
|
||||
"PeerQueryMaj23SleepDuration": {func(c *config.ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false},
|
||||
"PeerQueryMaj23SleepDuration negative": {func(c *config.ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
|
||||
"DoubleSignCheckHeight negative": {func(c *config.ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
|
||||
"TimeoutPropose": {func(c *ConsensusConfig) { c.TimeoutPropose = time.Second }, false},
|
||||
"TimeoutPropose negative": {func(c *ConsensusConfig) { c.TimeoutPropose = -1 }, true},
|
||||
"TimeoutProposeDelta": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = time.Second }, false},
|
||||
"TimeoutProposeDelta negative": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = -1 }, true},
|
||||
"TimeoutPrevote": {func(c *ConsensusConfig) { c.TimeoutPrevote = time.Second }, false},
|
||||
"TimeoutPrevote negative": {func(c *ConsensusConfig) { c.TimeoutPrevote = -1 }, true},
|
||||
"TimeoutPrevoteDelta": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = time.Second }, false},
|
||||
"TimeoutPrevoteDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = -1 }, true},
|
||||
"TimeoutPrecommit": {func(c *ConsensusConfig) { c.TimeoutPrecommit = time.Second }, false},
|
||||
"TimeoutPrecommit negative": {func(c *ConsensusConfig) { c.TimeoutPrecommit = -1 }, true},
|
||||
"TimeoutPrecommitDelta": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = time.Second }, false},
|
||||
"TimeoutPrecommitDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = -1 }, true},
|
||||
"TimeoutCommit": {func(c *ConsensusConfig) { c.TimeoutCommit = time.Second }, false},
|
||||
"TimeoutCommit negative": {func(c *ConsensusConfig) { c.TimeoutCommit = -1 }, true},
|
||||
"PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false},
|
||||
"PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true},
|
||||
"PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false},
|
||||
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
|
||||
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
|
||||
}
|
||||
for desc, tc := range testcases {
|
||||
tc := tc // appease linter
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
cfg := config.DefaultConsensusConfig()
|
||||
cfg := DefaultConsensusConfig()
|
||||
tc.modify(cfg)
|
||||
|
||||
err := cfg.ValidateBasic()
|
||||
@@ -185,7 +183,7 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInstrumentationConfigValidateBasic(t *testing.T) {
|
||||
cfg := config.TestInstrumentationConfig()
|
||||
cfg := TestInstrumentationConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with maximum open connections
|
||||
|
||||
109
config/toml.go
109
config/toml.go
@@ -2,6 +2,8 @@ package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
@@ -32,10 +34,10 @@ func EnsureRoot(rootDir string) {
|
||||
if err := tmos.EnsureDir(rootDir, DefaultDirPerm); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, DefaultConfigDir), DefaultDirPerm); err != nil {
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, DefaultDataDir), DefaultDirPerm); err != nil {
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
@@ -74,10 +76,6 @@ const defaultConfigTemplate = `# This is a TOML config file.
|
||||
# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable
|
||||
# or --home cmd flag.
|
||||
|
||||
# The version of the Tendermint binary that created or
|
||||
# last modified the config file. Do not modify this.
|
||||
version = "{{ .BaseConfig.Version }}"
|
||||
|
||||
#######################################################################
|
||||
### Main Base Config Options ###
|
||||
#######################################################################
|
||||
@@ -487,7 +485,6 @@ peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
#######################################################
|
||||
### Storage Configuration Options ###
|
||||
#######################################################
|
||||
[storage]
|
||||
|
||||
# Set to true to discard ABCI responses from the state store, which can save a
|
||||
# considerable amount of disk space. Set to false to ensure ABCI responses are
|
||||
@@ -539,3 +536,101 @@ max_open_connections = {{ .Instrumentation.MaxOpenConnections }}
|
||||
# Instrumentation namespace
|
||||
namespace = "{{ .Instrumentation.Namespace }}"
|
||||
`
|
||||
|
||||
/****** these are for test settings ***********/
|
||||
|
||||
func ResetTestRoot(testName string) *Config {
|
||||
return ResetTestRootWithChainID(testName, "")
|
||||
}
|
||||
|
||||
func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||
// create a unique, concurrency-safe test directory under os.TempDir()
|
||||
rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// ensure config and data subdirs are created
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
baseConfig := DefaultBaseConfig()
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
genesisFilePath := filepath.Join(rootDir, baseConfig.Genesis)
|
||||
privKeyFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorKey)
|
||||
privStateFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorState)
|
||||
|
||||
// Write default config file if missing.
|
||||
if !tmos.FileExists(configFilePath) {
|
||||
writeDefaultConfigFile(configFilePath)
|
||||
}
|
||||
if !tmos.FileExists(genesisFilePath) {
|
||||
if chainID == "" {
|
||||
chainID = "tendermint_test"
|
||||
}
|
||||
testGenesis := fmt.Sprintf(testGenesisFmt, chainID)
|
||||
tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
|
||||
}
|
||||
// we always overwrite the priv val
|
||||
tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644)
|
||||
tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644)
|
||||
|
||||
config := TestConfig().SetRoot(rootDir)
|
||||
return config
|
||||
}
|
||||
|
||||
var testGenesisFmt = `{
|
||||
"genesis_time": "2018-10-10T08:20:13.695936996Z",
|
||||
"chain_id": "%s",
|
||||
"initial_height": "1",
|
||||
"consensus_params": {
|
||||
"block": {
|
||||
"max_bytes": "22020096",
|
||||
"max_gas": "-1",
|
||||
"time_iota_ms": "10"
|
||||
},
|
||||
"evidence": {
|
||||
"max_age_num_blocks": "100000",
|
||||
"max_age_duration": "172800000000000",
|
||||
"max_bytes": "1048576"
|
||||
},
|
||||
"validator": {
|
||||
"pub_key_types": [
|
||||
"ed25519"
|
||||
]
|
||||
},
|
||||
"version": {}
|
||||
},
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "tendermint/PubKeyEd25519",
|
||||
"value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
|
||||
},
|
||||
"power": "10",
|
||||
"name": ""
|
||||
}
|
||||
],
|
||||
"app_hash": ""
|
||||
}`
|
||||
|
||||
var testPrivValidatorKey = `{
|
||||
"address": "A3258DCBF45DCA0DF052981870F2D1441A36D145",
|
||||
"pub_key": {
|
||||
"type": "tendermint/PubKeyEd25519",
|
||||
"value": "AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
|
||||
},
|
||||
"priv_key": {
|
||||
"type": "tendermint/PrivKeyEd25519",
|
||||
"value": "EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="
|
||||
}
|
||||
}`
|
||||
|
||||
var testPrivValidatorState = `{
|
||||
"height": "0",
|
||||
"round": 0,
|
||||
"step": 0
|
||||
}`
|
||||
|
||||
@@ -1,22 +1,20 @@
|
||||
package config_test
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
)
|
||||
|
||||
func ensureFiles(t *testing.T, rootDir string, files ...string) {
|
||||
for _, f := range files {
|
||||
p := filepath.Join(rootDir, f)
|
||||
p := rootify(rootDir, f)
|
||||
_, err := os.Stat(p)
|
||||
assert.NoError(t, err, p)
|
||||
assert.Nil(t, err, p)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,13 +27,15 @@ func TestEnsureRoot(t *testing.T) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// create root dir
|
||||
config.EnsureRoot(tmpDir)
|
||||
EnsureRoot(tmpDir)
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := os.ReadFile(filepath.Join(tmpDir, config.DefaultConfigDir, config.DefaultConfigFileName))
|
||||
data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
|
||||
require.Nil(err)
|
||||
|
||||
assertValidConfig(t, string(data))
|
||||
if !checkConfig(string(data)) {
|
||||
t.Fatalf("config file missing some information")
|
||||
}
|
||||
|
||||
ensureFiles(t, tmpDir, "data")
|
||||
}
|
||||
@@ -43,30 +43,35 @@ func TestEnsureRoot(t *testing.T) {
|
||||
func TestEnsureTestRoot(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
testName := "ensureTestRoot"
|
||||
|
||||
// create root dir
|
||||
cfg := test.ResetTestRoot("ensureTestRoot")
|
||||
cfg := ResetTestRoot(testName)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
rootDir := cfg.RootDir
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := os.ReadFile(filepath.Join(rootDir, config.DefaultConfigDir, config.DefaultConfigFileName))
|
||||
data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
|
||||
require.Nil(err)
|
||||
|
||||
assertValidConfig(t, string(data))
|
||||
if !checkConfig(string(data)) {
|
||||
t.Fatalf("config file missing some information")
|
||||
}
|
||||
|
||||
// TODO: make sure the cfg returned and testconfig are the same!
|
||||
baseConfig := config.DefaultBaseConfig()
|
||||
ensureFiles(t, rootDir, config.DefaultDataDir, baseConfig.Genesis, baseConfig.PrivValidatorKey, baseConfig.PrivValidatorState)
|
||||
baseConfig := DefaultBaseConfig()
|
||||
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidatorKey, baseConfig.PrivValidatorState)
|
||||
}
|
||||
|
||||
func assertValidConfig(t *testing.T, configFile string) {
|
||||
t.Helper()
|
||||
func checkConfig(configFile string) bool {
|
||||
var valid bool
|
||||
|
||||
// list of words we expect in the config
|
||||
var elems = []string{
|
||||
"moniker",
|
||||
"seeds",
|
||||
"proxy_app",
|
||||
"block_sync",
|
||||
"fast_sync",
|
||||
"create_empty_blocks",
|
||||
"peer",
|
||||
"timeout",
|
||||
@@ -79,6 +84,11 @@ func assertValidConfig(t *testing.T, configFile string) {
|
||||
"genesis",
|
||||
}
|
||||
for _, e := range elems {
|
||||
assert.Contains(t, configFile, e)
|
||||
if !strings.Contains(configFile, e) {
|
||||
valid = false
|
||||
} else {
|
||||
valid = true
|
||||
}
|
||||
}
|
||||
return valid
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Consensus
|
||||
|
||||
See the [consensus spec](https://github.com/tendermint/tendermint/tree/main/spec/consensus) for more information.
|
||||
See the [consensus spec](https://github.com/tendermint/tendermint/tree/main/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/tendermint/tree/main/spec/reactors/consensus) for more information.
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -101,7 +100,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
evpool.SetLogger(logger.With("module", "evidence"))
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs.SetLogger(cs.Logger)
|
||||
// set private validator
|
||||
@@ -166,16 +165,10 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
for i, peer := range peerList {
|
||||
if i < len(peerList)/2 {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer)
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{prevote1.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1}))
|
||||
} else {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer)
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{prevote2.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2}))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -527,26 +520,18 @@ func sendProposalAndParts(
|
||||
parts *types.PartSet,
|
||||
) {
|
||||
// proposal
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.Proposal{Proposal: *proposal.ToProto()},
|
||||
})
|
||||
msg := &ProposalMessage{Proposal: proposal}
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
|
||||
// parts
|
||||
for i := 0; i < int(parts.Total()); i++ {
|
||||
part := parts.GetPart(i)
|
||||
pp, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(err) // TODO: wbanfield better error handling
|
||||
msg := &BlockPartMessage{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
}
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: *pp,
|
||||
},
|
||||
})
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
}
|
||||
|
||||
// votes
|
||||
@@ -554,14 +539,9 @@ func sendProposalAndParts(
|
||||
prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
|
||||
precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
|
||||
cs.mtx.Unlock()
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{prevote.ToProto()},
|
||||
})
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{precommit.ToProto()},
|
||||
})
|
||||
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -599,7 +579,7 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
|
||||
func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
br.reactor.RemovePeer(peer, reason)
|
||||
}
|
||||
func (br *ByzantineReactor) Receive(e p2p.Envelope) {
|
||||
br.reactor.Receive(e)
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
br.reactor.Receive(chID, peer, msgBytes)
|
||||
}
|
||||
func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
tmbytes "github.com/tendermint/tendermint/libs/bytes"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
@@ -64,7 +63,7 @@ func ensureDir(dir string, mode os.FileMode) {
|
||||
}
|
||||
|
||||
func ResetConfig(name string) *cfg.Config {
|
||||
return test.ResetTestRoot(name)
|
||||
return cfg.ResetTestRoot(name)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
@@ -109,7 +108,7 @@ func (vs *validatorStub) signVote(
|
||||
BlockID: types.BlockID{Hash: hash, PartSetHeader: header},
|
||||
}
|
||||
v := vote.ToProto()
|
||||
if err := vs.PrivValidator.SignVote(test.DefaultTestChainID, v); err != nil {
|
||||
if err := vs.PrivValidator.SignVote(config.ChainID(), v); err != nil {
|
||||
return nil, fmt.Errorf("sign vote failed: %w", err)
|
||||
}
|
||||
|
||||
@@ -370,7 +369,7 @@ func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message {
|
||||
// consensus states
|
||||
|
||||
func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State {
|
||||
config := test.ResetTestRoot("consensus_state_test")
|
||||
config := cfg.ResetTestRoot("consensus_state_test")
|
||||
return newStateWithConfig(config, state, pv, app)
|
||||
}
|
||||
|
||||
@@ -439,7 +438,7 @@ func newStateWithConfigAndBlockStore(
|
||||
panic(err)
|
||||
}
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
@@ -869,7 +868,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
InitialHeight: 1,
|
||||
ChainID: test.DefaultTestChainID,
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -95,10 +94,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw
|
||||
peers := sw.Peers().List()
|
||||
for _, peer := range peers {
|
||||
cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer)
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{precommit.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -179,13 +179,13 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "round_voting_power_percent",
|
||||
Help: "RoundVotingPowerPercent is the percentage of the total voting power received with a round. The value begins at 0 for each round and approaches 1.0 as additional voting power is observed. The metric is labeled by vote type.",
|
||||
}, append(labels, "vote_type")).With(labelsAndValues...),
|
||||
}, labels).With(labelsAndValues...),
|
||||
LateVotes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "late_votes",
|
||||
Help: "LateVotes stores the number of votes that were received by this node that correspond to earlier heights and rounds than this node is currently in.",
|
||||
}, append(labels, "vote_type")).With(labelsAndValues...),
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -108,12 +108,12 @@ type Metrics struct {
|
||||
// RoundVotingPowerPercent is the percentage of the total voting power received
|
||||
// with a round. The value begins at 0 for each round and approaches 1.0 as
|
||||
// additional voting power is observed. The metric is labeled by vote type.
|
||||
RoundVotingPowerPercent metrics.Gauge `metrics_labels:"vote_type"`
|
||||
RoundVotingPowerPercent metrics.Gauge
|
||||
|
||||
// LateVotes stores the number of votes that were received by this node that
|
||||
// correspond to earlier heights and rounds than this node is currently
|
||||
// in.
|
||||
LateVotes metrics.Counter `metrics_labels:"vote_type"`
|
||||
LateVotes metrics.Counter
|
||||
}
|
||||
|
||||
// RecordConsMetrics uses for recording the block related metrics during fast-sync.
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
@@ -13,9 +15,7 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message.
|
||||
//
|
||||
// TODO: This needs to be removed, but WALToProto depends on this.
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message
|
||||
func MsgToProto(msg Message) (*tmcons.Message, error) {
|
||||
if msg == nil {
|
||||
return nil, errors.New("consensus: message is nil")
|
||||
@@ -260,6 +260,20 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
// MustEncode takes the reactors msg, makes it proto and marshals it
|
||||
// this mimics `MustMarshalBinaryBare` in that is panics on error
|
||||
func MustEncode(msg Message) []byte {
|
||||
pb, err := MsgToProto(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
enc, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return enc
|
||||
}
|
||||
|
||||
// WALToProto takes a WAL message and return a proto walMessage and error
|
||||
func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) {
|
||||
var pb tmcons.WALMessage
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
@@ -146,7 +148,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
Priority: 6,
|
||||
SendQueueCapacity: 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: DataChannel, // maybe split between gossiping current block and catchup stuff
|
||||
@@ -155,7 +156,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: VoteChannel,
|
||||
@@ -163,7 +163,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 100 * 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: VoteSetBitsChannel,
|
||||
@@ -171,7 +170,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 2,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -225,44 +223,34 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
// Peer state updates can happen in parallel, but processing of
|
||||
// proposals, block parts, and votes are ordered by the receiveRoutine
|
||||
// NOTE: blocks on consensus state for proposals, block parts, and votes
|
||||
func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
if !conR.IsRunning() {
|
||||
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID)
|
||||
conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
|
||||
return
|
||||
}
|
||||
|
||||
if w, ok := e.Message.(p2p.Wrapper); ok {
|
||||
var err error
|
||||
e.Message, err = w.Wrap()
|
||||
if err != nil {
|
||||
conR.Logger.Error("Error wrapping message", "src", e.Src, "chId", e.ChannelID, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
msg, err := MsgFromProto(e.Message.(*tmcons.Message))
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = msg.ValidateBasic(); err != nil {
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", msg)
|
||||
conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
|
||||
// Get peer states
|
||||
ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState)
|
||||
ps, ok := src.Get(types.PeerStateKey).(*PeerState)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("Peer %v has no state", e.Src))
|
||||
panic(fmt.Sprintf("Peer %v has no state", src))
|
||||
}
|
||||
|
||||
switch e.ChannelID {
|
||||
switch chID {
|
||||
case StateChannel:
|
||||
switch msg := msg.(type) {
|
||||
case *NewRoundStepMessage:
|
||||
@@ -270,8 +258,8 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
initialHeight := conR.conS.state.InitialHeight
|
||||
conR.conS.mtx.Unlock()
|
||||
if err = msg.ValidateHeight(initialHeight); err != nil {
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
ps.ApplyNewRoundStepMessage(msg)
|
||||
@@ -290,7 +278,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
// Peer claims to have a maj23 for some BlockID at H,R,S,
|
||||
err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
|
||||
if err != nil {
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
// Respond with a VoteSetBitsMessage showing which votes we have.
|
||||
@@ -304,19 +292,13 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
default:
|
||||
panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
|
||||
}
|
||||
eMsg := &tmcons.VoteSetBits{
|
||||
src.TrySend(VoteSetBitsChannel, MustEncode(&VoteSetBitsMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: msg.BlockID.ToProto(),
|
||||
}
|
||||
if votes := ourVotes.ToProto(); votes != nil {
|
||||
eMsg.Votes = *votes
|
||||
}
|
||||
e.Src.TrySend(p2p.Envelope{
|
||||
ChannelID: VoteSetBitsChannel,
|
||||
Message: eMsg,
|
||||
})
|
||||
BlockID: msg.BlockID,
|
||||
Votes: ourVotes,
|
||||
}))
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -329,13 +311,13 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
switch msg := msg.(type) {
|
||||
case *ProposalMessage:
|
||||
ps.SetHasProposal(msg.Proposal)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
case *ProposalPOLMessage:
|
||||
ps.ApplyProposalPOLMessage(msg)
|
||||
case *BlockPartMessage:
|
||||
ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index))
|
||||
conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -355,7 +337,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
|
||||
ps.SetHasVote(msg.Vote)
|
||||
|
||||
cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
cs.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
|
||||
default:
|
||||
// don't punish (leave room for soft upgrades)
|
||||
@@ -394,7 +376,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
}
|
||||
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID))
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -448,39 +430,29 @@ func (conR *Reactor) unsubscribeFromBroadcastEvents() {
|
||||
|
||||
func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
conR.Switch.NewBroadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: nrsMsg,
|
||||
})
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(nrsMsg))
|
||||
}
|
||||
|
||||
func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
|
||||
psh := rs.ProposalBlockParts.Header()
|
||||
csMsg := &tmcons.NewValidBlock{
|
||||
csMsg := &NewValidBlockMessage{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
BlockPartSetHeader: psh.ToProto(),
|
||||
BlockParts: rs.ProposalBlockParts.BitArray().ToProto(),
|
||||
BlockPartSetHeader: rs.ProposalBlockParts.Header(),
|
||||
BlockParts: rs.ProposalBlockParts.BitArray(),
|
||||
IsCommit: rs.Step == cstypes.RoundStepCommit,
|
||||
}
|
||||
conR.Switch.NewBroadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: csMsg,
|
||||
})
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(csMsg))
|
||||
}
|
||||
|
||||
// Broadcasts HasVoteMessage to peers that care.
|
||||
func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
msg := &tmcons.HasVote{
|
||||
msg := &HasVoteMessage{
|
||||
Height: vote.Height,
|
||||
Round: vote.Round,
|
||||
Type: vote.Type,
|
||||
Index: vote.ValidatorIndex,
|
||||
}
|
||||
conR.Switch.NewBroadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: msg,
|
||||
})
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(msg))
|
||||
/*
|
||||
// TODO: Make this broadcast more selective.
|
||||
for _, peer := range conR.Switch.Peers().List() {
|
||||
@@ -491,11 +463,7 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
prs := ps.GetRoundState()
|
||||
if prs.Height == vote.Height {
|
||||
// TODO: Also filter on round?
|
||||
e := p2p.Envelope{
|
||||
ChannelID: StateChannel, struct{ ConsensusMessage }{msg},
|
||||
Message: p,
|
||||
}
|
||||
peer.TrySend(e)
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg})
|
||||
} else {
|
||||
// Height doesn't match
|
||||
// TODO: check a field, maybe CatchupCommitRound?
|
||||
@@ -505,11 +473,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
*/
|
||||
}
|
||||
|
||||
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep) {
|
||||
nrsMsg = &tmcons.NewRoundStep{
|
||||
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) {
|
||||
nrsMsg = &NewRoundStepMessage{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Step: uint32(rs.Step),
|
||||
Step: rs.Step,
|
||||
SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()),
|
||||
LastCommitRound: rs.LastCommit.GetRound(),
|
||||
}
|
||||
@@ -519,10 +487,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep)
|
||||
func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
|
||||
rs := conR.getRoundState()
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: nrsMsg,
|
||||
})
|
||||
peer.Send(StateChannel, MustEncode(nrsMsg))
|
||||
}
|
||||
|
||||
func (conR *Reactor) updateRoundStateRoutine() {
|
||||
@@ -561,19 +526,13 @@ OUTER_LOOP:
|
||||
if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) {
|
||||
if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
|
||||
part := rs.ProposalBlockParts.GetPart(index)
|
||||
parts, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
msg := &BlockPartMessage{
|
||||
Height: rs.Height, // This tells peer that this part applies to us.
|
||||
Round: rs.Round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
}
|
||||
logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: rs.Height, // This tells peer that this part applies to us.
|
||||
Round: rs.Round, // This tells peer that this part applies to us.
|
||||
Part: *parts,
|
||||
},
|
||||
}) {
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
@@ -619,11 +578,9 @@ OUTER_LOOP:
|
||||
if rs.Proposal != nil && !prs.Proposal {
|
||||
// Proposal: share the proposal metadata with peer.
|
||||
{
|
||||
msg := &ProposalMessage{Proposal: rs.Proposal}
|
||||
logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.Proposal{Proposal: *rs.Proposal.ToProto()},
|
||||
}) {
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
// NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected!
|
||||
ps.SetHasProposal(rs.Proposal)
|
||||
}
|
||||
@@ -633,15 +590,13 @@ OUTER_LOOP:
|
||||
// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
|
||||
// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
|
||||
if 0 <= rs.Proposal.POLRound {
|
||||
msg := &ProposalPOLMessage{
|
||||
Height: rs.Height,
|
||||
ProposalPOLRound: rs.Proposal.POLRound,
|
||||
ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
|
||||
}
|
||||
logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.ProposalPOL{
|
||||
Height: rs.Height,
|
||||
ProposalPolRound: rs.Proposal.POLRound,
|
||||
ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(),
|
||||
},
|
||||
})
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
@@ -678,20 +633,13 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt
|
||||
return
|
||||
}
|
||||
// Send the part
|
||||
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
|
||||
pp, err := part.ToProto()
|
||||
if err != nil {
|
||||
logger.Error("Could not convert part to proto", "index", index, "error", err)
|
||||
return
|
||||
msg := &BlockPartMessage{
|
||||
Height: prs.Height, // Not our height, so it doesn't matter.
|
||||
Round: prs.Round, // Not our height, so it doesn't matter.
|
||||
Part: part,
|
||||
}
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: prs.Height, // Not our height, so it doesn't matter.
|
||||
Round: prs.Round, // Not our height, so it doesn't matter.
|
||||
Part: *pp,
|
||||
},
|
||||
}) {
|
||||
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
} else {
|
||||
logger.Debug("Sending block part for catchup failed")
|
||||
@@ -850,16 +798,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
|
||||
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -871,15 +815,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -891,16 +832,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
|
||||
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -915,15 +852,12 @@ OUTER_LOOP:
|
||||
if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() &&
|
||||
prs.Height >= conR.conS.blockStore.Base() {
|
||||
if commit := conR.conS.LoadCommit(prs.Height); commit != nil {
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: commit.BlockID.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: commit.BlockID,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -1137,13 +1071,9 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in
|
||||
// Returns true if vote was sent.
|
||||
func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
|
||||
if vote, ok := ps.PickVoteToSend(votes); ok {
|
||||
msg := &VoteMessage{vote}
|
||||
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
|
||||
if ps.peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{
|
||||
Vote: vote.ToProto(),
|
||||
},
|
||||
}) {
|
||||
if ps.peer.Send(VoteChannel, MustEncode(msg)) {
|
||||
ps.SetHasVote(vote)
|
||||
return true
|
||||
}
|
||||
@@ -1509,6 +1439,15 @@ func init() {
|
||||
tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits")
|
||||
}
|
||||
|
||||
func decodeMsg(bz []byte) (msg Message, err error) {
|
||||
pb := &tmcons.Message{}
|
||||
if err = proto.Unmarshal(bz, pb); err != nil {
|
||||
return msg, err
|
||||
}
|
||||
|
||||
return MsgFromProto(pb)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// NewRoundStepMessage is sent for every step taken in the ConsensusState.
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
p2pmock "github.com/tendermint/tendermint/p2p/mock"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
@@ -191,7 +190,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// mock the evidence pool
|
||||
// everyone includes evidence of another double signing
|
||||
vIdx := (i + 1) % nValidators
|
||||
ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], genDoc.ChainID)
|
||||
ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID())
|
||||
require.NoError(t, err)
|
||||
evpool := &statemocks.EvidencePool{}
|
||||
evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil)
|
||||
@@ -202,7 +201,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
evpool2 := sm.EmptyEvidencePool{}
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
@@ -266,18 +265,15 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Src: peer,
|
||||
Message: &tmcons.HasVote{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType},
|
||||
})
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
reactor.AddPeer(peer)
|
||||
})
|
||||
}
|
||||
@@ -292,18 +288,15 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
// we should call InitPeer here
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.Panics(t, func() {
|
||||
reactor.Receive(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Src: peer,
|
||||
Message: &tmcons.HasVote{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType},
|
||||
})
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -496,11 +496,11 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
|
||||
|
||||
// Use stubs for both mempool and evidence pool since no transactions nor
|
||||
// evidence are needed here - block already exists.
|
||||
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}, h.store)
|
||||
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{})
|
||||
blockExec.SetEventBus(h.eventBus)
|
||||
|
||||
var err error
|
||||
state, err = blockExec.ApplyBlock(state, meta.BlockID, block)
|
||||
state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block)
|
||||
if err != nil {
|
||||
return sm.State{}, err
|
||||
}
|
||||
|
||||
@@ -330,7 +330,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
}
|
||||
|
||||
mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||
|
||||
consensusState := NewState(csConfig, state.Copy(), blockExec,
|
||||
blockStore, mempool, evpool)
|
||||
|
||||
@@ -373,7 +373,7 @@ func TestSimulateValidatorsChange(t *testing.T) {
|
||||
|
||||
proposal := types.NewProposal(vss[1].Height, round, -1, blockID)
|
||||
p := proposal.ToProto()
|
||||
if err := vss[1].SignProposal(genDoc.ChainID, p); err != nil {
|
||||
if err := vss[1].SignProposal(config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
proposal.Signature = p.Signature
|
||||
@@ -405,7 +405,7 @@ func TestSimulateValidatorsChange(t *testing.T) {
|
||||
|
||||
proposal = types.NewProposal(vss[2].Height, round, -1, blockID)
|
||||
p = proposal.ToProto()
|
||||
if err := vss[2].SignProposal(genDoc.ChainID, p); err != nil {
|
||||
if err := vss[2].SignProposal(config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
proposal.Signature = p.Signature
|
||||
@@ -464,7 +464,7 @@ func TestSimulateValidatorsChange(t *testing.T) {
|
||||
|
||||
proposal = types.NewProposal(vss[3].Height, round, -1, blockID)
|
||||
p = proposal.ToProto()
|
||||
if err := vss[3].SignProposal(genDoc.ChainID, p); err != nil {
|
||||
if err := vss[3].SignProposal(config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
proposal.Signature = p.Signature
|
||||
@@ -525,7 +525,7 @@ func TestSimulateValidatorsChange(t *testing.T) {
|
||||
selfIndex = valIndexFn(0)
|
||||
proposal = types.NewProposal(vss[1].Height, round, -1, blockID)
|
||||
p = proposal.ToProto()
|
||||
if err := vss[1].SignProposal(genDoc.ChainID, p); err != nil {
|
||||
if err := vss[1].SignProposal(config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
proposal.Signature = p.Signature
|
||||
@@ -711,7 +711,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
|
||||
state := genesisState.Copy()
|
||||
// run the chain through state.ApplyBlock to build up the tendermint state
|
||||
state = buildTMStateFromChain(t, config, stateStore, state, chain, nBlocks, mode, store)
|
||||
state = buildTMStateFromChain(t, config, stateStore, state, chain, nBlocks, mode)
|
||||
latestAppHash := state.AppHash
|
||||
|
||||
// make a new client creator
|
||||
@@ -729,13 +729,13 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
})
|
||||
err := stateStore.Save(genesisState)
|
||||
require.NoError(t, err)
|
||||
buildAppStateFromChain(t, proxyApp, stateStore, genesisState, chain, nBlocks, mode, store)
|
||||
buildAppStateFromChain(t, proxyApp, stateStore, genesisState, chain, nBlocks, mode)
|
||||
}
|
||||
|
||||
// Prune block store if requested
|
||||
expectError := false
|
||||
if mode == 3 {
|
||||
pruned, _, err := store.PruneBlocks(2, state)
|
||||
pruned, err := store.PruneBlocks(2)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, pruned)
|
||||
expectError = int64(nBlocks) < 2
|
||||
@@ -789,20 +789,20 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
}
|
||||
}
|
||||
|
||||
func applyBlock(t *testing.T, stateStore sm.Store, st sm.State, blk *types.Block, proxyApp proxy.AppConns, bs *mockBlockStore) sm.State {
|
||||
func applyBlock(t *testing.T, stateStore sm.Store, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State {
|
||||
testPartSize := types.BlockPartSizeBytes
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, bs)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||
|
||||
bps, err := blk.MakePartSet(testPartSize)
|
||||
require.NoError(t, err)
|
||||
blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: bps.Header()}
|
||||
newState, err := blockExec.ApplyBlock(st, blkID, blk)
|
||||
newState, _, err := blockExec.ApplyBlock(st, blkID, blk)
|
||||
require.NoError(t, err)
|
||||
return newState
|
||||
}
|
||||
|
||||
func buildAppStateFromChain(t *testing.T, proxyApp proxy.AppConns, stateStore sm.Store,
|
||||
state sm.State, chain []*types.Block, nBlocks int, mode uint, blockStore *mockBlockStore) {
|
||||
state sm.State, chain []*types.Block, nBlocks int, mode uint) {
|
||||
// start a new app without handshake, play nBlocks blocks
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
panic(err)
|
||||
@@ -823,18 +823,18 @@ func buildAppStateFromChain(t *testing.T, proxyApp proxy.AppConns, stateStore sm
|
||||
case 0:
|
||||
for i := 0; i < nBlocks; i++ {
|
||||
block := chain[i]
|
||||
state = applyBlock(t, stateStore, state, block, proxyApp, blockStore)
|
||||
state = applyBlock(t, stateStore, state, block, proxyApp)
|
||||
}
|
||||
case 1, 2, 3:
|
||||
for i := 0; i < nBlocks-1; i++ {
|
||||
block := chain[i]
|
||||
state = applyBlock(t, stateStore, state, block, proxyApp, blockStore)
|
||||
state = applyBlock(t, stateStore, state, block, proxyApp)
|
||||
}
|
||||
|
||||
if mode == 2 || mode == 3 {
|
||||
// update the kvstore height and apphash
|
||||
// as if we ran commit but not
|
||||
state = applyBlock(t, stateStore, state, chain[nBlocks-1], proxyApp, blockStore)
|
||||
state = applyBlock(t, stateStore, state, chain[nBlocks-1], proxyApp)
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown mode %v", mode))
|
||||
@@ -849,8 +849,7 @@ func buildTMStateFromChain(
|
||||
state sm.State,
|
||||
chain []*types.Block,
|
||||
nBlocks int,
|
||||
mode uint,
|
||||
blockStore *mockBlockStore) sm.State {
|
||||
mode uint) sm.State {
|
||||
// run the whole chain against this client to build up the tendermint state
|
||||
clientCreator := proxy.NewLocalClientCreator(
|
||||
kvstore.NewPersistentKVStoreApplication(
|
||||
@@ -875,19 +874,19 @@ func buildTMStateFromChain(
|
||||
case 0:
|
||||
// sync right up
|
||||
for _, block := range chain {
|
||||
state = applyBlock(t, stateStore, state, block, proxyApp, blockStore)
|
||||
state = applyBlock(t, stateStore, state, block, proxyApp)
|
||||
}
|
||||
|
||||
case 1, 2, 3:
|
||||
// sync up to the penultimate as if we stored the block.
|
||||
// whether we commit or not depends on the appHash
|
||||
for _, block := range chain[:len(chain)-1] {
|
||||
state = applyBlock(t, stateStore, state, block, proxyApp, blockStore)
|
||||
state = applyBlock(t, stateStore, state, block, proxyApp)
|
||||
}
|
||||
|
||||
// apply the final block to a state copy so we can
|
||||
// get the right next appHash but keep the state back
|
||||
applyBlock(t, stateStore, state, chain[len(chain)-1], proxyApp, blockStore)
|
||||
applyBlock(t, stateStore, state, chain[len(chain)-1], proxyApp)
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown mode %v", mode))
|
||||
}
|
||||
@@ -1185,8 +1184,7 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
return bs.commits[height-1]
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, error) {
|
||||
evidencePoint := height
|
||||
func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) {
|
||||
pruned := uint64(0)
|
||||
for i := int64(0); i < height-1; i++ {
|
||||
bs.chain[i] = nil
|
||||
@@ -1194,11 +1192,9 @@ func (bs *mockBlockStore) PruneBlocks(height int64, state sm.State) (uint64, int
|
||||
pruned++
|
||||
}
|
||||
bs.base = height
|
||||
return pruned, evidencePoint, nil
|
||||
return pruned, nil
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) DeleteLatestBlock() error { return nil }
|
||||
|
||||
//---------------------------------------
|
||||
// Test handshake/init chain
|
||||
|
||||
|
||||
@@ -1694,7 +1694,12 @@ func (cs *State) finalizeCommit(height int64) {
|
||||
|
||||
// Execute and commit the block, update and save the state, and update the mempool.
|
||||
// NOTE The block.AppHash wont reflect these txs until the next block.
|
||||
stateCopy, err := cs.blockExec.ApplyBlock(
|
||||
var (
|
||||
err error
|
||||
retainHeight int64
|
||||
)
|
||||
|
||||
stateCopy, retainHeight, err = cs.blockExec.ApplyBlock(
|
||||
stateCopy,
|
||||
types.BlockID{
|
||||
Hash: block.Hash(),
|
||||
@@ -1709,6 +1714,16 @@ func (cs *State) finalizeCommit(height int64) {
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Prune old heights, if requested by ABCI app.
|
||||
if retainHeight > 0 {
|
||||
pruned, err := cs.pruneBlocks(retainHeight)
|
||||
if err != nil {
|
||||
logger.Error("failed to prune blocks", "retain_height", retainHeight, "err", err)
|
||||
} else {
|
||||
logger.Debug("pruned blocks", "pruned", pruned, "retain_height", retainHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// must be called before we update state
|
||||
cs.recordMetrics(height, block)
|
||||
|
||||
@@ -1732,6 +1747,22 @@ func (cs *State) finalizeCommit(height int64) {
|
||||
// * cs.StartTime is set to when we will start round0.
|
||||
}
|
||||
|
||||
func (cs *State) pruneBlocks(retainHeight int64) (uint64, error) {
|
||||
base := cs.blockStore.Base()
|
||||
if retainHeight <= base {
|
||||
return 0, nil
|
||||
}
|
||||
pruned, err := cs.blockStore.PruneBlocks(retainHeight)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to prune block store: %w", err)
|
||||
}
|
||||
err = cs.blockExec.Store().PruneStates(base, retainHeight)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to prune state database: %w", err)
|
||||
}
|
||||
return pruned, nil
|
||||
}
|
||||
|
||||
func (cs *State) recordMetrics(height int64, block *types.Block) {
|
||||
cs.metrics.Validators.Set(float64(cs.Validators.Size()))
|
||||
cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower()))
|
||||
|
||||
@@ -214,7 +214,7 @@ func TestStateBadProposal(t *testing.T) {
|
||||
blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
proposal := types.NewProposal(vs2.Height, round, -1, blockID)
|
||||
p := proposal.ToProto()
|
||||
if err := vs2.SignProposal(cs1.state.ChainID, p); err != nil {
|
||||
if err := vs2.SignProposal(config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
|
||||
@@ -276,7 +276,7 @@ func TestStateOversizedBlock(t *testing.T) {
|
||||
blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
proposal := types.NewProposal(height, round, -1, blockID)
|
||||
p := proposal.ToProto()
|
||||
if err := vs2.SignProposal(cs1.state.ChainID, p); err != nil {
|
||||
if err := vs2.SignProposal(config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
proposal.Signature = p.Signature
|
||||
@@ -1132,7 +1132,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
|
||||
// in round 2 we see the polkad block from round 0
|
||||
newProp := types.NewProposal(height, round, 0, propBlockID0)
|
||||
p := newProp.ToProto()
|
||||
if err := vs3.SignProposal(cs1.state.ChainID, p); err != nil {
|
||||
if err := vs3.SignProposal(config.ChainID(), p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -17,7 +16,7 @@ import (
|
||||
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
config = test.ResetTestRoot("consensus_height_vote_set_test")
|
||||
config = cfg.ResetTestRoot("consensus_height_vote_set_test")
|
||||
code := m.Run()
|
||||
os.RemoveAll(config.RootDir)
|
||||
os.Exit(code)
|
||||
@@ -26,7 +25,7 @@ func TestMain(m *testing.M) {
|
||||
func TestPeerCatchupRounds(t *testing.T) {
|
||||
valSet, privVals := types.RandValidatorSet(10, 1)
|
||||
|
||||
hvs := NewHeightVoteSet(test.DefaultTestChainID, 1, valSet)
|
||||
hvs := NewHeightVoteSet(config.ChainID(), 1, valSet)
|
||||
|
||||
vote999_0 := makeVoteHR(t, 1, 0, 999, privVals)
|
||||
added, err := hvs.AddVote(vote999_0, "peer1")
|
||||
@@ -74,9 +73,10 @@ func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []ty
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}},
|
||||
}
|
||||
chainID := config.ChainID()
|
||||
|
||||
v := vote.ToProto()
|
||||
err = privVal.SignVote(test.DefaultTestChainID, v)
|
||||
err = privVal.SignVote(chainID, v)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error signing vote: %v", err))
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
@@ -85,7 +84,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
})
|
||||
mempool := emptyMempool{}
|
||||
evpool := sm.EmptyEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||
consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
|
||||
consensusState.SetLogger(logger)
|
||||
consensusState.SetEventBus(eventBus)
|
||||
@@ -150,7 +149,7 @@ func makeAddrs() (string, string, string) {
|
||||
|
||||
// getConfig returns a config for test cases
|
||||
func getConfig(t *testing.T) *cfg.Config {
|
||||
c := test.ResetTestRoot(t.Name())
|
||||
c := cfg.ResetTestRoot(t.Name())
|
||||
|
||||
// and we use random ports to run in parallel
|
||||
tm, rpc, grpc := makeAddrs()
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
package batch
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/sr25519"
|
||||
)
|
||||
|
||||
// CreateBatchVerifier checks if a key type implements the batch verifier interface.
|
||||
// Currently only ed25519 & sr25519 supports batch verification.
|
||||
func CreateBatchVerifier(pk crypto.PubKey) (crypto.BatchVerifier, bool) {
|
||||
switch pk.Type() {
|
||||
case ed25519.KeyType:
|
||||
return ed25519.NewBatchVerifier(), true
|
||||
case sr25519.KeyType:
|
||||
return sr25519.NewBatchVerifier(), true
|
||||
}
|
||||
|
||||
// case where the key does not support batch verification
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// SupportsBatchVerifier checks if a key type implements the batch verifier
|
||||
// interface.
|
||||
func SupportsBatchVerifier(pk crypto.PubKey) bool {
|
||||
switch pk.Type() {
|
||||
case ed25519.KeyType, sr25519.KeyType:
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -40,15 +40,3 @@ type Symmetric interface {
|
||||
Encrypt(plaintext []byte, secret []byte) (ciphertext []byte)
|
||||
Decrypt(ciphertext []byte, secret []byte) (plaintext []byte, err error)
|
||||
}
|
||||
|
||||
// If a new key type implements batch verification,
|
||||
// the key type must be registered in github.com/tendermint/tendermint/crypto/batch
|
||||
type BatchVerifier interface {
|
||||
// Add appends an entry into the BatchVerifier.
|
||||
Add(key PubKey, message, signature []byte) error
|
||||
// Verify verifies all the entries in the BatchVerifier, and returns
|
||||
// if every signature in the batch is valid, and a vector of bools
|
||||
// indicating the verification status of each signature (in the order
|
||||
// that signatures were added to the batch).
|
||||
Verify() (bool, []bool)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
package ed25519
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/internal/benchmarking"
|
||||
)
|
||||
@@ -27,42 +24,3 @@ func BenchmarkVerification(b *testing.B) {
|
||||
priv := GenPrivKey()
|
||||
benchmarking.BenchmarkVerification(b, priv)
|
||||
}
|
||||
|
||||
func BenchmarkVerifyBatch(b *testing.B) {
|
||||
msg := []byte("BatchVerifyTest")
|
||||
|
||||
for _, sigsCount := range []int{1, 8, 64, 1024} {
|
||||
sigsCount := sigsCount
|
||||
b.Run(fmt.Sprintf("sig-count-%d", sigsCount), func(b *testing.B) {
|
||||
// Pre-generate all of the keys, and signatures, but do not
|
||||
// benchmark key-generation and signing.
|
||||
pubs := make([]crypto.PubKey, 0, sigsCount)
|
||||
sigs := make([][]byte, 0, sigsCount)
|
||||
for i := 0; i < sigsCount; i++ {
|
||||
priv := GenPrivKey()
|
||||
sig, _ := priv.Sign(msg)
|
||||
pubs = append(pubs, priv.PubKey().(PubKey))
|
||||
sigs = append(sigs, sig)
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.ReportAllocs()
|
||||
// NOTE: dividing by n so that metrics are per-signature
|
||||
for i := 0; i < b.N/sigsCount; i++ {
|
||||
// The benchmark could just benchmark the Verify()
|
||||
// routine, but there is non-trivial overhead associated
|
||||
// with BatchVerifier.Add(), which should be included
|
||||
// in the benchmark.
|
||||
v := NewBatchVerifier()
|
||||
for i := 0; i < sigsCount; i++ {
|
||||
err := v.Add(pubs[i], msg, sigs[i])
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
if ok, _ := v.Verify(); !ok {
|
||||
b.Fatal("signature set failed batch verification")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,12 +3,10 @@ package ed25519
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/ed25519"
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/ed25519/extra/cache"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
@@ -17,19 +15,7 @@ import (
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
var (
|
||||
_ crypto.PrivKey = PrivKey{}
|
||||
_ crypto.BatchVerifier = &BatchVerifier{}
|
||||
|
||||
// curve25519-voi's Ed25519 implementation supports configurable
|
||||
// verification behavior, and tendermint uses the ZIP-215 verification
|
||||
// semantics.
|
||||
verifyOptions = &ed25519.Options{
|
||||
Verify: ed25519.VerifyOptionsZIP_215,
|
||||
}
|
||||
|
||||
cachingVerifier = cache.NewVerifier(cache.NewLRUCache(cacheSize))
|
||||
)
|
||||
var _ crypto.PrivKey = PrivKey{}
|
||||
|
||||
const (
|
||||
PrivKeyName = "tendermint/PrivKeyEd25519"
|
||||
@@ -46,14 +32,6 @@ const (
|
||||
SeedSize = 32
|
||||
|
||||
KeyType = "ed25519"
|
||||
|
||||
// cacheSize is the number of public keys that will be cached in
|
||||
// an expanded format for repeated signature verification.
|
||||
//
|
||||
// TODO/perf: Either this should exclude single verification, or be
|
||||
// tuned to `> validatorSize + maxTxnsPerBlock` to avoid cache
|
||||
// thrashing.
|
||||
cacheSize = 4096
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -127,12 +105,14 @@ func GenPrivKey() PrivKey {
|
||||
|
||||
// genPrivKey generates a new ed25519 private key using the provided reader.
|
||||
func genPrivKey(rand io.Reader) PrivKey {
|
||||
_, priv, err := ed25519.GenerateKey(rand)
|
||||
seed := make([]byte, SeedSize)
|
||||
|
||||
_, err := io.ReadFull(rand, seed)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return PrivKey(priv)
|
||||
return PrivKey(ed25519.NewKeyFromSeed(seed))
|
||||
}
|
||||
|
||||
// GenPrivKeyFromSecret hashes the secret with SHA2, and uses
|
||||
@@ -149,7 +129,7 @@ func GenPrivKeyFromSecret(secret []byte) PrivKey {
|
||||
|
||||
var _ crypto.PubKey = PubKey{}
|
||||
|
||||
// PubKey implements crypto.PubKey for the Ed25519 signature scheme.
|
||||
// PubKeyEd25519 implements crypto.PubKey for the Ed25519 signature scheme.
|
||||
type PubKey []byte
|
||||
|
||||
// Address is the SHA256-20 of the raw pubkey bytes.
|
||||
@@ -171,7 +151,7 @@ func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return cachingVerifier.VerifyWithOptions(ed25519.PublicKey(pubKey), msg, sig, verifyOptions)
|
||||
return ed25519.Verify(ed25519.PublicKey(pubKey), msg, sig)
|
||||
}
|
||||
|
||||
func (pubKey PubKey) String() string {
|
||||
@@ -189,40 +169,3 @@ func (pubKey PubKey) Equals(other crypto.PubKey) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// BatchVerifier implements batch verification for ed25519.
|
||||
type BatchVerifier struct {
|
||||
*ed25519.BatchVerifier
|
||||
}
|
||||
|
||||
func NewBatchVerifier() crypto.BatchVerifier {
|
||||
return &BatchVerifier{ed25519.NewBatchVerifier()}
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error {
|
||||
pkEd, ok := key.(PubKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("pubkey is not Ed25519")
|
||||
}
|
||||
|
||||
pkBytes := pkEd.Bytes()
|
||||
|
||||
if l := len(pkBytes); l != PubKeySize {
|
||||
return fmt.Errorf("pubkey size is incorrect; expected: %d, got %d", PubKeySize, l)
|
||||
}
|
||||
|
||||
// check that the signature is the correct length
|
||||
if len(signature) != SignatureSize {
|
||||
return errors.New("invalid signature")
|
||||
}
|
||||
|
||||
cachingVerifier.AddWithOptions(b.BatchVerifier, ed25519.PublicKey(pkBytes), msg, signature, verifyOptions)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Verify() (bool, []bool) {
|
||||
return b.BatchVerifier.Verify(crypto.CReader())
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSignAndValidateEd25519(t *testing.T) {
|
||||
|
||||
privKey := ed25519.GenPrivKey()
|
||||
pubKey := privKey.PubKey()
|
||||
|
||||
@@ -27,28 +28,3 @@ func TestSignAndValidateEd25519(t *testing.T) {
|
||||
|
||||
assert.False(t, pubKey.VerifySignature(msg, sig))
|
||||
}
|
||||
|
||||
func TestBatchSafe(t *testing.T) {
|
||||
v := ed25519.NewBatchVerifier()
|
||||
|
||||
for i := 0; i <= 38; i++ {
|
||||
priv := ed25519.GenPrivKey()
|
||||
pub := priv.PubKey()
|
||||
|
||||
var msg []byte
|
||||
if i%2 == 0 {
|
||||
msg = []byte("easter")
|
||||
} else {
|
||||
msg = []byte("egg")
|
||||
}
|
||||
|
||||
sig, err := priv.Sign(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v.Add(pub, msg, sig)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
ok, _ := v.Verify()
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
"hash"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
@@ -22,27 +20,7 @@ func leafHash(leaf []byte) []byte {
|
||||
return tmhash.Sum(append(leafPrefix, leaf...))
|
||||
}
|
||||
|
||||
// returns tmhash(0x00 || leaf)
|
||||
func leafHashOpt(s hash.Hash, leaf []byte) []byte {
|
||||
s.Reset()
|
||||
s.Write(leafPrefix)
|
||||
s.Write(leaf)
|
||||
return s.Sum(nil)
|
||||
}
|
||||
|
||||
// returns tmhash(0x01 || left || right)
|
||||
func innerHash(left []byte, right []byte) []byte {
|
||||
data := make([]byte, len(innerPrefix)+len(left)+len(right))
|
||||
n := copy(data, innerPrefix)
|
||||
n += copy(data[n:], left)
|
||||
copy(data[n:], right)
|
||||
return tmhash.Sum(data)
|
||||
}
|
||||
|
||||
func innerHashOpt(s hash.Hash, left []byte, right []byte) []byte {
|
||||
s.Reset()
|
||||
s.Write(innerPrefix)
|
||||
s.Write(left)
|
||||
s.Write(right)
|
||||
return s.Sum(nil)
|
||||
return tmhash.Sum(append(innerPrefix, append(left, right...)...))
|
||||
}
|
||||
|
||||
@@ -50,13 +50,13 @@ func ProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*Proof) {
|
||||
// Verify that the Proof proves the root hash.
|
||||
// Check sp.Index/sp.Total manually if needed
|
||||
func (sp *Proof) Verify(rootHash []byte, leaf []byte) error {
|
||||
leafHash := leafHash(leaf)
|
||||
if sp.Total < 0 {
|
||||
return errors.New("proof total must be positive")
|
||||
}
|
||||
if sp.Index < 0 {
|
||||
return errors.New("proof index cannot be negative")
|
||||
}
|
||||
leafHash := leafHash(leaf)
|
||||
if !bytes.Equal(sp.LeafHash, leafHash) {
|
||||
return fmt.Errorf("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash)
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ func TestKeyPath(t *testing.T) {
|
||||
|
||||
res, err := KeyPathToKeys(path.String())
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, len(keys), len(res))
|
||||
|
||||
for i, key := range keys {
|
||||
require.Equal(t, key, res[i])
|
||||
|
||||
@@ -171,12 +171,12 @@ func TestProofValidateBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
func TestVoteProtobuf(t *testing.T) {
|
||||
|
||||
_, proofs := ProofsFromByteSlices([][]byte{
|
||||
[]byte("apple"),
|
||||
[]byte("watermelon"),
|
||||
[]byte("kiwi"),
|
||||
})
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
v1 *Proof
|
||||
|
||||
@@ -1,28 +1,22 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// HashFromByteSlices computes a Merkle tree where the leaves are the byte slice,
|
||||
// in the provided order. It follows RFC-6962.
|
||||
func HashFromByteSlices(items [][]byte) []byte {
|
||||
return hashFromByteSlices(sha256.New(), items)
|
||||
}
|
||||
|
||||
func hashFromByteSlices(sha hash.Hash, items [][]byte) []byte {
|
||||
switch len(items) {
|
||||
case 0:
|
||||
return emptyHash()
|
||||
case 1:
|
||||
return leafHashOpt(sha, items[0])
|
||||
return leafHash(items[0])
|
||||
default:
|
||||
k := getSplitPoint(int64(len(items)))
|
||||
left := hashFromByteSlices(sha, items[:k])
|
||||
right := hashFromByteSlices(sha, items[k:])
|
||||
return innerHashOpt(sha, left, right)
|
||||
left := HashFromByteSlices(items[:k])
|
||||
right := HashFromByteSlices(items[k:])
|
||||
return innerHash(left, right)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,7 +61,7 @@ func hashFromByteSlices(sha hash.Hash, items [][]byte) []byte {
|
||||
// implementation for so little benefit.
|
||||
func HashFromByteSlicesIterative(input [][]byte) []byte {
|
||||
items := make([][]byte, len(input))
|
||||
sha := sha256.New()
|
||||
|
||||
for i, leaf := range input {
|
||||
items[i] = leafHash(leaf)
|
||||
}
|
||||
@@ -84,7 +78,7 @@ func HashFromByteSlicesIterative(input [][]byte) []byte {
|
||||
wp := 0 // write position
|
||||
for rp < size {
|
||||
if rp+1 < size {
|
||||
items[wp] = innerHashOpt(sha, items[rp], items[rp+1])
|
||||
items[wp] = innerHash(items[rp], items[rp+1])
|
||||
rp += 2
|
||||
} else {
|
||||
items[wp] = items[rp]
|
||||
|
||||
@@ -8,8 +8,7 @@ import (
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/btcec/v2/ecdsa"
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
"golang.org/x/crypto/ripemd160" //nolint: staticcheck // necessary for Bitcoin address format
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
@@ -43,7 +42,7 @@ func (privKey PrivKey) Bytes() []byte {
|
||||
// PubKey performs the point-scalar multiplication from the privKey on the
|
||||
// generator point to get the pubkey.
|
||||
func (privKey PrivKey) PubKey() crypto.PubKey {
|
||||
_, pubkeyObject := secp256k1.PrivKeyFromBytes(privKey)
|
||||
_, pubkeyObject := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey)
|
||||
|
||||
pk := pubkeyObject.SerializeCompressed()
|
||||
|
||||
@@ -123,18 +122,24 @@ func GenPrivKeySecp256k1(secret []byte) PrivKey {
|
||||
return PrivKey(privKey32)
|
||||
}
|
||||
|
||||
// used to reject malleable signatures
|
||||
// see:
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
|
||||
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
|
||||
|
||||
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
|
||||
// The returned signature will be of the form R || S (in lower-S form).
|
||||
func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
|
||||
priv, _ := secp256k1.PrivKeyFromBytes(privKey)
|
||||
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey)
|
||||
|
||||
sig, err := ecdsa.SignCompact(priv, crypto.Sha256(msg), false)
|
||||
sig, err := priv.Sign(crypto.Sha256(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// remove the first byte which is compactSigRecoveryCode
|
||||
return sig[1:], nil
|
||||
sigBytes := serializeSig(sig)
|
||||
return sigBytes, nil
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
@@ -194,7 +199,7 @@ func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
pub, err := secp256k1.ParsePubKey(pubKey)
|
||||
pub, err := secp256k1.ParsePubKey(pubKey, secp256k1.S256())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@@ -203,13 +208,7 @@ func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
|
||||
signature := signatureFromBytes(sigStr)
|
||||
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
|
||||
// see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
// Serialize() would negate S value if it is over half order.
|
||||
// Hence, if the signature is different after Serialize() if should be rejected.
|
||||
var modifiedSignature, parseErr = ecdsa.ParseDERSignature(signature.Serialize())
|
||||
if parseErr != nil {
|
||||
return false
|
||||
}
|
||||
if !signature.IsEqual(modifiedSignature) {
|
||||
if signature.S.Cmp(secp256k1halfN) > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -218,10 +217,21 @@ func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
|
||||
|
||||
// Read Signature struct from R || S. Caller needs to ensure
|
||||
// that len(sigStr) == 64.
|
||||
func signatureFromBytes(sigStr []byte) *ecdsa.Signature {
|
||||
var r secp256k1.ModNScalar
|
||||
r.SetByteSlice(sigStr[:32])
|
||||
var s secp256k1.ModNScalar
|
||||
s.SetByteSlice(sigStr[32:64])
|
||||
return ecdsa.NewSignature(&r, &s)
|
||||
func signatureFromBytes(sigStr []byte) *secp256k1.Signature {
|
||||
return &secp256k1.Signature{
|
||||
R: new(big.Int).SetBytes(sigStr[:32]),
|
||||
S: new(big.Int).SetBytes(sigStr[32:64]),
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize signature to R || S.
|
||||
// R, S are padded to 32 bytes respectively.
|
||||
func serializeSig(sig *secp256k1.Signature) []byte {
|
||||
rBytes := sig.R.Bytes()
|
||||
sBytes := sig.S.Bytes()
|
||||
sigBytes := make([]byte, 64)
|
||||
// 0 pad the byte arrays from the left if they aren't big enough.
|
||||
copy(sigBytes[32-len(rBytes):32], rBytes)
|
||||
copy(sigBytes[64-len(sBytes):64], sBytes)
|
||||
return sigBytes
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec/v2"
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
)
|
||||
|
||||
func Test_genPrivKey(t *testing.T) {
|
||||
@@ -54,30 +54,20 @@ func TestSignatureVerificationAndRejectUpperS(t *testing.T) {
|
||||
priv := GenPrivKey()
|
||||
sigStr, err := priv.Sign(msg)
|
||||
require.NoError(t, err)
|
||||
var r secp256k1.ModNScalar
|
||||
r.SetByteSlice(sigStr[:32])
|
||||
var s secp256k1.ModNScalar
|
||||
s.SetByteSlice(sigStr[32:64])
|
||||
require.False(t, s.IsOverHalfOrder())
|
||||
sig := signatureFromBytes(sigStr)
|
||||
require.False(t, sig.S.Cmp(secp256k1halfN) > 0)
|
||||
|
||||
pub := priv.PubKey()
|
||||
require.True(t, pub.VerifySignature(msg, sigStr))
|
||||
|
||||
// malleate:
|
||||
var S256 secp256k1.ModNScalar
|
||||
S256.SetByteSlice(secp256k1.S256().N.Bytes())
|
||||
s.Negate().Add(&S256)
|
||||
require.True(t, s.IsOverHalfOrder())
|
||||
|
||||
rBytes := r.Bytes()
|
||||
sBytes := s.Bytes()
|
||||
malSigStr := make([]byte, 64)
|
||||
copy(malSigStr[32-len(rBytes):32], rBytes[:])
|
||||
copy(malSigStr[64-len(sBytes):64], sBytes[:])
|
||||
sig.S.Sub(secp256k1.S256().CurveParams.N, sig.S)
|
||||
require.True(t, sig.S.Cmp(secp256k1halfN) > 0)
|
||||
malSigStr := serializeSig(sig)
|
||||
|
||||
require.False(t, pub.VerifySignature(msg, malSigStr),
|
||||
"VerifyBytes incorrect with malleated & invalid S. sig=%v, key=%v",
|
||||
malSigStr,
|
||||
sig,
|
||||
priv,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -5,14 +5,14 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil/base58"
|
||||
"github.com/btcsuite/btcutil/base58"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
|
||||
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec/v2"
|
||||
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
)
|
||||
|
||||
type keyData struct {
|
||||
@@ -75,7 +75,7 @@ func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) {
|
||||
|
||||
// This function creates a private and public key in the underlying libraries format.
|
||||
// The private key is basically calling new(big.Int).SetBytes(pk), which removes leading zero bytes
|
||||
priv, _ := underlyingSecp256k1.PrivKeyFromBytes(privKeyBytes[:])
|
||||
priv, _ := underlyingSecp256k1.PrivKeyFromBytes(underlyingSecp256k1.S256(), privKeyBytes[:])
|
||||
// this takes the bytes returned by `(big int).Bytes()`, and if the length is less than 32 bytes,
|
||||
// pads the bytes from the left with zero bytes. Therefore these two functions composed
|
||||
// result in the identity function on privKeyBytes, hence the following equality check
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
package sr25519
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/sr25519"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
var _ crypto.BatchVerifier = &BatchVerifier{}
|
||||
|
||||
// BatchVerifier implements batch verification for sr25519.
|
||||
type BatchVerifier struct {
|
||||
*sr25519.BatchVerifier
|
||||
}
|
||||
|
||||
func NewBatchVerifier() crypto.BatchVerifier {
|
||||
return &BatchVerifier{sr25519.NewBatchVerifier()}
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error {
|
||||
pk, ok := key.(PubKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("sr25519: pubkey is not sr25519")
|
||||
}
|
||||
|
||||
var srpk sr25519.PublicKey
|
||||
if err := srpk.UnmarshalBinary(pk); err != nil {
|
||||
return fmt.Errorf("sr25519: invalid public key: %w", err)
|
||||
}
|
||||
|
||||
var sig sr25519.Signature
|
||||
if err := sig.UnmarshalBinary(signature); err != nil {
|
||||
return fmt.Errorf("sr25519: unable to decode signature: %w", err)
|
||||
}
|
||||
|
||||
st := signingCtx.NewTranscriptBytes(msg)
|
||||
b.BatchVerifier.Add(&srpk, st, &sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Verify() (bool, []bool) {
|
||||
return b.BatchVerifier.Verify(crypto.CReader())
|
||||
}
|
||||
@@ -1,12 +1,9 @@
|
||||
package sr25519
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/internal/benchmarking"
|
||||
)
|
||||
@@ -27,42 +24,3 @@ func BenchmarkVerification(b *testing.B) {
|
||||
priv := GenPrivKey()
|
||||
benchmarking.BenchmarkVerification(b, priv)
|
||||
}
|
||||
|
||||
func BenchmarkVerifyBatch(b *testing.B) {
|
||||
msg := []byte("BatchVerifyTest")
|
||||
|
||||
for _, sigsCount := range []int{1, 8, 64, 1024} {
|
||||
sigsCount := sigsCount
|
||||
b.Run(fmt.Sprintf("sig-count-%d", sigsCount), func(b *testing.B) {
|
||||
// Pre-generate all of the keys, and signatures, but do not
|
||||
// benchmark key-generation and signing.
|
||||
pubs := make([]crypto.PubKey, 0, sigsCount)
|
||||
sigs := make([][]byte, 0, sigsCount)
|
||||
for i := 0; i < sigsCount; i++ {
|
||||
priv := GenPrivKey()
|
||||
sig, _ := priv.Sign(msg)
|
||||
pubs = append(pubs, priv.PubKey().(PubKey))
|
||||
sigs = append(sigs, sig)
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.ReportAllocs()
|
||||
// NOTE: dividing by n so that metrics are per-signature
|
||||
for i := 0; i < b.N/sigsCount; i++ {
|
||||
// The benchmark could just benchmark the Verify()
|
||||
// routine, but there is non-trivial overhead associated
|
||||
// with BatchVerifier.Add(), which should be included
|
||||
// in the benchmark.
|
||||
v := NewBatchVerifier()
|
||||
for i := 0; i < sigsCount; i++ {
|
||||
err := v.Add(pubs[i], msg, sigs[i])
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
if ok, _ := v.Verify(); !ok {
|
||||
b.Fatal("signature set failed batch verification")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,23 @@
|
||||
package sr25519
|
||||
|
||||
import tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
)
|
||||
|
||||
var _ crypto.PrivKey = PrivKey{}
|
||||
|
||||
const (
|
||||
PrivKeyName = "tendermint/PrivKeySr25519"
|
||||
PubKeyName = "tendermint/PubKeySr25519"
|
||||
|
||||
// SignatureSize is the size of an Edwards25519 signature. Namely the size of a compressed
|
||||
// Sr25519 point, and a field element. Both of which are 32 bytes.
|
||||
SignatureSize = 64
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
tmjson.RegisterType(PubKey{}, PubKeyName)
|
||||
tmjson.RegisterType(PrivKey{}, PrivKeyName)
|
||||
}
|
||||
|
||||
@@ -1,126 +1,76 @@
|
||||
package sr25519
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/sr25519"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
|
||||
schnorrkel "github.com/ChainSafe/go-schnorrkel"
|
||||
)
|
||||
|
||||
var (
|
||||
_ crypto.PrivKey = PrivKey{}
|
||||
// PrivKeySize is the number of bytes in an Sr25519 private key.
|
||||
const PrivKeySize = 32
|
||||
|
||||
signingCtx = sr25519.NewSigningContext([]byte{})
|
||||
)
|
||||
|
||||
const (
|
||||
// PrivKeySize is the number of bytes in an Sr25519 private key.
|
||||
PrivKeySize = 32
|
||||
|
||||
KeyType = "sr25519"
|
||||
)
|
||||
|
||||
// PrivKey implements crypto.PrivKey.
|
||||
type PrivKey struct {
|
||||
msk sr25519.MiniSecretKey
|
||||
kp *sr25519.KeyPair
|
||||
}
|
||||
// PrivKeySr25519 implements crypto.PrivKey.
|
||||
type PrivKey []byte
|
||||
|
||||
// Bytes returns the byte representation of the PrivKey.
|
||||
func (privKey PrivKey) Bytes() []byte {
|
||||
if privKey.kp == nil {
|
||||
return nil
|
||||
}
|
||||
return privKey.msk[:]
|
||||
return []byte(privKey)
|
||||
}
|
||||
|
||||
// Sign produces a signature on the provided message.
|
||||
func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
|
||||
if privKey.kp == nil {
|
||||
return nil, fmt.Errorf("sr25519: uninitialized private key")
|
||||
}
|
||||
|
||||
st := signingCtx.NewTranscriptBytes(msg)
|
||||
|
||||
sig, err := privKey.kp.Sign(crypto.CReader(), st)
|
||||
var p [PrivKeySize]byte
|
||||
copy(p[:], privKey)
|
||||
miniSecretKey, err := schnorrkel.NewMiniSecretKeyFromRaw(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sr25519: failed to sign message: %w", err)
|
||||
return []byte{}, err
|
||||
}
|
||||
secretKey := miniSecretKey.ExpandEd25519()
|
||||
|
||||
sigBytes, err := sig.MarshalBinary()
|
||||
signingContext := schnorrkel.NewSigningContext([]byte{}, msg)
|
||||
|
||||
sig, err := secretKey.Sign(signingContext)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sr25519: failed to serialize signature: %w", err)
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
return sigBytes, nil
|
||||
sigBytes := sig.Encode()
|
||||
return sigBytes[:], nil
|
||||
}
|
||||
|
||||
// PubKey gets the corresponding public key from the private key.
|
||||
func (privKey PrivKey) PubKey() crypto.PubKey {
|
||||
if privKey.kp == nil {
|
||||
panic("sr25519: uninitialized private key")
|
||||
}
|
||||
|
||||
b, err := privKey.kp.PublicKey().MarshalBinary()
|
||||
var p [PrivKeySize]byte
|
||||
copy(p[:], privKey)
|
||||
miniSecretKey, err := schnorrkel.NewMiniSecretKeyFromRaw(p)
|
||||
if err != nil {
|
||||
panic("sr25519: failed to serialize public key: " + err.Error())
|
||||
panic(fmt.Sprintf("Invalid private key: %v", err))
|
||||
}
|
||||
secretKey := miniSecretKey.ExpandEd25519()
|
||||
|
||||
return PubKey(b)
|
||||
pubkey, err := secretKey.Public()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Could not generate public key: %v", err))
|
||||
}
|
||||
key := pubkey.Encode()
|
||||
return PubKey(key[:])
|
||||
}
|
||||
|
||||
// Equals - you probably don't need to use this.
|
||||
// Runs in constant time based on length of the keys.
|
||||
func (privKey PrivKey) Equals(other crypto.PrivKey) bool {
|
||||
if otherSr, ok := other.(PrivKey); ok {
|
||||
return privKey.msk.Equal(&otherSr.msk)
|
||||
if otherEd, ok := other.(PrivKey); ok {
|
||||
return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (privKey PrivKey) Type() string {
|
||||
return KeyType
|
||||
}
|
||||
|
||||
func (privKey PrivKey) MarshalJSON() ([]byte, error) {
|
||||
var b []byte
|
||||
|
||||
// Handle uninitialized private keys gracefully.
|
||||
if privKey.kp != nil {
|
||||
b = privKey.Bytes()
|
||||
}
|
||||
|
||||
return json.Marshal(b)
|
||||
}
|
||||
|
||||
func (privKey *PrivKey) UnmarshalJSON(data []byte) error {
|
||||
for i := range privKey.msk {
|
||||
privKey.msk[i] = 0
|
||||
}
|
||||
privKey.kp = nil
|
||||
|
||||
var b []byte
|
||||
if err := json.Unmarshal(data, &b); err != nil {
|
||||
return fmt.Errorf("sr25519: failed to deserialize JSON: %w", err)
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
msk, err := sr25519.NewMiniSecretKeyFromBytes(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sk := msk.ExpandEd25519()
|
||||
|
||||
privKey.msk = *msk
|
||||
privKey.kp = sk.KeyPair()
|
||||
|
||||
return nil
|
||||
return keyType
|
||||
}
|
||||
|
||||
// GenPrivKey generates a new sr25519 private key.
|
||||
@@ -131,18 +81,19 @@ func GenPrivKey() PrivKey {
|
||||
}
|
||||
|
||||
// genPrivKey generates a new sr25519 private key using the provided reader.
|
||||
func genPrivKey(rng io.Reader) PrivKey {
|
||||
msk, err := sr25519.GenerateMiniSecretKey(rng)
|
||||
func genPrivKey(rand io.Reader) PrivKey {
|
||||
var seed [64]byte
|
||||
|
||||
out := make([]byte, 64)
|
||||
_, err := io.ReadFull(rand, out)
|
||||
if err != nil {
|
||||
panic("sr25519: failed to generate MiniSecretKey: " + err.Error())
|
||||
panic(err)
|
||||
}
|
||||
|
||||
sk := msk.ExpandEd25519()
|
||||
copy(seed[:], out)
|
||||
|
||||
return PrivKey{
|
||||
msk: *msk,
|
||||
kp: sk.KeyPair(),
|
||||
}
|
||||
key := schnorrkel.NewMiniSecretKey(seed).ExpandEd25519().Encode()
|
||||
return key[:]
|
||||
}
|
||||
|
||||
// GenPrivKeyFromSecret hashes the secret with SHA2, and uses
|
||||
@@ -151,14 +102,9 @@ func genPrivKey(rng io.Reader) PrivKey {
|
||||
// if it's derived from user input.
|
||||
func GenPrivKeyFromSecret(secret []byte) PrivKey {
|
||||
seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes.
|
||||
|
||||
var privKey PrivKey
|
||||
if err := privKey.msk.UnmarshalBinary(seed); err != nil {
|
||||
panic("sr25519: failed to deserialize MiniSecretKey: " + err.Error())
|
||||
}
|
||||
|
||||
sk := privKey.msk.ExpandEd25519()
|
||||
privKey.kp = sk.KeyPair()
|
||||
|
||||
return privKey
|
||||
var bz [PrivKeySize]byte
|
||||
copy(bz[:], seed)
|
||||
privKey, _ := schnorrkel.NewMiniSecretKeyFromRaw(bz)
|
||||
key := privKey.ExpandEd25519().Encode()
|
||||
return key[:]
|
||||
}
|
||||
|
||||
@@ -4,30 +4,25 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/sr25519"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
|
||||
schnorrkel "github.com/ChainSafe/go-schnorrkel"
|
||||
)
|
||||
|
||||
var _ crypto.PubKey = PubKey{}
|
||||
|
||||
// PubKeySize is the number of bytes in an Sr25519 public key.
|
||||
const (
|
||||
// PubKeySize is the number of bytes in an Sr25519 public key.
|
||||
PubKeySize = 32
|
||||
|
||||
// SignatureSize is the size of a Sr25519 signature in bytes.
|
||||
SignatureSize = 64
|
||||
keyType = "sr25519"
|
||||
)
|
||||
|
||||
// PubKey implements crypto.PubKey for the Sr25519 signature scheme.
|
||||
// PubKeySr25519 implements crypto.PubKey for the Sr25519 signature scheme.
|
||||
type PubKey []byte
|
||||
|
||||
// Address is the SHA256-20 of the raw pubkey bytes.
|
||||
func (pubKey PubKey) Address() crypto.Address {
|
||||
if len(pubKey) != PubKeySize {
|
||||
panic("pubkey is incorrect size")
|
||||
}
|
||||
return crypto.Address(tmhash.SumTruncated(pubKey[:]))
|
||||
}
|
||||
|
||||
@@ -36,35 +31,47 @@ func (pubKey PubKey) Bytes() []byte {
|
||||
return []byte(pubKey)
|
||||
}
|
||||
|
||||
// Equals - checks that two public keys are the same time
|
||||
// Runs in constant time based on length of the keys.
|
||||
func (pubKey PubKey) Equals(other crypto.PubKey) bool {
|
||||
if otherSr, ok := other.(PubKey); ok {
|
||||
return bytes.Equal(pubKey[:], otherSr[:])
|
||||
func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool {
|
||||
// make sure we use the same algorithm to sign
|
||||
if len(sig) != SignatureSize {
|
||||
return false
|
||||
}
|
||||
var sig64 [SignatureSize]byte
|
||||
copy(sig64[:], sig)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (pubKey PubKey) VerifySignature(msg []byte, sigBytes []byte) bool {
|
||||
var srpk sr25519.PublicKey
|
||||
if err := srpk.UnmarshalBinary(pubKey); err != nil {
|
||||
publicKey := &(schnorrkel.PublicKey{})
|
||||
var p [PubKeySize]byte
|
||||
copy(p[:], pubKey)
|
||||
err := publicKey.Decode(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var sig sr25519.Signature
|
||||
if err := sig.UnmarshalBinary(sigBytes); err != nil {
|
||||
signingContext := schnorrkel.NewSigningContext([]byte{}, msg)
|
||||
|
||||
signature := &(schnorrkel.Signature{})
|
||||
err = signature.Decode(sig64)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
st := signingCtx.NewTranscriptBytes(msg)
|
||||
return srpk.Verify(st, &sig)
|
||||
return publicKey.Verify(signature, signingContext)
|
||||
}
|
||||
|
||||
func (pubKey PubKey) String() string {
|
||||
return fmt.Sprintf("PubKeySr25519{%X}", []byte(pubKey))
|
||||
}
|
||||
|
||||
func (pubKey PubKey) Type() string {
|
||||
return KeyType
|
||||
// Equals - checks that two public keys are the same time
|
||||
// Runs in constant time based on length of the keys.
|
||||
func (pubKey PubKey) Equals(other crypto.PubKey) bool {
|
||||
if otherEd, ok := other.(PubKey); ok {
|
||||
return bytes.Equal(pubKey[:], otherEd[:])
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (pubKey PubKey) Type() string {
|
||||
return keyType
|
||||
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package sr25519_test
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -13,6 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSignAndValidateSr25519(t *testing.T) {
|
||||
|
||||
privKey := sr25519.GenPrivKey()
|
||||
pubKey := privKey.PubKey()
|
||||
|
||||
@@ -30,69 +29,3 @@ func TestSignAndValidateSr25519(t *testing.T) {
|
||||
|
||||
assert.False(t, pubKey.VerifySignature(msg, sig))
|
||||
}
|
||||
|
||||
func TestBatchSafe(t *testing.T) {
|
||||
v := sr25519.NewBatchVerifier()
|
||||
vFail := sr25519.NewBatchVerifier()
|
||||
for i := 0; i <= 38; i++ {
|
||||
priv := sr25519.GenPrivKey()
|
||||
pub := priv.PubKey()
|
||||
|
||||
var msg []byte
|
||||
if i%2 == 0 {
|
||||
msg = []byte("easter")
|
||||
} else {
|
||||
msg = []byte("egg")
|
||||
}
|
||||
|
||||
sig, err := priv.Sign(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v.Add(pub, msg, sig)
|
||||
require.NoError(t, err)
|
||||
|
||||
switch i % 2 {
|
||||
case 0:
|
||||
err = vFail.Add(pub, msg, sig)
|
||||
case 1:
|
||||
msg[2] ^= byte(0x01)
|
||||
err = vFail.Add(pub, msg, sig)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
ok, valid := v.Verify()
|
||||
require.True(t, ok, "failed batch verification")
|
||||
for i, ok := range valid {
|
||||
require.Truef(t, ok, "sig[%d] should be marked valid", i)
|
||||
}
|
||||
|
||||
ok, valid = vFail.Verify()
|
||||
require.False(t, ok, "succeeded batch verification (invalid batch)")
|
||||
for i, ok := range valid {
|
||||
expected := (i % 2) == 0
|
||||
require.Equalf(t, expected, ok, "sig[%d] should be %v", i, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSON(t *testing.T) {
|
||||
privKey := sr25519.GenPrivKey()
|
||||
|
||||
t.Run("PrivKey", func(t *testing.T) {
|
||||
b, err := json.Marshal(privKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// b should be the base64 encoded MiniSecretKey, enclosed by doublequotes.
|
||||
b64 := base64.StdEncoding.EncodeToString(privKey.Bytes())
|
||||
b64 = "\"" + b64 + "\""
|
||||
require.Equal(t, []byte(b64), b)
|
||||
|
||||
var privKey2 sr25519.PrivKey
|
||||
err = json.Unmarshal(b, &privKey2)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, privKey2.Bytes(), sr25519.PrivKeySize)
|
||||
require.EqualValues(t, privKey.Bytes(), privKey2.Bytes())
|
||||
})
|
||||
|
||||
// PubKeys are just []byte, so there is no special handling.
|
||||
}
|
||||
|
||||
@@ -63,4 +63,3 @@
|
||||
/redirects/master/spec/blockchain/encoding.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/encoding.md
|
||||
/redirects/master/spec/blockchain/blockchain.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/blockchain.md
|
||||
/redirects/master/spec/blockchain/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/readme.md
|
||||
/redirects/master/tutorials/go.html /v0.34/tutorials/go.html
|
||||
@@ -253,13 +253,14 @@ Similarly, you could put the commands in a file and run
|
||||
Want to write an app in your favorite language?! We'd be happy
|
||||
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
|
||||
See [funding](https://github.com/interchainio/funding) opportunities from the
|
||||
[Interchain Foundation](https://interchain.io) for implementations in new languages and more.
|
||||
[Interchain Foundation](https://interchain.io/) for implementations in new languages and more.
|
||||
|
||||
The `abci-cli` is designed strictly for testing and debugging. In a real
|
||||
deployment, the role of sending messages is taken by Tendermint, which
|
||||
connects to the app using three separate connections, each with its own
|
||||
pattern of messages.
|
||||
|
||||
For examples of running an ABCI app with
|
||||
For more information, see the [application developers
|
||||
guide](./app-development.md). For examples of running an ABCI app with
|
||||
Tendermint, see the [getting started guide](./getting-started.md).
|
||||
Next is the ABCI specification.
|
||||
|
||||
@@ -15,7 +15,7 @@ the block itself is never stored.
|
||||
Each event contains a type and a list of attributes, which are key-value pairs
|
||||
denoting something about what happened during the method's execution. For more
|
||||
details on `Events`, see the
|
||||
[ABCI](https://github.com/tendermint/tendermint/blob/main/spec/abci/abci++_basic_concepts.md#events)
|
||||
[ABCI](https://github.com/tendermint/tendermint/blob/main/spec/abci/abci.md#events)
|
||||
documentation.
|
||||
|
||||
An `Event` has a composite key associated with it. A `compositeKey` is
|
||||
|
||||
@@ -63,12 +63,14 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-055: Protobuf-Design](./adr-055-protobuf-design.md)
|
||||
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks.md)
|
||||
- [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md)
|
||||
- [ADR-065: Custom Event Indexing](./adr-065-custom-event-indexing.md)
|
||||
- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md)
|
||||
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
||||
- [ADR-066: E2E-Testing](./adr-066-e2e-testing.md)
|
||||
- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md)
|
||||
- [ADR-076: Combine Spec and Tendermint Repositories](./adr-076-combine-spec-repo.md)
|
||||
- [ADR-077: Configurable Block Retention](./adr-077-block-retention.md)
|
||||
- [ADR-077: Block Retention](./adr-077-block-retention.md)
|
||||
- [ADR-078: Non-zero Genesis](./adr-078-nonzero-genesis.md)
|
||||
- [ADR-079: ED25519 Verification](./adr-079-ed25519-verification.md)
|
||||
- [ADR-080: Reverse Sync](./adr-080-reverse-sync.md)
|
||||
|
||||
### Accepted
|
||||
|
||||
@@ -76,10 +78,13 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-024: Sign-Bytes](./adr-024-sign-bytes.md)
|
||||
- [ADR-035: Documentation](./adr-035-documentation.md)
|
||||
- [ADR-039: Peer-Behaviour](./adr-039-peer-behaviour.md)
|
||||
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
||||
- [ADR-060: Go-API-Stability](./adr-060-go-api-stability.md)
|
||||
- [ADR-061: P2P-Refactor-Scope](./adr-061-p2p-refactor-scope.md)
|
||||
- [ADR-065: Custom Event Indexing](./adr-065-custom-event-indexing.md)
|
||||
- [ADR-068: Reverse-Sync](./adr-068-reverse-sync.md)
|
||||
- [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md)
|
||||
- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md)
|
||||
- [ADR-079: Ed25519 Verification](./adr-079-ed25519-verification.md)
|
||||
- [ADR-076: Combine Spec and Tendermint Repositories](./adr-076-combine-spec-repo.md)
|
||||
- [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md)
|
||||
|
||||
### Deprecated
|
||||
@@ -108,13 +113,8 @@ None
|
||||
- [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md)
|
||||
- [ADR-050: Improved Trusted Peering](./adr-050-improved-trusted-peering.md)
|
||||
- [ADR-057: RPC](./adr-057-RPC.md)
|
||||
- [ADR-060: Go-API-Stability](./adr-060-go-api-stability.md)
|
||||
- [ADR-061: P2P-Refactor-Scope](./adr-061-p2p-refactor-scope.md)
|
||||
- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md)
|
||||
- [ADR-064: Batch Verification](./adr-064-batch-verification.md)
|
||||
- [ADR-068: Reverse-Sync](./adr-068-reverse-sync.md)
|
||||
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
|
||||
- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md)
|
||||
- [ADR-073: Adopt LibP2P](./adr-073-libp2p.md)
|
||||
- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md)
|
||||
- [ADR-080: Reverse Sync](./adr-080-reverse-sync.md)
|
||||
|
||||
@@ -63,7 +63,7 @@ Light clients where all witnesses are faulty can be subject to an amnesia attack
|
||||
|
||||
|
||||
## References
|
||||
<!-- markdown-link-check-disable-next-line -->
|
||||
|
||||
- [Fork accountability algorithm](https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit)
|
||||
- [Fork accountability spec](https://github.com/tendermint/tendermint/blob/main/spec/consensus/light-client/accountability.md)
|
||||
|
||||
|
||||
@@ -162,7 +162,7 @@ The API guarantees above can be fairly constraining, but are unavoidable given t
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
Accepted
|
||||
|
||||
## Consequences
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ This phase covers speculative, wide-reaching proposals that are poorly defined a
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
Accepted
|
||||
|
||||
## Consequences
|
||||
|
||||
|
||||
@@ -577,9 +577,7 @@ func EchoReactor(ctx context.Context, channel *p2p.Channel, peerUpdates *p2p.Pee
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
Was partially implemented in v0.35 ([#5670](https://github.com/tendermint/tendermint/issues/5670))
|
||||
Partially implemented ([#5670](https://github.com/tendermint/tendermint/issues/5670))
|
||||
|
||||
## Consequences
|
||||
|
||||
|
||||
@@ -91,8 +91,8 @@ The upgrade of [tmkms](https://github.com/iqlusioninc/tmkms) will be coordinated
|
||||
|
||||
## Status
|
||||
|
||||
Accepted (tracked in
|
||||
[\#9256](https://github.com/tendermint/tendermint/issues/9256))
|
||||
|
||||
Implemented
|
||||
|
||||
### Positive
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
|
||||
## Status
|
||||
|
||||
Implemented
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
## Status
|
||||
|
||||
Implemented
|
||||
Accepted.
|
||||
|
||||
## Context
|
||||
|
||||
@@ -108,5 +108,5 @@ N/A
|
||||
|
||||
## References
|
||||
|
||||
- https://github.com/tendermint/tendermint/tree/main/spec
|
||||
- https://github.com/tendermint/spec
|
||||
- https://github.com/tendermint/tendermint
|
||||
|
||||
@@ -78,7 +78,7 @@ The returned `retain_height` would be the lowest height that satisfies:
|
||||
|
||||
## Status
|
||||
|
||||
Implemented
|
||||
Accepted
|
||||
|
||||
## Consequences
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ block may fail.
|
||||
|
||||
## Status
|
||||
|
||||
Implemented
|
||||
Accepted
|
||||
|
||||
## Consequences
|
||||
|
||||
|
||||
@@ -31,8 +31,7 @@ As part of the acceptance of this proposal it would be best to contract or discu
|
||||
|
||||
## Status
|
||||
|
||||
Accepted (implicitly tracked in
|
||||
[\#9186](https://github.com/tendermint/tendermint/issues/9186))
|
||||
Proposed
|
||||
|
||||
## Consequences
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ docker containers, modules it calls "chaincode". It uses an
|
||||
implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf).
|
||||
from a team at IBM that is [augmented to handle potentially
|
||||
non-deterministic
|
||||
chaincode](https://drops.dagstuhl.de/opus/volltexte/2017/7093/pdf/LIPIcs-OPODIS-2016-24.pdf) It is
|
||||
chaincode](https://www.zurich.ibm.com/~cca/papers/sieve.pdf) It is
|
||||
possible to implement this docker-based behavior as a ABCI app in
|
||||
Tendermint, though extending Tendermint to handle non-determinism
|
||||
remains for future work.
|
||||
|
||||
@@ -61,6 +61,5 @@ sections.
|
||||
- [RFC-021: The Future of the Socket Protocol](./rfc-021-socket-protocol.md)
|
||||
- [RFC-023: Semi-permanent Testnet](./rfc-023-semi-permanent-testnet.md)
|
||||
- [RFC-024: Block Structure Consolidation](./rfc-024-block-structure-consolidation.md)
|
||||
- [RFC-025: Application Defined Transaction Storage](./rfc-025-support-app-side-mempool.md)
|
||||
|
||||
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->
|
||||
|
||||
@@ -416,5 +416,5 @@ discussed.
|
||||
[socket-server]: https://github.com/tendermint/tendermint/blob/main/abci/server/socket_server.go
|
||||
[sdk-grpc]: https://pkg.go.dev/github.com/cosmos/cosmos-sdk/types/tx#ServiceServer
|
||||
[json-rpc]: https://www.jsonrpc.org/specification
|
||||
[abci-conn]: https://github.com/tendermint/tendermint/blob/main/spec/abci/abci++_basic_concepts.md
|
||||
[abci-conn]: https://github.com/tendermint/tendermint/blob/main/spec/abci/apps.md#state
|
||||
[adr-57]: https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-057-RPC.md
|
||||
|
||||
@@ -272,7 +272,7 @@ event sends. The following metrics would be a good start for tracking this perfo
|
||||
[rfc-002]: https://github.com/tendermint/tendermint/pull/6913
|
||||
[adr-57]: https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-057-RPC.md
|
||||
[issue-1319]: https://github.com/tendermint/tendermint/issues/1319
|
||||
[abci-commit-description]: https://github.com/tendermint/tendermint/blob/main/spec/abci/abci++_methods.md#commit
|
||||
[abci-commit-description]: https://github.com/tendermint/tendermint/blob/main/spec/abci/apps.md#commit
|
||||
[abci-local-client-code]: https://github.com/tendermint/tendermint/blob/511bd3eb7f037855a793a27ff4c53c12f085b570/abci/client/local_client.go#L84
|
||||
[hub-signature]: https://github.com/cosmos/gaia/blob/0ecb6ed8a244d835807f1ced49217d54a9ca2070/docs/resources/genesis.md#consensus-parameters
|
||||
[ed25519-bench]: https://github.com/oasisprotocol/curve25519-voi/blob/d2e7fc59fe38c18ca990c84c4186cba2cc45b1f9/PERFORMANCE.md
|
||||
|
||||
@@ -195,7 +195,7 @@ mutually exclusive.
|
||||
- [JSON-RPC 2.0 Specification](https://www.jsonrpc.org/specification)
|
||||
|
||||
[rpc-service]: https://docs.tendermint.com/v0.34/rpc/
|
||||
[rpc-methods]: https://github.com/tendermint/tendermint/blob/main/rpc/core/routes.go#L12
|
||||
[rpc-methods]: https://github.com/tendermint/tendermint/blob/main/internal/rpc/core/routes.go#L12
|
||||
[events]: ./rfc-005-event-system.rst
|
||||
[rpc-transport]: ./rfc-002-ipc-ecosystem.md#rpc-transport
|
||||
[ws]: https://datatracker.ietf.org/doc/html/rfc6455
|
||||
|
||||
@@ -343,10 +343,10 @@ something like this (subject to refinement):
|
||||
[i1161]: https://github.com/tendermint/tendermint/issues/1161
|
||||
[i7135]: https://github.com/tendermint/tendermint/issues/7135
|
||||
[i7247]: https://github.com/tendermint/tendermint/issues/7247
|
||||
[kv-index]: https://github.com/tendermint/tendermint/blob/main/state/indexer/block/kv
|
||||
[kv-index]: https://github.com/tendermint/tendermint/blob/main/internal/state/indexer/tx/kv
|
||||
[postgres]: https://postgresql.org/
|
||||
[psql]: https://github.com/tendermint/tendermint/tree/main/state/indexer/sink/psql
|
||||
[psql]: https://github.com/tendermint/tendermint/tree/main/state/indexer/sink/psql
|
||||
[psql]: https://github.com/tendermint/tendermint/blob/main/internal/state/indexer/sink/psql
|
||||
[psql]: https://github.com/tendermint/tendermint/blob/main/internal/state/indexer/sink/psql
|
||||
[query]: https://pkg.go.dev/github.com/tendermint/tendermint/internal/pubsub/query/syntax
|
||||
[sdk]: https://github.com/cosmos/cosmos-sdk
|
||||
[tmdb]: https://pkg.go.dev/github.com/tendermint/tm-db#DB
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
<!-- markdown-link-check-disable -->
|
||||
# RFC 019: Configuration File Versioning
|
||||
|
||||
## Changelog
|
||||
|
||||
@@ -1,301 +0,0 @@
|
||||
# RFC 25: Support Application Defined Transaction Storage (app-side mempools)
|
||||
|
||||
## Changelog
|
||||
|
||||
- Aug 17, 2022: initial draft (@williambanfield)
|
||||
- Aug 19, 2022: updated draft (@williambanfield)
|
||||
|
||||
## Abstract
|
||||
|
||||
With the release of ABCI++, specifically the `PrepareProposal` call, the utility
|
||||
of the Tendermint mempool becomes much less clear. This RFC discusses possible
|
||||
changes that should be considered to Tendermint to better support applications
|
||||
that intend to use `PrepareProposal` to implement much more powerful transaction
|
||||
ordering and filtering functionality than Tendermint can provide. It proposes
|
||||
scoping down the responsibilities of Tendermint to suit this new use case.
|
||||
|
||||
## Background
|
||||
|
||||
Tendermint currently ships with a data structure it calls the
|
||||
[mempool][mempool-link]. The mempool's primary function is to store pending
|
||||
valid transactions. Tendermint uses the contents of the mempool in two main
|
||||
ways: 1) to gossip these pending transactions to other nodes on a Tendermint
|
||||
network and 2) to select transactions to be included in a proposed block. Before
|
||||
ABCI++, when proposing a block Tendermint selects the next set of transactions
|
||||
from the mempool that fit within block and proposes them.
|
||||
|
||||
There are a few issues with this data structure. These include issues of how
|
||||
transaction validity is defined, how transactions should be ordered and selected
|
||||
for inclusion in the next block, and when a transaction should start or stop
|
||||
being gossiped. The creation of `PrepareProposal` in ABCI++ adds the additional
|
||||
issue of unclear ownership over which entity, Tendermint or the ABCI
|
||||
application, is responsible for selecting the transactions to be included in
|
||||
a proposed block.
|
||||
|
||||
None of these issues of validity, ordering, and gossiping having simple,
|
||||
one-size fits all solutions. Different applications will have different
|
||||
preferences and needs for each of them. The current Tendermint mempool attempts
|
||||
to strike a balance but is quite prescriptive about these questions. We can
|
||||
better support a varied range of applications by simplifying the current mempool
|
||||
and by reducing and clarifying its scope of responsibilities.
|
||||
|
||||
## Discussion
|
||||
|
||||
### The mempool is a leaky abstraction and handles too many concerns
|
||||
|
||||
The current mempool is a leaky abstraction. Presently, Tendermint's mempool keeps
|
||||
track of a multitude of details that primarily service concerns of the application.
|
||||
|
||||
#### Gas
|
||||
|
||||
The mempool keeps track of Gas, a proxy for how computationally expensive it
|
||||
will be to execute a transaction. As discussed in [RFC-011](https://github.com/tendermint/tendermint/blob/2313f358003d0c4d9d0e7705b4632d819dfb0d92/docs/rfc/rfc-011-delete-gas.md), this metadata is
|
||||
not a concern of Tendermint's. Tendermint does not execute transactions. This
|
||||
data is stored within Tendermint's mempool along with the maximum gas the application
|
||||
will permit to be used in a block so that Tendermint's mempool can enforce
|
||||
transaction validity using it: transactions that exceed the configured maximum
|
||||
are rejected from the mempool. How much 'Gas' a transaction consumes and if that
|
||||
precludes it from execution by the application is a validity condition imposed
|
||||
by the application, not Tendermint. It is an application abstraction that leaks
|
||||
into the Tendermint mempool.
|
||||
|
||||
#### Sender
|
||||
|
||||
The Tendermint mempool stores a `sender` string metadata for each transaction
|
||||
it receives. The mempool only stores one transaction per sender at any time.
|
||||
The `sender` metadata is populated by the application during `CheckTx`.
|
||||
`Sender` uniqueness is enforced separately on each node's mempool. Nothing
|
||||
prevents multiple transactions with the same `sender` from existing in separate
|
||||
mempools on the network.
|
||||
|
||||
While multiple transactions from the same sender on a network is a shortcoming
|
||||
of the `sender` abstraction, the issue posed by sender to the mempool is that
|
||||
`sender` uniqueness is a condition of transaction validity that is otherwise
|
||||
meaningless to Tendermint. The `sender` field allows the application to
|
||||
influence which transactions Tendermint will include next in a block. However,
|
||||
with the advent of `PrepareProposal`, the application can select directly and
|
||||
this `sender` field is of marginal benefit. Additionally, applications require
|
||||
much more expressive validity conditions than just `sender` uniqueness.
|
||||
|
||||
#### Adding additional semantics to the mempool
|
||||
|
||||
The Tendermint mempool is relied upon by every ABCI application. Changing its
|
||||
code to incorporate new features or update its behavior affects all of
|
||||
Tendermint's downstream consumers. New applications frequently need ways of
|
||||
sorting pending transactions and imposing transaction validity conditions. This
|
||||
data structure cannot change quickly to meet the shifting needs of new
|
||||
consumers while also maintaining a stable API for the applications that are
|
||||
already successfully running on top of Tendermint. New strategies for sorting
|
||||
and validating pending transactions would be best implemented outside of
|
||||
Tendermint, where creating new semantics does not risk disrupting the existing
|
||||
users.
|
||||
|
||||
### Tendermint's scope of responsibility
|
||||
|
||||
#### What should Tendermint be responsible for?
|
||||
|
||||
Tendermint's responsibilities should be as narrowly scoped as possible to allow
|
||||
the code base to be useful for many developers and maintainable by the core
|
||||
team.
|
||||
|
||||
The Tendermint node maintains a P2P network over which pending transactions,
|
||||
proposed blocks, votes and other messages are sent. Tendermint, using these
|
||||
messages, comes to consensus on the proposed blocks and delivers their contents
|
||||
to the application in an orderly fashion.
|
||||
|
||||
In this description of Tendermint, its only responsibility, in terms of pending
|
||||
transactions, is to _gossip_ them over its P2P network. Any additional logic
|
||||
surrounding validity, ordering etc. requires an understanding of the meaning of
|
||||
the transaction that Tendermint does not and _should not_ have.
|
||||
|
||||
#### What should the application be responsible for?
|
||||
|
||||
Transaction contents have semantic meaning to the ABCI application. Pending
|
||||
transactions are valid and have execution priority in relationship to the
|
||||
current state of application. While Tendermint is clearly responsible for the
|
||||
action of gossiping the transaction, it cannot decide when to start or stop
|
||||
gossiping any given transaction. While only valid transactions should be
|
||||
gossiped, as stated, it cannot appropriately make decisions about transaction
|
||||
validity beyond simple heuristics. The application therefore should be
|
||||
responsible for defining pending transaction validity, determining when to start
|
||||
or stop gossiping a transaction, and for selecting which transaction should be
|
||||
contained within a block.
|
||||
|
||||
### How can Tendermint best be designed for this responsibility?
|
||||
|
||||
With the understanding that Tendermint's responsibility is to gossip the set of
|
||||
transactions that the application currently considers valid and high priority,
|
||||
we can update its API and data structures accordingly. With the creation of
|
||||
`PrepareProposal`, the mempool may be able to drop its responsibility to select
|
||||
transactions for a block; It can be primarily responsible for gossiping and
|
||||
nothing else.
|
||||
|
||||
#### Goodbye mempool, hello GossipList
|
||||
|
||||
The mempool contains many structures to retain, order, and select the set of
|
||||
transactions to gossip and to propose. These mempool structures could be
|
||||
completely replaced with a single list that allows Tendermint to fulfill the
|
||||
previously stated responsibility. This proposed list, the `GossipList`, would
|
||||
simply contain the set of transactions that Tendermint is responsible for
|
||||
gossiping at the moment. This `GossipList` would be updated by the application
|
||||
at a set of defined junctures and Tendermint would never add to it or remove
|
||||
from it without input from the application. Tendermint would impose _no_
|
||||
validity constraints on the contents of this list and would not attempt to
|
||||
remove items unless instructed to.
|
||||
|
||||
### Mock API of the GossipList
|
||||
|
||||
Outlined below is a proposed API for this data structure. These calls would be
|
||||
added to the ABCI API and would come to replace the current `CheckTx` call.
|
||||
|
||||
#### `OfferPendingTransaction`
|
||||
|
||||
`OfferPendingTransaction` replaces the `CheckTx` call that is invoked when
|
||||
Tendermint receives a submitted or gossiped transaction. The `GossipList` will
|
||||
invoke `OfferPendingTransaction` on _every_ transaction sent to Tendermint that
|
||||
does not match one of the transactions already in the `GossipList`. The mempool
|
||||
currently drops gossiped transactions before `CheckTx` is called if the
|
||||
transaction is considered invalid for a Tendermint-defined reason such as
|
||||
inclusion in the mempool 'cache' or it overflows the max transaction size.
|
||||
|
||||
The application can indicate if the transaction should be added to the
|
||||
`GossipList` via `ResponseOfferPendingTransaction`'s `GossipStatus` field. If
|
||||
the `GossipList` is full, the application must list a transaction to remove from
|
||||
`GossipList`, otherwise the transaction will not be added. In this way,
|
||||
a transaction will _never_ leave the list unless the application removes it from
|
||||
the list explicitly.
|
||||
|
||||
```proto
|
||||
message RequestOfferPendingTransaction {
|
||||
bytes tx = 1;
|
||||
int64 gossip_list_max_size = 2;
|
||||
int64 gossip_list_current_size = 3;
|
||||
}
|
||||
|
||||
message ResponseOfferPendingTransaction {
|
||||
GossipStatus gossip_status = 1;
|
||||
enum GossipStatus {
|
||||
UNKNOWN = 0;
|
||||
GOSSIP = 1;
|
||||
NO_GOSSIP = 2;
|
||||
}
|
||||
repeated bytes removals =2
|
||||
}
|
||||
```
|
||||
|
||||
#### `UpdateTransactionGossipList`
|
||||
|
||||
`UpdateTransactionGossipList` would be a simple method that allows the
|
||||
application to exactly set the contents of the `GossipList`. Tendermint would
|
||||
call `UpdateTransactionGossipList` on the application, which would respond with
|
||||
the list of all transactions to gossip. The contents of the `GossipList` would
|
||||
be completely replaced with the contents provided by the application in
|
||||
`UpdateTransactionGossipList`.
|
||||
|
||||
```proto
|
||||
message UpdateTransactionGossipListRequest {
|
||||
int64 max_size = 1; // application cannot provide more than `max_size` transactions.
|
||||
}
|
||||
|
||||
message UpdateTransactionGossipListResponse {
|
||||
repeated bytes = 1;
|
||||
}
|
||||
```
|
||||
|
||||
This new `ABCI` method would serve multiple functions. First, it would replace
|
||||
the re-`CheckTx` calls made by Tendermint after each block is committed. After
|
||||
each block is committed, Tendermint currently passes the entire contents of the
|
||||
mempool to the application one-by-one via `CheckTx` calls with `CheckTxType` set
|
||||
to `RECHECK`. The application, in this way, can then inspect the entire mempool
|
||||
and remove any transactions that became invalid as a result of the most recent
|
||||
block being committed.
|
||||
|
||||
`UpdateTransactionGossipList` would completely replace this set of re-`CheckTx`
|
||||
calls. After each block is committed, Tendermint would call
|
||||
`UpdateTransactionGossipList` and the application would be responsible for
|
||||
exactly providing the set of transactions for Tendermint to maintain. The IPC
|
||||
overhead here would be roughly equivalent to the re-`CheckTx` overhead, as the
|
||||
entire contents of the gossip structure is communicated, but, in the
|
||||
`UpdateTransactionGossipList` call, the application sends transactions instead
|
||||
of Tendermint.
|
||||
|
||||
This new method would _also_ replace the mempool's `Update` API. The `Update`
|
||||
method on the mempool receives the list of transactions that were just executed
|
||||
as part of the most recent height and removes them from the mempool. The
|
||||
`GossipList` would have no such method and instead, the application would become
|
||||
responsible for setting the contents after each block via
|
||||
`UpdateTransactionGossipList`. This gives the application more control over when
|
||||
to start and stop gossiping transactions than it has at the moment. In this
|
||||
call, the application can completely replace the `GossipList`.
|
||||
|
||||
This also complements the `PrepareProposal` call nicely, because a transaction
|
||||
introduced via `PrepareProposal` may be semantically equivalent to a transaction
|
||||
present in Tendermint's mempool in a way that Tendermint cannot detect. The
|
||||
mempool `Update` call only compares transaction hashes,
|
||||
`UpdateTransactionGossipList` allows the application to easily compare on
|
||||
transaction contents as well.
|
||||
|
||||
As a nice benefit, it also allows the application to easily continue gossiping
|
||||
of a transaction that was just executed in the block. Applications may wish to
|
||||
execute the same transaction multiple times, which the mempool `Update` call
|
||||
makes very cumbersome by clearing transactions that have the same contents of
|
||||
those that were just executed.
|
||||
|
||||
### Tendermint startup
|
||||
|
||||
On Tendermint startup, the `GossipList` would be completely empty. It does not
|
||||
persist transactions and is an in-memory only data structure. To populate the
|
||||
`GossipList` on startup, Tendermint will issue an `UpdateTransactionGossipList`
|
||||
call to the application to request the application provide it with a list of
|
||||
transactions to fill the gossip list.
|
||||
|
||||
### Additional benefits of this API
|
||||
|
||||
#### No more confusing mempool cache
|
||||
|
||||
The current Tendermint mempool stores a [cache][cache-when-clear] of transaction
|
||||
hashes that should not be accepted into the mempool. When a transaction is sent
|
||||
to the mempool but is present in the cache the transaction is dropped without
|
||||
ever being sent to the application via `CheckTx`. This cache is intended to help
|
||||
the application guard against receiving the same invalid transaction over and
|
||||
over. However, this means that presence or absence from the mempool cache
|
||||
becomes a condition of validity for pending transactions.
|
||||
|
||||
Being placed in this cache has serious consequences for a proposed transaction,
|
||||
but the rules for when a transaction should be placed in this cache are unclear.
|
||||
So unclear in fact, that conditions for when to include a transaction in this
|
||||
cache have been completely reversed by different commits
|
||||
([1][update-remove-from-cache],[2][update-keep-in-cache]) on the Tendermint
|
||||
project. Additional github issues have noted that it's very ambiguous as to
|
||||
[when the cache should be cleared][cache-when-clear] and whether or not the
|
||||
cache should allow [previously invalid transactions][later-valid] to later
|
||||
become valid. There is no one-size-fits all solution to these problems.
|
||||
Different applications need very different behavior, so this should ultimately
|
||||
not be the responsibility of Tendermint. Implementing the `GossipList` clears
|
||||
Tendermint of this responsibility.
|
||||
|
||||
#### Improved guarantees about the set of transactions being gossiped
|
||||
|
||||
As discussed in the [Mock API](#mock-api-of-the-gossiplist) section, the
|
||||
`GossipList` only adds and removes or replaces transactions in the `GossipList`
|
||||
when the application says to. Under this design, the contents of this list are
|
||||
never ambiguous. The list contains exactly what the application most recently
|
||||
told Tendermint to gossip, nothing more nothing less.
|
||||
|
||||
### Additional considerations
|
||||
|
||||
This document leaves a few aspects unconsidered that should be understood before
|
||||
future designs are made in this area:
|
||||
|
||||
1. Impact of duplicating transactions in both the `GossipList` and within the
|
||||
application.
|
||||
2. Transition plan and feasibility of migrating applications to the new API.
|
||||
|
||||
## References
|
||||
|
||||
[mempool-cache]:https://github.com/tendermint/tendermint/blob/c8302c5fcb7f1ffafdefc5014a26047df1d27c99/mempool/v1/mempool.go#L41
|
||||
[cache-when-clear]:https://github.com/tendermint/tendermint/issues/7723
|
||||
[update-remove-from-cache]:https://github.com/tendermint/tendermint/pull/233
|
||||
[update-keep-in-cache]:https://github.com/tendermint/tendermint/issues/2855
|
||||
[later-valid]:https://github.com/tendermint/tendermint/issues/458
|
||||
[mempool-link]:https://github.com/tendermint/tendermint/blob/c8302c5fcb7f1ffafdefc5014a26047df1d27c99/mempool/mempool.go#L30
|
||||
@@ -11,6 +11,6 @@ nodes. This blockchain is accessible via various RPC endpoints, mainly
|
||||
`/blockchain?minHeight=_&maxHeight=_` to get a list of headers. But what
|
||||
exactly is stored in these blocks?
|
||||
|
||||
The [specification](https://github.com/tendermint/tendermint/blob/main/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started.
|
||||
The [specification](https://github.com/tendermint/spec/blob/8dd2ed4c6fe12459edeb9b783bdaaaeb590ec15c/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started.
|
||||
|
||||
To dig deeper, check out the [types package documentation](https://godoc.org/github.com/tendermint/tendermint/types).
|
||||
|
||||
@@ -115,7 +115,7 @@ I[10-04|13:54:30.410] Recheck txs module=mempoo
|
||||
Here is the list of modules you may encounter in Tendermint's log and a
|
||||
little overview what they do.
|
||||
|
||||
- `abci-client` As mentioned in [Application Development Guide](../app-dev/abci-cli.md), Tendermint acts as an ABCI
|
||||
- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI
|
||||
client with respect to the application and maintains 3 connections:
|
||||
mempool, consensus and query. The code used by Tendermint Core can
|
||||
be found [here](https://github.com/tendermint/tendermint/blob/v0.34.x/abci/client).
|
||||
|
||||
@@ -101,7 +101,7 @@ for more information.
|
||||
Rate-limiting and authentication are another key aspects to help protect
|
||||
against DOS attacks. Validators are supposed to use external tools like
|
||||
[NGINX](https://www.nginx.com/blog/rate-limiting-nginx/) or
|
||||
[traefik](https://doc.traefik.io/traefik/middlewares/http/ratelimit/)
|
||||
[traefik](https://docs.traefik.io/middlewares/ratelimit/)
|
||||
to achieve the same things.
|
||||
|
||||
## Debugging Tendermint
|
||||
@@ -332,7 +332,7 @@ We want `skip_timeout_commit=false` when there is economics on the line
|
||||
because proposers should wait to hear for more votes. But if you don't
|
||||
care about that and want the fastest consensus, you can skip it. It will
|
||||
be kept false by default for public deployments (e.g. [Cosmos
|
||||
Hub](https://hub.cosmos.network/)) while for enterprise
|
||||
Hub](https://cosmos.network/intro/hub)) while for enterprise
|
||||
applications, setting it to true is not a problem.
|
||||
|
||||
- `consensus.peer_gossip_sleep_duration`
|
||||
|
||||
@@ -36,14 +36,14 @@ more information on query syntax and other options.
|
||||
|
||||
You can also use tags, given you had included them into DeliverTx
|
||||
response, to query transaction results. See [Indexing
|
||||
transactions](../app-dev/indexing-transactions.md) for details.
|
||||
transactions](./indexing-transactions.md) for details.
|
||||
|
||||
## ValidatorSetUpdates
|
||||
|
||||
When validator set changes, ValidatorSetUpdates event is published. The
|
||||
event carries a list of pubkey/power pairs. The list is the same
|
||||
Tendermint receives from ABCI application (see [EndBlock
|
||||
section](https://github.com/tendermint/tendermint/blob/main/spec/abci/abci++_methods.md#endblock) in
|
||||
section](https://github.com/tendermint/tendermint/blob/main/spec/abci/abci.md#endblock) in
|
||||
the ABCI spec).
|
||||
|
||||
Response:
|
||||
|
||||
@@ -555,7 +555,7 @@ failing, you need at least four validator nodes (e.g., 2/3).
|
||||
|
||||
Updating validators in a live network is supported but must be
|
||||
explicitly programmed by the application developer. See the [application
|
||||
developers guide](../app-dev/abci-cli.md) for more details.
|
||||
developers guide](../app-dev/app-development.md) for more details.
|
||||
|
||||
### Local Network
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ More Information can be found at these links:
|
||||
|
||||
### Validator keys
|
||||
|
||||
Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`). Fortunately, the [Interchain Foundation](https://interchain.io) has worked with a team to build a key management server for validators. You can find documentation on how to use it [here](https://github.com/iqlusioninc/tmkms), it is used extensively in production. You are not limited to using this tool, there are also [HSMs](https://safenet.gemalto.com/data-encryption/hardware-security-modules-hsms/), there is not a recommended HSM.
|
||||
Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`). Fortunately, the [Interchain Foundation](https://interchain.io/) has worked with a team to build a key management server for validators. You can find documentation on how to use it [here](https://github.com/iqlusioninc/tmkms), it is used extensively in production. You are not limited to using this tool, there are also [HSMs](https://safenet.gemalto.com/data-encryption/hardware-security-modules-hsms/), there is not a recommended HSM.
|
||||
|
||||
Currently Tendermint uses [Ed25519](https://ed25519.cr.yp.to/) keys which are widely supported across the security sector and HSMs.
|
||||
|
||||
|
||||
@@ -214,7 +214,7 @@ etc.) by Tendermint Core.
|
||||
|
||||
Valid transactions will eventually be committed given they are not too big and
|
||||
have enough gas. To learn more about gas, check out ["the
|
||||
specification"](https://github.com/tendermint/tendermint/blob/main/spec/abci/abci++_app_requirements.md#gas).
|
||||
specification"](https://github.com/tendermint/tendermint/blob/main/spec/abci/apps.md#gas).
|
||||
|
||||
For the underlying key-value store we'll use
|
||||
[badger](https://github.com/dgraph-io/badger), which is an embeddable,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user