Compare commits

..

1 Commits

Author SHA1 Message Date
William Banfield
00dd3a6801 allow e2e to work for out of process app 2022-07-15 16:57:42 -04:00
536 changed files with 3855 additions and 48600 deletions

3
.github/CODEOWNERS vendored
View File

@@ -7,5 +7,4 @@
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair

View File

@@ -1,37 +0,0 @@
---
name: Protocol Change Proposal
about: Create a proposal to request a change to the protocol
---
<!-- < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < ☺
v ✰ Thanks for opening an issue! ✰
v Before smashing the submit button please review the template.
v Word of caution: Under-specified proposals may be rejected summarily
☺ > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > -->
# Protocol Change Proposal
## Summary
<!-- Short, concise description of the proposed change -->
## Problem Definition
<!-- Why do we need this change?
What problems may be addressed by introducing this change?
What benefits does Tendermint stand to gain by including this change?
Are there any disadvantages of including this change? -->
## Proposal
<!-- Detailed description of requirements of implementation -->
____
#### For Admin Use
- [ ] Not duplicate issue
- [ ] Appropriate labels applied
- [ ] Appropriate contributors tagged
- [ ] Contributor assigned/self-assigned

View File

@@ -7,7 +7,7 @@ name: Check generated code
on:
pull_request:
branches:
- main
- v0.34.x
permissions:
contents: read
@@ -18,7 +18,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.18'
go-version: '1.17'
- uses: actions/checkout@v3
@@ -44,7 +44,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.18'
go-version: '1.17'
- uses: actions/checkout@v3
with:

View File

@@ -44,7 +44,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: "1.18"
go-version: "^1.15.4"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
@@ -66,7 +66,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: "1.18"
go-version: "^1.15.4"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:

View File

@@ -16,7 +16,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.18'
go-version: '1.17'
- uses: actions/checkout@v3

View File

@@ -23,7 +23,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.18'
go-version: '^1.15.4'
- uses: actions/checkout@v3
with:

View File

@@ -22,7 +22,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.18'
go-version: '1.15'
- uses: actions/checkout@v3

View File

@@ -15,7 +15,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.18'
go-version: '^1.15.4'
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:

View File

@@ -11,7 +11,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.18'
go-version: '1.15'
- uses: actions/checkout@v3

12
.github/workflows/linkchecker.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: Check Markdown links
on:
schedule:
- cron: '* */24 * * *'
jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: creachadair/github-action-markdown-link-check@master
with:
folder-path: "docs"

View File

@@ -16,7 +16,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '1.18'
go-version: '^1.16'
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
@@ -25,10 +25,8 @@ jobs:
go.sum
- uses: golangci/golangci-lint-action@v3
with:
# Required: the version of golangci-lint is required and
# must be specified without patch version: we always use the
# latest patch version.
version: v1.50.1
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.45
args: --timeout 10m
github-token: ${{ secrets.github_token }}
if: env.GIT_DIFF

View File

@@ -1,65 +0,0 @@
name: "Pre-release"
on:
push:
tags:
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
jobs:
prerelease:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: actions/setup-go@v3
with:
go-version: '1.18'
- name: Build
uses: goreleaser/goreleaser-action@v3
if: ${{ github.event_name == 'pull_request' }}
with:
version: latest
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
# Link to CHANGELOG_PENDING.md as release notes.
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG_PENDING.md > ../release_notes.md
- name: Release
uses: goreleaser/goreleaser-action@v3
if: startsWith(github.ref, 'refs/tags/')
with:
version: latest
args: release --rm-dist --release-notes=../release_notes.md
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
prerelease-success:
needs: prerelease
if: ${{ success() }}
runs-on: ubuntu-latest
steps:
- name: Notify Slack upon pre-release
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
with:
payload: |
{
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
}
}
]
}

View File

@@ -15,7 +15,7 @@ jobs:
timeout-minutes: 5
steps:
- uses: actions/checkout@v3
- uses: bufbuild/buf-setup-action@v1.8.0
- uses: bufbuild/buf-setup-action@v1.6.0
- uses: bufbuild/buf-lint-action@v1
with:
input: 'proto'

22
.github/workflows/proto.yml vendored Normal file
View File

@@ -0,0 +1,22 @@
name: Protobuf
# Protobuf runs buf (https://buf.build/) lint and check-breakage
# This workflow is only run when a .proto file has been modified
on:
pull_request:
paths:
- "**.proto"
jobs:
proto-lint:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v3
- name: lint
run: make proto-lint
proto-breakage:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v3
- name: check-breakage
run: make proto-check-breaking-ci

View File

@@ -3,10 +3,10 @@ name: "Release"
on:
push:
tags:
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
jobs:
release:
goreleaser:
runs-on: ubuntu-latest
steps:
- name: Checkout
@@ -16,47 +16,14 @@ jobs:
- uses: actions/setup-go@v3
with:
go-version: '1.18'
go-version: '^1.15.4'
- name: Build
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v3
if: ${{ github.event_name == 'pull_request' }}
with:
version: latest
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
- name: Release
uses: goreleaser/goreleaser-action@v3
if: startsWith(github.ref, 'refs/tags/')
with:
version: latest
args: release --rm-dist --release-notes=../release_notes.md
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
release-success:
needs: release
if: ${{ success() }}
runs-on: ubuntu-latest
steps:
- name: Notify Slack upon release
uses: slackapi/slack-github-action@v1.23.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
with:
payload: |
{
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
}
}
]
}

View File

@@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v6
- uses: actions/stale@v5
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: "This pull request has been automatically marked as stale because it has not had

View File

@@ -25,7 +25,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: "1.18"
go-version: "^1.15.4"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
@@ -57,7 +57,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: "^1.18"
go-version: "^1.15.4"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
@@ -89,7 +89,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: "^1.18"
go-version: "^1.15.4"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
@@ -120,7 +120,7 @@ jobs:
steps:
- uses: actions/setup-go@v3
with:
go-version: "1.18"
go-version: "^1.15.4"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:

11
.gitignore vendored
View File

@@ -22,7 +22,6 @@ docs/.vuepress/dist
*.log
abci-cli
docs/node_modules/
docs/.vuepress/public/rpc
index.html.md
scripts/wal2json/wal2json
@@ -47,8 +46,6 @@ terraform.tfstate
terraform.tfstate.backup
terraform.tfstate.d
profile\.out
test/app/grpc_client
test/loadtime/build
test/e2e/build
test/e2e/networks/*/
test/logs
@@ -59,11 +56,3 @@ test/fuzz/**/corpus
test/fuzz/**/crashers
test/fuzz/**/suppressions
test/fuzz/**/*.zip
*.aux
*.bbl
*.blg
*.pdf
*.gz
*.dvi
# Python virtual environments
.venv

View File

@@ -2,12 +2,20 @@ linters:
enable:
- asciicheck
- bodyclose
- deadcode
- depguard
- dogsled
- dupl
- errcheck
- exportloopref
# - funlen
# - gochecknoglobals
# - gochecknoinits
# - gocognit
- goconst
# - gocritic
# - gocyclo
# - godox
- gofmt
- goimports
- revive
@@ -15,32 +23,48 @@ linters:
- gosimple
- govet
- ineffassign
- misspell
# - interfacer
- lll
# - maligned
# - misspell
- nakedret
- nolintlint
- prealloc
- staticcheck
# - structcheck // to be fixed by golangci-lint
- structcheck
- stylecheck
- typecheck
# - typecheck
- unconvert
# - unparam
- unused
- varcheck
# - whitespace
# - wsl
issues:
exclude-rules:
- path: _test\.go
linters:
- gosec
- linters:
- lll
source: "https://"
max-same-issues: 50
linters-settings:
dogsled:
max-blank-identifiers: 3
golint:
maligned:
suggest-new: true
# govet:
# check-shadowing: true
revive:
min-confidence: 0
maligned:
suggest-new: true
misspell:
locale: US
ignore-words:
- behaviour
run:
skip-files:
- libs/pubsub/query/query.peg.go

View File

@@ -25,13 +25,4 @@ checksum:
algorithm: sha256
release:
prerelease: auto
name_template: "{{.Version}}"
archives:
- files:
- LICENSE
- README.md
- UPGRADING.md
- SECURITY.md
- CHANGELOG.md
name_template: "{{.Version}} (WARNING: BETA SOFTWARE)"

View File

@@ -1,11 +1,8 @@
default: true
MD001: false
MD007: {indent: 4}
default: true,
MD007: { "indent": 4 }
MD013: false
MD024: {siblings_only: true}
MD024: { siblings_only: true }
MD025: false
MD033: false
MD036: false
MD010: false
MD012: false
MD028: false
MD033: { no-inline-html: false }
no-hard-tabs: false
whitespace: false

View File

@@ -5,5 +5,4 @@
"--proto_path=${workspaceRoot}/third_party/proto"
]
}
}

View File

@@ -2,140 +2,21 @@
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
## v0.34.24
*Nov 21, 2022*
Apart from one minor bug fix, this release aims to optimize the output of the
RPC (both HTTP and WebSocket endpoints). See our [upgrading
guidelines](./UPGRADING.md#v03424) for more details.
### IMPROVEMENTS
- `[rpc]` [\#9724](https://github.com/tendermint/tendermint/issues/9724) Remove
useless whitespace in RPC output (@adizere, @thanethomson)
### BUG FIXES
- `[rpc]` [\#9692](https://github.com/tendermint/tendermint/issues/9692) Remove
`Cache-Control` header response from `/check_tx` endpoint (@JayT106)
## v0.34.23
*Nov 9, 2022*
This release introduces some new Prometheus metrics to help in determining what
kinds of messages are consuming the most P2P bandwidth. This builds towards our
broader goal of optimizing Tendermint bandwidth consumption, and will give us
meaningful insights once we can establish these metrics for a number of chains.
We now also return `Cache-Control` headers for select RPC endpoints to help
facilitate caching.
Special thanks to external contributors on this release: @JayT106
### IMPROVEMENTS
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
Envelope type and associated methods for sending and receiving Envelopes
instead of raw bytes. This also adds new metrics,
`tendermint_p2p_message_send_bytes_total` and
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
each message type have been sent.
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
caching of RPC responses (@JayT106)
The following RPC endpoints will return `Cache-Control` headers with a maximum
age of 1 day:
- `/abci_info`
- `/block`, if `height` is supplied
- `/block_by_hash`
- `/block_results`, if `height` is supplied
- `/blockchain`
- `/check_tx`
- `/commit`, if `height` is supplied
- `/consensus_params`, if `height` is supplied
- `/genesis`
- `/genesis_chunked`
- `/tx`
- `/validators`, if `height` is supplied
## v0.34.22
This release includes several bug fixes, [one of
which](https://github.com/tendermint/tendermint/pull/9518) we discovered while
building up a baseline for v0.34 against which to compare our upcoming v0.37
release during our [QA process](./docs/qa/).
Special thanks to external contributors on this release: @RiccardoM
### FEATURES
- [rpc] [\#9423](https://github.com/tendermint/tendermint/pull/9423) Support
HTTPS URLs from the WebSocket client (@RiccardoM, @cmwaters)
### BUG FIXES
- [config] [\#9483](https://github.com/tendermint/tendermint/issues/9483)
Calling `tendermint init` would incorrectly leave out the new `[storage]`
section delimiter in the generated configuration file - this has now been
fixed
- [p2p] [\#9500](https://github.com/tendermint/tendermint/issues/9500) Prevent
peers who have errored being added to the peer set (@jmalicevic)
- [indexer] [\#9473](https://github.com/tendermint/tendermint/issues/9473) Fix
bug that caused the psql indexer to index empty blocks whenever one of the
transactions returned a non zero code. The relevant deduplication logic has
been moved within the kv indexer only (@cmwaters)
- [blocksync] [\#9518](https://github.com/tendermint/tendermint/issues/9518) A
block sync stall was observed during our QA process whereby the node was
unable to make progress. Retrying block requests after a timeout fixes this.
## v0.34.21
Release highlights include:
- A new `[storage]` configuration section and flag `discard_abci_responses`,
which, if enabled, discards all ABCI responses except the latest one in order
to reduce disk space usage in the state store. When enabled, the
`block_results` RPC endpoint can no longer function and will return an error.
- A new CLI command, `reindex-event`, to re-index block and tx events to the
event sinks. You can run this command when the event store backend
dropped/disconnected or you want to replace the backend. When
`discard_abci_responses` is enabled, you will not be able to use this command.
Special thanks to external contributors on this release: @rootwarp & @animart
### FEATURES
- [cli] [\#9083](https://github.com/tendermint/tendermint/issues/9083) Backport command to reindex missed events (@cmwaters)
- [cli] [\#9107](https://github.com/tendermint/tendermint/issues/9107) Add the `p2p.external-address` argument to set the node P2P external address (@amimart)
### IMPROVEMENTS
- [config] [\#9054](https://github.com/tendermint/tendermint/issues/9054) `discard_abci_responses` flag added to discard all ABCI
responses except the last in order to save on storage space in the state
store (@samricotta)
### BUG FIXES
- [mempool] [\#9033](https://github.com/tendermint/tendermint/issues/9033) Rework lock discipline to mitigate callback deadlocks in the
priority mempool
- [cli] [\#9103](https://github.com/tendermint/tendermint/issues/9103) fix unsafe-reset-all for working with home path (@rootwarp)
## v0.34.20
## v0.34.20-rc1
Special thanks to external contributors on this release: @joeabbey @yihuang
This release introduces a prioritized mempool. Further notes can be found in UPGRADING.md.
NOTE: There's a known issue when combining the prioritized mempool with the ABCI socket client, that the team are curently working to resolve. Read more about the issue [here](https://github.com/tendermint/tendermint/pull/9030).
### BUG FIXES
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
- [indexer] [#8625](https://github.com/tendermint/tendermint/pull/8625) Fix overriding tx index of duplicated txs. (@yihuang)
- [mempool] [\#8962](https://github.com/tendermint/tendermint/issues/8962) Backport priority mempool fixes from v0.35.x to v0.34.x (@creachadair).
## v0.34.20-rc0
This RC introduces the prioritized mempool.
NOTE: There's a known memory leak with the prioritized mempool that the team are currently working on resolving. We will cut v0.34.20 when this has been resolved. This release candidate is to provide the SDK with the new APIs. Read more about the issue [here](https://github.com/tendermint/tendermint/issues/8775)
### FEATURES
- [cli] [\#8674] Add command to force compact goleveldb databases (@cmwaters)
@@ -145,6 +26,10 @@ NOTE: There's a known issue when combining the prioritized mempool with the ABCI
- [logging] [\#8845](https://github.com/tendermint/tendermint/issues/8845) Add "Lazy" Stringers to defer Sprintf and Hash until logs print. (@joeabbey)
### BUG FIXES
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
## v0.34.19
### BUG FIXES
@@ -558,7 +443,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
- [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Remove `MockEvidence` in favor of testing with actual evidence types (`DuplicateVoteEvidence` & `LightClientAttackEvidence`) (@cmwaters)
- [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker)
- [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker)
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
- [db] [\#5233](https://github.com/tendermint/tendermint/pull/5233) Add support for `badgerdb` database backend (@erikgrinaker)
- [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes)
- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia (light client attack) evidence can be detected, verified and committed (@cmwaters)
@@ -572,7 +457,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
- [rpc] [\#5017](https://github.com/tendermint/tendermint/pull/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes)
- [rpc] [\#5108](https://github.com/tendermint/tendermint/pull/5108) Subscribe using the websocket for new evidence events (@cmwaters)
- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section.
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
### IMPROVEMENTS
@@ -652,7 +537,7 @@ This security release fixes:
Tendermint 0.33.0 and above allow block proposers to include signatures for the
wrong block. This may happen naturally if you start a network, have it run for
some time and restart it **without changing the chainID**. (It is a
[misconfiguration](https://docs.tendermint.com/v0.33/tendermint-core/using-tendermint.html)
[misconfiguration](https://docs.tendermint.com/master/tendermint-core/using-tendermint.html)
to reuse chainIDs.) Correct block proposers will accidentally include signatures
for the wrong block if they see these signatures, and then commits won't validate,
making all proposed blocks invalid. A malicious validator (even with a minimal
@@ -951,7 +836,7 @@ and a validator address plus a timestamp. Note we may remove the validator
address & timestamp fields in the future (see ADR-25).
`lite2` package has been added to solve `lite` issues and introduce weak
subjectivity interface. Refer to the [spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client.md) for complete details.
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
`lite` package is now deprecated and will be removed in v0.34 release.
### BREAKING CHANGES:
@@ -1312,7 +1197,7 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah
*August 28, 2019*
@climber73 wrote the [Writing a Tendermint Core application in Java
(gRPC)](https://github.com/tendermint/tendermint/blob/v0.32.x/docs/guides/java.md)
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md)
guide.
Special thanks to external contributors on this release:
@@ -1345,7 +1230,7 @@ Special thanks to external contributors on this release:
### FEATURES:
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/v0.34.x/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-043-blockchain-riri-org.md)
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md)
- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele)
- [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors`
option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors).
@@ -1662,7 +1547,7 @@ Special thanks to external contributors on this release:
- [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation
* Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
use `make build_c` / `make install_c` (full instructions can be found at
<https://docs.tendermint.com/>)
https://docs.tendermint.com/master/introduction/install.html#compile-with-cleveldb-support)
* Use `boltdb` tag to compile Tendermint with bolt db
- [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except
when IP lookup fails)
@@ -1886,7 +1771,7 @@ more details.
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
* Apps
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
@@ -1939,7 +1824,7 @@ more details.
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md)
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md)
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
(@guagualvcha)
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
@@ -2637,7 +2522,7 @@ Special thanks to external contributors on this release:
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
It also addresses some issues found via security audit, removes various unused
functions from `libs/common`, and implements
[ADR-012](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-012-peer-transport.md).
[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md).
BREAKING CHANGES:
@@ -2718,7 +2603,7 @@ BREAKING CHANGES:
- [abci] Added address of the original proposer of the block to Header
- [abci] Change ABCI Header to match Tendermint exactly
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
[ADR-018](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-018-ABCI-Validators.md)):
[ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)):
- Remove PubKey from `Validator` (so it's just Address and Power)
- Introduce `ValidatorUpdate` (with just PubKey and Power)
- InitChain and EndBlock use ValidatorUpdate
@@ -2740,7 +2625,7 @@ BREAKING CHANGES:
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
- Add NextValidatorSet to State, changes on-disk representation of state
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
[ADR-020](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-020-block-size.md)).
[ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)).
- Remove ConsensusParams.BlockSize.MaxTxs
- Introduce maximum sizes for all components of a block, including ChainID
- [types] Updates to the block Header:
@@ -2751,7 +2636,7 @@ BREAKING CHANGES:
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
[ADR-014](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-014-secp-malleability.md)):
[ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)):
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
- malleability removed by requiring `s` to be in canonical form.
@@ -3551,7 +3436,7 @@ Also includes the Grand Repo-Merge of 2017.
BREAKING CHANGES:
- Config and Flags:
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/v0.10.0/config/config.go#L11),
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig`
- This affects the following flags:
- `--seeds` is now `--p2p.seeds`

View File

@@ -1,6 +1,10 @@
# Unreleased Changes
## v0.34.25
## v0.34.20
Special thanks to external contributors on this release:
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES
@@ -19,4 +23,3 @@
### IMPROVEMENTS
### BUG FIXES

View File

@@ -26,7 +26,7 @@ will indicate their support with a heartfelt emoji.
If the issue would benefit from thorough discussion, maintainers may
request that you create a [Request For
Comment](https://github.com/tendermint/tendermint/tree/main/rfc). Discussion
Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion
at the RFC stage will build collective understanding of the dimensions
of the problems and help structure conversations around trade-offs.

View File

@@ -1,5 +1,5 @@
# stage 1 Generate Tendermint Binary
FROM golang:1.18-alpine as builder
FROM golang:1.15-alpine as builder
RUN apk update && \
apk upgrade && \
apk --no-cache add make
@@ -8,7 +8,7 @@ WORKDIR /tendermint
RUN make build-linux
# stage 2
FROM golang:1.18-alpine
FROM golang:1.15-alpine
LABEL maintainer="hello@tendermint.com"
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json

View File

@@ -6,7 +6,7 @@ DockerHub tags for official releases are [here](https://hub.docker.com/r/tenderm
Official releases can be found [here](https://github.com/tendermint/tendermint/releases).
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/main/DOCKER/Dockerfile).
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
@@ -20,9 +20,9 @@ Respective versioned files can be found <https://raw.githubusercontent.com/tende
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
For more background, see the [the docs](https://docs.tendermint.com/v0.34/introduction/#quick-start).
For more background, see the [the docs](https://docs.tendermint.com/master/introduction/#quick-start).
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/v0.34/introduction/quick-start.html).
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/master/introduction/quick-start.html).
## How to use this image
@@ -37,7 +37,7 @@ docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app
## Local cluster
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/v0.34.x/Makefile) and run:
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run:
```sh
make build-linux
@@ -49,8 +49,8 @@ Note that this will build and use a different image than the ones provided here.
## License
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/main/LICENSE).
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/master/LICENSE).
## Contributing
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/main/CONTRIBUTING.md) for more information.
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.

View File

@@ -1,3 +1,5 @@
Tendermint Core
License: Apache2.0
Apache License
Version 2.0, January 2004
@@ -179,7 +181,7 @@
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
@@ -187,7 +189,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 2016 All in Bits, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -74,9 +74,9 @@ install:
### Mocks ###
###############################################################################
mockery:
mockery:
go generate -run="./scripts/mockery_generate.sh" ./...
.PHONY: mockery
.PHONY: mockery
###############################################################################
### Protobuf ###
@@ -200,7 +200,7 @@ format:
lint:
@echo "--> Running linter"
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
@golangci-lint run
.PHONY: lint
DESTINATION = ./index.html.md

213
README.md
View File

@@ -1,170 +1,161 @@
# Tendermint
_UPDATE: TendermintCore featureset is frozen for LTS, see issue https://github.com/tendermint/tendermint/issues/9972_<br/>
_This is the latest stable release used by cosmoshub-4, version 0.34.24_<br/>
_The previous main branch (v0.38.xx) can now be found under "main_backup"_<br/>
![banner](docs/tendermint-core-image.jpg)
[Byzantine-Fault Tolerant][bft] [State Machine Replication][smr]. Or
[Blockchain], for short.
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
[![Version][version-badge]][version-url]
[![API Reference][api-badge]][api-url]
[![Go version][go-badge]][go-url]
[![Discord chat][discord-badge]][discord-url]
[![License][license-badge]][license-url]
[![Sourcegraph][sg-badge]][sg-url]
[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest)
[![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/tendermint/tendermint)
[![Go version](https://img.shields.io/badge/go-1.15-blue.svg)](https://github.com/moovweb/gvm)
[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/AzefAFd)
[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE)
[![tendermint/tendermint](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
[![Sourcegraph](https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg)](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
| Branch | Tests | Linting |
|--------|------------------------------------|---------------------------------|
| main | [![Tests][tests-badge]][tests-url] | [![Lint][lint-badge]][lint-url] |
| Branch | Tests | Coverage | Linting |
| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------- |
| master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) </br> ![Tests](https://github.com/tendermint/tendermint/workflows/Tests/badge.svg?branch=master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | ![Lint](https://github.com/tendermint/tendermint/workflows/Lint/badge.svg) |
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a
state transition machine - written in any programming language - and securely
replicates it on many machines.
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
and securely replicates it on many machines.
For protocol details, refer to the [Tendermint Specification](./spec/README.md).
For protocol details, see [the specification](https://github.com/tendermint/spec).
For detailed analysis of the consensus protocol, including safety and liveness
proofs, read our paper, "[The latest gossip on BFT
consensus](https://arxiv.org/abs/1807.04938)".
## Documentation
Complete documentation can be found on the
[website](https://docs.tendermint.com/).
For detailed analysis of the consensus protocol, including safety and liveness proofs,
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
## Releases
Please do not depend on `main` as your production branch. Use
[releases](https://github.com/tendermint/tendermint/releases) instead.
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
Tendermint has been in the production of private and public environments, most
notably the blockchains of the Cosmos Network. we haven't released v1.0 yet
since we are making breaking changes to the protocol and the APIs. See below for
more details about [versioning](#versioning).
Tendermint is being used in production in both private and public environments,
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
See below for more details about [versioning](#versioning).
In any case, if you intend to run Tendermint in production, we're happy to help.
You can contact us [over email](mailto:hello@newtendermint.org) or [join the
chat](https://discord.gg/gnoland).
More on how releases are conducted can be found [here](./RELEASES.md).
In any case, if you intend to run Tendermint in production, we're happy to help. You can
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/AzefAFd).
## Security
To report a security vulnerability, please [email us](mailto:security@newtendermint.org).
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
To report a security vulnerability, see our [bug bounty
program](https://hackerone.com/tendermint).
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md)
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
## Minimum requirements
| Requirement | Notes |
|-------------|-------------------|
| Go version | Go 1.18 or higher |
| Requirement | Notes |
| ----------- | ---------------- |
| Go version | Go1.15 or higher |
## Documentation
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
### Install
See the [install instructions](./docs/introduction/install.md).
See the [install instructions](/docs/introduction/install.md).
### Quick Start
- [Single node](./docs/introduction/quick-start.md)
- [Local cluster using docker-compose](./docs/tools/docker-compose.md)
- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md)
- [Single node](/docs/introduction/quick-start.md)
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md)
- [Join the Cosmos testnet](https://cosmos.network/testnet)
## Contributing
Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
Before contributing to the project, please take a look at the [contributing
guidelines](CONTRIBUTING.md) and the [style guide](STYLE_GUIDE.md). You may also
find it helpful to read the [specifications](./spec/README.md), and familiarize
yourself with our [Architectural Decision Records
(ADRs)](./docs/architecture/README.md) and
[Request For Comments (RFCs)](./docs/rfc/README.md).
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
and familiarize yourself with our
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
## Versioning
### Semantic Versioning
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and
how the version changes. According to SemVer, anything in the public API can
change at any time before version 1.0.0
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
According to SemVer, anything in the public API can change at any time before version 1.0.0
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR
version is used to signal breaking changes across Tendermint's API. This API
includes all publicly exposed types, functions, and methods in non-internal Go
packages as well as the types and methods accessible via the Tendermint RPC
interface.
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
to signal breaking changes across a subset of the total public API. This subset includes all
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
include the Go APIs.
Breaking changes to these public APIs will be documented in the CHANGELOG.
That said, breaking changes in the following packages will be documented in the
CHANGELOG even if they don't lead to MINOR version bumps:
- crypto
- config
- libs
- bech32
- bits
- bytes
- json
- log
- math
- net
- os
- protoio
- rand
- sync
- strings
- service
- node
- rpc/client
- types
### Upgrades
In an effort to avoid accumulating technical debt prior to 1.0.0, we do not
guarantee that breaking changes (ie. bumps in the MINOR version) will work with
existing Tendermint blockchains. In these cases you will have to start a new
blockchain, or write something custom to get the old data into the new chain.
However, any bump in the PATCH version should be compatible with existing
blockchain histories.
In an effort to avoid accumulating technical debt prior to 1.0.0,
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
will work with existing Tendermint blockchains. In these cases you will
have to start a new blockchain, or write something custom to get the old
data into the new chain. However, any bump in the PATCH version should be
compatible with existing blockchain histories.
For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
### Supported Versions
Because we are a small core team, we only ship patch updates, including security
updates, to the most recent minor release and the second-most recent minor
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
Because we are a small core team, we only ship patch updates, including security updates,
to the most recent minor release and the second-most recent minor release. Consequently,
we strongly recommend keeping Tendermint up-to-date. Upgrading instructions can be found
in [UPGRADING.md](./UPGRADING.md).
## Resources
### Libraries
### Tendermint Core
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building
applications in Golang
- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs)
- [ABCI Tower](https://github.com/penumbra-zone/tower-abci)
For details about the blockchain data structures and the p2p protocols, see the
[Tendermint specification](https://docs.tendermint.com/master/spec/).
For details on using the software, see the [documentation](/docs/) which is also
hosted at: <https://docs.tendermint.com/master/>
### Tools
Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test).
Additional tooling can be found in [/docs/tools](/docs/tools).
### Applications
- [Cosmos Hub](https://hub.cosmos.network/)
- [Terra](https://www.terra.money/)
- [Celestia](https://celestia.org/)
- [Anoma](https://anoma.network/)
- [Vocdoni](https://docs.vocdoni.io/)
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
- [Many more](https://tendermint.com/ecosystem)
### Research
- [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
- [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
- [Original Whitepaper: "Tendermint: Consensus Without Mining"](https://tendermint.com/static/docs/tendermint.pdf)
- [Tendermint Core Blog](https://medium.com/tendermint/tagged/tendermint-core)
- [Cosmos Blog](https://blog.cosmos.network/tendermint/home)
## Join us!
The development of Tendermint Core was led primarily by All in Bits, Inc. The
Tendermint trademark is owned by New Tendermint, LLC. If you'd like to work
full-time on Tendermint2 or [gno.land](https://gno.land), [we're
hiring](mailto:hiring@newtendermint.org)!
[bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance
[smr]: https://en.wikipedia.org/wiki/State_machine_replication
[Blockchain]: https://en.wikipedia.org/wiki/Blockchain
[version-badge]: https://img.shields.io/github/tag/tendermint/tendermint.svg
[version-url]: https://github.com/tendermint/tendermint/releases/latest
[api-badge]: https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
[api-url]: https://pkg.go.dev/github.com/tendermint/tendermint
[go-badge]: https://img.shields.io/badge/go-1.18-blue.svg
[go-url]: https://github.com/moovweb/gvm
[discord-badge]: https://img.shields.io/discord/669268347736686612.svg
[discord-url]: https://discord.gg/cosmosnetwork
[license-badge]: https://img.shields.io/github/license/tendermint/tendermint.svg
[license-url]: https://github.com/tendermint/tendermint/blob/main/LICENSE
[sg-badge]: https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg
[sg-url]: https://sourcegraph.com/github.com/tendermint/tendermint?badge
[tests-url]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml
[tests-badge]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml/badge.svg?branch=main
[lint-badge]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml/badge.svg
[lint-url]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml
- [Blog](https://blog.cosmos.network/tendermint/home)

View File

@@ -2,146 +2,98 @@
## Reporting a Bug
As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security),
we operate a [bug bounty][hackerone]. See the policy for more
details on submissions and rewards, and see "Example Vulnerabilities" (below)
for examples of the kinds of bugs we're most interested in.
As part of our [Coordinated Vulnerability Disclosure
Policy](https://tendermint.com/security), we operate a [bug
bounty](https://hackerone.com/tendermint).
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
### Guidelines
### Guidelines
We require that all researchers:
* Use the bug bounty to disclose all vulnerabilities, and avoid posting
vulnerability information in public places, including Github Issues, Discord
channels, and Telegram groups
* Make every effort to avoid privacy violations, degradation of user experience,
disruption to production systems (including but not limited to the Cosmos
Hub), and destruction of data
* Keep any information about vulnerabilities that youve discovered confidential
between yourself and the Tendermint Core engineering team until the issue has
been resolved and disclosed
* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups
* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data
* Keep any information about vulnerabilities that youve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
* Avoid posting personally identifiable information, privately or publicly
If you follow these guidelines when reporting an issue to us, we commit to:
* Not pursue or support any legal action related to your research on this
vulnerability
* Work with you to understand, resolve and ultimately disclose the issue in a
timely fashion
* Not pursue or support any legal action related to your research on this vulnerability
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
## Disclosure Process
## Disclosure Process
Tendermint Core uses the following disclosure process:
1. Once a security report is received, the Tendermint Core team works to verify
the issue and confirm its severity level using CVSS.
2. The Tendermint Core team collaborates with the Gaia team to determine the
vulnerabilitys potential impact on the Cosmos Hub.
3. Patches are prepared for eligible releases of Tendermint in private
repositories. See “Supported Releases” below for more information on which
releases are considered eligible.
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE
Numbering Authority.
5. We notify the community that a security release is coming, to give users time
to prepare their systems for the update. Notifications can include forum
posts, tweets, and emails to partners and validators, including emails sent
to the [Tendermint Security Mailing List][tmsec-mailing].
6. 24 hours following this notification, the fixes are applied publicly and new
releases are issued.
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these
releases, and then themselves issue new releases.
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we
notify the community, again, through the same channels as above. We also
publish a Security Advisory on Github and publish the CVE, as long as neither
the Security Advisory nor the CVE include any information on how to exploit
these vulnerabilities beyond what information is already available in the
patch itself.
9. Once the community is notified, we will pay out any relevant bug bounties to
submitters.
10. One week after the releases go out, we will publish a post with further
details on the vulnerability as well as our response to it.
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerabilitys potential impact on the Cosmos Hub.
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc).
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
This process can take some time. Every effort will be made to handle the bug in
as timely a manner as possible, however it's important that we follow the
process described above to ensure that disclosures are handled consistently and
to keep Tendermint Core and its downstream dependent projects--including but not
limited to Gaia and the Cosmos Hub--as secure as possible.
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
### Example Timeline
### Example Timeline
The following is an example timeline for the triage and response. The required
roles and team members are described in parentheses after each task; however,
multiple people can play each role and each person may play multiple roles.
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
#### 24+ Hours Before Release Time
#### > 24 Hours Before Release Time
1. Request CVE number (ADMIN)
2. Gather emails and other contact info for validators (COMMS LEAD)
3. Create patches in a private security repo, and ensure that PRs are open
targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG)
5. Write “Security Advisory” for forum (TENDERMINT LEAD)
1. Request CVE number (ADMIN)
2. Gather emails and other contact info for validators (COMMS LEAD)
3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG)
4. Write “Security Advisory” for forum (TENDERMINT LEAD)
#### 24 Hours Before Release Time
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
2. Post Tweet linking to forum post (COMMS LEAD)
3. Announce security advisory/link to post in various other social channels
(Telegram, Discord) (COMMS LEAD)
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
2. Post Tweet linking to forum post (COMMS LEAD)
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
#### Release Time
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT
LEAD)
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT LEAD)
2. Cut Cosmos SDK release for eligible versions (COSMOS ENG)
3. Cut Gaia release for eligible versions (GAIA ENG)
4. Post “Security releases” on forum (TENDERMINT LEAD)
5. Post new Tweet linking to forum post (COMMS LEAD)
6. Remind everyone via social channels (Telegram, Discord) that the release is
out (COMMS LEAD)
7. Send emails to validators or other users (COMMS LEAD)
8. Publish Security Advisory and CVE, if CVE has no sensitive information
(ADMIN)
6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD)
7. Send emails to validators or other users (COMMS LEAD)
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
#### After Release Time
1. Write forum post with exploit details (TENDERMINT LEAD)
2. Approve pay-out on HackerOne for submitter (ADMIN)
2. Approve pay-out on HackerOne for submitter (ADMIN)
#### 7 Days After Release Time
1. Publish CVE if it has not yet been published (ADMIN)
1. Publish CVE if it has not yet been published (ADMIN)
2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD)
## Supported Releases
The Tendermint Core team commits to releasing security patch releases for both
the latest minor release as well for the major/minor release that the Cosmos Hub
is running.
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
If you are running older versions of Tendermint Core, we encourage you to
upgrade at your earliest opportunity so that you can receive security patches
directly from the Tendermint repo. While you are welcome to backport security
patches to older versions for your own use, we will not publish or promote these
backports.
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
## Scope
The full scope of our bug bounty program is outlined on our
[Hacker One program page][hackerone]. Please also note that, in the interest of
the safety of our users and staff, a few things are explicitly excluded from
scope:
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
* Any third-party services
* Findings from physical testing, such as office access
* Any third-party services
* Findings from physical testing, such as office access
* Findings derived from social engineering (e.g., phishing)
## Example Vulnerabilities
## Example Vulnerabilities
The following is a list of examples of the kinds of vulnerabilities that were
most interested in. It is not exhaustive: there are other kinds of issues we may
also be interested in!
The following is a list of examples of the kinds of vulnerabilities that were most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
### Specification
@@ -153,8 +105,7 @@ also be interested in!
Assuming less than 1/3 of the voting power is Byzantine (malicious):
* Validation of blockchain data structures, including blocks, block parts,
votes, and so on
* Validation of blockchain data structures, including blocks, block parts, votes, and so on
* Execution of blocks
* Validator set changes
* Proposer round robin
@@ -163,9 +114,6 @@ Assuming less than 1/3 of the voting power is Byzantine (malicious):
* A node halting (liveness failure)
* Syncing new and old nodes
Assuming more than 1/3 the voting power is Byzantine:
* Attacks that go unpunished (unhandled evidence)
### Networking
@@ -191,7 +139,7 @@ Attacks may come through the P2P network or the RPC layer:
### Libraries
* Serialization
* Serialization (Amino)
* Reading/Writing files and databases
### Cryptography
@@ -202,8 +150,5 @@ Attacks may come through the P2P network or the RPC layer:
### Light Client
* Core verification
* Core verification
* Bisection/sequential algorithms
[hackerone]: https://hackerone.com/cosmos
[tmsec-mailing]: https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc

View File

@@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig
* Make use of table driven testing where possible and not-cumbersome
* [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)
* Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require)
* When using mocks, it is recommended to use Testify [mock](<https://pkg.go.dev/github.com/stretchr/testify/mock>
* When using mocks, it is recommended to use Testify [mock] (<https://pkg.go.dev/github.com/stretchr/testify/mock>
) along with [Mockery](https://github.com/vektra/mockery) for autogeneration
## Errors

View File

@@ -1,30 +1,6 @@
# Upgrading Tendermint Core
This guide provides instructions for upgrading to specific versions of
Tendermint Core.
## v0.34.24
Note that in [\#9724](https://github.com/tendermint/tendermint/pull/9724) we
un-prettified the JSON output (i.e. removed all indentation) of the HTTP and
WebSocket RPC for performance and subscription stability reasons. We recommend
using a tool such as [jq](https://github.com/stedolan/jq) to obtain prettified
output if you rely on that prettified output in some way.
## v0.34.20
### Feature: Priority Mempool
This release backports an implementation of the Priority Mempool from the v0.35
branch. This implementation of the mempool permits the application to set a
priority on each transaction during CheckTx, and during block selection the
highest-priority transactions are chosen (subject to the constraints on size
and gas cost).
Operators can enable the priority mempool by setting `mempool.version` to
`"v1"` in the `config.toml`. For more technical details about the priority
mempool, see [ADR 067: Mempool
Refactor](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-067-mempool-refactor.md).
This guide provides instructions for upgrading to specific versions of Tendermint Core.
## v0.34.0
@@ -32,7 +8,7 @@ Refactor](https://github.com/tendermint/tendermint/blob/main/docs/architecture/a
This release is not compatible with previous blockchains due to changes to
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
### ABCI Changes
@@ -42,7 +18,7 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher.
were added to support the new State Sync feature.
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
new nodes are able to join a network in a matter of seconds.
Read [the spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#state-sync)
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
if you want to learn more about State Sync, or if you'd like your application to use it.
(If you don't want to support State Sync in your application, you can just implement these new
ABCI methods as no-ops, leaving them empty.)
@@ -58,7 +34,7 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher.
Applications should be able to handle these evidence types
(i.e., through slashing or other accountability measures).
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/crypto/keys.proto#L13-L15)
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/crypto/keys.proto#L13-L15)
(used in ABCI as part of `ValidatorUpdate`) now uses a `oneof` protobuf type.
Note that since Tendermint only supports ed25519 validator keys, there's only one
option in the `oneof`. For more, see "Protocol Buffers," below.
@@ -73,9 +49,12 @@ directory. For more, see "Protobuf," below.
### Blockchain Protocol
* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
fields.
* `Header#LastResultsHash` previously was the root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data)` responses.
As of 0.34,`Header#LastResultsHash` is now the root hash of a Merkle tree built from:
* `BeginBlock#Events`
* Root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data,
GasWanted, GasUsed, Events)` responses
* `BeginBlock#Events`
* Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input,
to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962).
@@ -133,7 +112,7 @@ Tendermint 0.34 includes new and updated consensus parameters.
#### Evidence Parameters
* `MaxBytes`, which caps the total amount of evidence. The default is 1048576 (1 MB).
* `MaxBytes`, which caps the total amount of evidence. The default is 1048576 (1 MB).
### Crypto
@@ -165,7 +144,7 @@ The `bech32` package has moved to the Cosmos SDK:
### CLI
The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API.
See [the docs](https://docs.tendermint.com/v0.33/tendermint-core/light-client-protocol.html#http-proxy) for details.
See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details.
### Light Client
@@ -179,7 +158,6 @@ Other user-relevant changes include:
* The `Verifier` was broken up into two pieces:
* Core verification logic (pure `VerifyX` functions)
* `Client` object, which represents the complete light client
* The new light client stores headers and validator sets as `LightBlock`s
* The RPC client can be found in the `/rpc` directory.
* The HTTP(S) proxy is located in the `/proxy` directory.
@@ -210,7 +188,7 @@ blockchains, we recommend that you check the chain ID.
### Version
Version is now set through Go linker flags `ld_flags`. Applications that are using tendermint as a library should set this at compile time.
Version is now set through Go linker flags `ld_flags`. Applications that are using tendermint as a library should set this at compile time.
Example:
@@ -218,7 +196,7 @@ Example:
go install -mod=readonly -ldflags "-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(go list -m github.com/tendermint/tendermint | sed 's/ /\@/g') -s -w " -trimpath ./cmd
```
Additionally, the exported constant `version.Version` is now `version.TMCoreSemVer`.
Additionally, the exported constant `version.Version` is now `version.TMCoreSemVer`.
## v0.33.4
@@ -321,7 +299,7 @@ Evidence Params has been changed to include duration.
### RPC Changes
* `/validators` is now paginated (default: 30 vals per page)
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/v0.33/rpc/#/Info/block_results)
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/master/rpc/#/Info/block_results)
* Event suffix has been removed from the ID in event responses
* IDs are now integers not `json-client-XYZ`
@@ -440,11 +418,11 @@ the compilation tag:
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
use `make build_c` / `make install_c` (full instructions can be found at
<https://docs.tendermint.com/v0.33/introduction/install.html#compile-with-cleveldb-support>)
<https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support>)
## v0.31.0
This release contains a breaking change to the behavior of the pubsub system.
This release contains a breaking change to the behaviour of the pubsub system.
It also contains some minor breaking changes in the Go API and ABCI.
There are no changes to the block or p2p protocols, so v0.31.0 should work fine
with blockchains created from the v0.30 series.
@@ -462,7 +440,7 @@ In this case, the WS client will receive an error with description:
"error": {
"code": -32000,
"msg": "Server error",
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
}
}
@@ -515,14 +493,14 @@ due to changes in how various data structures are hashed.
Any implementations of Tendermint blockchain verification, including lite clients,
will need to be updated. For specific details:
* [Merkle tree](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/encoding.md#merkle-trees)
* [ConsensusParams](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/state.md#consensusparams)
* [Merkle tree](https://github.com/tendermint/spec/blob/master/spec/blockchain/encoding.md#merkle-trees)
* [ConsensusParams](https://github.com/tendermint/spec/blob/master/spec/blockchain/state.md#consensusparams)
There was also a small change to field ordering in the vote struct. Any
implementations of an out-of-process validator (like a Key-Management Server)
will need to be updated. For specific details:
* [Vote](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/signing.md#votes)
* [Vote](https://github.com/tendermint/spec/blob/master/spec/consensus/signing.md#votes)
Finally, the proposer selection algorithm continues to evolve. See the
[work-in-progress
@@ -643,7 +621,7 @@ to `timeout_propose = "3s"`.
### RPC Changes
The default behavior of `/abci_query` has been changed to not return a proof,
The default behaviour of `/abci_query` has been changed to not return a proof,
and the name of the parameter that controls this has been changed from `trusted`
to `prove`. To get proofs with your queries, ensure you set `prove=true`.

View File

@@ -19,7 +19,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
A detailed description of the ABCI methods and message types is contained in:
- [The main spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md)
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
- [A protobuf file](./types/types.proto)
- [A Go interface](./types/application.go)

View File

@@ -2,7 +2,7 @@ package kvstore
import (
"fmt"
"os"
"io/ioutil"
"sort"
"testing"
@@ -71,7 +71,7 @@ func TestKVStoreKV(t *testing.T) {
}
func TestPersistentKVStoreKV(t *testing.T) {
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
@@ -87,7 +87,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
}
func TestPersistentKVStoreInfo(t *testing.T) {
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
@@ -114,11 +114,12 @@ func TestPersistentKVStoreInfo(t *testing.T) {
if resInfo.LastBlockHeight != height {
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
}
}
// add a validator, remove a validator, update a validator
func TestValUpdates(t *testing.T) {
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
@@ -161,7 +162,7 @@ func TestValUpdates(t *testing.T) {
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
vals2 = kvstore.Validators()
valsEqual(t, vals1, vals2)
@@ -180,6 +181,7 @@ func TestValUpdates(t *testing.T) {
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
vals2 = kvstore.Validators()
valsEqual(t, vals1, vals2)
}
func makeApplyBlock(
@@ -187,8 +189,7 @@ func makeApplyBlock(
kvstore types.Application,
heightInt int,
diff []types.ValidatorUpdate,
txs ...[]byte,
) {
txs ...[]byte) {
// make and apply block
height := int64(heightInt)
hash := []byte("foo")
@@ -206,6 +207,7 @@ func makeApplyBlock(
kvstore.Commit()
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
}
// order doesn't matter

View File

@@ -2,8 +2,9 @@
Package server is used to start a new ABCI server.
It contains two server implementation:
- gRPC server
- socket server
* gRPC server
* socket server
*/
package server

View File

@@ -8,34 +8,35 @@ There are four different behaviours a reactor can report.
1. bad message
type badMessage struct {
explanation string
}
type badMessage struct {
explanation string
}
# This message will request the peer be stopped for an error
This message will request the peer be stopped for an error
2. message out of order
type messageOutOfOrder struct {
explanation string
}
type messageOutOfOrder struct {
explanation string
}
# This message will request the peer be stopped for an error
This message will request the peer be stopped for an error
3. consesnsus Vote
type consensusVote struct {
explanation string
}
type consensusVote struct {
explanation string
}
# This message will request the peer be marked as good
This message will request the peer be marked as good
4. block part
type blockPart struct {
explanation string
}
type blockPart struct {
explanation string
}
This message will request the peer be marked as good
*/
package behaviour

View File

@@ -6,7 +6,6 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/types"
)
@@ -20,6 +19,58 @@ const (
BlockResponseMessageFieldKeySize
)
// EncodeMsg encodes a Protobuf message
func EncodeMsg(pb proto.Message) ([]byte, error) {
msg := bcproto.Message{}
switch pb := pb.(type) {
case *bcproto.BlockRequest:
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
case *bcproto.BlockResponse:
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
case *bcproto.NoBlockResponse:
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
case *bcproto.StatusRequest:
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
case *bcproto.StatusResponse:
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
default:
return nil, fmt.Errorf("unknown message type %T", pb)
}
bz, err := proto.Marshal(&msg)
if err != nil {
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
}
return bz, nil
}
// DecodeMsg decodes a Protobuf message.
func DecodeMsg(bz []byte) (proto.Message, error) {
pb := &bcproto.Message{}
err := proto.Unmarshal(bz, pb)
if err != nil {
return nil, err
}
switch msg := pb.Sum.(type) {
case *bcproto.Message_BlockRequest:
return msg.BlockRequest, nil
case *bcproto.Message_BlockResponse:
return msg.BlockResponse, nil
case *bcproto.Message_NoBlockResponse:
return msg.NoBlockResponse, nil
case *bcproto.Message_StatusRequest:
return msg.StatusRequest, nil
case *bcproto.Message_StatusResponse:
return msg.StatusResponse, nil
default:
return nil, fmt.Errorf("unknown message type %T", msg)
}
}
// ValidateMsg validates a message.
func ValidateMsg(pb proto.Message) error {
if pb == nil {
@@ -57,31 +108,3 @@ func ValidateMsg(pb proto.Message) error {
}
return nil
}
// EncodeMsg encodes a Protobuf message
//
// Deprecated: Will be removed in v0.37.
func EncodeMsg(pb proto.Message) ([]byte, error) {
if um, ok := pb.(p2p.Wrapper); ok {
pb = um.Wrap()
}
bz, err := proto.Marshal(pb)
if err != nil {
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
}
return bz, nil
}
// DecodeMsg decodes a Protobuf message.
//
// Deprecated: Will be removed in v0.37.
func DecodeMsg(bz []byte) (proto.Message, error) {
pb := &bcproto.Message{}
err := proto.Unmarshal(bz, pb)
if err != nil {
return nil, err
}
return pb.Unwrap()
}

View File

@@ -78,7 +78,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
}
}
//nolint:lll // ignore line length in tests
// nolint:lll // ignore line length in tests
func TestBlockchainMessageVectors(t *testing.T) {
block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil)
block.Version.Block = 11 // overwrite updated protocol version

View File

@@ -32,7 +32,6 @@ const (
maxTotalRequesters = 600
maxPendingRequests = maxTotalRequesters
maxPendingRequestsPerPeer = 20
requestRetrySeconds = 30
// Minimum recv rate to ensure we're receiving blocks from a peer fast
// enough. If a peer is not sending us data at at least that rate, we
@@ -411,7 +410,6 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
}
// for debugging purposes
//
//nolint:unused
func (pool *BlockPool) debug() string {
pool.mtx.Lock()
@@ -603,7 +601,7 @@ OUTER_LOOP:
}
peer = bpr.pool.pickIncrAvailablePeer(bpr.height)
if peer == nil {
bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height)
// log.Info("No peers available", "height", height)
time.Sleep(requestIntervalMS * time.Millisecond)
continue PICK_PEER_LOOP
}
@@ -613,7 +611,6 @@ OUTER_LOOP:
bpr.peerID = peer.id
bpr.mtx.Unlock()
to := time.NewTimer(requestRetrySeconds * time.Second)
// Send request and wait.
bpr.pool.sendRequest(bpr.height, peer.id)
WAIT_LOOP:
@@ -626,11 +623,6 @@ OUTER_LOOP:
return
case <-bpr.Quit():
return
case <-to.C:
bpr.Logger.Debug("Retrying block request after timeout", "height", bpr.height, "peer", bpr.peerID)
// Simulate a redo
bpr.reset()
continue OUTER_LOOP
case peerID := <-bpr.redoCh:
if peerID == bpr.peerID {
bpr.reset()

View File

@@ -5,8 +5,6 @@ import (
"reflect"
"time"
"github.com/gogo/protobuf/proto"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
@@ -146,20 +144,21 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
SendQueueCapacity: 1000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MessageType: &bcproto.Message{},
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height()})
if err != nil {
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
return
}
peer.Send(BlockchainChannel, msgBytes)
// it's OK if send fails. will try later in poolRoutine
// peer is added to the pool once we receive the first
@@ -183,73 +182,75 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcproto.BlockRequest,
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
return false
}
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockResponse{Block: bl},
}, bcR.Logger)
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bl})
if err != nil {
bcR.Logger.Error("could not marshal msg", "err", err)
return false
}
return src.TrySend(BlockchainChannel, msgBytes)
}
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}, bcR.Logger)
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
if err != nil {
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
return false
}
return src.TrySend(BlockchainChannel, msgBytes)
}
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
if err := bc.ValidateMsg(e.Message); err != nil {
bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
bcR.Switch.StopPeerForError(e.Src, err)
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
msg, err := bc.DecodeMsg(msgBytes)
if err != nil {
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
bcR.Switch.StopPeerForError(src, err)
return
}
bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message)
if err = bc.ValidateMsg(msg); err != nil {
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
bcR.Switch.StopPeerForError(src, err)
return
}
switch msg := e.Message.(type) {
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
switch msg := msg.(type) {
case *bcproto.BlockRequest:
bcR.respondToPeer(msg, e.Src)
bcR.respondToPeer(msg, src)
case *bcproto.BlockResponse:
bi, err := types.BlockFromProto(msg.Block)
if err != nil {
bcR.Logger.Error("Block content is invalid", "err", err)
return
}
bcR.pool.AddBlock(e.Src.ID(), bi, msg.Block.Size())
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
case *bcproto.StatusRequest:
// Send peer our state.
p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Height: bcR.store.Height(),
Base: bcR.store.Base(),
},
}, bcR.Logger)
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
Height: bcR.store.Height(),
Base: bcR.store.Base(),
})
if err != nil {
bcR.Logger.Error("could not convert msg to protobut", "err", err)
return
}
src.TrySend(BlockchainChannel, msgBytes)
case *bcproto.StatusResponse:
// Got a peer status. Unverified.
bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height)
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
case *bcproto.NoBlockResponse:
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
default:
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
}
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
msg := &bcproto.Message{}
err := proto.Unmarshal(msgBytes, msg)
if err != nil {
panic(err)
}
uw, err := msg.Unwrap()
if err != nil {
panic(err)
}
bcR.ReceiveEnvelope(p2p.Envelope{
ChannelID: chID,
Src: peer,
Message: uw,
})
}
// Handle messages from the poolReactor telling the reactor what to do.
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
@@ -285,10 +286,13 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
if peer == nil {
continue
}
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockRequest{Height: request.Height},
}, bcR.Logger)
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
if err != nil {
bcR.Logger.Error("could not convert msg to proto", "err", err)
continue
}
queued := peer.TrySend(BlockchainChannel, msgBytes)
if !queued {
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
}
@@ -300,7 +304,7 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
case <-statusUpdateTicker.C:
// ask for status updates
go bcR.BroadcastStatusRequest() //nolint: errcheck
go bcR.BroadcastStatusRequest() // nolint: errcheck
}
}
@@ -421,9 +425,13 @@ FOR_LOOP:
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
ChannelID: BlockchainChannel,
Message: &bcproto.StatusRequest{},
})
bm, err := bc.EncodeMsg(&bcproto.StatusRequest{})
if err != nil {
bcR.Logger.Error("could not convert msg to proto", "err", err)
return fmt.Errorf("could not convert msg to proto: %w", err)
}
bcR.Switch.Broadcast(BlockchainChannel, bm)
return nil
}

View File

@@ -7,7 +7,6 @@ import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -18,7 +17,6 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mempool/mock"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
@@ -72,9 +70,7 @@ func newBlockchainReactor(
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
blockStore := store.NewBlockStore(blockDB)
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
@@ -87,9 +83,7 @@ func newBlockchainReactor(
// pool.height is determined from the store.
fastSync := true
db := dbm.NewMemDB()
stateStore = sm.NewStore(db, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore = sm.NewStore(db)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mock.Mempool{}, sm.EmptyEvidencePool{})
if err = stateStore.Save(state); err != nil {
@@ -194,25 +188,6 @@ func TestNoBlockResponse(t *testing.T) {
}
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
reactor := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 10).reactor
peer := p2p.CreateRandomPeer(false)
reactor.InitPeer(peer)
reactor.AddPeer(peer)
m := &bcproto.StatusRequest{}
wm := m.Wrap()
msg, err := proto.Marshal(wm)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(BlockchainChannel, peer, msg)
})
}
// NOTE: This is too hard to test without
// an easy way to add test peer to switch
// or without significant refactoring of the module.

View File

@@ -2,10 +2,9 @@ package v1
import (
"fmt"
"reflect"
"time"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/behaviour"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
@@ -173,20 +172,21 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
SendQueueCapacity: 2000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MessageType: &bcproto.Message{},
},
}
}
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
})
if err != nil {
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
return
}
peer.Send(BlockchainChannel, msgBytes)
// it's OK if send fails. will try later in poolRoutine
// peer is added to the pool once we receive the first
@@ -206,28 +206,35 @@ func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
bcR.Logger.Error("Could not send block message to peer", "err", err)
return false
}
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockResponse{Block: pbbi},
}, bcR.Logger)
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: pbbi})
if err != nil {
bcR.Logger.Error("unable to marshal msg", "err", err)
return false
}
return src.TrySend(BlockchainChannel, msgBytes)
}
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}, bcR.Logger)
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
if err != nil {
bcR.Logger.Error("unable to marshal msg", "err", err)
return false
}
return src.TrySend(BlockchainChannel, msgBytes)
}
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
},
}, bcR.Logger)
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
Base: bcR.store.Base(),
Height: bcR.store.Height(),
})
if err != nil {
bcR.Logger.Error("unable to marshal msg", "err", err)
return false
}
return src.TrySend(BlockchainChannel, msgBytes)
}
// RemovePeer implements Reactor by removing peer from the pool.
@@ -243,27 +250,34 @@ func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
}
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
if err := bc.ValidateMsg(e.Message); err != nil {
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
msg, err := bc.DecodeMsg(msgBytes)
if err != nil {
bcR.Logger.Error("error decoding message", "src", src, "chId", chID, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
return
}
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
if err = bc.ValidateMsg(msg); err != nil {
bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
return
}
switch msg := e.Message.(type) {
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
switch msg := msg.(type) {
case *bcproto.BlockRequest:
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
if queued := bcR.sendBlockToPeer(msg, src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height)
}
case *bcproto.StatusRequest:
// Send peer our state.
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
if queued := bcR.sendStatusResponseToPeer(msg, src); !queued {
// Unfortunately not queued since the queue is full.
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
bcR.Logger.Error("Could not send status message to peer", "src", src)
}
case *bcproto.BlockResponse:
@@ -275,23 +289,23 @@ func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
msgForFSM := bcReactorMessage{
event: blockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
peerID: src.ID(),
height: bi.Height,
block: bi,
length: msg.Size(),
length: len(msgBytes),
},
}
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
bcR.Logger.Info("Received", "src", src, "height", bi.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.NoBlockResponse:
msgForFSM := bcReactorMessage{
event: noBlockResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
peerID: src.ID(),
height: msg.Height,
},
}
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
bcR.messagesForFSMCh <- msgForFSM
case *bcproto.StatusResponse:
@@ -299,35 +313,18 @@ func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
msgForFSM := bcReactorMessage{
event: statusResponseEv,
data: bReactorEventData{
peerID: e.Src.ID(),
peerID: src.ID(),
height: msg.Height,
length: msg.Size(),
length: len(msgBytes),
},
}
bcR.messagesForFSMCh <- msgForFSM
default:
bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg))
bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg)))
}
}
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
msg := &bcproto.Message{}
err := proto.Unmarshal(msgBytes, msg)
if err != nil {
panic(err)
}
uw, err := msg.Unwrap()
if err != nil {
panic(err)
}
bcR.ReceiveEnvelope(p2p.Envelope{
ChannelID: chID,
Src: peer,
Message: uw,
})
}
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
@@ -495,10 +492,11 @@ func (bcR *BlockchainReactor) processBlock() error {
// Implements bcRNotifier
// sendStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) sendStatusRequest() {
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
ChannelID: BlockchainChannel,
Message: &bcproto.StatusRequest{},
})
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
if err != nil {
panic(err)
}
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
}
// Implements bcRNotifier
@@ -509,10 +507,11 @@ func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) erro
return errNilPeerForBlockRequest
}
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockRequest{Height: height},
}, bcR.Logger)
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
if err != nil {
return err
}
queued := peer.TrySend(BlockchainChannel, msgBytes)
if !queued {
return errSendQueueFull
}
@@ -535,8 +534,8 @@ func (bcR *BlockchainReactor) switchToConsensus() {
// Called by FSM and pool:
// - pool calls when it detects slow peer or when peer times out
// - FSM calls when:
// - adding a block (addBlock) fails
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
// - adding a block (addBlock) fails
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
msgData := bcFsmMessage{

View File

@@ -8,7 +8,6 @@ import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -19,7 +18,6 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mempool/mock"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
@@ -104,9 +102,7 @@ func newBlockchainReactor(
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
blockStore := store.NewBlockStore(blockDB)
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
@@ -119,9 +115,7 @@ func newBlockchainReactor(
// pool.height is determined from the store.
fastSync := true
db := dbm.NewMemDB()
stateStore = sm.NewStore(db, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore = sm.NewStore(db)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mock.Mempool{}, sm.EmptyEvidencePool{})
if err = stateStore.Save(state); err != nil {
@@ -351,25 +345,6 @@ outerFor:
assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1)
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
reactor := newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, 10)
peer := p2p.CreateRandomPeer(false)
reactor.InitPeer(peer)
reactor.AddPeer(peer)
m := &bcproto.StatusRequest{}
wm := m.Wrap()
msg, err := proto.Marshal(wm)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(BlockchainChannel, peer, msg)
})
}
//----------------------------------------------
// utility funcs

View File

@@ -3,6 +3,7 @@ package v2
import (
"fmt"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/state"
@@ -15,7 +16,7 @@ type iIO interface {
sendBlockNotFound(height int64, peerID p2p.ID) error
sendStatusResponse(base, height int64, peerID p2p.ID) error
broadcastStatusRequest()
broadcastStatusRequest() error
trySwitchToConsensus(state state.State, skipWAL bool) bool
}
@@ -46,10 +47,13 @@ func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error {
if peer == nil {
return fmt.Errorf("peer not found")
}
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockRequest{Height: height},
}, sio.sw.Logger); !queued {
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
if err != nil {
return err
}
queued := peer.TrySend(BlockchainChannel, msgBytes)
if !queued {
return fmt.Errorf("send queue full")
}
return nil
@@ -61,10 +65,12 @@ func (sio *switchIO) sendStatusResponse(base int64, height int64, peerID p2p.ID)
return fmt.Errorf("peer not found")
}
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.StatusRequest{},
}, sio.sw.Logger); !queued {
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{Height: height, Base: base})
if err != nil {
return err
}
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
return fmt.Errorf("peer queue full")
}
@@ -85,10 +91,11 @@ func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
return err
}
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.BlockResponse{Block: bpb},
}, sio.sw.Logger); !queued {
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bpb})
if err != nil {
return err
}
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
return fmt.Errorf("peer queue full")
}
@@ -100,10 +107,12 @@ func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error {
if peer == nil {
return fmt.Errorf("peer not found")
}
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: BlockchainChannel,
Message: &bcproto.NoBlockResponse{Height: height},
}, sio.sw.Logger); !queued {
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: height})
if err != nil {
return err
}
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
return fmt.Errorf("peer queue full")
}
@@ -118,10 +127,14 @@ func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool
return ok
}
func (sio *switchIO) broadcastStatusRequest() {
func (sio *switchIO) broadcastStatusRequest() error {
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
if err != nil {
return err
}
// XXX: maybe we should use an io specific peer list here
sio.sw.BroadcastEnvelope(p2p.Envelope{
ChannelID: BlockchainChannel,
Message: &bcproto.StatusRequest{},
})
sio.sw.Broadcast(BlockchainChannel, msgBytes)
return nil
}

View File

@@ -5,8 +5,6 @@ import (
"fmt"
"time"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/behaviour"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
@@ -217,7 +215,7 @@ type bcBlockResponse struct {
priorityNormal
time time.Time
peerID p2p.ID
size int
size int64
block *types.Block
}
@@ -351,7 +349,9 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
case <-doProcessBlockCh:
r.processor.send(rProcessBlock{})
case <-doStatusCh:
r.io.broadcastStatusRequest()
if err := r.io.broadcastStatusRequest(); err != nil {
r.logger.Error("Error broadcasting status request", "err", err)
}
// Events from peers. Closing the channel signals event loop termination.
case event, ok := <-events:
@@ -455,31 +455,39 @@ func (r *BlockchainReactor) Stop() error {
}
// Receive implements Reactor by handling different message types.
func (r *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
if err := bc.ValidateMsg(e.Message); err != nil {
r.logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
_ = r.reporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
msg, err := bc.DecodeMsg(msgBytes)
if err != nil {
r.logger.Error("error decoding message",
"src", src.ID(), "chId", chID, "msg", msg, "err", err)
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
return
}
r.logger.Debug("Receive", "src", e.Src.ID(), "chID", e.ChannelID, "msg", e.Message)
if err = bc.ValidateMsg(msg); err != nil {
r.logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
return
}
switch msg := e.Message.(type) {
r.logger.Debug("Receive", "src", src.ID(), "chID", chID, "msg", msg)
switch msg := msg.(type) {
case *bcproto.StatusRequest:
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), e.Src.ID()); err != nil {
r.logger.Error("Could not send status message to peer", "src", e.Src)
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src.ID()); err != nil {
r.logger.Error("Could not send status message to peer", "src", src)
}
case *bcproto.BlockRequest:
block := r.store.LoadBlock(msg.Height)
if block != nil {
if err := r.io.sendBlockToPeer(block, e.Src.ID()); err != nil {
if err = r.io.sendBlockToPeer(block, src.ID()); err != nil {
r.logger.Error("Could not send block message to peer: ", err)
}
} else {
r.logger.Info("peer asking for a block we don't have", "src", e.Src, "height", msg.Height)
peerID := e.Src.ID()
if err := r.io.sendBlockNotFound(msg.Height, peerID); err != nil {
r.logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
peerID := src.ID()
if err = r.io.sendBlockNotFound(msg.Height, peerID); err != nil {
r.logger.Error("Couldn't send block not found: ", err)
}
}
@@ -487,7 +495,7 @@ func (r *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
case *bcproto.StatusResponse:
r.mtx.RLock()
if r.events != nil {
r.events <- bcStatusResponse{peerID: e.Src.ID(), base: msg.Base, height: msg.Height}
r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height}
}
r.mtx.RUnlock()
@@ -500,10 +508,10 @@ func (r *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
r.mtx.RLock()
if r.events != nil {
r.events <- bcBlockResponse{
peerID: e.Src.ID(),
peerID: src.ID(),
block: bi,
size: int64(len(msgBytes)),
time: time.Now(),
size: msg.Size(),
}
}
r.mtx.RUnlock()
@@ -511,29 +519,12 @@ func (r *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
case *bcproto.NoBlockResponse:
r.mtx.RLock()
if r.events != nil {
r.events <- bcNoBlockResponse{peerID: e.Src.ID(), height: msg.Height, time: time.Now()}
r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()}
}
r.mtx.RUnlock()
}
}
func (r *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
msg := &bcproto.Message{}
err := proto.Unmarshal(msgBytes, msg)
if err != nil {
panic(err)
}
uw, err := msg.Unwrap()
if err != nil {
panic(err)
}
r.ReceiveEnvelope(p2p.Envelope{
ChannelID: chID,
Src: peer,
Message: uw,
})
}
// AddPeer implements Reactor interface
func (r *BlockchainReactor) AddPeer(peer p2p.Peer) {
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer.ID())
@@ -568,7 +559,6 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
SendQueueCapacity: 2000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MessageType: &bcproto.Message{},
},
}
}

View File

@@ -9,13 +9,13 @@ import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/behaviour"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
@@ -53,19 +53,34 @@ func (mp mockPeer) NodeInfo() p2p.NodeInfo {
func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} }
func (mp mockPeer) SendEnvelope(e p2p.Envelope) bool { return true }
func (mp mockPeer) TrySendEnvelope(e p2p.Envelope) bool { return true }
func (mp mockPeer) Send(byte, []byte) bool { return true }
func (mp mockPeer) TrySend(byte, []byte) bool { return true }
func (mp mockPeer) Set(string, interface{}) {}
func (mp mockPeer) Get(string) interface{} { return struct{}{} }
func (mp mockPeer) SetRemovalFailed() {}
func (mp mockPeer) GetRemovalFailed() bool { return false }
// nolint:unused // ignore
type mockBlockStore struct {
blocks map[int64]*types.Block
}
type mockBlockApplier struct{}
// nolint:unused // ignore
func (ml *mockBlockStore) Height() int64 {
return int64(len(ml.blocks))
}
// nolint:unused // ignore
func (ml *mockBlockStore) LoadBlock(height int64) *types.Block {
return ml.blocks[height]
}
// nolint:unused // ignore
func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) {
ml.blocks[block.Height] = block
}
type mockBlockApplier struct {
}
// XXX: Add whitelist/blacklist?
func (mba *mockBlockApplier) ApplyBlock(
@@ -115,7 +130,8 @@ func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool
return true
}
func (sio *mockSwitchIo) broadcastStatusRequest() {
func (sio *mockSwitchIo) broadcastStatusRequest() error {
return nil
}
type testReactorParams struct {
@@ -143,9 +159,7 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
panic(fmt.Errorf("error start app: %w", err))
}
db := dbm.NewMemDB()
stateStore := sm.NewStore(db, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(db)
appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{})
if err = stateStore.Save(state); err != nil {
panic(err)
@@ -336,7 +350,9 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
// }
func TestReactorHelperMode(t *testing.T) {
channelID := byte(0x40)
var (
channelID = byte(0x40)
)
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(config.RootDir)
@@ -352,7 +368,7 @@ func TestReactorHelperMode(t *testing.T) {
type testEvent struct {
peer string
event proto.Message
event interface{}
}
tests := []struct {
@@ -364,10 +380,10 @@ func TestReactorHelperMode(t *testing.T) {
name: "status request",
params: params,
msgs: []testEvent{
{"P1", &bcproto.StatusRequest{}},
{"P1", &bcproto.BlockRequest{Height: 13}},
{"P1", &bcproto.BlockRequest{Height: 20}},
{"P1", &bcproto.BlockRequest{Height: 22}},
{"P1", bcproto.StatusRequest{}},
{"P1", bcproto.BlockRequest{Height: 13}},
{"P1", bcproto.BlockRequest{Height: 20}},
{"P1", bcproto.BlockRequest{Height: 22}},
},
},
}
@@ -384,27 +400,25 @@ func TestReactorHelperMode(t *testing.T) {
for i := 0; i < len(tt.msgs); i++ {
step := tt.msgs[i]
switch ev := step.event.(type) {
case *bcproto.StatusRequest:
case bcproto.StatusRequest:
old := mockSwitch.numStatusResponse
reactor.ReceiveEnvelope(p2p.Envelope{
ChannelID: channelID,
Src: mockPeer{id: p2p.ID(step.peer)},
Message: ev})
msg, err := bc.EncodeMsg(&ev)
assert.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
case *bcproto.BlockRequest:
case bcproto.BlockRequest:
if ev.Height > params.startHeight {
old := mockSwitch.numNoBlockResponse
reactor.ReceiveEnvelope(p2p.Envelope{
ChannelID: channelID,
Src: mockPeer{id: p2p.ID(step.peer)},
Message: ev})
msg, err := bc.EncodeMsg(&ev)
assert.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
} else {
old := mockSwitch.numBlockResponse
reactor.ReceiveEnvelope(p2p.Envelope{
ChannelID: channelID,
Src: mockPeer{id: p2p.ID(step.peer)},
Message: ev})
msg, err := bc.EncodeMsg(&ev)
assert.NoError(t, err)
assert.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
}
}
@@ -415,34 +429,6 @@ func TestReactorHelperMode(t *testing.T) {
}
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
params := testReactorParams{
logger: log.TestingLogger(),
genDoc: genDoc,
privVals: privVals,
startHeight: 20,
mockA: true,
}
reactor := newTestReactor(params)
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
reactor.io = mockSwitch
peer := p2p.CreateRandomPeer(false)
reactor.InitPeer(peer)
reactor.AddPeer(peer)
m := &bcproto.StatusRequest{}
wm := m.Wrap()
msg, err := proto.Marshal(wm)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(BlockchainChannel, peer, msg)
})
}
func TestReactorSetSwitchNil(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(config.RootDir)
@@ -479,8 +465,7 @@ type testApp struct {
}
func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) (
*types.GenesisDoc, []types.PrivValidator,
) {
*types.GenesisDoc, []types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ {
@@ -505,8 +490,7 @@ func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower
func newReactorStore(
genDoc *types.GenesisDoc,
privVals []types.PrivValidator,
maxBlockHeight int64,
) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
if len(privVals) != 1 {
panic("only support one validator")
}
@@ -520,19 +504,14 @@ func newReactorStore(
stateDB := dbm.NewMemDB()
blockStore := store.NewBlockStore(dbm.NewMemDB())
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
if err != nil {
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
}
db := dbm.NewMemDB()
stateStore = sm.NewStore(db, sm.StoreOptions{
DiscardABCIResponses: false,
},
)
stateStore = sm.NewStore(db)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mock.Mempool{}, sm.EmptyEvidencePool{})
if err = stateStore.Save(state); err != nil {

View File

@@ -52,6 +52,11 @@ func (rt *Routine) setLogger(logger log.Logger) {
rt.logger = logger
}
// nolint:unused
func (rt *Routine) setMetrics(metrics *Metrics) {
rt.metrics = metrics
}
func (rt *Routine) start() {
rt.logger.Info("routine start", "msg", log.NewLazySprintf("%s: run", rt.name))
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))

View File

@@ -366,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
}
// CONTRACT: peer exists and in Ready state.
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int, now time.Time) error {
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error {
peer := sc.peers[peerID]
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
@@ -379,7 +379,7 @@ func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int, now tim
height, pendingTime, now)
}
peer.lastRate = int64(size) / now.Sub(pendingTime).Nanoseconds()
peer.lastRate = size / now.Sub(pendingTime).Nanoseconds()
sc.setStateAtHeight(height, blockStateReceived)
delete(sc.pendingBlocks, height)
@@ -532,7 +532,7 @@ func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) {
return noOp, nil
}
err = sc.markReceived(event.peerID, event.block.Height, event.block.Size(), event.time)
err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time)
if err != nil {
sc.removePeer(event.peerID)
return scPeerError{peerID: event.peerID, reason: err}, nil

View File

@@ -853,7 +853,7 @@ func TestScMarkReceived(t *testing.T) {
type args struct {
peerID p2p.ID
height int64
size int
size int64
tm time.Time
}
tests := []struct {

View File

@@ -3,6 +3,7 @@ package debug
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
@@ -81,7 +82,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) {
start := time.Now().UTC()
tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp")
tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp")
if err != nil {
logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err)
return

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -67,6 +68,7 @@ func zipDir(src, dest string) error {
_, err = io.Copy(headerWriter, file)
return err
})
}
// copyFile copies a file from src to dest and returns an error upon failure. The
@@ -109,5 +111,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error {
return fmt.Errorf("failed to encode state dump: %w", err)
}
return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
}

View File

@@ -3,6 +3,7 @@ package debug
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -55,7 +56,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
// Create a temporary directory which will contain all the state dumps and
// relevant files and directories that will be compressed into a file.
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp")
if err != nil {
return fmt.Errorf("failed to create temporary directory: %w", err)
}
@@ -104,7 +105,7 @@ func killProc(pid uint64, dir string) error {
// pipe STDERR output from tailing the Tendermint process to a file
//
// NOTE: This will only work on UNIX systems.
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) //nolint: gosec
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) // nolint: gosec
outFile, err := os.Create(filepath.Join(dir, "stacktrace.out"))
if err != nil {

View File

@@ -3,7 +3,7 @@ package debug
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
@@ -67,17 +67,16 @@ func copyConfig(home, dir string) error {
func dumpProfile(dir, addr, profile string, debug int) error {
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
//nolint:gosec,nolintlint
resp, err := http.Get(endpoint)
resp, err := http.Get(endpoint) // nolint: gosec
if err != nil {
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read %s profile response body: %w", profile, err)
}
return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
}

View File

@@ -1,232 +0,0 @@
package commands
import (
"errors"
"fmt"
"strings"
"github.com/spf13/cobra"
dbm "github.com/tendermint/tm-db"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/progressbar"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv"
"github.com/tendermint/tendermint/state/indexer/sink/psql"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/state/txindex/kv"
"github.com/tendermint/tendermint/types"
)
const (
reindexFailed = "event re-index failed: "
)
var (
ErrHeightNotAvailable = errors.New("height is not available")
ErrInvalidRequest = errors.New("invalid request")
)
// ReIndexEventCmd constructs a command to re-index events in a block height interval.
var ReIndexEventCmd = &cobra.Command{
Use: "reindex-event",
Short: "Re-index events to the event store backends",
Long: `
reindex-event is an offline tooling to re-index block and tx events to the eventsinks.
You can run this command when the event store backend dropped/disconnected or you want to
replace the backend. The default start-height is 0, meaning the tooling will start
reindex from the base block height(inclusive); and the default end-height is 0, meaning
the tooling will reindex until the latest block height(inclusive). User can omit
either or both arguments.
Note: This operation requires ABCIResponses. Do not set DiscardABCIResponses to true if you
want to use this command.
`,
Example: `
tendermint reindex-event
tendermint reindex-event --start-height 2
tendermint reindex-event --end-height 10
tendermint reindex-event --start-height 2 --end-height 10
`,
Run: func(cmd *cobra.Command, args []string) {
bs, ss, err := loadStateAndBlockStore(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err := checkValidHeight(bs); err != nil {
fmt.Println(reindexFailed, err)
return
}
bi, ti, err := loadEventSinks(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
riArgs := eventReIndexArgs{
startHeight: startHeight,
endHeight: endHeight,
blockIndexer: bi,
txIndexer: ti,
blockStore: bs,
stateStore: ss,
}
if err := eventReIndex(cmd, riArgs); err != nil {
panic(fmt.Errorf("%s: %w", reindexFailed, err))
}
fmt.Println("event re-index finished")
},
}
var (
startHeight int64
endHeight int64
)
func init() {
ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index")
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
}
func loadEventSinks(cfg *tmcfg.Config) (indexer.BlockIndexer, txindex.TxIndexer, error) {
switch strings.ToLower(cfg.TxIndex.Indexer) {
case "null":
return nil, nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
case "psql":
conn := cfg.TxIndex.PsqlConn
if conn == "" {
return nil, nil, errors.New("the psql connection settings cannot be empty")
}
es, err := psql.NewEventSink(conn, cfg.ChainID())
if err != nil {
return nil, nil, err
}
return es.BlockIndexer(), es.TxIndexer(), nil
case "kv":
store, err := dbm.NewDB("tx_index", dbm.BackendType(cfg.DBBackend), cfg.DBDir())
if err != nil {
return nil, nil, err
}
txIndexer := kv.NewTxIndex(store)
blockIndexer := blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
return blockIndexer, txIndexer, nil
default:
return nil, nil, fmt.Errorf("unsupported event sink type: %s", cfg.TxIndex.Indexer)
}
}
type eventReIndexArgs struct {
startHeight int64
endHeight int64
blockIndexer indexer.BlockIndexer
txIndexer txindex.TxIndexer
blockStore state.BlockStore
stateStore state.Store
}
func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
var bar progressbar.Bar
bar.NewOption(args.startHeight-1, args.endHeight)
fmt.Println("start re-indexing events:")
defer bar.Finish()
for i := args.startHeight; i <= args.endHeight; i++ {
select {
case <-cmd.Context().Done():
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
default:
b := args.blockStore.LoadBlock(i)
if b == nil {
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
}
r, err := args.stateStore.LoadABCIResponses(i)
if err != nil {
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
}
e := types.EventDataNewBlockHeader{
Header: b.Header,
NumTxs: int64(len(b.Txs)),
ResultBeginBlock: *r.BeginBlock,
ResultEndBlock: *r.EndBlock,
}
var batch *txindex.Batch
if e.NumTxs > 0 {
batch = txindex.NewBatch(e.NumTxs)
for i := range b.Data.Txs {
tr := abcitypes.TxResult{
Height: b.Height,
Index: uint32(i),
Tx: b.Data.Txs[i],
Result: *(r.DeliverTxs[i]),
}
if err = batch.Add(&tr); err != nil {
return fmt.Errorf("adding tx to batch: %w", err)
}
}
if err := args.txIndexer.AddBatch(batch); err != nil {
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
}
}
if err := args.blockIndexer.Index(e); err != nil {
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
}
}
bar.Play(i)
}
return nil
}
func checkValidHeight(bs state.BlockStore) error {
base := bs.Base()
if startHeight == 0 {
startHeight = base
fmt.Printf("set the start block height to the base height of the blockstore %d \n", base)
}
if startHeight < base {
return fmt.Errorf("%s (requested start height: %d, base height: %d)",
ErrHeightNotAvailable, startHeight, base)
}
height := bs.Height()
if startHeight > height {
return fmt.Errorf(
"%s (requested start height: %d, store height: %d)", ErrHeightNotAvailable, startHeight, height)
}
if endHeight == 0 || endHeight > height {
endHeight = height
fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height)
}
if endHeight < base {
return fmt.Errorf(
"%s (requested end height: %d, base height: %d)", ErrHeightNotAvailable, endHeight, base)
}
if endHeight < startHeight {
return fmt.Errorf(
"%s (requested the end height: %d is less than the start height: %d)",
ErrInvalidRequest, startHeight, endHeight)
}
return nil
}

View File

@@ -1,193 +0,0 @@
package commands
import (
"context"
"errors"
"testing"
"github.com/spf13/cobra"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
blockmocks "github.com/tendermint/tendermint/state/indexer/mocks"
"github.com/tendermint/tendermint/state/mocks"
txmocks "github.com/tendermint/tendermint/state/txindex/mocks"
"github.com/tendermint/tendermint/types"
)
const (
height int64 = 10
base int64 = 2
)
func setupReIndexEventCmd() *cobra.Command {
reIndexEventCmd := &cobra.Command{
Use: ReIndexEventCmd.Use,
Run: func(cmd *cobra.Command, args []string) {},
}
_ = reIndexEventCmd.ExecuteContext(context.Background())
return reIndexEventCmd
}
func TestReIndexEventCheckHeight(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height)
testCases := []struct {
startHeight int64
endHeight int64
validHeight bool
}{
{0, 0, true},
{0, base, true},
{0, base - 1, false},
{0, height, true},
{0, height + 1, true},
{0, 0, true},
{base - 1, 0, false},
{base, 0, true},
{base, base, true},
{base, base - 1, false},
{base, height, true},
{base, height + 1, true},
{height, 0, true},
{height, base, false},
{height, height - 1, false},
{height, height, true},
{height, height + 1, true},
{height + 1, 0, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := checkValidHeight(mockBlockStore)
if tc.validHeight {
require.NoError(t, err)
} else {
require.Error(t, err)
}
}
}
func TestLoadEventSink(t *testing.T) {
testCases := []struct {
sinks string
connURL string
loadErr bool
}{
{"", "", true},
{"NULL", "", true},
{"KV", "", false},
{"PSQL", "", true}, // true because empty connect url
// skip to test PSQL connect with correct url
{"UnsupportedSinkType", "wrongUrl", true},
}
for idx, tc := range testCases {
cfg := tmcfg.TestConfig()
cfg.TxIndex.Indexer = tc.sinks
cfg.TxIndex.PsqlConn = tc.connURL
_, _, err := loadEventSinks(cfg)
if tc.loadErr {
require.Error(t, err, idx)
} else {
require.NoError(t, err, idx)
}
}
}
func TestLoadBlockStore(t *testing.T) {
cfg := tmcfg.TestConfig()
cfg.DBPath = t.TempDir()
_, _, err := loadStateAndBlockStore(cfg)
require.Error(t, err)
_, err = dbm.NewDB("blockstore", dbm.GoLevelDBBackend, cfg.DBDir())
require.NoError(t, err)
// Get StateStore
_, err = dbm.NewDB("state", dbm.GoLevelDBBackend, cfg.DBDir())
require.NoError(t, err)
bs, ss, err := loadStateAndBlockStore(cfg)
require.NoError(t, err)
require.NotNil(t, bs)
require.NotNil(t, ss)
}
func TestReIndexEvent(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockStateStore := &mocks.Store{}
mockBlockIndexer := &blockmocks.BlockIndexer{}
mockTxIndexer := &txmocks.TxIndexer{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height).
On("LoadBlock", base).Return(nil).Once().
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
dtx := abcitypes.ResponseDeliverTx{}
abciResp := &prototmstate.ABCIResponses{
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
EndBlock: &abcitypes.ResponseEndBlock{},
BeginBlock: &abcitypes.ResponseBeginBlock{},
}
mockBlockIndexer.
On("Index", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
On("Index", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil)
mockTxIndexer.
On("AddBatch", mock.AnythingOfType("*txindex.Batch")).Return(errors.New("")).Once().
On("AddBatch", mock.AnythingOfType("*txindex.Batch")).Return(nil)
mockStateStore.
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
On("LoadABCIResponses", base).Return(abciResp, nil).
On("LoadABCIResponses", height).Return(abciResp, nil)
testCases := []struct {
startHeight int64
endHeight int64
reIndexErr bool
}{
{base, height, true}, // LoadBlock error
{base, height, true}, // LoadABCIResponses error
{base, height, true}, // index block event error
{base, height, true}, // index tx event error
{base, base, false},
{height, height, false},
}
for _, tc := range testCases {
args := eventReIndexArgs{
startHeight: tc.startHeight,
endHeight: tc.endHeight,
blockIndexer: mockBlockIndexer,
txIndexer: mockTxIndexer,
blockStore: mockBlockStore,
stateStore: mockStateStore,
}
err := eventReIndex(setupReIndexEventCmd(), args)
if tc.reIndexErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}

View File

@@ -29,7 +29,7 @@ var ResetStateCmd = &cobra.Command{
Short: "Remove all the data and WAL",
PreRun: deprecateSnakeCase,
RunE: func(cmd *cobra.Command, args []string) (err error) {
config, err = ParseConfig(cmd)
config, err = ParseConfig()
if err != nil {
return err
}
@@ -54,7 +54,7 @@ var ResetPrivValidatorCmd = &cobra.Command{
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetAllCmd(cmd *cobra.Command, args []string) (err error) {
config, err = ParseConfig(cmd)
config, err = ParseConfig()
if err != nil {
return err
}
@@ -71,7 +71,7 @@ func resetAllCmd(cmd *cobra.Command, args []string) (err error) {
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) (err error) {
config, err = ParseConfig(cmd)
config, err = ParseConfig()
if err != nil {
return err
}

View File

@@ -2,14 +2,12 @@ package commands
import (
"fmt"
"path/filepath"
"github.com/spf13/cobra"
dbm "github.com/tendermint/tm-db"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
)
@@ -57,10 +55,6 @@ func RollbackState(config *cfg.Config) (int64, []byte, error) {
func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store, error) {
dbType := dbm.BackendType(config.DBBackend)
if !os.FileExists(filepath.Join(config.DBDir(), "blockstore.db")) {
return nil, nil, fmt.Errorf("no blockstore found in %v", config.DBDir())
}
// Get BlockStore
blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir())
if err != nil {
@@ -68,18 +62,12 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store,
}
blockStore := store.NewBlockStore(blockStoreDB)
if !os.FileExists(filepath.Join(config.DBDir(), "state.db")) {
return nil, nil, fmt.Errorf("no statestore found in %v", config.DBDir())
}
// Get StateStore
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
if err != nil {
return nil, nil, err
}
stateStore := state.NewStore(stateDB, state.StoreOptions{
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
})
stateStore := state.NewStore(stateDB)
return blockStore, stateStore, nil
}

View File

@@ -29,25 +29,12 @@ func registerFlagsRootCmd(cmd *cobra.Command) {
// ParseConfig retrieves the default environment configuration,
// sets up the Tendermint root and ensures that the root exists
func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) {
func ParseConfig() (*cfg.Config, error) {
conf := cfg.DefaultConfig()
err := viper.Unmarshal(conf)
if err != nil {
return nil, err
}
var home string
if os.Getenv("TMHOME") != "" {
home = os.Getenv("TMHOME")
} else {
home, err = cmd.Flags().GetString(cli.HomeFlag)
if err != nil {
return nil, err
}
}
conf.RootDir = home
conf.SetRoot(conf.RootDir)
cfg.EnsureRoot(conf.RootDir)
if err := conf.ValidateBasic(); err != nil {
@@ -65,7 +52,7 @@ var RootCmd = &cobra.Command{
return nil
}
config, err = ParseConfig(cmd)
config, err = ParseConfig()
if err != nil {
return err
}

View File

@@ -2,6 +2,7 @@ package commands
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -17,7 +18,9 @@ import (
tmos "github.com/tendermint/tendermint/libs/os"
)
var defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
var (
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
)
// clearConfig clears env vars, the given root dir, and resets viper.
func clearConfig(dir string) {
@@ -85,6 +88,7 @@ func TestRootHome(t *testing.T) {
}
func TestRootFlagsEnv(t *testing.T) {
// defaults
defaults := cfg.DefaultConfig()
defaultLogLvl := defaults.LogLevel
@@ -112,6 +116,7 @@ func TestRootFlagsEnv(t *testing.T) {
}
func TestRootConfig(t *testing.T) {
// write non-default config
nonDefaultLogLvl := "abc:debug"
cvals := map[string]string{
@@ -135,7 +140,7 @@ func TestRootConfig(t *testing.T) {
// XXX: path must match cfg.defaultConfigPath
configFilePath := filepath.Join(defaultRoot, "config")
err := tmos.EnsureDir(configFilePath, 0o700)
err := tmos.EnsureDir(configFilePath, 0700)
require.Nil(t, err)
// write the non-defaults to a different path
@@ -163,5 +168,5 @@ func WriteConfigVals(dir string, vals map[string]string) error {
data += fmt.Sprintf("%s = \"%s\"\n", k, v)
}
cfile := filepath.Join(dir, "config.toml")
return os.WriteFile(cfile, []byte(data), 0o600)
return ioutil.WriteFile(cfile, []byte(data), 0600)
}

View File

@@ -63,8 +63,6 @@ func AddNodeFlags(cmd *cobra.Command) {
"p2p.laddr",
config.P2P.ListenAddress,
"node listen address. (0.0.0.0:0 means any interface, any port)")
cmd.Flags().String("p2p.external-address",
config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial")
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
cmd.Flags().String("p2p.unconditional_peer_ids",

View File

@@ -1,7 +1,6 @@
package commands
import (
"encoding/json"
"fmt"
"github.com/spf13/cobra"
@@ -14,25 +13,6 @@ var VersionCmd = &cobra.Command{
Use: "version",
Short: "Show version info",
Run: func(cmd *cobra.Command, args []string) {
if verbose {
values, _ := json.MarshalIndent(struct {
Tendermint string `json:"tendermint"`
ABCI string `json:"abci"`
BlockProtocol uint64 `json:"block_protocol"`
P2PProtocol uint64 `json:"p2p_protocol"`
}{
Tendermint: version.TMCoreSemVer,
ABCI: version.ABCIVersion,
BlockProtocol: version.BlockProtocol,
P2PProtocol: version.P2PProtocol,
}, "", " ")
fmt.Println(string(values))
} else {
fmt.Println(version.TMCoreSemVer)
}
fmt.Println(version.TMCoreSemVer)
},
}
func init() {
VersionCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Show protocol and library versions")
}

View File

@@ -18,7 +18,6 @@ func main() {
cmd.InitFilesCmd,
cmd.ProbeUpnpCmd,
cmd.LightCmd,
cmd.ReIndexEventCmd,
cmd.ReplayCmd,
cmd.ReplayConsoleCmd,
cmd.ResetAllCmd,

View File

@@ -74,7 +74,6 @@ type Config struct {
StateSync *StateSyncConfig `mapstructure:"statesync"`
FastSync *FastSyncConfig `mapstructure:"fastsync"`
Consensus *ConsensusConfig `mapstructure:"consensus"`
Storage *StorageConfig `mapstructure:"storage"`
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
}
@@ -89,7 +88,6 @@ func DefaultConfig() *Config {
StateSync: DefaultStateSyncConfig(),
FastSync: DefaultFastSyncConfig(),
Consensus: DefaultConsensusConfig(),
Storage: DefaultStorageConfig(),
TxIndex: DefaultTxIndexConfig(),
Instrumentation: DefaultInstrumentationConfig(),
}
@@ -105,7 +103,6 @@ func TestConfig() *Config {
StateSync: TestStateSyncConfig(),
FastSync: TestFastSyncConfig(),
Consensus: TestConsensusConfig(),
Storage: TestStorageConfig(),
TxIndex: TestTxIndexConfig(),
Instrumentation: TestInstrumentationConfig(),
}
@@ -1074,41 +1071,11 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
}
//-----------------------------------------------------------------------------
// StorageConfig
// StorageConfig allows more fine-grained control over certain storage-related
// behavior.
type StorageConfig struct {
// Set to false to ensure ABCI responses are persisted. ABCI responses are
// required for `/block_results` RPC queries, and to reindex events in the
// command-line tool.
DiscardABCIResponses bool `mapstructure:"discard_abci_responses"`
}
// DefaultStorageConfig returns the default configuration options relating to
// Tendermint storage optimization.
func DefaultStorageConfig() *StorageConfig {
return &StorageConfig{
DiscardABCIResponses: false,
}
}
// TestStorageConfig returns storage configuration that can be used for
// testing.
func TestStorageConfig() *StorageConfig {
return &StorageConfig{
DiscardABCIResponses: false,
}
}
// -----------------------------------------------------------------------------
// TxIndexConfig
// Remember that Event has the following structure:
// type: [
//
// key: value,
// ...
//
// key: value,
// ...
// ]
//
// CompositeKeys are constructed by `type.key`

View File

@@ -27,6 +27,7 @@ func TestDefaultConfig(t *testing.T) {
assert.Equal("/foo/bar", cfg.GenesisFile())
assert.Equal("/opt/data", cfg.DBDir())
assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir())
}
func TestConfigValidateBasic(t *testing.T) {
@@ -139,8 +140,8 @@ func TestFastSyncConfigValidateBasic(t *testing.T) {
assert.Error(t, cfg.ValidateBasic())
}
//nolint:lll
func TestConsensusConfig_ValidateBasic(t *testing.T) {
// nolint: lll
testcases := map[string]struct {
modify func(*ConsensusConfig)
expectErr bool
@@ -165,7 +166,6 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) {
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
}
for desc, tc := range testcases {
tc := tc // appease linter
t.Run(desc, func(t *testing.T) {

View File

@@ -3,7 +3,7 @@ package config
import (
"bytes"
"fmt"
"os"
"io/ioutil"
"path/filepath"
"strings"
"text/template"
@@ -12,7 +12,7 @@ import (
)
// DefaultDirPerm is the default permissions used when creating directories.
const DefaultDirPerm = 0o700
const DefaultDirPerm = 0700
var configTemplate *template.Template
@@ -63,7 +63,7 @@ func WriteConfigFile(configFilePath string, config *Config) {
panic(err)
}
tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0o644)
tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0644)
}
// Note: any changes to the comments/variables/mapstructure
@@ -480,17 +480,6 @@ create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}"
peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
#######################################################
### Storage Configuration Options ###
#######################################################
[storage]
# Set to true to discard ABCI responses from the state store, which can save a
# considerable amount of disk space. Set to false to ensure ABCI responses are
# persisted. ABCI responses are required for /block_results RPC queries, and to
# reindex events in the command-line tool.
discard_abci_responses = {{ .Storage.DiscardABCIResponses}}
#######################################################
### Transaction Indexer Configuration Options ###
#######################################################
@@ -544,7 +533,7 @@ func ResetTestRoot(testName string) *Config {
func ResetTestRootWithChainID(testName string, chainID string) *Config {
// create a unique, concurrency-safe test directory under os.TempDir()
rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName))
rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName))
if err != nil {
panic(err)
}
@@ -571,11 +560,11 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config {
chainID = "tendermint_test"
}
testGenesis := fmt.Sprintf(testGenesisFmt, chainID)
tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0o644)
tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
}
// we always overwrite the priv val
tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0o644)
tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0o644)
tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644)
tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644)
config := TestConfig().SetRoot(rootDir)
return config

View File

@@ -1,6 +1,7 @@
package config
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -22,7 +23,7 @@ func TestEnsureRoot(t *testing.T) {
require := require.New(t)
// setup temp dir for test
tmpDir, err := os.MkdirTemp("", "config-test")
tmpDir, err := ioutil.TempDir("", "config-test")
require.Nil(err)
defer os.RemoveAll(tmpDir)
@@ -30,7 +31,7 @@ func TestEnsureRoot(t *testing.T) {
EnsureRoot(tmpDir)
// make sure config is set properly
data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
require.Nil(err)
if !checkConfig(string(data)) {
@@ -51,7 +52,7 @@ func TestEnsureTestRoot(t *testing.T) {
rootDir := cfg.RootDir
// make sure config is set properly
data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
require.Nil(err)
if !checkConfig(string(data)) {
@@ -67,7 +68,7 @@ func checkConfig(configFile string) bool {
var valid bool
// list of words we expect in the config
elems := []string{
var elems = []string{
"moniker",
"seeds",
"proxy_app",

View File

@@ -1,3 +1,3 @@
# Consensus
See the [consensus spec](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/reactors/consensus) for more information.
See the [consensus spec](https://github.com/tendermint/spec/tree/master/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/spec/tree/master/spec/reactors/consensus) for more information.

View File

@@ -26,7 +26,6 @@ import (
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
"github.com/tendermint/tendermint/p2p"
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
@@ -51,9 +50,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
for i := 0; i < nValidators; i++ {
logger := consensusLogger().With("test", "byzantine", "validator", i)
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
defer os.RemoveAll(thisConfig.RootDir)
@@ -166,16 +163,10 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
for i, peer := range peerList {
if i < len(peerList)/2 {
bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer)
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
Message: &tmcons.Vote{Vote: prevote1.ToProto()},
ChannelID: VoteChannel,
}, bcs.Logger)
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1}))
} else {
bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer)
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
Message: &tmcons.Vote{Vote: prevote2.ToProto()},
ChannelID: VoteChannel,
}, bcs.Logger)
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2}))
}
}
} else {
@@ -429,7 +420,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
// wait for someone in the big partition (B) to make a block
<-blocksSubs[ind2].Out()
t.Logf("A block has been committed. Healing partition")
t.Log("A block has been committed. Healing partition")
p2p.Connect2Switches(switches, ind0, ind1)
p2p.Connect2Switches(switches, ind0, ind2)
@@ -455,8 +446,8 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
case <-done:
case <-tick.C:
for i, reactor := range reactors {
t.Logf(fmt.Sprintf("Consensus Reactor %v", i))
t.Logf(fmt.Sprintf("%v", reactor))
t.Log(fmt.Sprintf("Consensus Reactor %v", i))
t.Log(fmt.Sprintf("%v", reactor))
}
t.Fatalf("Timed out waiting for all validators to commit first block")
}
@@ -519,26 +510,18 @@ func sendProposalAndParts(
parts *types.PartSet,
) {
// proposal
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: DataChannel,
Message: &tmcons.Proposal{Proposal: *proposal.ToProto()},
}, cs.Logger)
msg := &ProposalMessage{Proposal: proposal}
peer.Send(DataChannel, MustEncode(msg))
// parts
for i := 0; i < int(parts.Total()); i++ {
part := parts.GetPart(i)
pp, err := part.ToProto()
if err != nil {
panic(err) // TODO: wbanfield better error handling
msg := &BlockPartMessage{
Height: height, // This tells peer that this part applies to us.
Round: round, // This tells peer that this part applies to us.
Part: part,
}
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: DataChannel,
Message: &tmcons.BlockPart{
Height: height, // This tells peer that this part applies to us.
Round: round, // This tells peer that this part applies to us.
Part: *pp,
},
}, cs.Logger)
peer.Send(DataChannel, MustEncode(msg))
}
// votes
@@ -546,14 +529,9 @@ func sendProposalAndParts(
prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
cs.mtx.Unlock()
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: VoteChannel,
Message: &tmcons.Vote{Vote: prevote.ToProto()},
}, cs.Logger)
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: VoteChannel,
Message: &tmcons.Vote{Vote: precommit.ToProto()},
}, cs.Logger)
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
}
//----------------------------------------
@@ -591,10 +569,7 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
br.reactor.RemovePeer(peer, reason)
}
func (br *ByzantineReactor) ReceiveEnvelope(e p2p.Envelope) {
br.reactor.ReceiveEnvelope(e)
}
func (br *ByzantineReactor) Receive(chID byte, p p2p.Peer, m []byte) {
br.reactor.Receive(chID, p, m)
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
br.reactor.Receive(chID, peer, msgBytes)
}
func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }

View File

@@ -4,8 +4,8 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"sync"
@@ -15,6 +15,8 @@ import (
"github.com/go-kit/log/term"
"github.com/stretchr/testify/require"
"path"
dbm "github.com/tendermint/tm-db"
abcicli "github.com/tendermint/tendermint/abci/client"
@@ -90,8 +92,8 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida
func (vs *validatorStub) signVote(
voteType tmproto.SignedMsgType,
hash []byte,
header types.PartSetHeader,
) (*types.Vote, error) {
header types.PartSetHeader) (*types.Vote, error) {
pubKey, err := vs.PrivValidator.GetPubKey()
if err != nil {
return nil, fmt.Errorf("can't get pubkey: %w", err)
@@ -139,8 +141,7 @@ func signVotes(
voteType tmproto.SignedMsgType,
hash []byte,
header types.PartSetHeader,
vss ...*validatorStub,
) []*types.Vote {
vss ...*validatorStub) []*types.Vote {
votes := make([]*types.Vote, len(vss))
for i, vs := range vss {
votes[i] = signVote(vs, voteType, hash, header)
@@ -427,9 +428,7 @@ func newStateWithConfigAndBlockStore(
// Make State
stateDB := blockDB
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
if err := stateStore.Save(state); err != nil { // for save height 1's validators info
panic(err)
}
@@ -451,7 +450,7 @@ func newStateWithConfigAndBlockStore(
func loadPrivValidator(config *cfg.Config) *privval.FilePV {
privValidatorKeyFile := config.PrivValidatorKeyFile()
ensureDir(filepath.Dir(privValidatorKeyFile), 0o700)
ensureDir(filepath.Dir(privValidatorKeyFile), 0700)
privValidatorStateFile := config.PrivValidatorStateFile()
privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
privValidator.Reset()
@@ -478,8 +477,7 @@ func randState(nValidators int) (*State, []*validatorStub) {
//-------------------------------------------------------------------------------
func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration,
errorMessage string,
) {
errorMessage string) {
select {
case <-time.After(timeout):
break
@@ -657,8 +655,7 @@ func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int32) {
}
func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32,
voteType tmproto.SignedMsgType,
) {
voteType tmproto.SignedMsgType) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for NewVote event")
@@ -714,24 +711,21 @@ func consensusLogger() log.Logger {
}
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
appFunc func() abci.Application, configOpts ...func(*cfg.Config),
) ([]*State, cleanupFunc) {
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) {
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
css := make([]*State, nValidators)
logger := consensusLogger()
configRootDirs := make([]string, 0, nValidators)
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)
for _, opt := range configOpts {
opt(thisConfig)
}
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := appFunc()
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
@@ -762,13 +756,11 @@ func randConsensusNetWithPeers(
configRootDirs := make([]string, 0, nPeers)
for i := 0; i < nPeers; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
if i == 0 {
peer0Config = thisConfig
}
@@ -776,11 +768,11 @@ func randConsensusNetWithPeers(
if i < nValidators {
privVal = privVals[i]
} else {
tempKeyFile, err := os.CreateTemp("", "priv_validator_key_")
tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_")
if err != nil {
panic(err)
}
tempStateFile, err := os.CreateTemp("", "priv_validator_state_")
tempStateFile, err := ioutil.TempFile("", "priv_validator_state_")
if err != nil {
panic(err)
}
@@ -902,7 +894,7 @@ func newCounter() abci.Application {
}
func newPersistentKVStore() abci.Application {
dir, err := os.MkdirTemp("", "persistent-kvstore")
dir, err := ioutil.TempDir("", "persistent-kvstore")
if err != nil {
panic(err)
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/p2p"
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
)
@@ -95,10 +94,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw
peers := sw.Peers().List()
for _, peer := range peers {
cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer)
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
Message: &tmcons.Vote{Vote: precommit.ToProto()},
ChannelID: VoteChannel,
}, cs.Logger)
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
}
}()
}

View File

@@ -113,7 +113,7 @@ func deliverTxsRange(cs *State, start, end int) {
func TestMempoolTxConcurrentWithCommit(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
blockDB := dbm.NewMemDB()
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
stateStore := sm.NewStore(blockDB)
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB)
err := stateStore.Save(state)
require.NoError(t, err)
@@ -138,7 +138,7 @@ func TestMempoolRmBadTx(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
app := NewCounterApplication()
blockDB := dbm.NewMemDB()
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
stateStore := sm.NewStore(blockDB)
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
err := stateStore.Save(state)
require.NoError(t, err)

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/gogo/protobuf/proto"
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/libs/bits"
tmmath "github.com/tendermint/tendermint/libs/math"
@@ -14,155 +15,173 @@ import (
"github.com/tendermint/tendermint/types"
)
// MsgToProto takes a consensus message type and returns the proto defined consensus message.
//
// TODO: This needs to be removed, but WALToProto depends on this.
// MsgToProto takes a consensus message type and returns the proto defined consensus message
func MsgToProto(msg Message) (*tmcons.Message, error) {
if msg == nil {
return nil, errors.New("consensus: message is nil")
}
var pb tmcons.Message
switch msg := msg.(type) {
case *NewRoundStepMessage:
m := &tmcons.NewRoundStep{
Height: msg.Height,
Round: msg.Round,
Step: uint32(msg.Step),
SecondsSinceStartTime: msg.SecondsSinceStartTime,
LastCommitRound: msg.LastCommitRound,
pb = tmcons.Message{
Sum: &tmcons.Message_NewRoundStep{
NewRoundStep: &tmcons.NewRoundStep{
Height: msg.Height,
Round: msg.Round,
Step: uint32(msg.Step),
SecondsSinceStartTime: msg.SecondsSinceStartTime,
LastCommitRound: msg.LastCommitRound,
},
},
}
return m.Wrap().(*tmcons.Message), nil
case *NewValidBlockMessage:
pbPartSetHeader := msg.BlockPartSetHeader.ToProto()
pbBits := msg.BlockParts.ToProto()
m := &tmcons.NewValidBlock{
Height: msg.Height,
Round: msg.Round,
BlockPartSetHeader: pbPartSetHeader,
BlockParts: pbBits,
IsCommit: msg.IsCommit,
pb = tmcons.Message{
Sum: &tmcons.Message_NewValidBlock{
NewValidBlock: &tmcons.NewValidBlock{
Height: msg.Height,
Round: msg.Round,
BlockPartSetHeader: pbPartSetHeader,
BlockParts: pbBits,
IsCommit: msg.IsCommit,
},
},
}
return m.Wrap().(*tmcons.Message), nil
case *ProposalMessage:
pbP := msg.Proposal.ToProto()
m := &tmcons.Proposal{
Proposal: *pbP,
pb = tmcons.Message{
Sum: &tmcons.Message_Proposal{
Proposal: &tmcons.Proposal{
Proposal: *pbP,
},
},
}
return m.Wrap().(*tmcons.Message), nil
case *ProposalPOLMessage:
pbBits := msg.ProposalPOL.ToProto()
m := &tmcons.ProposalPOL{
Height: msg.Height,
ProposalPolRound: msg.ProposalPOLRound,
ProposalPol: *pbBits,
pb = tmcons.Message{
Sum: &tmcons.Message_ProposalPol{
ProposalPol: &tmcons.ProposalPOL{
Height: msg.Height,
ProposalPolRound: msg.ProposalPOLRound,
ProposalPol: *pbBits,
},
},
}
return m.Wrap().(*tmcons.Message), nil
case *BlockPartMessage:
parts, err := msg.Part.ToProto()
if err != nil {
return nil, fmt.Errorf("msg to proto error: %w", err)
}
m := &tmcons.BlockPart{
Height: msg.Height,
Round: msg.Round,
Part: *parts,
pb = tmcons.Message{
Sum: &tmcons.Message_BlockPart{
BlockPart: &tmcons.BlockPart{
Height: msg.Height,
Round: msg.Round,
Part: *parts,
},
},
}
return m.Wrap().(*tmcons.Message), nil
case *VoteMessage:
vote := msg.Vote.ToProto()
m := &tmcons.Vote{
Vote: vote,
pb = tmcons.Message{
Sum: &tmcons.Message_Vote{
Vote: &tmcons.Vote{
Vote: vote,
},
},
}
return m.Wrap().(*tmcons.Message), nil
case *HasVoteMessage:
m := &tmcons.HasVote{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
Index: msg.Index,
pb = tmcons.Message{
Sum: &tmcons.Message_HasVote{
HasVote: &tmcons.HasVote{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
Index: msg.Index,
},
},
}
return m.Wrap().(*tmcons.Message), nil
case *VoteSetMaj23Message:
bi := msg.BlockID.ToProto()
m := &tmcons.VoteSetMaj23{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
BlockID: bi,
pb = tmcons.Message{
Sum: &tmcons.Message_VoteSetMaj23{
VoteSetMaj23: &tmcons.VoteSetMaj23{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
BlockID: bi,
},
},
}
return m.Wrap().(*tmcons.Message), nil
case *VoteSetBitsMessage:
bi := msg.BlockID.ToProto()
bits := msg.Votes.ToProto()
m := &tmcons.VoteSetBits{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
BlockID: bi,
vsb := &tmcons.Message_VoteSetBits{
VoteSetBits: &tmcons.VoteSetBits{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
BlockID: bi,
},
}
if bits != nil {
m.Votes = *bits
vsb.VoteSetBits.Votes = *bits
}
return m.Wrap().(*tmcons.Message), nil
pb = tmcons.Message{
Sum: vsb,
}
default:
return nil, fmt.Errorf("consensus: message not recognized: %T", msg)
}
return &pb, nil
}
// MsgFromProto takes a consensus proto message and returns the native go type
func MsgFromProto(p *tmcons.Message) (Message, error) {
if p == nil {
func MsgFromProto(msg *tmcons.Message) (Message, error) {
if msg == nil {
return nil, errors.New("consensus: nil message")
}
var pb Message
um, err := p.Unwrap()
if err != nil {
return nil, err
}
switch msg := um.(type) {
case *tmcons.NewRoundStep:
rs, err := tmmath.SafeConvertUint8(int64(msg.Step))
switch msg := msg.Sum.(type) {
case *tmcons.Message_NewRoundStep:
rs, err := tmmath.SafeConvertUint8(int64(msg.NewRoundStep.Step))
// deny message based on possible overflow
if err != nil {
return nil, fmt.Errorf("denying message due to possible overflow: %w", err)
}
pb = &NewRoundStepMessage{
Height: msg.Height,
Round: msg.Round,
Height: msg.NewRoundStep.Height,
Round: msg.NewRoundStep.Round,
Step: cstypes.RoundStepType(rs),
SecondsSinceStartTime: msg.SecondsSinceStartTime,
LastCommitRound: msg.LastCommitRound,
SecondsSinceStartTime: msg.NewRoundStep.SecondsSinceStartTime,
LastCommitRound: msg.NewRoundStep.LastCommitRound,
}
case *tmcons.NewValidBlock:
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.BlockPartSetHeader)
case *tmcons.Message_NewValidBlock:
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader)
if err != nil {
return nil, fmt.Errorf("parts to proto error: %w", err)
}
pbBits := new(bits.BitArray)
pbBits.FromProto(msg.BlockParts)
pbBits.FromProto(msg.NewValidBlock.BlockParts)
pb = &NewValidBlockMessage{
Height: msg.Height,
Round: msg.Round,
Height: msg.NewValidBlock.Height,
Round: msg.NewValidBlock.Round,
BlockPartSetHeader: *pbPartSetHeader,
BlockParts: pbBits,
IsCommit: msg.IsCommit,
IsCommit: msg.NewValidBlock.IsCommit,
}
case *tmcons.Proposal:
pbP, err := types.ProposalFromProto(&msg.Proposal)
case *tmcons.Message_Proposal:
pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal)
if err != nil {
return nil, fmt.Errorf("proposal msg to proto error: %w", err)
}
@@ -170,26 +189,26 @@ func MsgFromProto(p *tmcons.Message) (Message, error) {
pb = &ProposalMessage{
Proposal: pbP,
}
case *tmcons.ProposalPOL:
case *tmcons.Message_ProposalPol:
pbBits := new(bits.BitArray)
pbBits.FromProto(&msg.ProposalPol)
pbBits.FromProto(&msg.ProposalPol.ProposalPol)
pb = &ProposalPOLMessage{
Height: msg.Height,
ProposalPOLRound: msg.ProposalPolRound,
Height: msg.ProposalPol.Height,
ProposalPOLRound: msg.ProposalPol.ProposalPolRound,
ProposalPOL: pbBits,
}
case *tmcons.BlockPart:
parts, err := types.PartFromProto(&msg.Part)
case *tmcons.Message_BlockPart:
parts, err := types.PartFromProto(&msg.BlockPart.Part)
if err != nil {
return nil, fmt.Errorf("blockpart msg to proto error: %w", err)
}
pb = &BlockPartMessage{
Height: msg.Height,
Round: msg.Round,
Height: msg.BlockPart.Height,
Round: msg.BlockPart.Round,
Part: parts,
}
case *tmcons.Vote:
vote, err := types.VoteFromProto(msg.Vote)
case *tmcons.Message_Vote:
vote, err := types.VoteFromProto(msg.Vote.Vote)
if err != nil {
return nil, fmt.Errorf("vote msg to proto error: %w", err)
}
@@ -197,36 +216,36 @@ func MsgFromProto(p *tmcons.Message) (Message, error) {
pb = &VoteMessage{
Vote: vote,
}
case *tmcons.HasVote:
case *tmcons.Message_HasVote:
pb = &HasVoteMessage{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
Index: msg.Index,
Height: msg.HasVote.Height,
Round: msg.HasVote.Round,
Type: msg.HasVote.Type,
Index: msg.HasVote.Index,
}
case *tmcons.VoteSetMaj23:
bi, err := types.BlockIDFromProto(&msg.BlockID)
case *tmcons.Message_VoteSetMaj23:
bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID)
if err != nil {
return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err)
}
pb = &VoteSetMaj23Message{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
Height: msg.VoteSetMaj23.Height,
Round: msg.VoteSetMaj23.Round,
Type: msg.VoteSetMaj23.Type,
BlockID: *bi,
}
case *tmcons.VoteSetBits:
bi, err := types.BlockIDFromProto(&msg.BlockID)
case *tmcons.Message_VoteSetBits:
bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID)
if err != nil {
return nil, fmt.Errorf("voteSetBits msg to proto error: %w", err)
}
bits := new(bits.BitArray)
bits.FromProto(&msg.Votes)
bits.FromProto(&msg.VoteSetBits.Votes)
pb = &VoteSetBitsMessage{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
Height: msg.VoteSetBits.Height,
Round: msg.VoteSetBits.Round,
Type: msg.VoteSetBits.Type,
BlockID: *bi,
Votes: bits,
}
@@ -243,8 +262,6 @@ func MsgFromProto(p *tmcons.Message) (Message, error) {
// MustEncode takes the reactors msg, makes it proto and marshals it
// this mimics `MustMarshalBinaryBare` in that is panics on error
//
// Deprecated: Will be removed in v0.37.
func MustEncode(msg Message) []byte {
pb, err := MsgToProto(msg)
if err != nil {

View File

@@ -80,15 +80,17 @@ func TestMsgToProto(t *testing.T) {
Step: 1,
SecondsSinceStartTime: 1,
LastCommitRound: 2,
}, (&tmcons.NewRoundStep{
Height: 2,
Round: 1,
Step: 1,
SecondsSinceStartTime: 1,
LastCommitRound: 2,
}).Wrap().(*tmcons.Message),
false},
}, &tmcons.Message{
Sum: &tmcons.Message_NewRoundStep{
NewRoundStep: &tmcons.NewRoundStep{
Height: 2,
Round: 1,
Step: 1,
SecondsSinceStartTime: 1,
LastCommitRound: 2,
},
},
}, false},
{"successful NewValidBlockMessage", &NewValidBlockMessage{
Height: 1,
@@ -96,78 +98,92 @@ func TestMsgToProto(t *testing.T) {
BlockPartSetHeader: psh,
BlockParts: bits,
IsCommit: false,
}, (&tmcons.NewValidBlock{
Height: 1,
Round: 1,
BlockPartSetHeader: pbPsh,
BlockParts: pbBits,
IsCommit: false,
}).Wrap().(*tmcons.Message),
false},
}, &tmcons.Message{
Sum: &tmcons.Message_NewValidBlock{
NewValidBlock: &tmcons.NewValidBlock{
Height: 1,
Round: 1,
BlockPartSetHeader: pbPsh,
BlockParts: pbBits,
IsCommit: false,
},
},
}, false},
{"successful BlockPartMessage", &BlockPartMessage{
Height: 100,
Round: 1,
Part: &parts,
}, (&tmcons.BlockPart{
Height: 100,
Round: 1,
Part: *pbParts,
}).Wrap().(*tmcons.Message),
false},
}, &tmcons.Message{
Sum: &tmcons.Message_BlockPart{
BlockPart: &tmcons.BlockPart{
Height: 100,
Round: 1,
Part: *pbParts,
},
},
}, false},
{"successful ProposalPOLMessage", &ProposalPOLMessage{
Height: 1,
ProposalPOLRound: 1,
ProposalPOL: bits,
}, (&tmcons.ProposalPOL{
Height: 1,
ProposalPolRound: 1,
ProposalPol: *pbBits,
}).Wrap().(*tmcons.Message),
false},
}, &tmcons.Message{
Sum: &tmcons.Message_ProposalPol{
ProposalPol: &tmcons.ProposalPOL{
Height: 1,
ProposalPolRound: 1,
ProposalPol: *pbBits,
},
}}, false},
{"successful ProposalMessage", &ProposalMessage{
Proposal: &proposal,
}, (&tmcons.Proposal{
Proposal: *pbProposal,
}).Wrap().(*tmcons.Message),
false},
}, &tmcons.Message{
Sum: &tmcons.Message_Proposal{
Proposal: &tmcons.Proposal{
Proposal: *pbProposal,
},
},
}, false},
{"successful VoteMessage", &VoteMessage{
Vote: vote,
}, (&tmcons.Vote{
Vote: pbVote,
}).Wrap().(*tmcons.Message),
false},
}, &tmcons.Message{
Sum: &tmcons.Message_Vote{
Vote: &tmcons.Vote{
Vote: pbVote,
},
},
}, false},
{"successful VoteSetMaj23", &VoteSetMaj23Message{
Height: 1,
Round: 1,
Type: 1,
BlockID: bi,
}, (&tmcons.VoteSetMaj23{
Height: 1,
Round: 1,
Type: 1,
BlockID: pbBi,
}).Wrap().(*tmcons.Message),
false},
}, &tmcons.Message{
Sum: &tmcons.Message_VoteSetMaj23{
VoteSetMaj23: &tmcons.VoteSetMaj23{
Height: 1,
Round: 1,
Type: 1,
BlockID: pbBi,
},
},
}, false},
{"successful VoteSetBits", &VoteSetBitsMessage{
Height: 1,
Round: 1,
Type: 1,
BlockID: bi,
Votes: bits,
}, (&tmcons.VoteSetBits{
Height: 1,
Round: 1,
Type: 1,
BlockID: pbBi,
Votes: *pbBits,
}).Wrap().(*tmcons.Message),
false},
}, &tmcons.Message{
Sum: &tmcons.Message_VoteSetBits{
VoteSetBits: &tmcons.VoteSetBits{
Height: 1,
Round: 1,
Type: 1,
BlockID: pbBi,
Votes: *pbBits,
},
},
}, false},
{"failure", nil, &tmcons.Message{}, true},
}
for _, tt := range testsCases {
@@ -298,7 +314,7 @@ func TestWALMsgProto(t *testing.T) {
}
}
//nolint:lll //ignore line length for tests
// nolint:lll //ignore line length for tests
func TestConsMsgsVectors(t *testing.T) {
date := time.Date(2018, 8, 30, 12, 0, 0, 0, time.UTC)
psh := types.PartSetHeader{

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/gogo/protobuf/proto"
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/libs/bits"
tmevents "github.com/tendermint/tendermint/libs/events"
@@ -147,7 +148,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
Priority: 6,
SendQueueCapacity: 100,
RecvMessageCapacity: maxMsgSize,
MessageType: &tmcons.Message{},
},
{
ID: DataChannel, // maybe split between gossiping current block and catchup stuff
@@ -156,7 +156,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
SendQueueCapacity: 100,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: maxMsgSize,
MessageType: &tmcons.Message{},
},
{
ID: VoteChannel,
@@ -164,7 +163,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
SendQueueCapacity: 100,
RecvBufferCapacity: 100 * 100,
RecvMessageCapacity: maxMsgSize,
MessageType: &tmcons.Message{},
},
{
ID: VoteSetBitsChannel,
@@ -172,7 +170,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
SendQueueCapacity: 2,
RecvBufferCapacity: 1024,
RecvMessageCapacity: maxMsgSize,
MessageType: &tmcons.Message{},
},
}
}
@@ -226,37 +223,34 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// Peer state updates can happen in parallel, but processing of
// proposals, block parts, and votes are ordered by the receiveRoutine
// NOTE: blocks on consensus state for proposals, block parts, and votes
func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
if !conR.IsRunning() {
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID)
conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
return
}
m := e.Message
if wm, ok := m.(p2p.Wrapper); ok {
m = wm.Wrap()
}
msg, err := MsgFromProto(m.(*tmcons.Message))
msg, err := decodeMsg(msgBytes)
if err != nil {
conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err)
conR.Switch.StopPeerForError(e.Src, err)
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
conR.Switch.StopPeerForError(src, err)
return
}
if err = msg.ValidateBasic(); err != nil {
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
conR.Switch.StopPeerForError(e.Src, err)
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
conR.Switch.StopPeerForError(src, err)
return
}
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", msg)
conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
// Get peer states
ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState)
ps, ok := src.Get(types.PeerStateKey).(*PeerState)
if !ok {
panic(fmt.Sprintf("Peer %v has no state", e.Src))
panic(fmt.Sprintf("Peer %v has no state", src))
}
switch e.ChannelID {
switch chID {
case StateChannel:
switch msg := msg.(type) {
case *NewRoundStepMessage:
@@ -264,8 +258,8 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
initialHeight := conR.conS.state.InitialHeight
conR.conS.mtx.Unlock()
if err = msg.ValidateHeight(initialHeight); err != nil {
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err)
conR.Switch.StopPeerForError(e.Src, err)
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
conR.Switch.StopPeerForError(src, err)
return
}
ps.ApplyNewRoundStepMessage(msg)
@@ -284,7 +278,7 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
// Peer claims to have a maj23 for some BlockID at H,R,S,
err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
if err != nil {
conR.Switch.StopPeerForError(e.Src, err)
conR.Switch.StopPeerForError(src, err)
return
}
// Respond with a VoteSetBitsMessage showing which votes we have.
@@ -298,19 +292,13 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
default:
panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
}
eMsg := &tmcons.VoteSetBits{
src.TrySend(VoteSetBitsChannel, MustEncode(&VoteSetBitsMessage{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
BlockID: msg.BlockID.ToProto(),
}
if votes := ourVotes.ToProto(); votes != nil {
eMsg.Votes = *votes
}
p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck
ChannelID: VoteSetBitsChannel,
Message: eMsg,
}, conR.Logger)
BlockID: msg.BlockID,
Votes: ourVotes,
}))
default:
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
@@ -323,13 +311,13 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
switch msg := msg.(type) {
case *ProposalMessage:
ps.SetHasProposal(msg.Proposal)
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
case *ProposalPOLMessage:
ps.ApplyProposalPOLMessage(msg)
case *BlockPartMessage:
ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index))
conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1)
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1)
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
default:
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
@@ -349,7 +337,7 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
ps.SetHasVote(msg.Vote)
cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
cs.peerMsgQueue <- msgInfo{msg, src.ID()}
default:
// don't punish (leave room for soft upgrades)
@@ -388,27 +376,10 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
}
default:
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID))
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID))
}
}
func (conR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
msg := &tmcons.Message{}
err := proto.Unmarshal(msgBytes, msg)
if err != nil {
panic(err)
}
uw, err := msg.Unwrap()
if err != nil {
panic(err)
}
conR.ReceiveEnvelope(p2p.Envelope{
ChannelID: chID,
Src: peer,
Message: uw,
})
}
// SetEventBus sets event bus.
func (conR *Reactor) SetEventBus(b *types.EventBus) {
conR.eventBus = b
@@ -459,39 +430,29 @@ func (conR *Reactor) unsubscribeFromBroadcastEvents() {
func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
nrsMsg := makeRoundStepMessage(rs)
conR.Switch.BroadcastEnvelope(p2p.Envelope{
ChannelID: StateChannel,
Message: nrsMsg,
})
conR.Switch.Broadcast(StateChannel, MustEncode(nrsMsg))
}
func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
psh := rs.ProposalBlockParts.Header()
csMsg := &tmcons.NewValidBlock{
csMsg := &NewValidBlockMessage{
Height: rs.Height,
Round: rs.Round,
BlockPartSetHeader: psh.ToProto(),
BlockParts: rs.ProposalBlockParts.BitArray().ToProto(),
BlockPartSetHeader: rs.ProposalBlockParts.Header(),
BlockParts: rs.ProposalBlockParts.BitArray(),
IsCommit: rs.Step == cstypes.RoundStepCommit,
}
conR.Switch.BroadcastEnvelope(p2p.Envelope{
ChannelID: StateChannel,
Message: csMsg,
})
conR.Switch.Broadcast(StateChannel, MustEncode(csMsg))
}
// Broadcasts HasVoteMessage to peers that care.
func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
msg := &tmcons.HasVote{
msg := &HasVoteMessage{
Height: vote.Height,
Round: vote.Round,
Type: vote.Type,
Index: vote.ValidatorIndex,
}
conR.Switch.BroadcastEnvelope(p2p.Envelope{
ChannelID: StateChannel,
Message: msg,
})
conR.Switch.Broadcast(StateChannel, MustEncode(msg))
/*
// TODO: Make this broadcast more selective.
for _, peer := range conR.Switch.Peers().List() {
@@ -502,11 +463,7 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
prs := ps.GetRoundState()
if prs.Height == vote.Height {
// TODO: Also filter on round?
e := p2p.Envelope{
ChannelID: StateChannel, struct{ ConsensusMessage }{msg},
Message: p,
}
p2p.TrySendEnvelopeShim(peer, e) //nolint: staticcheck
peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg})
} else {
// Height doesn't match
// TODO: check a field, maybe CatchupCommitRound?
@@ -516,11 +473,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
*/
}
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep) {
nrsMsg = &tmcons.NewRoundStep{
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) {
nrsMsg = &NewRoundStepMessage{
Height: rs.Height,
Round: rs.Round,
Step: uint32(rs.Step),
Step: rs.Step,
SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()),
LastCommitRound: rs.LastCommit.GetRound(),
}
@@ -530,10 +487,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep)
func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
rs := conR.getRoundState()
nrsMsg := makeRoundStepMessage(rs)
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: StateChannel,
Message: nrsMsg,
}, conR.Logger)
peer.Send(StateChannel, MustEncode(nrsMsg))
}
func (conR *Reactor) updateRoundStateRoutine() {
@@ -572,19 +526,13 @@ OUTER_LOOP:
if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) {
if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
part := rs.ProposalBlockParts.GetPart(index)
parts, err := part.ToProto()
if err != nil {
panic(err)
msg := &BlockPartMessage{
Height: rs.Height, // This tells peer that this part applies to us.
Round: rs.Round, // This tells peer that this part applies to us.
Part: part,
}
logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: DataChannel,
Message: &tmcons.BlockPart{
Height: rs.Height, // This tells peer that this part applies to us.
Round: rs.Round, // This tells peer that this part applies to us.
Part: *parts,
},
}, logger) {
if peer.Send(DataChannel, MustEncode(msg)) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
}
continue OUTER_LOOP
@@ -630,11 +578,9 @@ OUTER_LOOP:
if rs.Proposal != nil && !prs.Proposal {
// Proposal: share the proposal metadata with peer.
{
msg := &ProposalMessage{Proposal: rs.Proposal}
logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: DataChannel,
Message: &tmcons.Proposal{Proposal: *rs.Proposal.ToProto()},
}, logger) {
if peer.Send(DataChannel, MustEncode(msg)) {
// NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected!
ps.SetHasProposal(rs.Proposal)
}
@@ -644,15 +590,13 @@ OUTER_LOOP:
// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
if 0 <= rs.Proposal.POLRound {
msg := &ProposalPOLMessage{
Height: rs.Height,
ProposalPOLRound: rs.Proposal.POLRound,
ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
}
logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: DataChannel,
Message: &tmcons.ProposalPOL{
Height: rs.Height,
ProposalPolRound: rs.Proposal.POLRound,
ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(),
},
}, logger)
peer.Send(DataChannel, MustEncode(msg))
}
continue OUTER_LOOP
}
@@ -689,20 +633,13 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt
return
}
// Send the part
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
pp, err := part.ToProto()
if err != nil {
logger.Error("Could not convert part to proto", "index", index, "error", err)
return
msg := &BlockPartMessage{
Height: prs.Height, // Not our height, so it doesn't matter.
Round: prs.Round, // Not our height, so it doesn't matter.
Part: part,
}
if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: DataChannel,
Message: &tmcons.BlockPart{
Height: prs.Height, // Not our height, so it doesn't matter.
Round: prs.Round, // Not our height, so it doesn't matter.
Part: *pp,
},
}, logger) {
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
if peer.Send(DataChannel, MustEncode(msg)) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
} else {
logger.Debug("Sending block part for catchup failed")
@@ -861,16 +798,12 @@ OUTER_LOOP:
prs := ps.GetRoundState()
if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: StateChannel,
Message: &tmcons.VoteSetMaj23{
Height: prs.Height,
Round: prs.Round,
Type: tmproto.PrevoteType,
BlockID: maj23.ToProto(),
},
}, ps.logger)
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.Round,
Type: tmproto.PrevoteType,
BlockID: maj23,
}))
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
}
}
@@ -882,15 +815,12 @@ OUTER_LOOP:
prs := ps.GetRoundState()
if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: StateChannel,
Message: &tmcons.VoteSetMaj23{
Height: prs.Height,
Round: prs.Round,
Type: tmproto.PrecommitType,
BlockID: maj23.ToProto(),
},
}, ps.logger)
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.Round,
Type: tmproto.PrecommitType,
BlockID: maj23,
}))
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
}
}
@@ -902,16 +832,12 @@ OUTER_LOOP:
prs := ps.GetRoundState()
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: StateChannel,
Message: &tmcons.VoteSetMaj23{
Height: prs.Height,
Round: prs.ProposalPOLRound,
Type: tmproto.PrevoteType,
BlockID: maj23.ToProto(),
},
}, ps.logger)
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.ProposalPOLRound,
Type: tmproto.PrevoteType,
BlockID: maj23,
}))
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
}
}
@@ -926,15 +852,12 @@ OUTER_LOOP:
if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() &&
prs.Height >= conR.conS.blockStore.Base() {
if commit := conR.conS.LoadCommit(prs.Height); commit != nil {
p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: StateChannel,
Message: &tmcons.VoteSetMaj23{
Height: prs.Height,
Round: commit.Round,
Type: tmproto.PrecommitType,
BlockID: commit.BlockID.ToProto(),
},
}, ps.logger)
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
Height: prs.Height,
Round: commit.Round,
Type: tmproto.PrecommitType,
BlockID: commit.BlockID,
}))
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
}
}
@@ -1148,13 +1071,9 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in
// Returns true if vote was sent.
func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
if vote, ok := ps.PickVoteToSend(votes); ok {
msg := &VoteMessage{vote}
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
if p2p.SendEnvelopeShim(ps.peer, p2p.Envelope{ //nolint: staticcheck
ChannelID: VoteChannel,
Message: &tmcons.Vote{
Vote: vote.ToProto(),
},
}, ps.logger) {
if ps.peer.Send(VoteChannel, MustEncode(msg)) {
ps.SetHasVote(vote)
return true
}
@@ -1520,6 +1439,15 @@ func init() {
tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits")
}
func decodeMsg(bz []byte) (msg Message, err error) {
pb := &tmcons.Message{}
if err = proto.Unmarshal(bz, pb); err != nil {
return msg, err
}
return MsgFromProto(pb)
}
//-------------------------------------
// NewRoundStepMessage is sent for every step taken in the ConsensusState.

View File

@@ -11,7 +11,6 @@ import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
@@ -34,7 +33,6 @@ import (
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
"github.com/tendermint/tendermint/p2p"
p2pmock "github.com/tendermint/tendermint/p2p/mock"
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
sm "github.com/tendermint/tendermint/state"
statemocks "github.com/tendermint/tendermint/state/mocks"
@@ -140,9 +138,7 @@ func TestReactorWithEvidence(t *testing.T) {
logger := consensusLogger()
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
defer os.RemoveAll(thisConfig.RootDir)
@@ -256,7 +252,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
}, css)
}
func TestLegacyReactorReceiveBasicIfAddPeerHasntBeenCalledYet(t *testing.T) {
func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
N := 1
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
@@ -266,45 +262,13 @@ func TestLegacyReactorReceiveBasicIfAddPeerHasntBeenCalledYet(t *testing.T) {
var (
reactor = reactors[0]
peer = p2pmock.NewPeer(nil)
msg = MustEncode(&HasVoteMessage{Height: 1,
Round: 1, Index: 1, Type: tmproto.PrevoteType})
)
reactor.InitPeer(peer)
// simulate switch calling Receive before AddPeer
assert.NotPanics(t, func() {
reactor.ReceiveEnvelope(p2p.Envelope{
ChannelID: StateChannel,
Src: peer,
Message: &tmcons.HasVote{Height: 1,
Round: 1, Index: 1, Type: tmproto.PrevoteType},
})
reactor.AddPeer(peer)
})
}
func TestLegacyReactorReceiveBasic(t *testing.T) {
N := 1
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
defer cleanup()
reactors, _, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
var (
reactor = reactors[0]
peer = p2pmock.NewPeer(nil)
)
reactor.InitPeer(peer)
v := &tmcons.HasVote{
Height: 1,
Round: 1,
Index: 1,
Type: tmproto.PrevoteType,
}
w := v.Wrap()
msg, err := proto.Marshal(w)
assert.NoError(t, err)
assert.NotPanics(t, func() {
reactor.Receive(StateChannel, peer, msg)
reactor.AddPeer(peer)
@@ -321,18 +285,15 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
var (
reactor = reactors[0]
peer = p2pmock.NewPeer(nil)
msg = MustEncode(&HasVoteMessage{Height: 1,
Round: 1, Index: 1, Type: tmproto.PrevoteType})
)
// we should call InitPeer here
// simulate switch calling Receive before AddPeer
assert.Panics(t, func() {
reactor.ReceiveEnvelope(p2p.Envelope{
ChannelID: StateChannel,
Src: peer,
Message: &tmcons.HasVote{Height: 1,
Round: 1, Index: 1, Type: tmproto.PrevoteType},
})
reactor.Receive(StateChannel, peer, msg)
})
}
@@ -728,7 +689,7 @@ func capture() {
// Ensure basic validation of structs is functioning
func TestNewRoundStepMessageValidateBasic(t *testing.T) {
testCases := []struct {
testCases := []struct { // nolint: maligned
expectErr bool
messageRound int32
messageLastCommitRound int32
@@ -767,7 +728,7 @@ func TestNewRoundStepMessageValidateBasic(t *testing.T) {
func TestNewRoundStepMessageValidateHeight(t *testing.T) {
initialHeight := int64(10)
testCases := []struct { //nolint: maligned
testCases := []struct { // nolint: maligned
expectErr bool
messageLastCommitRound int32
messageHeight int64
@@ -917,7 +878,7 @@ func TestHasVoteMessageValidateBasic(t *testing.T) {
invalidSignedMsgType tmproto.SignedMsgType = 0x03
)
testCases := []struct { //nolint: maligned
testCases := []struct { // nolint: maligned
expectErr bool
messageRound int32
messageIndex int32
@@ -962,7 +923,7 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
},
}
testCases := []struct { //nolint: maligned
testCases := []struct { // nolint: maligned
expectErr bool
messageRound int32
messageHeight int64

View File

@@ -418,7 +418,7 @@ func (h *Handshaker) ReplayBlocks(
case appBlockHeight == storeBlockHeight:
// We ran Commit, but didn't save the state, so replayBlock with mock app.
abciResponses, err := h.stateStore.LoadLastABCIResponse(storeBlockHeight)
abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight)
if err != nil {
return nil, err
}

View File

@@ -297,9 +297,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
if err != nil {
tmos.Exit(err.Error())
}
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile())
if err != nil {
tmos.Exit(err.Error())

View File

@@ -5,6 +5,7 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -64,8 +65,7 @@ func TestMain(m *testing.M) {
// wal writer when we need to, instead of with every message.
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store,
) {
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
logger := log.TestingLogger()
state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
privValidator := loadPrivValidator(consensusReplayConfig)
@@ -78,7 +78,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
)
cs.SetLogger(logger)
bytes, _ := os.ReadFile(cs.config.WalFile())
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
t.Logf("====== WAL: \n\r%X\n", bytes)
err := cs.Start()
@@ -126,18 +126,14 @@ func TestWALCrash(t *testing.T) {
initFn func(dbm.DB, *State, context.Context)
heightToStop int64
}{
{
"empty block",
{"empty block",
func(stateDB dbm.DB, cs *State, ctx context.Context) {},
1,
},
{
"many non-empty blocks",
1},
{"many non-empty blocks",
func(stateDB dbm.DB, cs *State, ctx context.Context) {
go sendTxs(ctx, cs)
},
3,
},
3},
}
for i, tc := range testCases {
@@ -150,8 +146,7 @@ func TestWALCrash(t *testing.T) {
}
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config,
initFn func(dbm.DB, *State, context.Context), heightToStop int64,
) {
initFn func(dbm.DB, *State, context.Context), heightToStop int64) {
walPanicked := make(chan error)
crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop}
@@ -164,9 +159,7 @@ LOOP:
logger := log.NewNopLogger()
blockDB := dbm.NewMemDB()
stateDB := blockDB
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
require.NoError(t, err)
privValidator := loadPrivValidator(consensusReplayConfig)
@@ -289,8 +282,7 @@ func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() }
func (w *crashingWAL) SearchForEndHeight(
height int64,
options *WALSearchOptions,
) (rd io.ReadCloser, found bool, err error) {
options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
return w.next.SearchForEndHeight(height, options)
}
@@ -298,7 +290,7 @@ func (w *crashingWAL) Start() error { return w.next.Start() }
func (w *crashingWAL) Stop() error { return w.next.Stop() }
func (w *crashingWAL) Wait() { w.next.Wait() }
// ------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------
type testSim struct {
GenesisState sm.State
Config *cfg.Config
@@ -594,7 +586,7 @@ func TestHandshakeReplayNone(t *testing.T) {
func TestMockProxyApp(t *testing.T) {
sim.CleanupFunc() // clean the test env created in TestSimulateValidatorsChange
logger := log.TestingLogger()
validTxs, invalidTxs := 0, 0
var validTxs, invalidTxs = 0, 0
txIndex := 0
assert.NotPanics(t, func() {
@@ -642,7 +634,7 @@ func TestMockProxyApp(t *testing.T) {
}
func tempWALWithData(data []byte) string {
walFile, err := os.CreateTemp("", "wal")
walFile, err := ioutil.TempFile("", "wal")
if err != nil {
panic(fmt.Sprintf("failed to create temp WAL file: %v", err))
}
@@ -701,9 +693,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion)
}
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
store.chain = chain
store.commits = commits
@@ -722,9 +712,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(clientCreator2)
stateDB1 := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB1, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB1)
err := stateStore.Save(genesisState)
require.NoError(t, err)
buildAppStateFromChain(proxyApp, stateStore, genesisState, chain, nBlocks, mode)
@@ -800,8 +788,7 @@ func applyBlock(stateStore sm.Store, st sm.State, blk *types.Block, proxyApp pro
}
func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store,
state sm.State, chain []*types.Block, nBlocks int, mode uint,
) {
state sm.State, chain []*types.Block, nBlocks int, mode uint) {
// start a new app without handshake, play nBlocks blocks
if err := proxyApp.Start(); err != nil {
panic(err)
@@ -838,6 +825,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store,
default:
panic(fmt.Sprintf("unknown mode %v", mode))
}
}
func buildTMStateFromChain(
@@ -846,8 +834,7 @@ func buildTMStateFromChain(
state sm.State,
chain []*types.Block,
nBlocks int,
mode uint,
) sm.State {
mode uint) sm.State {
// run the whole chain against this client to build up the tendermint state
clientCreator := proxy.NewLocalClientCreator(
kvstore.NewPersistentKVStoreApplication(
@@ -904,9 +891,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
pubKey, err := privVal.GetPubKey()
require.NoError(t, err)
stateDB, state, store := stateAndStore(config, pubKey, appVersion)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
state.LastValidators = state.Validators.Copy()
// mode = 0 for committing all the blocks
@@ -990,8 +975,8 @@ func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Bl
}
func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta,
privVal types.PrivValidator, height int64,
) (*types.Block, *types.PartSet) {
privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) {
lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil)
if height > 1 {
vote, _ := types.MakeVote(
@@ -1073,8 +1058,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
case EndHeightMessage:
// if its not the first one, we have a full block
if thisBlockParts != nil {
pbb := new(tmproto.Block)
bz, err := io.ReadAll(thisBlockParts.GetReader())
var pbb = new(tmproto.Block)
bz, err := ioutil.ReadAll(thisBlockParts.GetReader())
if err != nil {
panic(err)
}
@@ -1113,11 +1098,11 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
}
}
// grab the last block too
bz, err := io.ReadAll(thisBlockParts.GetReader())
bz, err := ioutil.ReadAll(thisBlockParts.GetReader())
if err != nil {
panic(err)
}
pbb := new(tmproto.Block)
var pbb = new(tmproto.Block)
err = proto.Unmarshal(bz, pbb)
if err != nil {
panic(err)
@@ -1161,12 +1146,9 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} {
func stateAndStore(
config *cfg.Config,
pubKey crypto.PubKey,
appVersion uint64,
) (dbm.DB, sm.State, *mockBlockStore) {
appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())
state.Version.Consensus.App = appVersion
store := newMockBlockStore(config, state.ConsensusParams)
@@ -1200,7 +1182,6 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain
func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block {
return bs.chain[int64(len(bs.chain))-1]
}
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
block := bs.chain[height-1]
return &types.BlockMeta{
@@ -1211,11 +1192,9 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil }
func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
}
func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit {
return bs.commits[height-1]
}
func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
return bs.commits[height-1]
}
@@ -1246,9 +1225,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
pubKey, err := privVal.GetPubKey()
require.NoError(t, err)
stateDB, state, store := stateAndStore(config, pubKey, 0x0)
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
oldValAddr := state.Validators.Validators[0].Address

View File

@@ -4,7 +4,7 @@ import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"runtime/debug"
"sort"
@@ -468,6 +468,7 @@ func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error
// SetProposal inputs a proposal.
func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""}
} else {
@@ -480,6 +481,7 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
// AddProposalBlockPart inputs a part of the proposal block.
func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""}
} else {
@@ -497,6 +499,7 @@ func (cs *State) SetProposalAndBlock(
parts *types.PartSet,
peerID p2p.ID,
) error {
if err := cs.SetProposal(proposal, peerID); err != nil {
return err
}
@@ -818,7 +821,7 @@ func (cs *State) handleMsg(mi msgInfo) {
// We unlock here to yield to any routines that need to read the the RoundState.
// Previously, this code held the lock from the point at which the final block
// part was received until the block executed against the application.
// part was recieved until the block executed against the application.
// This prevented the reactor from being able to retrieve the most updated
// version of the RoundState. The reactor needs the updated RoundState to
// gossip the now completed block.
@@ -934,6 +937,7 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
default:
panic(fmt.Sprintf("invalid timeout step: %v", ti.Step))
}
}
func (cs *State) handleTxsAvailable() {
@@ -966,9 +970,7 @@ func (cs *State) handleTxsAvailable() {
// Used internally by handleTimeout and handleMsg to make state transitions
// Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit),
//
// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1)
//
// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1)
// Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1)
// Enter: +2/3 precommits for nil at (height,round-1)
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
@@ -1053,9 +1055,7 @@ func (cs *State) needProofBlock(height int64) bool {
// Enter (CreateEmptyBlocks): from enterNewRound(height,round)
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ):
//
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
//
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
func (cs *State) enterPropose(height int64, round int32) {
logger := cs.Logger.With("height", height, "round", round)
@@ -1177,6 +1177,7 @@ func (cs *State) isProposalComplete() bool {
}
// if this is false the proposer is lying or we haven't received the POL yet
return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
}
// Create the next block to propose and return it. Returns nil block upon error.
@@ -1880,12 +1881,12 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add
)
}
if added && cs.ProposalBlockParts.IsComplete() {
bz, err := io.ReadAll(cs.ProposalBlockParts.GetReader())
bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader())
if err != nil {
return added, err
}
pbb := new(tmproto.Block)
var pbb = new(tmproto.Block)
err = proto.Unmarshal(bz, pbb)
if err != nil {
return added, err
@@ -1950,7 +1951,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) {
// If the vote height is off, we'll just ignore it,
// But if it's a conflicting sig, add it to the cs.evpool.
// If it's otherwise invalid, punish peer.
//nolint: gocritic
// nolint: gocritic
if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok {
if cs.privValidatorPubKey == nil {
return false, errPubKeyIsNotSet
@@ -2207,11 +2208,10 @@ func (cs *State) voteTime() time.Time {
now := tmtime.Now()
minVoteTime := now
// TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil,
// even if cs.LockedBlock != nil. See https://github.com/tendermint/tendermint/tree/v0.34.x/spec/.
// even if cs.LockedBlock != nil. See https://docs.tendermint.com/master/spec/.
timeIota := time.Duration(cs.state.ConsensusParams.Block.TimeIotaMs) * time.Millisecond
if cs.LockedBlock != nil {
// See the BFT time spec
// https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/bft-time.md
// See the BFT time spec https://docs.tendermint.com/master/spec/consensus/bft-time.html
minVoteTime = cs.LockedBlock.Time.Add(timeIota)
} else if cs.ProposalBlock != nil {
minVoteTime = cs.ProposalBlock.Time.Add(timeIota)

View File

@@ -47,9 +47,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
}
blockStoreDB := db.NewMemDB()
stateDB := blockStoreDB
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisState(genDoc)
if err != nil {
return fmt.Errorf("failed to make genesis state: %w", err)

View File

@@ -3,6 +3,7 @@ package consensus
import (
"bytes"
"crypto/rand"
"io/ioutil"
"os"
"path/filepath"
@@ -26,7 +27,7 @@ const (
)
func TestWALTruncate(t *testing.T) {
walDir, err := os.MkdirTemp("", "wal")
walDir, err := ioutil.TempDir("", "wal")
require.NoError(t, err)
defer os.RemoveAll(walDir)
@@ -108,7 +109,7 @@ func TestWALEncoderDecoder(t *testing.T) {
}
func TestWALWrite(t *testing.T) {
walDir, err := os.MkdirTemp("", "wal")
walDir, err := ioutil.TempDir("", "wal")
require.NoError(t, err)
defer os.RemoveAll(walDir)
walFile := filepath.Join(walDir, "wal")
@@ -176,7 +177,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
}
func TestWALPeriodicSync(t *testing.T) {
walDir, err := os.MkdirTemp("", "wal")
walDir, err := ioutil.TempDir("", "wal")
require.NoError(t, err)
defer os.RemoveAll(walDir)
@@ -268,23 +269,18 @@ func BenchmarkWalDecode512B(b *testing.B) {
func BenchmarkWalDecode10KB(b *testing.B) {
benchmarkWalDecode(b, 10*1024)
}
func BenchmarkWalDecode100KB(b *testing.B) {
benchmarkWalDecode(b, 100*1024)
}
func BenchmarkWalDecode1MB(b *testing.B) {
benchmarkWalDecode(b, 1024*1024)
}
func BenchmarkWalDecode10MB(b *testing.B) {
benchmarkWalDecode(b, 10*1024*1024)
}
func BenchmarkWalDecode100MB(b *testing.B) {
benchmarkWalDecode(b, 100*1024*1024)
}
func BenchmarkWalDecode1GB(b *testing.B) {
benchmarkWalDecode(b, 1024*1024*1024)
}

View File

@@ -12,7 +12,7 @@ For any specific algorithm, use its specific module e.g.
## Binary encoding
For Binary encoding, please refer to the [Tendermint encoding specification](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/encoding.md).
For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/blockchain/encoding.html).
## JSON Encoding

View File

@@ -3,9 +3,9 @@ package armor
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"golang.org/x/crypto/openpgp/armor" //nolint: staticcheck
"golang.org/x/crypto/openpgp/armor" // nolint: staticcheck
)
func EncodeArmor(blockType string, headers map[string]string, data []byte) string {
@@ -31,7 +31,7 @@ func DecodeArmor(armorStr string) (blockType string, headers map[string]string,
if err != nil {
return "", nil, nil, err
}
data, err = io.ReadAll(block.Body)
data, err = ioutil.ReadAll(block.Body)
if err != nil {
return "", nil, nil, err
}

View File

@@ -12,19 +12,20 @@ second pre-image attacks. Hence, use this library with caution.
Otherwise you might run into similar issues as, e.g., in early Bitcoin:
https://bitcointalk.org/?topic=102395
*
/ \
/ \
/ \
/ \
* *
/ \ / \
/ \ / \
/ \ / \
* * * h6
/ \ / \ / \
h0 h1 h2 h3 h4 h5
*
/ \
/ \
/ \
/ \
* *
/ \ / \
/ \ / \
/ \ / \
* * * h6
/ \ / \ / \
h0 h1 h2 h3 h4 h5
TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure.
*/
package merkle

View File

@@ -85,8 +85,8 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) {
bz := new(bytes.Buffer)
// Wrap <op.Key, vhash> to hash the KVPair.
encodeByteSlice(bz, op.key) //nolint: errcheck // does not error
encodeByteSlice(bz, vhash) //nolint: errcheck // does not error
encodeByteSlice(bz, op.key) // nolint: errcheck // does not error
encodeByteSlice(bz, vhash) // nolint: errcheck // does not error
kvhash := leafHash(bz.Bytes())
if !bytes.Equal(kvhash, op.Proof.LeafHash) {

View File

@@ -47,10 +47,10 @@ func HashFromByteSlices(items [][]byte) []byte {
//
// These preliminary results suggest:
//
// 1. The performance of the HashFromByteSlice is pretty good
// 2. Go has low overhead for recursive functions
// 3. The performance of the HashFromByteSlice routine is dominated
// by the actual hashing of data
// 1. The performance of the HashFromByteSlice is pretty good
// 2. Go has low overhead for recursive functions
// 3. The performance of the HashFromByteSlice routine is dominated
// by the actual hashing of data
//
// Although this work is in no way exhaustive, point #3 suggests that
// optimization of this routine would need to take an alternative

View File

@@ -9,13 +9,13 @@ import (
"math/big"
secp256k1 "github.com/btcsuite/btcd/btcec"
"golang.org/x/crypto/ripemd160" //nolint: staticcheck // necessary for Bitcoin address format
"golang.org/x/crypto/ripemd160" // nolint: staticcheck // necessary for Bitcoin address format
"github.com/tendermint/tendermint/crypto"
tmjson "github.com/tendermint/tendermint/libs/json"
)
// -------------------------------------
//-------------------------------------
const (
PrivKeyName = "tendermint/PrivKeySecp256k1"
PubKeyName = "tendermint/PubKeySecp256k1"
@@ -124,8 +124,8 @@ func GenPrivKeySecp256k1(secret []byte) PrivKey {
// used to reject malleable signatures
// see:
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.

View File

@@ -1,6 +1,14 @@
module.exports = {
theme: 'cosmos',
title: 'Tendermint Core',
// locales: {
// "/": {
// lang: "en-US"
// },
// "/ru/": {
// lang: "ru"
// }
// },
base: process.env.VUEPRESS_BASE,
themeConfig: {
repo: 'tendermint/tendermint',
@@ -15,12 +23,16 @@ module.exports = {
},
versions: [
{
"label": "v0.34 (latest)",
"label": "v0.33",
"key": "v0.33"
},
{
"label": "v0.34",
"key": "v0.34"
},
{
"label": "v0.33",
"key": "v0.33"
"label": "v0.35",
"key": "v0.35"
}
],
topbar: {
@@ -33,8 +45,10 @@ module.exports = {
title: 'Resources',
children: [
{
// TODO(creachadair): Figure out how to make this per-branch.
// See: https://github.com/tendermint/tendermint/issues/7908
title: 'RPC',
path: (process.env.VUEPRESS_BASE ? process.env.VUEPRESS_BASE : '/')+'rpc/',
path: 'https://docs.tendermint.com/v0.35/rpc/',
static: true
},
]
@@ -45,9 +59,9 @@ module.exports = {
title: 'Help & Support',
editLink: true,
forum: {
title: 'Tendermint Discussions',
text: 'Join the Tendermint discussions to learn more',
url: 'https://github.com/tendermint/tendermint/discussions',
title: 'Tendermint Forum',
text: 'Join the Tendermint forum to learn more',
url: 'https://forum.cosmos.network/c/tendermint',
bg: '#0B7E0B',
logo: 'tendermint'
},
@@ -58,7 +72,7 @@ module.exports = {
},
footer: {
question: {
text: 'Chat with Tendermint developers in <a href=\'https://discord.gg/vcExX9T\' target=\'_blank\'>Discord</a> or reach out on <a href=\'https://github.com/tendermint/tendermint/discussions\' target=\'_blank\'>GitHub</a> to learn more.'
text: 'Chat with Tendermint developers in <a href=\'https://discord.gg/vcExX9T\' target=\'_blank\'>Discord</a> or reach out on the <a href=\'https://forum.cosmos.network/c/tendermint\' target=\'_blank\'>Tendermint Forum</a> to learn more.'
},
logo: '/logo-bw.svg',
textLink: {
@@ -92,7 +106,7 @@ module.exports = {
}
],
smallprint:
'The development of Tendermint Core was led primarily by All in Bits, Inc. The Tendermint trademark is owned by New Tendermint, LLC.'
'The development of Tendermint Core is led primarily by [Interchain GmbH](https://interchain.berlin/). Funding for this development comes primarily from the Interchain Foundation, a Swiss non-profit. The Tendermint trademark is owned by Tendermint Inc, the for-profit entity that also maintains this website.',
links: [
{
title: 'Documentation',
@@ -115,8 +129,8 @@ module.exports = {
url: 'https://medium.com/@tendermint'
},
{
title: 'GitHub Discussions',
url: 'https://github.com/tendermint/tendermint/discussions'
title: 'Forum',
url: 'https://forum.cosmos.network/c/tendermint'
}
]
},

View File

@@ -1,66 +1 @@
/redirects/master/ /main/
/redirects/master/spec/core/state.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/state.md
/redirects/master/spec/core/encoding.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/encoding.md
/redirects/master/spec/core/genesis.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/genesis.md
/redirects/master/spec/core/data_structures.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/data_structures.md
/redirects/master/spec/core/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/readme.md
/redirects/master/spec/p2p/messages/pex.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/pex.md
/redirects/master/spec/p2p/messages/mempool.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/mempool.md
/redirects/master/spec/p2p/messages/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/README.md
/redirects/master/spec/p2p/messages/block-sync.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/block-sync.md
/redirects/master/spec/p2p/messages/state-sync.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/state-sync.md
/redirects/master/spec/p2p/messages/consensus.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/consensus.md
/redirects/master/spec/p2p/messages/evidence.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/messages/evidence.md
/redirects/master/spec/p2p/peer.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/peer.md
/redirects/master/spec/p2p/connection.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/connection.md
/redirects/master/spec/p2p/config.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/config.md
/redirects/master/spec/p2p/node.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/node.md
/redirects/master/spec/p2p/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/p2p/readme.md
/redirects/master/spec/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/README.md
/redirects/master/spec/ivy-proofs/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/ivy-proofs/README.md
/redirects/master/spec/consensus/proposer-selection.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/proposer-selection.md
/redirects/master/spec/consensus/creating-proposal.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/creating-proposal.md
/redirects/master/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md
/redirects/master/spec/consensus/proposer-based-timestamp/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/proposer-based-timestamp/README.md
/redirects/master/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md
/redirects/master/spec/consensus/proposer-based-timestamp/pbts_001_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/proposer-based-timestamp/pbts_001_draft.md
/redirects/master/spec/consensus/light-client/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client/README.md
/redirects/master/spec/consensus/light-client/accountability.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client/accountability.md
/redirects/master/spec/consensus/light-client/detection.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client/detection.md
/redirects/master/spec/consensus/light-client/verification.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client/verification.md
/redirects/master/spec/consensus/consensus-paper/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/consensus-paper/README.md
/redirects/master/spec/consensus/signing.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/signing.md
/redirects/master/spec/consensus/consensus.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/consensus.md
/redirects/master/spec/consensus/evidence.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/evidence.md
/redirects/master/spec/consensus/bft-time.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/bft-time.md
/redirects/master/spec/consensus/wal.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/wal.md
/redirects/master/spec/consensus/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/readme.md
/redirects/master/spec/light-client/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/README.md
/redirects/master/spec/light-client/detection/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/README.md
/redirects/master/spec/light-client/detection/req-ibc-detection.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/req-ibc-detection.md
/redirects/master/spec/light-client/detection/detection_001_reviewed.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/detection_001_reviewed.md
/redirects/master/spec/light-client/detection/draft-functions.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/draft-functions.md
/redirects/master/spec/light-client/detection/discussions.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/discussions.md
/redirects/master/spec/light-client/detection/detection_003_reviewed.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/detection/detection_003_reviewed.md
/redirects/master/spec/light-client/accountability/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/accountability/README.md
/redirects/master/spec/light-client/accountability/results/001indinv-apalache-report.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/accountability/results/001indinv-apalache-report.md
/redirects/master/spec/light-client/accountability/Synopsis.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/accountability/Synopsis.md
/redirects/master/spec/light-client/verification/verification_003_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/verification/verification_003_draft.md
/redirects/master/spec/light-client/verification/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/verification/README.md
/redirects/master/spec/light-client/verification/verification_001_published.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/verification/verification_001_published.md
/redirects/master/spec/light-client/verification/verification_002_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/verification/verification_002_draft.md
/redirects/master/spec/light-client/supervisor/supervisor_002_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/supervisor/supervisor_002_draft.md
/redirects/master/spec/light-client/supervisor/supervisor_001_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/supervisor/supervisor_001_draft.md
/redirects/master/spec/light-client/attacks/isolate-attackers_001_draft.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/attacks/isolate-attackers_001_draft.md
/redirects/master/spec/light-client/attacks/notes-on-evidence-handling.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/attacks/notes-on-evidence-handling.md
/redirects/master/spec/light-client/attacks/isolate-attackers_002_reviewed.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/light-client/attacks/isolate-attackers_002_reviewed.md
/redirects/master/spec/abci/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/README.md
/redirects/master/spec/abci/client-server.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/client-server.md
/redirects/master/spec/abci/apps.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md
/redirects/master/spec/abci/abci.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md
/redirects/master/spec/rpc/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/rpc/README.md
/redirects/master/spec/blockchain/state.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/state.md
/redirects/master/spec/blockchain/encoding.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/encoding.md
/redirects/master/spec/blockchain/blockchain.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/blockchain.md
/redirects/master/spec/blockchain/index.html https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/readme.md
/redirects/master/tutorials/go.html /v0.34/tutorials/go.html
/master/ /v0.35/

View File

@@ -2,38 +2,39 @@
The documentation for Tendermint Core is hosted at:
- <https://docs.tendermint.com/>
- <https://docs.tendermint.com/master/>
built from the files in this (`/docs`) directory.
built from the files in this (`/docs`) directory for
[master](https://github.com/tendermint/tendermint/tree/master/docs) respectively.
## How It Works
There is a [GitHub Action](../.github/workflows/docs-deployment.yml) that is
triggered by changes in the `/docs` directory on `main` as well as the branch of
each major supported version (e.g. `v0.34.x`). Any updates to files in this
directory on those branches will automatically trigger a website deployment.
There is a CircleCI job listening for changes in the `/docs` directory, on both
the `master` branch. Any updates to files in this directory
on those branches will automatically trigger a website deployment. Under the hood,
the private website repository has a `make build-docs` target consumed by a CircleCI job in that repo.
## README
The [README.md](./README.md) is also the landing page for the documentation on
the website.
The [README.md](./README.md) is also the landing page for the documentation
on the website. During the Jenkins build, the current commit is added to the bottom
of the README.
## Config.js
The [config.js](./.vuepress/config.js) generates the sidebar and Table of
Contents on the website docs. Note the use of relative links and the omission of
file extensions. Additional features are available to improve the look of the
sidebar.
The [config.js](./.vuepress/config.js) generates the sidebar and Table of Contents
on the website docs. Note the use of relative links and the omission of
file extensions. Additional features are available to improve the look
of the sidebar.
## Links
**NOTE:** Strongly consider the existing links - both within this directory and
to the website docs - when moving or deleting files.
**NOTE:** Strongly consider the existing links - both within this directory
and to the website docs - when moving or deleting files.
Links to directories _MUST_ end in a `/`.
Relative links should be used nearly everywhere, having discovered and weighed
the following:
Relative links should be used nearly everywhere, having discovered and weighed the following:
### Relative
@@ -64,8 +65,7 @@ Make sure you are in the `docs` directory and run the following commands:
rm -rf node_modules
```
This command will remove old version of the visual theme and required packages.
This step is optional.
This command will remove old version of the visual theme and required packages. This step is optional.
```bash
npm install
@@ -79,24 +79,17 @@ npm run serve
<!-- markdown-link-check-disable -->
Run `pre` and `post` hooks and start a hot-reloading web-server. See output of
this command for the URL (it is often <https://localhost:8080>).
Run `pre` and `post` hooks and start a hot-reloading web-server. See output of this command for the URL (it is often <https://localhost:8080>).
<!-- markdown-link-check-enable -->
To build documentation as a static website run `npm run build`. You will find
the website in `.vuepress/dist` directory.
To build documentation as a static website run `npm run build`. You will find the website in `.vuepress/dist` directory.
## Search
We are using [Algolia](https://www.algolia.com) to power full-text search. This
uses a public API search-only key in the `config.js` as well as a
[tendermint.json](https://github.com/algolia/docsearch-configs/blob/master/configs/tendermint.json)
configuration file that we can update with PRs.
We are using [Algolia](https://www.algolia.com) to power full-text search. This uses a public API search-only key in the `config.js` as well as a [tendermint.json](https://github.com/algolia/docsearch-configs/blob/master/configs/tendermint.json) configuration file that we can update with PRs.
## Consistency
Because the build processes are identical (as is the information contained
herein), this file should be kept in sync as much as possible with its
[counterpart in the Cosmos SDK
repo](https://github.com/cosmos/cosmos-sdk/blob/master/docs/DOCS_README.md).
Because the build processes are identical (as is the information contained herein), this file should be kept in sync as
much as possible with its [counterpart in the Cosmos SDK repo](https://github.com/cosmos/cosmos-sdk/blob/master/docs/DOCS_README.md).

View File

@@ -14,29 +14,20 @@ of a web-server, database, and supporting libraries for blockchain applications
written in any programming language. Like a web-server serving web applications,
Tendermint serves blockchain applications.
More formally, Tendermint Core performs Byzantine Fault Tolerant (BFT) State
Machine Replication (SMR) for arbitrary deterministic, finite state machines.
More formally, Tendermint Core performs Byzantine Fault Tolerant (BFT)
State Machine Replication (SMR) for arbitrary deterministic, finite state machines.
For more background, see [What is
Tendermint?](introduction/what-is-tendermint.md).
To get started quickly with an example application, see the [quick start
guide](introduction/quick-start.md).
To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md).
To learn about application development on Tendermint, see the [Application
Blockchain
Interface](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/abci).
To learn about application development on Tendermint, see the [Application Blockchain Interface](https://github.com/tendermint/spec/tree/master/spec/abci).
For more details on using Tendermint, see the respective documentation for
[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and
[network deployments](networks/).
[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](networks/).
To find out about the Tendermint ecosystem you can go
[here](https://github.com/tendermint/awesome#ecosystem). If you are a project
that is using Tendermint you are welcome to make a PR to add your project to the
list.
To find out about the Tendermint ecosystem you can go [here](https://github.com/tendermint/awesome#ecosystem). If you are a project that is using Tendermint you are welcome to make a PR to add your project to the list.
## Contribute
To contribute to the documentation, see [this
file](https://github.com/tendermint/tendermint/blob/main/docs/DOCS_README.md)
for details of the build process and considerations when making changes.
To contribute to the documentation, see [this file](https://github.com/tendermint/tendermint/blob/master/docs/DOCS_README.md) for details of the build process and considerations when making changes.

View File

@@ -138,7 +138,7 @@ response.
The server may be generic for a particular language, and we provide a
[reference implementation in
Golang](https://github.com/tendermint/tendermint/tree/v0.34.x/abci/server). See the
Golang](https://github.com/tendermint/tendermint/tree/master/abci/server). See the
[list of other ABCI implementations](https://github.com/tendermint/awesome#ecosystem) for servers in
other languages.
@@ -325,7 +325,7 @@ But the ultimate flexibility comes from being able to write the
application easily in any language.
We have implemented the counter in a number of languages [see the
example directory](https://github.com/tendermint/tendermint/tree/v0.34.x/abci/example).
example directory](https://github.com/tendermint/tendermint/tree/master/abci/example).
To run the Node.js version, fist download & install [the Javascript ABCI server](https://github.com/tendermint/js-abci):
@@ -349,8 +349,8 @@ the same results as for the Go version.
Want to write the counter app in your favorite language?! We'd be happy
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
TODO link to bounties page.
See [funding](https://github.com/interchainio/funding) opportunities from the
[Interchain Foundation](https://interchain.io/) for implementations in new languages and more.
The `abci-cli` is designed strictly for testing and debugging. In a real
deployment, the role of sending messages is taken by Tendermint, which

View File

@@ -55,6 +55,6 @@ Tendermint.
See the following for more extensive documentation:
- [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028)
- [Tendermint RPC Docs](https://docs.tendermint.com/v0.34/rpc/)
- [Tendermint RPC Docs](https://docs.tendermint.com/master/rpc/)
- [Tendermint in Production](../tendermint-core/running-in-production.md)
- [ABCI spec](https://github.com/tendermint/spec/tree/95cf253b6df623066ff7cd4074a94e7a3f147c7a/spec/abci)

View File

@@ -15,7 +15,7 @@ the block itself is never stored.
Each event contains a type and a list of attributes, which are key-value pairs
denoting something about what happened during the method's execution. For more
details on `Events`, see the
[ABCI](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md#events)
[ABCI](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#events)
documentation.
An `Event` has a composite key associated with it. A `compositeKey` is
@@ -146,7 +146,7 @@ You can query for a paginated set of transaction by their events by calling the
curl "localhost:26657/tx_search?query=\"message.sender='cosmos1...'\"&prove=true"
```
Check out [API docs](https://docs.tendermint.com/v0.34/rpc/#/Info/tx_search)
Check out [API docs](https://docs.tendermint.com/master/rpc/#/Info/tx_search)
for more information on query syntax and other options.
## Subscribing to Transactions
@@ -165,7 +165,7 @@ a query to `/subscribe` RPC endpoint.
}
```
Check out [API docs](https://docs.tendermint.com/v0.34/rpc/#subscribe) for more information
Check out [API docs](https://docs.tendermint.com/master/rpc/#subscribe) for more information
on query syntax and other options.
## Querying Blocks Events
@@ -177,5 +177,5 @@ You can query for a paginated set of blocks by their events by calling the
curl "localhost:26657/block_search?query=\"block.height > 10 AND val_set.num_changed > 0\""
```
Check out [API docs](https://docs.tendermint.com/v0.34/rpc/#/Info/block_search)
Check out [API docs](https://docs.tendermint.com/master/rpc/#/Info/block_search)
for more information on query syntax and other options.

View File

@@ -120,7 +120,7 @@ consensus engine, and provides a particular application state.
## ABCI Overview
The [Application BlockChain Interface
(ABCI)](https://github.com/tendermint/tendermint/tree/v0.34.x/abci)
(ABCI)](https://github.com/tendermint/tendermint/tree/master/abci)
allows for Byzantine Fault Tolerant replication of applications
written in any programming language.
@@ -180,15 +180,15 @@ The application will be responsible for
- Allowing clients to query the UTXO database.
Tendermint is able to decompose the blockchain design by offering a very
simple API (i.e. the ABCI) between the application process and consensus
simple API (ie. the ABCI) between the application process and consensus
process.
The ABCI consists of 3 primary message types that get delivered from the
core to the application. The application replies with corresponding
response messages.
The messages are specified in the [ABCI
specification](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md).
The messages are specified here: [ABCI Message
Types](https://github.com/tendermint/tendermint/blob/master/abci/README.md#message-types).
The **DeliverTx** message is the work horse of the application. Each
transaction in the blockchain is delivered with this message. The

View File

@@ -14,7 +14,7 @@ testnets on those servers.
## Install
NOTE: see the [integration bash
script](https://github.com/tendermint/tendermint/blob/v0.34.x/networks/remote/integration.sh)
script](https://github.com/tendermint/tendermint/blob/master/networks/remote/integration.sh)
that can be run on a fresh DO droplet and will automatically spin up a 4
node testnet. The script more or less does everything described below.
@@ -58,7 +58,7 @@ With the droplets created and running, let's setup Ansible.
## Ansible
The playbooks in [the ansible
directory](https://github.com/tendermint/tendermint/tree/v0.34.x/networks/remote/ansible)
directory](https://github.com/tendermint/tendermint/tree/master/networks/remote/ansible)
run ansible roles to configure the sentry node architecture. You must
switch to this directory to run ansible
(`cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible`).

24
docs/package-lock.json generated
View File

@@ -8876,9 +8876,9 @@
}
},
"node_modules/minimist": {
"version": "1.2.6",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
"integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q=="
"version": "1.2.5",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
"integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw=="
},
"node_modules/mississippi": {
"version": "3.0.0",
@@ -13045,9 +13045,9 @@
}
},
"node_modules/url-parse": {
"version": "1.5.10",
"resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz",
"integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==",
"version": "1.5.7",
"resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.7.tgz",
"integrity": "sha512-HxWkieX+STA38EDk7CE9MEryFeHCKzgagxlGvsdS7WBImq9Mk+PGwiT56w82WI3aicwJA8REp42Cxo98c8FZMA==",
"dependencies": {
"querystringify": "^2.1.1",
"requires-port": "^1.0.0"
@@ -21113,9 +21113,9 @@
}
},
"minimist": {
"version": "1.2.6",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
"integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q=="
"version": "1.2.5",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
"integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw=="
},
"mississippi": {
"version": "3.0.0",
@@ -24536,9 +24536,9 @@
}
},
"url-parse": {
"version": "1.5.10",
"resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz",
"integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==",
"version": "1.5.7",
"resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.7.tgz",
"integrity": "sha512-HxWkieX+STA38EDk7CE9MEryFeHCKzgagxlGvsdS7WBImq9Mk+PGwiT56w82WI3aicwJA8REp42Cxo98c8FZMA==",
"requires": {
"querystringify": "^2.1.1",
"requires-port": "^1.0.0"

View File

@@ -1,22 +0,0 @@
---
order: 1
parent:
title: Tendermint Quality Assurance
description: This is a report on the process followed and results obtained when running v0.34.x on testnets
order: 2
---
# Tendermint Quality Assurance
This directory keeps track of the process followed by the Tendermint Core team
for Quality Assurance before cutting a release.
This directory is to live in multiple branches. On each release branch,
the contents of this directory reflect the status of the process
at the time the Quality Assurance process was applied for that release.
File [method](./method.md) keeps track of the process followed to obtain the results
used to decide if a release is passing the Quality Assurance process.
The results obtained in each release are stored in their own directory.
The following releases have undergone the Quality Assurance process:
* [v0.34.x](./v034/), which was tested just before releasing v0.34.22

View File

@@ -1,214 +0,0 @@
---
order: 1
title: Method
---
# Method
This document provides a detailed description of the QA process.
It is intended to be used by engineers reproducing the experimental setup for future tests of Tendermint.
The (first iteration of the) QA process as described [in the RELEASES.md document][releases]
was applied to version v0.34.x in order to have a set of results acting as benchmarking baseline.
This baseline is then compared with results obtained in later versions.
Out of the testnet-based test cases described in [the releases document][releases] we focused on two of them:
_200 Node Test_, and _Rotating Nodes Test_.
[releases]: https://github.com/tendermint/tendermint/blob/v0.37.x/RELEASES.md#large-scale-testnets
## Software Dependencies
### Infrastructure Requirements to Run the Tests
* An account at Digital Ocean (DO), with a high droplet limit (>202)
* The machine to orchestrate the tests should have the following installed:
* A clone of the [testnet repository][testnet-repo]
* This repository contains all the scripts mentioned in the reminder of this section
* [Digital Ocean CLI][doctl]
* [Terraform CLI][Terraform]
* [Ansible CLI][Ansible]
[testnet-repo]: https://github.com/interchainio/tendermint-testnet
[Ansible]: https://docs.ansible.com/ansible/latest/index.html
[Terraform]: https://www.terraform.io/docs
[doctl]: https://docs.digitalocean.com/reference/doctl/how-to/install/
### Requirements for Result Extraction
* Matlab or Octave
* [Prometheus][prometheus] server installed
* blockstore DB of one of the full nodes in the testnet
* Prometheus DB
[prometheus]: https://prometheus.io/
## 200 Node Testnet
### Running the test
This section explains how the tests were carried out for reproducibility purposes.
1. [If you haven't done it before]
Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`.
2. Copy file `testnets/testnet200.toml` onto `testnet.toml` (do NOT commit this change)
3. Set the variable `VERSION_TAG` in the `Makefile` to the git hash that is to be tested.
4. Follow steps 5-10 of the `README.md` to configure and start the 200 node testnet
* WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests (see step 9)
5. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric.
All nodes should be increasing their heights.
6. `ssh` into the `testnet-load-runner`, then copy script `script/200-node-loadscript.sh` and run it from the load runner node.
* Before running it, you need to edit the script to provide the IP address of a full node.
This node will receive all transactions from the load runner node.
* This script will take about 40 mins to run
* It is running 90-seconds-long experiments in a loop with different loads
7. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine
8. Verify that the data was collected without errors
* at least one blockstore DB for a Tendermint validator
* the Prometheus database from the Prometheus node
* for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s)
9. **Run `make terraform-destroy`**
* Don't forget to type `yes`! Otherwise you're in trouble.
### Result Extraction
The method for extracting the results described here is highly manual (and exploratory) at this stage.
The Core team should improve it at every iteration to increase the amount of automation.
#### Steps
1. Unzip the blockstore into a directory
2. Extract the latency report and the raw latencies for all the experiments. Run these commands from the directory containing the blockstore
* `go run github.com/tendermint/tendermint/test/loadtime/cmd/report@3ec6e424d --database-type goleveldb --data-dir ./ > results/report.txt`
* `go run github.com/tendermint/tendermint/test/loadtime/cmd/report@3ec6e424d --database-type goleveldb --data-dir ./ --csv results/raw.csv`
3. File `report.txt` contains an unordered list of experiments with varying concurrent connections and transaction rate
* Create files `report01.txt`, `report02.txt`, `report04.txt` and, for each experiment in file `report.txt`,
copy its related lines to the filename that matches the number of connections.
* Sort the experiments in `report01.txt` in ascending tx rate order. Likewise for `report02.txt` and `report04.txt`.
4. Generate file `report_tabbed.txt` by showing the contents `report01.txt`, `report02.txt`, `report04.txt` side by side
* This effectively creates a table where rows are a particular tx rate and columns are a particular number of websocket connections.
5. Extract the raw latencies from file `raw.csv` using the following bash loop. This creates a `.csv` file and a `.dat` file per experiment.
The format of the `.dat` files is amenable to loading them as matrices in Octave
```bash
uuids=($(cat report01.txt report02.txt report04.txt | grep '^Experiment ID: ' | awk '{ print $3 }'))
c=1
for i in 01 02 04; do
for j in 0025 0050 0100 0200; do
echo $i $j $c "${uuids[$c]}"
filename=c${i}_r${j}
grep ${uuids[$c]} raw.csv > ${filename}.csv
cat ${filename}.csv | tr , ' ' | awk '{ print $2, $3 }' > ${filename}.dat
c=$(expr $c + 1)
done
done
```
6. Enter Octave
7. Load all `.dat` files generated in step 5 into matrices using this Octave code snippet
```octave
conns = { "01"; "02"; "04" };
rates = { "0025"; "0050"; "0100"; "0200" };
for i = 1:length(conns)
for j = 1:length(rates)
filename = strcat("c", conns{i}, "_r", rates{j}, ".dat");
load("-ascii", filename);
endfor
endfor
```
8. Set variable release to the current release undergoing QA
```octave
release = "v0.34.x";
```
9. Generate a plot with all (or some) experiments, where the X axis is the experiment time,
and the y axis is the latency of transactions.
The following snippet plots all experiments.
```octave
legends = {};
hold off;
for i = 1:length(conns)
for j = 1:length(rates)
data_name = strcat("c", conns{i}, "_r", rates{j});
l = strcat("c=", conns{i}, " r=", rates{j});
m = eval(data_name); plot((m(:,1) - min(m(:,1))) / 1e+9, m(:,2) / 1e+9, ".");
hold on;
legends(1, end+1) = l;
endfor
endfor
legend(legends, "location", "northeastoutside");
xlabel("experiment time (s)");
ylabel("latency (s)");
t = sprintf("200-node testnet - %s", release);
title(t);
```
10. Consider adjusting the axis, in case you want to compare your results to the baseline, for instance
```octave
axis([0, 100, 0, 30], "tic");
```
11. Use Octave's GUI menu to save the plot (e.g. as `.png`)
12. Repeat steps 9 and 10 to obtain as many plots as deemed necessary.
13. To generate a latency vs throughput plot, using the raw CSV file generated
in step 2, follow the instructions for the [`latency_throughput.py`] script.
[`latency_throughput.py`]: ../../scripts/qa/reporting/README.md
#### Extracting Prometheus Metrics
1. Stop the prometheus server if it is running as a service (e.g. a `systemd` unit).
2. Unzip the prometheus database retrieved from the testnet, and move it to replace the
local prometheus database.
3. Start the prometheus server and make sure no error logs appear at start up.
4. Introduce the metrics you want to gather or plot.
## Rotating Node Testnet
### Running the test
This section explains how the tests were carried out for reproducibility purposes.
1. [If you haven't done it before]
Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`.
2. Copy file `testnet_rotating.toml` onto `testnet.toml` (do NOT commit this change)
3. Set variable `VERSION_TAG` to the git hash that is to be tested.
4. Run `make terraform-apply EPHEMERAL_SIZE=25`
* WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests
5. Follow steps 6-10 of the `README.md` to configure and start the "stable" part of the rotating node testnet
6. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric.
All nodes should be increasing their heights.
7. On a different shell,
* run `make runload ROTATE_CONNECTIONS=X ROTATE_TX_RATE=Y`
* `X` and `Y` should reflect a load below the saturation point (see, e.g.,
[this paragraph](./v034/README.md#finding-the-saturation-point) for further info)
8. Run `make rotate` to start the script that creates the ephemeral nodes, and kills them when they are caught up.
* WARNING: If you run this command from your laptop, the laptop needs to be up and connected for full length
of the experiment.
9. When the height of the chain reaches 3000, stop the `make rotate` script
10. When the rotate script has made two iterations (i.e., all ephemeral nodes have caught up twice)
after height 3000 was reached, stop `make rotate`
11. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine
12. Verify that the data was collected without errors
* at least one blockstore DB for a Tendermint validator
* the Prometheus database from the Prometheus node
* for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s)
13. **Run `make terraform-destroy`**
Steps 8 to 10 are highly manual at the moment and will be improved in next iterations.
### Result Extraction
In order to obtain a latency plot, follow the instructions above for the 200 node experiment, but:
* The `results.txt` file contains only one experiment
* Therefore, no need for any `for` loops
As for prometheus, the same method as for the 200 node experiment can be applied.

Some files were not shown because too many files have changed in this diff Show More