Compare commits
56 Commits
cal/val-to
...
cal/node-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7018c73baf | ||
|
|
4021df503b | ||
|
|
11976a4863 | ||
|
|
296ec7e113 | ||
|
|
933256c862 | ||
|
|
c8f9f061fd | ||
|
|
f138cb9c0c | ||
|
|
83b7f4ad5b | ||
|
|
09b8708314 | ||
|
|
d95e423756 | ||
|
|
95bd4b6701 | ||
|
|
98ad5f1bf3 | ||
|
|
82c29db2bc | ||
|
|
160a33fdb1 | ||
|
|
13bd4b63f8 | ||
|
|
a2610a9998 | ||
|
|
da8810f480 | ||
|
|
10dca45e8c | ||
|
|
67bc51ad72 | ||
|
|
3faf580a0d | ||
|
|
bc15531e1d | ||
|
|
716a624d57 | ||
|
|
58b9e4f03d | ||
|
|
6a46a76163 | ||
|
|
0d4db7aae6 | ||
|
|
40a59d1e10 | ||
|
|
f6709208b0 | ||
|
|
2c40ca52c1 | ||
|
|
3136b7a084 | ||
|
|
af2981a2f7 | ||
|
|
3bd2153136 | ||
|
|
301211c2cb | ||
|
|
58ee42ca52 | ||
|
|
6e38fff9ed | ||
|
|
93ab364abc | ||
|
|
1c60efc0bc | ||
|
|
6768b98568 | ||
|
|
3cdfbda2eb | ||
|
|
4552cfc271 | ||
|
|
91fba07e49 | ||
|
|
5df9c410ff | ||
|
|
c8f203293d | ||
|
|
b06e1cea54 | ||
|
|
6ea968d576 | ||
|
|
b42c439776 | ||
|
|
387bf6795a | ||
|
|
4f3e87b2e4 | ||
|
|
a371b1e3a8 | ||
|
|
9dd99e9294 | ||
|
|
4fd19a275e | ||
|
|
8d26460f9d | ||
|
|
c0bdb2423a | ||
|
|
cdd3479f20 | ||
|
|
b1dc5a6def | ||
|
|
abbeb919df | ||
|
|
a02cc30e41 |
12
.github/dependabot.yml
vendored
@@ -53,10 +53,10 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: daily
|
||||
target-branch: "v0.37.x"
|
||||
# Only allow automated security-related dependency updates until we cut the
|
||||
# final v0.37.0 release.
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
open-pull-requests-limit: 0
|
||||
labels:
|
||||
- T:dependencies
|
||||
@@ -65,9 +65,11 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: daily
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
open-pull-requests-limit: 0
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
6
.github/workflows/docker.yml
vendored
@@ -41,17 +41,17 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.0.0
|
||||
uses: docker/setup-buildx-action@v2.2.1
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2.0.0
|
||||
uses: docker/login-action@v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3.1.1
|
||||
uses: docker/build-push-action@v3.2.0
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
4
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
4
.github/workflows/e2e-nightly-37x.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
4
.github/workflows/e2e-nightly-main.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
2
.github/workflows/fuzz-nightly.yml
vendored
@@ -76,7 +76,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
|
||||
41
.github/workflows/gosec.yml
vendored
@@ -1,41 +0,0 @@
|
||||
name: Run Gosec
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'feature/*'
|
||||
- 'v0.37.x'
|
||||
- 'v0.34.x'
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
|
||||
jobs:
|
||||
Gosec:
|
||||
permissions:
|
||||
security-events: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout Source
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run Gosec Security Scanner
|
||||
uses: cosmos/gosec@master
|
||||
with:
|
||||
# Let the report trigger a failure with the Github Security scanner features.
|
||||
args: "-no-fail -fmt sarif -out results.sarif ./..."
|
||||
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
# Path to SARIF file relative to the root of the repository
|
||||
sarif_file: results.sarif
|
||||
2
.github/workflows/janitor.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.10.1
|
||||
- uses: styfle/cancel-workflow-action@0.11.0
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
5
.github/workflows/lint.yml
vendored
@@ -31,10 +31,7 @@ jobs:
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.47.3
|
||||
version: v1.50.1
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
19
.github/workflows/markdown-links.yml
vendored
@@ -1,23 +1,20 @@
|
||||
name: Check Markdown links
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches: [main]
|
||||
schedule:
|
||||
# 2am UTC daily
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
strategy:
|
||||
matrix:
|
||||
branch: ['main', 'v0.37.x', 'v0.34.x']
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.md
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
ref: ${{ matrix.branch }}
|
||||
- uses: informalsystems/github-action-markdown-link-check@main
|
||||
with:
|
||||
check-modified-files-only: 'yes'
|
||||
config-file: '.md-link-check.json'
|
||||
if: env.GIT_DIFF
|
||||
|
||||
27
.github/workflows/pre-release.yml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
prerelease:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -38,3 +38,28 @@ jobs:
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
prerelease-success:
|
||||
needs: prerelease
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon pre-release
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/workflows/proto-lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.8.0
|
||||
- uses: bufbuild/buf-setup-action@v1.9.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
27
.github/workflows/release.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -35,3 +35,28 @@ jobs:
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
release-success:
|
||||
needs: release
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon release
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.gitignore
vendored
@@ -55,3 +55,5 @@ proto/spec/**/*.pb.go
|
||||
*.pdf
|
||||
*.gz
|
||||
*.dvi
|
||||
# Python virtual environments
|
||||
.venv
|
||||
|
||||
@@ -2,7 +2,6 @@ linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
@@ -26,7 +25,6 @@ linters:
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
|
||||
@@ -2,5 +2,16 @@
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503]
|
||||
"aliveStatusCodes": [200, 206, 503],
|
||||
"httpHeaders": [
|
||||
{
|
||||
"urls": [
|
||||
"https://docs.github.com/",
|
||||
"https://help.github.com/"
|
||||
],
|
||||
"headers": {
|
||||
"Accept-Encoding": "zstd, br, gzip, deflate"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
30
CHANGELOG.md
@@ -2,6 +2,36 @@
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.22
|
||||
|
||||
This release includes several bug fixes, [one of
|
||||
which](https://github.com/tendermint/tendermint/pull/9518) we discovered while
|
||||
building up a baseline for v0.34 against which to compare our upcoming v0.37
|
||||
release during our [QA process](./docs/qa/).
|
||||
|
||||
Special thanks to external contributors on this release: @RiccardoM
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#9423](https://github.com/tendermint/tendermint/pull/9423) Support
|
||||
HTTPS URLs from the WebSocket client (@RiccardoM, @cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [config] [\#9483](https://github.com/tendermint/tendermint/issues/9483)
|
||||
Calling `tendermint init` would incorrectly leave out the new `[storage]`
|
||||
section delimiter in the generated configuration file - this has now been
|
||||
fixed
|
||||
- [p2p] [\#9500](https://github.com/tendermint/tendermint/issues/9500) Prevent
|
||||
peers who have errored being added to the peer set (@jmalicevic)
|
||||
- [indexer] [\#9473](https://github.com/tendermint/tendermint/issues/9473) Fix
|
||||
bug that caused the psql indexer to index empty blocks whenever one of the
|
||||
transactions returned a non zero code. The relevant deduplication logic has
|
||||
been moved within the kv indexer only (@cmwaters)
|
||||
- [blocksync] [\#9518](https://github.com/tendermint/tendermint/issues/9518) A
|
||||
block sync stall was observed during our QA process whereby the node was
|
||||
unable to make progress. Retrying block requests after a timeout fixes this.
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Release highlights include:
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [p2p] \#9625 Remove unused p2p/trust package (@cmwaters)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
@@ -90,8 +91,10 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778 & @Yawning)
|
||||
- [types] \#6120 use batch verification for verifying commits signatures. (@marbar3778 & @cmwaters & @Yawning)
|
||||
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
|
||||
- [state] \#9505 Added logic so when pruning, the evidence period is taken into consideration and only deletes unecessary data (@samricotta)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)
|
||||
- [docker] \#9073 enable cross platform build using docker buildx
|
||||
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout
|
||||
@@ -12,7 +12,7 @@ and hence to Tendermint.
|
||||
* We are committed to providing a friendly, safe and welcoming environment for
|
||||
all, regardless of level of experience, gender, gender identity and
|
||||
expression, sexual orientation, disability, personal appearance, body size,
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristics.
|
||||
|
||||
* On Slack, please avoid using overtly sexual nicknames or other nicknames that
|
||||
might detract from a friendly, safe and welcoming environment for all.
|
||||
|
||||
@@ -12,7 +12,7 @@ landing changes in `main`.
|
||||
All work on the code base should be motivated by a [Github
|
||||
Issue](https://github.com/tendermint/tendermint/issues).
|
||||
[Search](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)
|
||||
is a good place start when looking for places to contribute. If you
|
||||
is a good place to start when looking for places to contribute. If you
|
||||
would like to work on an issue which already exists, please indicate so
|
||||
by leaving a comment.
|
||||
|
||||
@@ -213,7 +213,7 @@ Changes with multiple classifications should be doubly included (eg. a bug fix
|
||||
that is also a breaking change should be recorded under both).
|
||||
|
||||
Breaking changes are further subdivided according to the APIs/users they impact.
|
||||
Any change that effects multiple APIs/users should be recorded multiply - for
|
||||
Any change that affects multiple APIs/users should be recorded multiply - for
|
||||
instance, a change to the `Blockchain Protocol` that removes a field from the
|
||||
header should also be recorded under `CLI/RPC/Config` since the field will be
|
||||
removed from the header in RPC responses as well.
|
||||
@@ -247,7 +247,7 @@ To begin contributing, create a development branch either on `github.com/tenderm
|
||||
Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `main`. (Since pull requests are squash-merged, either is fine!)
|
||||
|
||||
Update the `UPGRADING.md` if the change you've made is breaking and the
|
||||
instructions should be in place for a user on how he/she can upgrade it's
|
||||
instructions should be in place for a user on how he/she can upgrade its
|
||||
software (ABCI application, Tendermint-based blockchain, light client, wallet).
|
||||
|
||||
Once you have submitted a pull request label the pull request with either `R:minor`, if the change should be included in the next minor release, or `R:major`, if the change is meant for a major release.
|
||||
|
||||
12
Makefile
@@ -4,14 +4,8 @@ OUTPUT?=$(BUILDDIR)/tendermint
|
||||
|
||||
BUILD_TAGS?=tendermint
|
||||
|
||||
# If building a release, please checkout the version tag to get the correct version setting
|
||||
ifneq ($(shell git symbolic-ref -q --short HEAD),)
|
||||
VERSION := unreleased-$(shell git symbolic-ref -q --short HEAD)-$(shell git rev-parse HEAD)
|
||||
else
|
||||
VERSION := $(shell git describe)
|
||||
endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
COMMIT_HASH := $(shell git rev-parse --short HEAD)
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMGitCommitHash=$(COMMIT_HASH)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
CGO_ENABLED ?= 0
|
||||
@@ -277,7 +271,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@golangci-lint run
|
||||
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
@@ -79,7 +79,7 @@ func NewApplication() *Application {
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
Version: version.ABCISemVer,
|
||||
AppVersion: ProtocolVersion,
|
||||
LastBlockHeight: app.state.Height,
|
||||
LastBlockAppHash: app.state.AppHash,
|
||||
|
||||
@@ -6,4 +6,4 @@ import (
|
||||
|
||||
// TODO: eliminate this after some version refactor
|
||||
|
||||
const Version = version.ABCIVersion
|
||||
const Version = version.ABCISemVer
|
||||
|
||||
30
blocksync/metrics.gen.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Syncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Syncing: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
19
blocksync/metrics.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "blocksync"
|
||||
)
|
||||
|
||||
//go:generate go run ../scripts/metricsgen -struct=Metrics
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// Whether or not a node is block syncing. 1 if yes, 0 if no.
|
||||
Syncing metrics.Gauge
|
||||
}
|
||||
@@ -19,58 +19,6 @@ const (
|
||||
BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
msg := bcproto.Message{}
|
||||
|
||||
switch pb := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
|
||||
case *bcproto.BlockResponse:
|
||||
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
|
||||
case *bcproto.NoBlockResponse:
|
||||
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
|
||||
case *bcproto.StatusRequest:
|
||||
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
|
||||
case *bcproto.StatusResponse:
|
||||
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", pb)
|
||||
}
|
||||
|
||||
bz, err := proto.Marshal(&msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *bcproto.Message_BlockRequest:
|
||||
return msg.BlockRequest, nil
|
||||
case *bcproto.Message_BlockResponse:
|
||||
return msg.BlockResponse, nil
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
return msg.NoBlockResponse, nil
|
||||
case *bcproto.Message_StatusRequest:
|
||||
return msg.StatusRequest, nil
|
||||
case *bcproto.Message_StatusResponse:
|
||||
return msg.StatusResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateMsg validates a message.
|
||||
func ValidateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
|
||||
@@ -80,7 +80,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
//nolint:lll // ignore line length in tests
|
||||
func TestBlockchainMessageVectors(t *testing.T) {
|
||||
func TestBlocksyncMessageVectors(t *testing.T) {
|
||||
block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil)
|
||||
block.Version.Block = 11 // overwrite updated protocol version
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ const (
|
||||
maxTotalRequesters = 600
|
||||
maxPendingRequests = maxTotalRequesters
|
||||
maxPendingRequestsPerPeer = 20
|
||||
requestRetrySeconds = 30
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending us data at at least that rate, we
|
||||
@@ -98,6 +99,9 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
// OnStart implements service.Service by spawning requesters routine and recording
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
if pool.height == 0 {
|
||||
return errors.New("height not set")
|
||||
}
|
||||
go pool.makeRequestersRoutine()
|
||||
pool.startTime = time.Now()
|
||||
return nil
|
||||
@@ -110,22 +114,19 @@ func (pool *BlockPool) makeRequestersRoutine() {
|
||||
break
|
||||
}
|
||||
|
||||
_, numPending, lenRequesters := pool.GetStatus()
|
||||
switch {
|
||||
case numPending >= maxPendingRequests:
|
||||
height, maxPeerHeight, numPending, lenRequesters := pool.GetStatus()
|
||||
if height >= maxPeerHeight ||
|
||||
numPending >= maxPendingRequests ||
|
||||
lenRequesters >= maxTotalRequesters {
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
case lenRequesters >= maxTotalRequesters:
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
default:
|
||||
// request for more blocks.
|
||||
pool.makeNextRequester()
|
||||
continue
|
||||
}
|
||||
|
||||
// request for more blocks.
|
||||
pool.makeNextRequester()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,11 +156,11 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
|
||||
// GetStatus returns pool's height, numPending requests and the number of
|
||||
// requesters.
|
||||
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
|
||||
func (pool *BlockPool) GetStatus() (height, maxPeerHeight int64, numPending int32, lenRequesters int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
return pool.height, pool.maxPeerHeight, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
}
|
||||
|
||||
// IsCaughtUp returns true if this node is caught up, false - otherwise.
|
||||
@@ -301,6 +302,7 @@ func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) {
|
||||
}
|
||||
|
||||
if height > pool.maxPeerHeight {
|
||||
pool.Logger.Info("new max peer height", "height", height)
|
||||
pool.maxPeerHeight = height
|
||||
}
|
||||
}
|
||||
@@ -387,7 +389,7 @@ func (pool *BlockPool) makeNextRequester() {
|
||||
|
||||
err := request.Start()
|
||||
if err != nil {
|
||||
request.Logger.Error("Error starting request", "err", err)
|
||||
pool.Logger.Error("Error starting request", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -602,7 +604,7 @@ OUTER_LOOP:
|
||||
}
|
||||
peer = bpr.pool.pickIncrAvailablePeer(bpr.height)
|
||||
if peer == nil {
|
||||
// log.Info("No peers available", "height", height)
|
||||
bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height)
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
continue PICK_PEER_LOOP
|
||||
}
|
||||
@@ -612,6 +614,7 @@ OUTER_LOOP:
|
||||
bpr.peerID = peer.id
|
||||
bpr.mtx.Unlock()
|
||||
|
||||
to := time.NewTimer(requestRetrySeconds * time.Second)
|
||||
// Send request and wait.
|
||||
bpr.pool.sendRequest(bpr.height, peer.id)
|
||||
WAIT_LOOP:
|
||||
@@ -624,6 +627,11 @@ OUTER_LOOP:
|
||||
return
|
||||
case <-bpr.Quit():
|
||||
return
|
||||
case <-to.C:
|
||||
bpr.Logger.Debug("Retrying block request after timeout", "height", bpr.height, "peer", bpr.peerID)
|
||||
// Simulate a redo
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP
|
||||
case peerID := <-bpr.redoCh:
|
||||
if peerID == bpr.peerID {
|
||||
bpr.reset()
|
||||
|
||||
@@ -30,9 +30,9 @@ const (
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and block sync to
|
||||
// for when we switch from blocksync reactor and block sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
SwitchToConsensus(state sm.State, skipWAL bool) error
|
||||
}
|
||||
|
||||
type peerError struct {
|
||||
@@ -54,16 +54,15 @@ type Reactor struct {
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
blockSync bool
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
blockSync bool) *Reactor {
|
||||
|
||||
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, metrics *Metrics) *Reactor {
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
@@ -85,9 +84,9 @@ func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockS
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: pool,
|
||||
blockSync: blockSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
metrics: metrics,
|
||||
}
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("Reactor", bcR)
|
||||
return bcR
|
||||
@@ -101,22 +100,22 @@ func (bcR *Reactor) SetLogger(l log.Logger) {
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (bcR *Reactor) OnStart() error {
|
||||
if bcR.blockSync {
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go bcR.poolRoutine(false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsSyncing returns whether the node is using blocksync to advance heights
|
||||
func (bcR *Reactor) IsSyncing() bool {
|
||||
return bcR.pool.IsRunning()
|
||||
}
|
||||
|
||||
// SwitchToBlockSync is called by the state sync reactor when switching to block sync.
|
||||
func (bcR *Reactor) SwitchToBlockSync(state sm.State) error {
|
||||
bcR.blockSync = true
|
||||
bcR.initialState = state
|
||||
|
||||
bcR.pool.height = state.LastBlockHeight + 1
|
||||
if state.LastBlockHeight == 0 {
|
||||
bcR.pool.height = state.InitialHeight
|
||||
} else {
|
||||
bcR.pool.height = state.LastBlockHeight + 1
|
||||
}
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -127,7 +126,7 @@ func (bcR *Reactor) SwitchToBlockSync(state sm.State) error {
|
||||
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *Reactor) OnStop() {
|
||||
if bcR.blockSync {
|
||||
if bcR.pool.IsRunning() {
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
@@ -143,21 +142,20 @@ func (bcR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes, err := EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height()})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
peer.Send(BlocksyncChannel, msgBytes)
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
})
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -182,69 +180,53 @@ func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
return false
|
||||
}
|
||||
|
||||
msgBytes, err := EncodeMsg(&bcproto.BlockResponse{Block: bl})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
return src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.BlockResponse{Block: bl},
|
||||
})
|
||||
}
|
||||
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes, err := EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
return src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
})
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
func (bcR *Reactor) Receive(e p2p.Envelope) {
|
||||
if err := ValidateMsg(e.Message); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
bcR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message)
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
switch msg := e.Message.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
bcR.respondToPeer(msg, src)
|
||||
bcR.respondToPeer(msg, e.Src)
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Block content is invalid", "err", err)
|
||||
return
|
||||
}
|
||||
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
|
||||
bcR.pool.AddBlock(e.Src.ID(), bi, msg.Block.Size())
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
msgBytes, err := EncodeMsg(&bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
e.Src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobut", "err", err)
|
||||
return
|
||||
}
|
||||
src.TrySend(BlocksyncChannel, msgBytes)
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
|
||||
bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height)
|
||||
case *bcproto.NoBlockResponse:
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -253,6 +235,8 @@ func (bcR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
bcR.metrics.Syncing.Set(1)
|
||||
defer bcR.metrics.Syncing.Set(0)
|
||||
|
||||
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
defer trySyncTicker.Stop()
|
||||
@@ -285,13 +269,10 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
msgBytes, err := EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlocksyncChannel, msgBytes)
|
||||
queued := peer.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
})
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
@@ -303,7 +284,7 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() //nolint: errcheck
|
||||
go bcR.BroadcastStatusRequest()
|
||||
|
||||
}
|
||||
}
|
||||
@@ -313,24 +294,28 @@ FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-switchToConsensusTicker.C:
|
||||
height, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
height, peerHeight, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
outbound, inbound, _ := bcR.Switch.NumPeers()
|
||||
bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters,
|
||||
"outbound", outbound, "inbound", inbound)
|
||||
"outbound", outbound, "inbound", inbound, "peerHeight", peerHeight)
|
||||
if bcR.pool.IsCaughtUp() {
|
||||
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
|
||||
// TODO: node struct should be responsible for switching from block sync to
|
||||
// consensus. It's messy to have to grab the consensus reactor from the switch
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
err := conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
bcR.Logger.Error("failed to switch to consensus", "err", err)
|
||||
}
|
||||
// else {
|
||||
// should only happen during testing
|
||||
// }
|
||||
|
||||
break FOR_LOOP
|
||||
return
|
||||
}
|
||||
|
||||
case <-trySyncTicker.C: // chan time
|
||||
@@ -429,14 +414,9 @@ FOR_LOOP:
|
||||
}
|
||||
|
||||
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
|
||||
func (bcR *Reactor) BroadcastStatusRequest() error {
|
||||
bm, err := EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
return fmt.Errorf("could not convert msg to proto: %w", err)
|
||||
}
|
||||
|
||||
bcR.Switch.Broadcast(BlocksyncChannel, bm)
|
||||
|
||||
return nil
|
||||
func (bcR *Reactor) BroadcastStatusRequest() {
|
||||
bcR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -95,14 +95,6 @@ func newReactor(
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
// Make the Reactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mp, sm.EmptyEvidencePool{}, blockStore)
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
@@ -145,17 +137,20 @@ func newReactor(
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, NopMetrics())
|
||||
bcReactor.SetLogger(logger.With("module", "blocksync"))
|
||||
|
||||
return ReactorPair{bcReactor, proxyApp}
|
||||
}
|
||||
|
||||
func TestNoBlockResponse(t *testing.T) {
|
||||
config = test.ResetTestRoot("blockchain_reactor_test")
|
||||
config = test.ResetTestRoot("blocksync_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]ReactorPair, 2)
|
||||
@@ -164,11 +159,17 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
reactorPairs[1] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
for _, reactor := range reactorPairs {
|
||||
// turn on the syncing algorithm
|
||||
err := reactor.reactor.SwitchToBlockSync(state)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
err := r.reactor.Stop()
|
||||
@@ -214,10 +215,13 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestBadBlockStopsPeer(t *testing.T) {
|
||||
config = test.ResetTestRoot("blockchain_reactor_test")
|
||||
config = test.ResetTestRoot("blocksync_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
// Other chain needs a different validator set
|
||||
@@ -239,11 +243,17 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
reactorPairs[3] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
for _, reactor := range reactorPairs {
|
||||
// turn on the syncing algorithm
|
||||
err := reactor.reactor.SwitchToBlockSync(state)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
err := r.reactor.Stop()
|
||||
@@ -278,7 +288,7 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].reactor)
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[len(reactorPairs)-1].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
@@ -287,6 +297,11 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
otherState, err := sm.MakeGenesisState(otherGenDoc)
|
||||
require.NoError(t, err)
|
||||
err = lastReactorPair.reactor.SwitchToBlockSync(otherState)
|
||||
require.NoError(t, err)
|
||||
|
||||
for {
|
||||
if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
|
||||
@@ -67,7 +67,8 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
resp, err := http.Get(endpoint) //nolint: gosec
|
||||
//nolint:all
|
||||
resp, err := http.Get(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/tendermint/tendermint/store"
|
||||
)
|
||||
|
||||
var removeBlock bool = false
|
||||
var removeBlock = false
|
||||
|
||||
func init() {
|
||||
RollbackStateCmd.Flags().BoolVar(&removeBlock, "hard", false, "remove last block as well as state")
|
||||
|
||||
@@ -14,6 +14,11 @@ var VersionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show version info",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
tmVersion := version.TMCoreSemVer
|
||||
if version.TMGitCommitHash != "" {
|
||||
tmVersion += "+" + version.TMGitCommitHash
|
||||
}
|
||||
|
||||
if verbose {
|
||||
values, _ := json.MarshalIndent(struct {
|
||||
Tendermint string `json:"tendermint"`
|
||||
@@ -21,14 +26,14 @@ var VersionCmd = &cobra.Command{
|
||||
BlockProtocol uint64 `json:"block_protocol"`
|
||||
P2PProtocol uint64 `json:"p2p_protocol"`
|
||||
}{
|
||||
Tendermint: version.TMCoreSemVer,
|
||||
ABCI: version.ABCIVersion,
|
||||
Tendermint: tmVersion,
|
||||
ABCI: version.ABCISemVer,
|
||||
BlockProtocol: version.BlockProtocol,
|
||||
P2PProtocol: version.P2PProtocol,
|
||||
}, "", " ")
|
||||
fmt.Println(string(values))
|
||||
} else {
|
||||
fmt.Println(version.TMCoreSemVer)
|
||||
fmt.Println(tmVersion)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -423,6 +423,7 @@ type RPCConfig struct {
|
||||
TLSKeyFile string `mapstructure:"tls_key_file"`
|
||||
|
||||
// pprof listen address (https://golang.org/pkg/net/http/pprof)
|
||||
// FIXME: This should be moved under the instrumentation section
|
||||
PprofListenAddress string `mapstructure:"pprof_laddr"`
|
||||
}
|
||||
|
||||
@@ -506,6 +507,10 @@ func (cfg *RPCConfig) IsCorsEnabled() bool {
|
||||
return len(cfg.CORSAllowedOrigins) != 0
|
||||
}
|
||||
|
||||
func (cfg *RPCConfig) IsPprofEnabled() bool {
|
||||
return len(cfg.PprofListenAddress) != 0
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) KeyFile() string {
|
||||
path := cfg.TLSKeyFile
|
||||
if filepath.IsAbs(path) {
|
||||
@@ -703,9 +708,6 @@ type MempoolConfig struct {
|
||||
// Mempool version to use:
|
||||
// 1) "v0" - (default) FIFO mempool.
|
||||
// 2) "v1" - prioritized mempool.
|
||||
// WARNING: There's a known memory leak with the prioritized mempool
|
||||
// that the team are working on. Read more here:
|
||||
// https://github.com/tendermint/tendermint/issues/8775
|
||||
Version string `mapstructure:"version"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
@@ -1204,6 +1206,10 @@ func (cfg *InstrumentationConfig) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *InstrumentationConfig) IsPrometheusEnabled() bool {
|
||||
return cfg.Prometheus && cfg.PrometheusListenAddr != ""
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Utils
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -45,6 +46,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
appFunc := newKVStore
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
css := make([]*State, nValidators)
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
@@ -53,7 +56,6 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
@@ -101,7 +103,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
cs.SetLogger(cs.Logger)
|
||||
// set private validator
|
||||
pv := privVals[i]
|
||||
@@ -124,7 +126,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
blocksSubs := make([]types.Subscription, 0)
|
||||
eventBuses := make([]*types.EventBus, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
|
||||
// Note, we dont start the consensus states
|
||||
reactors[i] = NewReactor(css[i])
|
||||
reactors[i].SetLogger(css[i].Logger)
|
||||
|
||||
// eventBus is already started with the cs
|
||||
@@ -165,10 +168,16 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
for i, peer := range peerList {
|
||||
if i < len(peerList)/2 {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1}))
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: prevote1.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
} else {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2}))
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: prevote2.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -247,8 +256,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
// start the consensus reactors
|
||||
for i := 0; i < nValidators; i++ {
|
||||
s := reactors[i].conS.GetState()
|
||||
reactors[i].SwitchToConsensus(s, false)
|
||||
require.NoError(t, reactors[i].SwitchToConsensus(state.Copy(), false))
|
||||
}
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
@@ -307,7 +315,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
N := 4
|
||||
logger := consensusLogger().With("test", "byzantine")
|
||||
app := newKVStore
|
||||
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), app)
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_byzantine_test", newMockTickerFunc(false), app)
|
||||
defer cleanup()
|
||||
|
||||
// give the byzantine validator a normal ticker
|
||||
@@ -356,7 +364,8 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
conR := NewReactor(css[i], true) // so we don't start the consensus states
|
||||
// Note, we don't start the consensus states
|
||||
conR := NewReactor(css[i])
|
||||
conR.SetLogger(logger.With("validator", i))
|
||||
conR.SetEventBus(eventBus)
|
||||
|
||||
@@ -400,13 +409,13 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
// note these must be started before the byz
|
||||
for i := 1; i < N; i++ {
|
||||
cr := reactors[i].(*Reactor)
|
||||
cr.SwitchToConsensus(cr.conS.GetState(), false)
|
||||
require.NoError(t, cr.SwitchToConsensus(cr.conS.GetState(), false))
|
||||
}
|
||||
|
||||
// start the byzantine state machine
|
||||
byzR := reactors[0].(*ByzantineReactor)
|
||||
s := byzR.reactor.conS.GetState()
|
||||
byzR.reactor.SwitchToConsensus(s, false)
|
||||
require.NoError(t, byzR.reactor.SwitchToConsensus(s, false))
|
||||
|
||||
// byz proposer sends one block to peers[0]
|
||||
// and the other block to peers[1] and peers[2].
|
||||
@@ -520,18 +529,26 @@ func sendProposalAndParts(
|
||||
parts *types.PartSet,
|
||||
) {
|
||||
// proposal
|
||||
msg := &ProposalMessage{Proposal: proposal}
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.Proposal{Proposal: *proposal.ToProto()},
|
||||
})
|
||||
|
||||
// parts
|
||||
for i := 0; i < int(parts.Total()); i++ {
|
||||
part := parts.GetPart(i)
|
||||
msg := &BlockPartMessage{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
pp, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(err) // TODO: wbanfield better error handling
|
||||
}
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: *pp,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// votes
|
||||
@@ -539,9 +556,14 @@ func sendProposalAndParts(
|
||||
prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
|
||||
precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
|
||||
cs.mtx.Unlock()
|
||||
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{Vote: prevote.ToProto()},
|
||||
})
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{Vote: precommit.ToProto()},
|
||||
})
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -572,14 +594,14 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
|
||||
|
||||
// Send our state to peer.
|
||||
// If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
if !br.reactor.waitSync {
|
||||
if br.reactor.conS.IsRunning() {
|
||||
br.reactor.sendNewRoundStepMessage(peer)
|
||||
}
|
||||
}
|
||||
func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
br.reactor.RemovePeer(peer, reason)
|
||||
}
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
br.reactor.Receive(chID, peer, msgBytes)
|
||||
func (br *ByzantineReactor) Receive(e p2p.Envelope) {
|
||||
br.reactor.Receive(e)
|
||||
}
|
||||
func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }
|
||||
|
||||
@@ -440,7 +440,7 @@ func newStateWithConfigAndBlockStore(
|
||||
}
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
@@ -747,18 +747,17 @@ func consensusLogger() log.Logger {
|
||||
}).With("module", "consensus")
|
||||
}
|
||||
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) {
|
||||
t.Helper()
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
css := make([]*State, nValidators)
|
||||
logger := consensusLogger()
|
||||
configRootDirs := make([]string, 0, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
for _, opt := range configOpts {
|
||||
@@ -772,6 +771,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
css[i].updateToState(state.Copy())
|
||||
}
|
||||
return css, func() {
|
||||
for _, dir := range configRootDirs {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
@@ -94,7 +95,10 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw
|
||||
peers := sw.Peers().List()
|
||||
for _, peer := range peers {
|
||||
cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: precommit.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -118,18 +118,6 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "latest_block_height",
|
||||
Help: "The latest block height.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "state_syncing",
|
||||
Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
@@ -208,8 +196,6 @@ func NopMetrics() *Metrics {
|
||||
BlockSizeBytes: discard.NewGauge(),
|
||||
TotalTxs: discard.NewGauge(),
|
||||
CommittedHeight: discard.NewGauge(),
|
||||
BlockSyncing: discard.NewGauge(),
|
||||
StateSyncing: discard.NewGauge(),
|
||||
BlockParts: discard.NewCounter(),
|
||||
StepDurationSeconds: discard.NewHistogram(),
|
||||
BlockGossipPartsReceived: discard.NewCounter(),
|
||||
|
||||
@@ -61,10 +61,6 @@ type Metrics struct {
|
||||
TotalTxs metrics.Gauge
|
||||
// The latest block height.
|
||||
CommittedHeight metrics.Gauge `metrics_name:"latest_block_height"`
|
||||
// Whether or not a node is block syncing. 1 if yes, 0 if no.
|
||||
BlockSyncing metrics.Gauge
|
||||
// Whether or not a node is state syncing. 1 if yes, 0 if no.
|
||||
StateSyncing metrics.Gauge
|
||||
|
||||
// Number of block parts transmitted by each peer.
|
||||
BlockParts metrics.Counter `metrics_labels:"peer_id"`
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
@@ -15,173 +14,147 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message
|
||||
func MsgToProto(msg Message) (*tmcons.Message, error) {
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message.
|
||||
//
|
||||
// TODO: This needs to be removed, but WALToProto depends on this.
|
||||
func MsgToProto(msg Message) (proto.Message, error) {
|
||||
if msg == nil {
|
||||
return nil, errors.New("consensus: message is nil")
|
||||
}
|
||||
var pb tmcons.Message
|
||||
var pb proto.Message
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *NewRoundStepMessage:
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_NewRoundStep{
|
||||
NewRoundStep: &tmcons.NewRoundStep{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: uint32(msg.Step),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.NewRoundStep{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: uint32(msg.Step),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
}
|
||||
|
||||
case *NewValidBlockMessage:
|
||||
pbPartSetHeader := msg.BlockPartSetHeader.ToProto()
|
||||
pbBits := msg.BlockParts.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_NewValidBlock{
|
||||
NewValidBlock: &tmcons.NewValidBlock{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.NewValidBlock{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
}
|
||||
|
||||
case *ProposalMessage:
|
||||
pbP := msg.Proposal.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_Proposal{
|
||||
Proposal: &tmcons.Proposal{
|
||||
Proposal: *pbP,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.Proposal{
|
||||
Proposal: *pbP,
|
||||
}
|
||||
|
||||
case *ProposalPOLMessage:
|
||||
pbBits := msg.ProposalPOL.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_ProposalPol{
|
||||
ProposalPol: &tmcons.ProposalPOL{
|
||||
Height: msg.Height,
|
||||
ProposalPolRound: msg.ProposalPOLRound,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.ProposalPOL{
|
||||
Height: msg.Height,
|
||||
ProposalPolRound: msg.ProposalPOLRound,
|
||||
ProposalPol: *pbBits,
|
||||
}
|
||||
|
||||
case *BlockPartMessage:
|
||||
parts, err := msg.Part.ToProto()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("msg to proto error: %w", err)
|
||||
}
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_BlockPart{
|
||||
BlockPart: &tmcons.BlockPart{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: *parts,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.BlockPart{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: *parts,
|
||||
}
|
||||
|
||||
case *VoteMessage:
|
||||
vote := msg.Vote.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{
|
||||
Vote: vote,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.Vote{
|
||||
Vote: vote,
|
||||
}
|
||||
|
||||
case *HasVoteMessage:
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_HasVote{
|
||||
HasVote: &tmcons.HasVote{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.HasVote{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
}
|
||||
|
||||
case *VoteSetMaj23Message:
|
||||
bi := msg.BlockID.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetMaj23{
|
||||
VoteSetMaj23: &tmcons.VoteSetMaj23{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.VoteSetMaj23{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
}
|
||||
|
||||
case *VoteSetBitsMessage:
|
||||
bi := msg.BlockID.ToProto()
|
||||
bits := msg.Votes.ToProto()
|
||||
|
||||
vsb := &tmcons.Message_VoteSetBits{
|
||||
VoteSetBits: &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
},
|
||||
vsb := &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
}
|
||||
|
||||
if bits != nil {
|
||||
vsb.VoteSetBits.Votes = *bits
|
||||
vsb.Votes = *bits
|
||||
}
|
||||
|
||||
pb = tmcons.Message{
|
||||
Sum: vsb,
|
||||
}
|
||||
pb = vsb
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("consensus: message not recognized: %T", msg)
|
||||
}
|
||||
|
||||
return &pb, nil
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
// MsgFromProto takes a consensus proto message and returns the native go type
|
||||
func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
if msg == nil {
|
||||
func MsgFromProto(p proto.Message) (Message, error) {
|
||||
if p == nil {
|
||||
return nil, errors.New("consensus: nil message")
|
||||
}
|
||||
var pb Message
|
||||
|
||||
switch msg := msg.Sum.(type) {
|
||||
case *tmcons.Message_NewRoundStep:
|
||||
rs, err := tmmath.SafeConvertUint8(int64(msg.NewRoundStep.Step))
|
||||
switch msg := p.(type) {
|
||||
case *tmcons.NewRoundStep:
|
||||
rs, err := tmmath.SafeConvertUint8(int64(msg.Step))
|
||||
// deny message based on possible overflow
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("denying message due to possible overflow: %w", err)
|
||||
}
|
||||
pb = &NewRoundStepMessage{
|
||||
Height: msg.NewRoundStep.Height,
|
||||
Round: msg.NewRoundStep.Round,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: cstypes.RoundStepType(rs),
|
||||
SecondsSinceStartTime: msg.NewRoundStep.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.NewRoundStep.LastCommitRound,
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
}
|
||||
case *tmcons.Message_NewValidBlock:
|
||||
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader)
|
||||
case *tmcons.NewValidBlock:
|
||||
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.BlockPartSetHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parts to proto error: %w", err)
|
||||
}
|
||||
|
||||
pbBits := new(bits.BitArray)
|
||||
pbBits.FromProto(msg.NewValidBlock.BlockParts)
|
||||
pbBits.FromProto(msg.BlockParts)
|
||||
|
||||
pb = &NewValidBlockMessage{
|
||||
Height: msg.NewValidBlock.Height,
|
||||
Round: msg.NewValidBlock.Round,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: *pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.NewValidBlock.IsCommit,
|
||||
IsCommit: msg.IsCommit,
|
||||
}
|
||||
case *tmcons.Message_Proposal:
|
||||
pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal)
|
||||
case *tmcons.Proposal:
|
||||
pbP, err := types.ProposalFromProto(&msg.Proposal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("proposal msg to proto error: %w", err)
|
||||
}
|
||||
@@ -189,26 +162,26 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
pb = &ProposalMessage{
|
||||
Proposal: pbP,
|
||||
}
|
||||
case *tmcons.Message_ProposalPol:
|
||||
case *tmcons.ProposalPOL:
|
||||
pbBits := new(bits.BitArray)
|
||||
pbBits.FromProto(&msg.ProposalPol.ProposalPol)
|
||||
pbBits.FromProto(&msg.ProposalPol)
|
||||
pb = &ProposalPOLMessage{
|
||||
Height: msg.ProposalPol.Height,
|
||||
ProposalPOLRound: msg.ProposalPol.ProposalPolRound,
|
||||
Height: msg.Height,
|
||||
ProposalPOLRound: msg.ProposalPolRound,
|
||||
ProposalPOL: pbBits,
|
||||
}
|
||||
case *tmcons.Message_BlockPart:
|
||||
parts, err := types.PartFromProto(&msg.BlockPart.Part)
|
||||
case *tmcons.BlockPart:
|
||||
parts, err := types.PartFromProto(&msg.Part)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blockpart msg to proto error: %w", err)
|
||||
}
|
||||
pb = &BlockPartMessage{
|
||||
Height: msg.BlockPart.Height,
|
||||
Round: msg.BlockPart.Round,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: parts,
|
||||
}
|
||||
case *tmcons.Message_Vote:
|
||||
vote, err := types.VoteFromProto(msg.Vote.Vote)
|
||||
case *tmcons.Vote:
|
||||
vote, err := types.VoteFromProto(msg.Vote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("vote msg to proto error: %w", err)
|
||||
}
|
||||
@@ -216,36 +189,36 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
pb = &VoteMessage{
|
||||
Vote: vote,
|
||||
}
|
||||
case *tmcons.Message_HasVote:
|
||||
case *tmcons.HasVote:
|
||||
pb = &HasVoteMessage{
|
||||
Height: msg.HasVote.Height,
|
||||
Round: msg.HasVote.Round,
|
||||
Type: msg.HasVote.Type,
|
||||
Index: msg.HasVote.Index,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
}
|
||||
case *tmcons.Message_VoteSetMaj23:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID)
|
||||
case *tmcons.VoteSetMaj23:
|
||||
bi, err := types.BlockIDFromProto(&msg.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err)
|
||||
}
|
||||
pb = &VoteSetMaj23Message{
|
||||
Height: msg.VoteSetMaj23.Height,
|
||||
Round: msg.VoteSetMaj23.Round,
|
||||
Type: msg.VoteSetMaj23.Type,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: *bi,
|
||||
}
|
||||
case *tmcons.Message_VoteSetBits:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID)
|
||||
case *tmcons.VoteSetBits:
|
||||
bi, err := types.BlockIDFromProto(&msg.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("voteSetBits msg to proto error: %w", err)
|
||||
}
|
||||
bits := new(bits.BitArray)
|
||||
bits.FromProto(&msg.VoteSetBits.Votes)
|
||||
bits.FromProto(&msg.Votes)
|
||||
|
||||
pb = &VoteSetBitsMessage{
|
||||
Height: msg.VoteSetBits.Height,
|
||||
Round: msg.VoteSetBits.Round,
|
||||
Type: msg.VoteSetBits.Type,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: *bi,
|
||||
Votes: bits,
|
||||
}
|
||||
@@ -260,20 +233,6 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
// MustEncode takes the reactors msg, makes it proto and marshals it
|
||||
// this mimics `MustMarshalBinaryBare` in that is panics on error
|
||||
func MustEncode(msg Message) []byte {
|
||||
pb, err := MsgToProto(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
enc, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return enc
|
||||
}
|
||||
|
||||
// WALToProto takes a WAL message and return a proto walMessage and error
|
||||
func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) {
|
||||
var pb tmcons.WALMessage
|
||||
@@ -294,10 +253,14 @@ func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if w, ok := consMsg.(p2p.Wrapper); ok {
|
||||
consMsg = w.Wrap()
|
||||
}
|
||||
cm := consMsg.(*tmcons.Message)
|
||||
pb = tmcons.WALMessage{
|
||||
Sum: &tmcons.WALMessage_MsgInfo{
|
||||
MsgInfo: &tmcons.MsgInfo{
|
||||
Msg: *consMsg,
|
||||
Msg: *cm,
|
||||
PeerID: string(msg.PeerID),
|
||||
},
|
||||
},
|
||||
@@ -343,7 +306,11 @@ func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) {
|
||||
Step: msg.EventDataRoundState.Step,
|
||||
}
|
||||
case *tmcons.WALMessage_MsgInfo:
|
||||
walMsg, err := MsgFromProto(&msg.MsgInfo.Msg)
|
||||
um, err := msg.MsgInfo.Msg.Unwrap()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unwrap message: %w", err)
|
||||
}
|
||||
walMsg, err := MsgFromProto(um)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("msgInfo from proto error: %w", err)
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestMsgToProto(t *testing.T) {
|
||||
testsCases := []struct {
|
||||
testName string
|
||||
msg Message
|
||||
want *tmcons.Message
|
||||
want proto.Message
|
||||
wantErr bool
|
||||
}{
|
||||
{"successful NewRoundStepMessage", &NewRoundStepMessage{
|
||||
@@ -80,17 +80,15 @@ func TestMsgToProto(t *testing.T) {
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_NewRoundStep{
|
||||
NewRoundStep: &tmcons.NewRoundStep{
|
||||
Height: 2,
|
||||
Round: 1,
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.NewRoundStep{
|
||||
Height: 2,
|
||||
Round: 1,
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
},
|
||||
|
||||
false},
|
||||
|
||||
{"successful NewValidBlockMessage", &NewValidBlockMessage{
|
||||
Height: 1,
|
||||
@@ -98,92 +96,78 @@ func TestMsgToProto(t *testing.T) {
|
||||
BlockPartSetHeader: psh,
|
||||
BlockParts: bits,
|
||||
IsCommit: false,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_NewValidBlock{
|
||||
NewValidBlock: &tmcons.NewValidBlock{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
BlockPartSetHeader: pbPsh,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: false,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.NewValidBlock{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
BlockPartSetHeader: pbPsh,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: false,
|
||||
},
|
||||
|
||||
false},
|
||||
{"successful BlockPartMessage", &BlockPartMessage{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: &parts,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_BlockPart{
|
||||
BlockPart: &tmcons.BlockPart{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: *pbParts,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.BlockPart{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: *pbParts,
|
||||
},
|
||||
|
||||
false},
|
||||
{"successful ProposalPOLMessage", &ProposalPOLMessage{
|
||||
Height: 1,
|
||||
ProposalPOLRound: 1,
|
||||
ProposalPOL: bits,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_ProposalPol{
|
||||
ProposalPol: &tmcons.ProposalPOL{
|
||||
Height: 1,
|
||||
ProposalPolRound: 1,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
}}, false},
|
||||
}, &tmcons.ProposalPOL{
|
||||
Height: 1,
|
||||
ProposalPolRound: 1,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
false},
|
||||
{"successful ProposalMessage", &ProposalMessage{
|
||||
Proposal: &proposal,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_Proposal{
|
||||
Proposal: &tmcons.Proposal{
|
||||
Proposal: *pbProposal,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.Proposal{
|
||||
Proposal: *pbProposal,
|
||||
},
|
||||
|
||||
false},
|
||||
{"successful VoteMessage", &VoteMessage{
|
||||
Vote: vote,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{
|
||||
Vote: pbVote,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.Vote{
|
||||
Vote: pbVote,
|
||||
},
|
||||
|
||||
false},
|
||||
{"successful VoteSetMaj23", &VoteSetMaj23Message{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: bi,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetMaj23{
|
||||
VoteSetMaj23: &tmcons.VoteSetMaj23{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.VoteSetMaj23{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
},
|
||||
|
||||
false},
|
||||
{"successful VoteSetBits", &VoteSetBitsMessage{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: bi,
|
||||
Votes: bits,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetBits{
|
||||
VoteSetBits: &tmcons.VoteSetBits{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
Votes: *pbBits,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.VoteSetBits{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
Votes: *pbBits,
|
||||
},
|
||||
|
||||
false},
|
||||
{"failure", nil, &tmcons.Message{}, true},
|
||||
}
|
||||
for _, tt := range testsCases {
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
@@ -44,7 +42,6 @@ type Reactor struct {
|
||||
conS *State
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
waitSync bool
|
||||
eventBus *types.EventBus
|
||||
rs *cstypes.RoundState
|
||||
|
||||
@@ -55,12 +52,11 @@ type ReactorOption func(*Reactor)
|
||||
|
||||
// NewReactor returns a new Reactor with the given
|
||||
// consensusState.
|
||||
func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor {
|
||||
func NewReactor(consensusState *State, options ...ReactorOption) *Reactor {
|
||||
conR := &Reactor{
|
||||
conS: consensusState,
|
||||
waitSync: waitSync,
|
||||
rs: consensusState.GetRoundState(),
|
||||
Metrics: NopMetrics(),
|
||||
conS: consensusState,
|
||||
rs: consensusState.GetRoundState(),
|
||||
Metrics: NopMetrics(),
|
||||
}
|
||||
conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR)
|
||||
|
||||
@@ -74,21 +70,12 @@ func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption)
|
||||
// OnStart implements BaseService by subscribing to events, which later will be
|
||||
// broadcasted to other peers and starting state if we're not in block sync.
|
||||
func (conR *Reactor) OnStart() error {
|
||||
conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync())
|
||||
|
||||
// start routine that computes peer statistics for evaluating peer quality
|
||||
go conR.peerStatsRoutine()
|
||||
|
||||
conR.subscribeToBroadcastEvents()
|
||||
go conR.updateRoundStateRoutine()
|
||||
|
||||
if !conR.WaitSync() {
|
||||
err := conR.conS.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -96,47 +83,34 @@ func (conR *Reactor) OnStart() error {
|
||||
// state.
|
||||
func (conR *Reactor) OnStop() {
|
||||
conR.unsubscribeFromBroadcastEvents()
|
||||
if err := conR.conS.Stop(); err != nil {
|
||||
conR.Logger.Error("Error stopping consensus state", "err", err)
|
||||
}
|
||||
if !conR.WaitSync() {
|
||||
if conR.conS.IsRunning() {
|
||||
if err := conR.conS.Stop(); err != nil {
|
||||
conR.Logger.Error("Error stopping consensus state", "err", err)
|
||||
}
|
||||
conR.conS.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (conR *Reactor) IsConsensusRunning() bool {
|
||||
return conR.conS.IsRunning()
|
||||
}
|
||||
|
||||
// SwitchToConsensus switches from block_sync mode to consensus mode.
|
||||
// It resets the state, turns off block_sync, and starts the consensus state-machine
|
||||
func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
|
||||
func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) error {
|
||||
conR.Logger.Info("SwitchToConsensus")
|
||||
|
||||
// We have no votes, so reconstruct LastCommit from SeenCommit.
|
||||
if state.LastBlockHeight > 0 {
|
||||
if state.LastBlockHeight > state.InitialHeight {
|
||||
conR.conS.reconstructLastCommit(state)
|
||||
}
|
||||
|
||||
// NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a
|
||||
// NewRoundStepMessage.
|
||||
conR.conS.updateToState(state)
|
||||
|
||||
conR.mtx.Lock()
|
||||
conR.waitSync = false
|
||||
conR.mtx.Unlock()
|
||||
conR.Metrics.BlockSyncing.Set(0)
|
||||
conR.Metrics.StateSyncing.Set(0)
|
||||
|
||||
if skipWAL {
|
||||
conR.conS.doWALCatchup = false
|
||||
}
|
||||
err := conR.conS.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf(`Failed to start consensus state: %v
|
||||
|
||||
conS:
|
||||
%+v
|
||||
|
||||
conR:
|
||||
%+v`, err, conR.conS, conR))
|
||||
}
|
||||
return conR.conS.Start()
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
@@ -148,6 +122,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
Priority: 6,
|
||||
SendQueueCapacity: 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: DataChannel, // maybe split between gossiping current block and catchup stuff
|
||||
@@ -156,6 +131,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: VoteChannel,
|
||||
@@ -163,6 +139,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 100 * 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: VoteSetBitsChannel,
|
||||
@@ -170,6 +147,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 2,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -199,7 +177,7 @@ func (conR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
|
||||
// Send our state to peer.
|
||||
// If we're block_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
if !conR.WaitSync() {
|
||||
if conR.conS.IsRunning() {
|
||||
conR.sendNewRoundStepMessage(peer)
|
||||
}
|
||||
}
|
||||
@@ -223,34 +201,33 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
// Peer state updates can happen in parallel, but processing of
|
||||
// proposals, block parts, and votes are ordered by the receiveRoutine
|
||||
// NOTE: blocks on consensus state for proposals, block parts, and votes
|
||||
func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
if !conR.IsRunning() {
|
||||
conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
|
||||
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID)
|
||||
return
|
||||
}
|
||||
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
msg, err := MsgFromProto(e.Message)
|
||||
if err != nil {
|
||||
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = msg.ValidateBasic(); err != nil {
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
|
||||
conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", msg)
|
||||
|
||||
// Get peer states
|
||||
ps, ok := src.Get(types.PeerStateKey).(*PeerState)
|
||||
ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("Peer %v has no state", src))
|
||||
panic(fmt.Sprintf("Peer %v has no state", e.Src))
|
||||
}
|
||||
|
||||
switch chID {
|
||||
switch e.ChannelID {
|
||||
case StateChannel:
|
||||
switch msg := msg.(type) {
|
||||
case *NewRoundStepMessage:
|
||||
@@ -258,8 +235,8 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
initialHeight := conR.conS.state.InitialHeight
|
||||
conR.conS.mtx.Unlock()
|
||||
if err = msg.ValidateHeight(initialHeight); err != nil {
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
ps.ApplyNewRoundStepMessage(msg)
|
||||
@@ -278,7 +255,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
// Peer claims to have a maj23 for some BlockID at H,R,S,
|
||||
err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
|
||||
if err != nil {
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
// Respond with a VoteSetBitsMessage showing which votes we have.
|
||||
@@ -292,38 +269,44 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
default:
|
||||
panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
|
||||
}
|
||||
src.TrySend(VoteSetBitsChannel, MustEncode(&VoteSetBitsMessage{
|
||||
eMsg := &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: msg.BlockID,
|
||||
Votes: ourVotes,
|
||||
}))
|
||||
BlockID: msg.BlockID.ToProto(),
|
||||
}
|
||||
if votes := ourVotes.ToProto(); votes != nil {
|
||||
eMsg.Votes = *votes
|
||||
}
|
||||
e.Src.TrySend(p2p.Envelope{
|
||||
ChannelID: VoteSetBitsChannel,
|
||||
Message: eMsg,
|
||||
})
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
|
||||
case DataChannel:
|
||||
if conR.WaitSync() {
|
||||
if !conR.conS.IsRunning() {
|
||||
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
|
||||
return
|
||||
}
|
||||
switch msg := msg.(type) {
|
||||
case *ProposalMessage:
|
||||
ps.SetHasProposal(msg.Proposal)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
case *ProposalPOLMessage:
|
||||
ps.ApplyProposalPOLMessage(msg)
|
||||
case *BlockPartMessage:
|
||||
ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index))
|
||||
conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
|
||||
case VoteChannel:
|
||||
if conR.WaitSync() {
|
||||
if !conR.conS.IsRunning() {
|
||||
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
|
||||
return
|
||||
}
|
||||
@@ -337,7 +320,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
|
||||
ps.SetHasVote(msg.Vote)
|
||||
|
||||
cs.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
|
||||
default:
|
||||
// don't punish (leave room for soft upgrades)
|
||||
@@ -345,7 +328,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
}
|
||||
|
||||
case VoteSetBitsChannel:
|
||||
if conR.WaitSync() {
|
||||
if !conR.conS.IsRunning() {
|
||||
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
|
||||
return
|
||||
}
|
||||
@@ -376,7 +359,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
}
|
||||
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID))
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -386,13 +369,6 @@ func (conR *Reactor) SetEventBus(b *types.EventBus) {
|
||||
conR.conS.SetEventBus(b)
|
||||
}
|
||||
|
||||
// WaitSync returns whether the consensus reactor is waiting for state/block sync.
|
||||
func (conR *Reactor) WaitSync() bool {
|
||||
conR.mtx.RLock()
|
||||
defer conR.mtx.RUnlock()
|
||||
return conR.waitSync
|
||||
}
|
||||
|
||||
//--------------------------------------
|
||||
|
||||
// subscribeToBroadcastEvents subscribes for new round steps and votes
|
||||
@@ -430,29 +406,39 @@ func (conR *Reactor) unsubscribeFromBroadcastEvents() {
|
||||
|
||||
func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(nrsMsg))
|
||||
conR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: nrsMsg,
|
||||
})
|
||||
}
|
||||
|
||||
func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
|
||||
csMsg := &NewValidBlockMessage{
|
||||
psh := rs.ProposalBlockParts.Header()
|
||||
csMsg := &tmcons.NewValidBlock{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
BlockPartSetHeader: rs.ProposalBlockParts.Header(),
|
||||
BlockParts: rs.ProposalBlockParts.BitArray(),
|
||||
BlockPartSetHeader: psh.ToProto(),
|
||||
BlockParts: rs.ProposalBlockParts.BitArray().ToProto(),
|
||||
IsCommit: rs.Step == cstypes.RoundStepCommit,
|
||||
}
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(csMsg))
|
||||
conR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: csMsg,
|
||||
})
|
||||
}
|
||||
|
||||
// Broadcasts HasVoteMessage to peers that care.
|
||||
func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
msg := &HasVoteMessage{
|
||||
msg := &tmcons.HasVote{
|
||||
Height: vote.Height,
|
||||
Round: vote.Round,
|
||||
Type: vote.Type,
|
||||
Index: vote.ValidatorIndex,
|
||||
}
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(msg))
|
||||
conR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: msg,
|
||||
})
|
||||
/*
|
||||
// TODO: Make this broadcast more selective.
|
||||
for _, peer := range conR.Switch.Peers().List() {
|
||||
@@ -463,7 +449,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
prs := ps.GetRoundState()
|
||||
if prs.Height == vote.Height {
|
||||
// TODO: Also filter on round?
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg})
|
||||
e := p2p.Envelope{
|
||||
ChannelID: StateChannel, struct{ ConsensusMessage }{msg},
|
||||
Message: p,
|
||||
}
|
||||
peer.TrySend(e)
|
||||
} else {
|
||||
// Height doesn't match
|
||||
// TODO: check a field, maybe CatchupCommitRound?
|
||||
@@ -473,11 +463,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
*/
|
||||
}
|
||||
|
||||
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) {
|
||||
nrsMsg = &NewRoundStepMessage{
|
||||
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep) {
|
||||
nrsMsg = &tmcons.NewRoundStep{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Step: rs.Step,
|
||||
Step: uint32(rs.Step),
|
||||
SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()),
|
||||
LastCommitRound: rs.LastCommit.GetRound(),
|
||||
}
|
||||
@@ -487,7 +477,10 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage)
|
||||
func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
|
||||
rs := conR.getRoundState()
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
peer.Send(StateChannel, MustEncode(nrsMsg))
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: nrsMsg,
|
||||
})
|
||||
}
|
||||
|
||||
func (conR *Reactor) updateRoundStateRoutine() {
|
||||
@@ -519,6 +512,11 @@ OUTER_LOOP:
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
return
|
||||
}
|
||||
if !conR.IsConsensusRunning() {
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
|
||||
@@ -526,13 +524,19 @@ OUTER_LOOP:
|
||||
if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) {
|
||||
if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
|
||||
part := rs.ProposalBlockParts.GetPart(index)
|
||||
msg := &BlockPartMessage{
|
||||
Height: rs.Height, // This tells peer that this part applies to us.
|
||||
Round: rs.Round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
parts, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: rs.Height, // This tells peer that this part applies to us.
|
||||
Round: rs.Round, // This tells peer that this part applies to us.
|
||||
Part: *parts,
|
||||
},
|
||||
}) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
@@ -578,9 +582,11 @@ OUTER_LOOP:
|
||||
if rs.Proposal != nil && !prs.Proposal {
|
||||
// Proposal: share the proposal metadata with peer.
|
||||
{
|
||||
msg := &ProposalMessage{Proposal: rs.Proposal}
|
||||
logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.Proposal{Proposal: *rs.Proposal.ToProto()},
|
||||
}) {
|
||||
// NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected!
|
||||
ps.SetHasProposal(rs.Proposal)
|
||||
}
|
||||
@@ -590,13 +596,15 @@ OUTER_LOOP:
|
||||
// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
|
||||
// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
|
||||
if 0 <= rs.Proposal.POLRound {
|
||||
msg := &ProposalPOLMessage{
|
||||
Height: rs.Height,
|
||||
ProposalPOLRound: rs.Proposal.POLRound,
|
||||
ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
|
||||
}
|
||||
logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.ProposalPOL{
|
||||
Height: rs.Height,
|
||||
ProposalPolRound: rs.Proposal.POLRound,
|
||||
ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(),
|
||||
},
|
||||
})
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
@@ -633,20 +641,27 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt
|
||||
return
|
||||
}
|
||||
// Send the part
|
||||
msg := &BlockPartMessage{
|
||||
Height: prs.Height, // Not our height, so it doesn't matter.
|
||||
Round: prs.Round, // Not our height, so it doesn't matter.
|
||||
Part: part,
|
||||
}
|
||||
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
pp, err := part.ToProto()
|
||||
if err != nil {
|
||||
logger.Error("Could not convert part to proto", "index", index, "error", err)
|
||||
return
|
||||
}
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: prs.Height, // Not our height, so it doesn't matter.
|
||||
Round: prs.Round, // Not our height, so it doesn't matter.
|
||||
Part: *pp,
|
||||
},
|
||||
}) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
} else {
|
||||
logger.Debug("Sending block part for catchup failed")
|
||||
}
|
||||
return
|
||||
}
|
||||
// logger.Info("No parts to send in catch-up, sleeping")
|
||||
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
}
|
||||
|
||||
@@ -656,12 +671,16 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
// Simple hack to throttle logs upon sleep.
|
||||
var sleeping = 0
|
||||
|
||||
OUTER_LOOP:
|
||||
for {
|
||||
// Manage disconnects from self or peer.
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
return
|
||||
}
|
||||
if !conR.IsConsensusRunning() {
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
continue
|
||||
}
|
||||
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
|
||||
@@ -672,14 +691,11 @@ OUTER_LOOP:
|
||||
sleeping = 0
|
||||
}
|
||||
|
||||
// logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round,
|
||||
// "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step)
|
||||
|
||||
// If height matches, then send LastCommit, Prevotes, Precommits.
|
||||
if rs.Height == prs.Height {
|
||||
heightLogger := logger.With("height", prs.Height)
|
||||
if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) {
|
||||
continue OUTER_LOOP
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -688,7 +704,7 @@ OUTER_LOOP:
|
||||
if prs.Height != 0 && rs.Height == prs.Height+1 {
|
||||
if ps.PickSendVote(rs.LastCommit) {
|
||||
logger.Debug("Picked rs.LastCommit to send", "height", prs.Height)
|
||||
continue OUTER_LOOP
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -701,7 +717,7 @@ OUTER_LOOP:
|
||||
if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil {
|
||||
if ps.PickSendVote(commit) {
|
||||
logger.Debug("Picked Catchup commit to send", "height", prs.Height)
|
||||
continue OUTER_LOOP
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -718,7 +734,7 @@ OUTER_LOOP:
|
||||
}
|
||||
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
continue OUTER_LOOP
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -792,18 +808,27 @@ OUTER_LOOP:
|
||||
return
|
||||
}
|
||||
|
||||
if !conR.IsConsensusRunning() {
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
|
||||
// Maybe send Height/Round/Prevotes
|
||||
{
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -815,12 +840,15 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -832,12 +860,16 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -852,19 +884,20 @@ OUTER_LOOP:
|
||||
if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() &&
|
||||
prs.Height >= conR.conS.blockStore.Base() {
|
||||
if commit := conR.conS.LoadCommit(prs.Height); commit != nil {
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: commit.BlockID,
|
||||
}))
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: commit.BlockID.ToProto(),
|
||||
},
|
||||
})
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
@@ -1071,9 +1104,13 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in
|
||||
// Returns true if vote was sent.
|
||||
func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
|
||||
if vote, ok := ps.PickVoteToSend(votes); ok {
|
||||
msg := &VoteMessage{vote}
|
||||
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
|
||||
if ps.peer.Send(VoteChannel, MustEncode(msg)) {
|
||||
if ps.peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{
|
||||
Vote: vote.ToProto(),
|
||||
},
|
||||
}) {
|
||||
ps.SetHasVote(vote)
|
||||
return true
|
||||
}
|
||||
@@ -1439,15 +1476,6 @@ func init() {
|
||||
tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits")
|
||||
}
|
||||
|
||||
func decodeMsg(bz []byte) (msg Message, err error) {
|
||||
pb := &tmcons.Message{}
|
||||
if err = proto.Unmarshal(bz, pb); err != nil {
|
||||
return msg, err
|
||||
}
|
||||
|
||||
return MsgFromProto(pb)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// NewRoundStepMessage is sent for every step taken in the ConsensusState.
|
||||
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
p2pmock "github.com/tendermint/tendermint/p2p/mock"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
@@ -54,9 +55,8 @@ func startConsensusNet(t *testing.T, css []*State, n int) (
|
||||
blocksSubs := make([]types.Subscription, 0)
|
||||
eventBuses := make([]*types.EventBus, n)
|
||||
for i := 0; i < n; i++ {
|
||||
/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
|
||||
if err != nil { t.Fatal(err)}*/
|
||||
reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
|
||||
// Note, we dont start the consensus states
|
||||
reactors[i] = NewReactor(css[i])
|
||||
reactors[i].SetLogger(css[i].Logger)
|
||||
|
||||
// eventBus is already started with the cs
|
||||
@@ -87,7 +87,8 @@ func startConsensusNet(t *testing.T, css []*State, n int) (
|
||||
// TODO: is this still true with new pubsub?
|
||||
for i := 0; i < n; i++ {
|
||||
s := reactors[i].conS.GetState()
|
||||
reactors[i].SwitchToConsensus(s, false)
|
||||
err := reactors[i].SwitchToConsensus(s, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return reactors, blocksSubs, eventBuses
|
||||
}
|
||||
@@ -112,7 +113,7 @@ func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*type
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactorBasic(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -134,6 +135,8 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
css := make([]*State, nValidators)
|
||||
logger := consensusLogger()
|
||||
for i := 0; i < nValidators; i++ {
|
||||
@@ -141,7 +144,6 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
@@ -202,7 +204,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
|
||||
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool2, WithState(state.Copy()))
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
@@ -236,7 +238,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// Ensure a testnet makes blocks when there are txs
|
||||
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore,
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore,
|
||||
func(c *cfg.Config) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
})
|
||||
@@ -257,7 +259,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
|
||||
func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -265,22 +267,25 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
reactor.Receive(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Src: peer,
|
||||
Message: &tmcons.HasVote{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType},
|
||||
})
|
||||
reactor.AddPeer(peer)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -288,22 +293,25 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
// we should call InitPeer here
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.Panics(t, func() {
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
reactor.Receive(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Src: peer,
|
||||
Message: &tmcons.HasVote{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Test we record stats about votes and block parts from other peers.
|
||||
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -329,6 +337,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
nVals := 4
|
||||
logger := log.TestingLogger()
|
||||
css, cleanup := randConsensusNet(
|
||||
t,
|
||||
nVals,
|
||||
"consensus_voting_power_changes_test",
|
||||
newMockTickerFunc(true),
|
||||
@@ -524,7 +533,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
// Check we can make blocks with skip_timeout_commit=false
|
||||
func TestReactorWithTimeoutCommit(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newKVStore)
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newKVStore)
|
||||
defer cleanup()
|
||||
// override default SkipTimeoutCommit == true for tests
|
||||
for i := 0; i < N; i++ {
|
||||
|
||||
@@ -129,8 +129,8 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error
|
||||
}
|
||||
pb.cs.Wait()
|
||||
|
||||
newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
|
||||
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool)
|
||||
newCS := NewState(pb.cs.config, pb.cs.blockExec,
|
||||
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool, WithState(pb.genesisState.Copy()))
|
||||
newCS.SetEventBus(pb.cs.eventBus)
|
||||
newCS.startForReplay()
|
||||
|
||||
@@ -332,8 +332,8 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
|
||||
|
||||
consensusState := NewState(csConfig, state.Copy(), blockExec,
|
||||
blockStore, mempool, evpool)
|
||||
consensusState := NewState(csConfig, blockExec,
|
||||
blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
|
||||
consensusState.SetEventBus(eventBus)
|
||||
return consensusState
|
||||
|
||||
@@ -67,7 +67,8 @@ func TestMain(m *testing.M) {
|
||||
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
|
||||
logger := log.TestingLogger()
|
||||
state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
|
||||
state, err := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
cs := newStateWithConfigAndBlockStore(
|
||||
consensusReplayConfig,
|
||||
@@ -81,7 +82,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
|
||||
bytes, _ := os.ReadFile(cs.config.WalFile())
|
||||
t.Logf("====== WAL: \n\r%X\n", bytes)
|
||||
|
||||
err := cs.Start()
|
||||
err = cs.Start()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := cs.Stop(); err != nil {
|
||||
@@ -555,40 +556,40 @@ func TestSimulateValidatorsChange(t *testing.T) {
|
||||
// Sync from scratch
|
||||
func TestHandshakeReplayAll(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, 0, m, false)
|
||||
testHandshakeReplay(t, 0, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, 0, m, true)
|
||||
testHandshakeReplay(t, 0, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync many, not from scratch
|
||||
func TestHandshakeReplaySome(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, 2, m, false)
|
||||
testHandshakeReplay(t, 2, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, 2, m, true)
|
||||
testHandshakeReplay(t, 2, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from lagging by one
|
||||
func TestHandshakeReplayOne(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, numBlocks-1, m, false)
|
||||
testHandshakeReplay(t, numBlocks-1, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, numBlocks-1, m, true)
|
||||
testHandshakeReplay(t, numBlocks-1, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from caught up
|
||||
func TestHandshakeReplayNone(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, numBlocks, m, false)
|
||||
testHandshakeReplay(t, numBlocks, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, numBlocks, m, true)
|
||||
testHandshakeReplay(t, numBlocks, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -660,25 +661,27 @@ func tempWALWithData(data []byte) string {
|
||||
|
||||
// Make some blocks. Start a fresh app and apply nBlocks blocks.
|
||||
// Then restart the app and sync it up with the remaining blocks
|
||||
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) {
|
||||
var chain []*types.Block
|
||||
var commits []*types.Commit
|
||||
var store *mockBlockStore
|
||||
var stateDB dbm.DB
|
||||
var genesisState sm.State
|
||||
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint, testValidatorsChange bool) {
|
||||
var (
|
||||
chain []*types.Block
|
||||
commits []*types.Commit
|
||||
store *mockBlockStore
|
||||
stateDB dbm.DB
|
||||
genesisState sm.State
|
||||
config *cfg.Config
|
||||
)
|
||||
if testValidatorsChange {
|
||||
testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
|
||||
defer os.RemoveAll(testConfig.RootDir)
|
||||
config = ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
stateDB = dbm.NewMemDB()
|
||||
|
||||
genesisState = sim.GenesisState
|
||||
config = sim.Config
|
||||
chain = append([]*types.Block{}, sim.Chain...) // copy chain
|
||||
commits = sim.Commits
|
||||
store = newMockBlockStore(t, config, genesisState.ConsensusParams)
|
||||
} else { // test single node
|
||||
testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
|
||||
defer os.RemoveAll(testConfig.RootDir)
|
||||
config = ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
walBody, err := WALWithNBlocks(t, numBlocks)
|
||||
require.NoError(t, err)
|
||||
walFile := tempWALWithData(walBody)
|
||||
@@ -735,7 +738,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
// Prune block store if requested
|
||||
expectError := false
|
||||
if mode == 3 {
|
||||
pruned, err := store.PruneBlocks(2)
|
||||
pruned, _, err := store.PruneBlocks(2, state)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, pruned)
|
||||
expectError = int64(nBlocks) < 2
|
||||
@@ -811,14 +814,11 @@ func buildAppStateFromChain(t *testing.T, proxyApp proxy.AppConns, stateStore sm
|
||||
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
_, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
switch mode {
|
||||
case 0:
|
||||
for i := 0; i < nBlocks; i++ {
|
||||
@@ -1185,7 +1185,8 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
return bs.commits[height-1]
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) {
|
||||
func (bs *mockBlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, error) {
|
||||
evidencePoint := height
|
||||
pruned := uint64(0)
|
||||
for i := int64(0); i < height-1; i++ {
|
||||
bs.chain[i] = nil
|
||||
@@ -1193,7 +1194,7 @@ func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) {
|
||||
pruned++
|
||||
}
|
||||
bs.base = height
|
||||
return pruned, nil
|
||||
return pruned, evidencePoint, nil
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) DeleteLatestBlock() error { return nil }
|
||||
|
||||
@@ -148,7 +148,6 @@ type StateOption func(*State)
|
||||
// NewState returns a new State.
|
||||
func NewState(
|
||||
config *cfg.ConsensusConfig,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore sm.BlockStore,
|
||||
txNotifier txNotifier,
|
||||
@@ -177,13 +176,6 @@ func NewState(
|
||||
cs.doPrevote = cs.defaultDoPrevote
|
||||
cs.setProposal = cs.defaultSetProposal
|
||||
|
||||
// We have no votes, so reconstruct LastCommit from SeenCommit.
|
||||
if state.LastBlockHeight > 0 {
|
||||
cs.reconstructLastCommit(state)
|
||||
}
|
||||
|
||||
cs.updateToState(state)
|
||||
|
||||
// NOTE: we do not call scheduleRound0 yet, we do that upon Start()
|
||||
|
||||
cs.BaseService = *service.NewBaseService(nil, "State", cs)
|
||||
@@ -207,10 +199,19 @@ func (cs *State) SetEventBus(b *types.EventBus) {
|
||||
}
|
||||
|
||||
// StateMetrics sets the metrics.
|
||||
func StateMetrics(metrics *Metrics) StateOption {
|
||||
func WithMetrics(metrics *Metrics) StateOption {
|
||||
return func(cs *State) { cs.metrics = metrics }
|
||||
}
|
||||
|
||||
func WithState(state sm.State) StateOption {
|
||||
return func(cs *State) {
|
||||
if state.LastBlockHeight > 0 {
|
||||
cs.reconstructLastCommit(state)
|
||||
}
|
||||
cs.updateToState(state)
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string.
|
||||
func (cs *State) String() string {
|
||||
// better not to access shared variables
|
||||
@@ -297,6 +298,10 @@ func (cs *State) LoadCommit(height int64) *types.Commit {
|
||||
// OnStart loads the latest state via the WAL, and starts the timeout and
|
||||
// receive routines.
|
||||
func (cs *State) OnStart() error {
|
||||
if cs.state.IsEmpty() {
|
||||
return errors.New("no state to commence consensus on")
|
||||
}
|
||||
|
||||
// We may set the WAL in testing before calling Start, so only OpenWAL if its
|
||||
// still the nilWAL.
|
||||
if _, ok := cs.wal.(nilWAL); ok {
|
||||
@@ -612,7 +617,7 @@ func (cs *State) updateToState(state sm.State) {
|
||||
// signal the new round step, because other services (eg. txNotifier)
|
||||
// depend on having an up-to-date peer state!
|
||||
if state.LastBlockHeight <= cs.state.LastBlockHeight {
|
||||
cs.Logger.Debug(
|
||||
cs.Logger.Info(
|
||||
"ignoring updateToState()",
|
||||
"new_height", state.LastBlockHeight+1,
|
||||
"old_height", cs.state.LastBlockHeight+1,
|
||||
@@ -2275,11 +2280,10 @@ func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: pass pubKey to signVote
|
||||
vote, err := cs.signVote(msgType, hash, header)
|
||||
if err == nil {
|
||||
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
|
||||
cs.Logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote)
|
||||
cs.Logger.Info("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote)
|
||||
return vote
|
||||
}
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
return
|
||||
}
|
||||
voteSet := hvs.getVoteSet(vote.Round, vote.Type)
|
||||
if voteSet == nil {
|
||||
if voteSet.IsEmpty() {
|
||||
if rndz := hvs.peerCatchupRounds[peerID]; len(rndz) < 2 {
|
||||
hvs.addRound(vote.Round)
|
||||
voteSet = hvs.getVoteSet(vote.Round, vote.Type)
|
||||
@@ -166,7 +166,7 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID types.BlockID) {
|
||||
func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType) *types.VoteSet {
|
||||
rvs, ok := hvs.roundVoteSets[round]
|
||||
if !ok {
|
||||
return nil
|
||||
return &types.VoteSet{}
|
||||
}
|
||||
switch voteType {
|
||||
case tmproto.PrevoteType:
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
@@ -30,30 +31,23 @@ func TestPeerCatchupRounds(t *testing.T) {
|
||||
|
||||
vote999_0 := makeVoteHR(t, 1, 0, 999, privVals)
|
||||
added, err := hvs.AddVote(vote999_0, "peer1")
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from peer", added, err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
|
||||
vote1000_0 := makeVoteHR(t, 1, 0, 1000, privVals)
|
||||
added, err = hvs.AddVote(vote1000_0, "peer1")
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from peer", added, err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
|
||||
vote1001_0 := makeVoteHR(t, 1, 0, 1001, privVals)
|
||||
added, err = hvs.AddVote(vote1001_0, "peer1")
|
||||
if err != ErrGotVoteFromUnwantedRound {
|
||||
t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err)
|
||||
}
|
||||
if added {
|
||||
t.Error("Expected to *not* add vote from peer, too many catchup rounds.")
|
||||
}
|
||||
require.Error(t, err)
|
||||
require.Equal(t, ErrGotVoteFromUnwantedRound, err)
|
||||
require.False(t, added)
|
||||
|
||||
added, err = hvs.AddVote(vote1001_0, "peer2")
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from another peer")
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
}
|
||||
|
||||
func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []types.PrivValidator) *types.Vote {
|
||||
|
||||
@@ -86,7 +86,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
mempool := emptyMempool{}
|
||||
evpool := sm.EmptyEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
|
||||
consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
|
||||
consensusState := NewState(config.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
consensusState.SetLogger(logger)
|
||||
consensusState.SetEventBus(eventBus)
|
||||
if privValidator != nil {
|
||||
|
||||
@@ -99,4 +99,4 @@ configuration file that we can update with PRs.
|
||||
Because the build processes are identical (as is the information contained
|
||||
herein), this file should be kept in sync as much as possible with its
|
||||
[counterpart in the Cosmos SDK
|
||||
repo](https://github.com/cosmos/cosmos-sdk/blob/master/docs/DOCS_README.md).
|
||||
repo](https://github.com/cosmos/cosmos-sdk/blob/main/docs/README.md).
|
||||
|
||||
@@ -61,7 +61,7 @@ The following protocols and application features require a reliable source of ti
|
||||
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/main/spec/light-client/verification/README.md#definitions-1) and the block time for block verification.
|
||||
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/8029cf7a0fcc89a5004e173ec065aa48ad5ba3c8/spec/consensus/evidence.md#verification).
|
||||
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/ce75de4019b0129f6efcbb0e752cd2cc9e6136d3/params-change/Staking.md#unbondingtime).
|
||||
* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements)
|
||||
* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.45/ibc/overview.html#acknowledgements)
|
||||
|
||||
Finally, inflation distribution in the Cosmos Hub uses an approximation of time to calculate an annual percentage rate.
|
||||
This approximation of time is calculated using [block heights with an estimated number of blocks produced in a year](https://github.com/cosmos/governance/blob/master/params-change/Mint.md#blocksperyear).
|
||||
|
||||
23
docs/qa/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
title: Tendermint Quality Assurance
|
||||
description: This is a report on the process followed and results obtained when running v0.34.x on testnets
|
||||
order: 2
|
||||
---
|
||||
|
||||
# Tendermint Quality Assurance
|
||||
|
||||
This directory keeps track of the process followed by the Tendermint Core team
|
||||
for Quality Assurance before cutting a release.
|
||||
This directory is to live in multiple branches. On each release branch,
|
||||
the contents of this directory reflect the status of the process
|
||||
at the time the Quality Assurance process was applied for that release.
|
||||
|
||||
File [method](./method.md) keeps track of the process followed to obtain the results
|
||||
used to decide if a release is passing the Quality Assurance process.
|
||||
The results obtained in each release are stored in their own directory.
|
||||
The following releases have undergone the Quality Assurance process:
|
||||
|
||||
* [v0.34.x](./v034/), which was tested just before releasing v0.34.22
|
||||
* [v0.37.x](./v037/), with v.34.x acting as a baseline
|
||||
214
docs/qa/method.md
Normal file
@@ -0,0 +1,214 @@
|
||||
---
|
||||
order: 1
|
||||
title: Method
|
||||
---
|
||||
|
||||
# Method
|
||||
|
||||
This document provides a detailed description of the QA process.
|
||||
It is intended to be used by engineers reproducing the experimental setup for future tests of Tendermint.
|
||||
|
||||
The (first iteration of the) QA process as described [in the RELEASES.md document][releases]
|
||||
was applied to version v0.34.x in order to have a set of results acting as benchmarking baseline.
|
||||
This baseline is then compared with results obtained in later versions.
|
||||
|
||||
Out of the testnet-based test cases described in [the releases document][releases] we focused on two of them:
|
||||
_200 Node Test_, and _Rotating Nodes Test_.
|
||||
|
||||
[releases]: https://github.com/tendermint/tendermint/blob/v0.37.x/RELEASES.md#large-scale-testnets
|
||||
|
||||
## Software Dependencies
|
||||
|
||||
### Infrastructure Requirements to Run the Tests
|
||||
|
||||
* An account at Digital Ocean (DO), with a high droplet limit (>202)
|
||||
* The machine to orchestrate the tests should have the following installed:
|
||||
* A clone of the [testnet repository][testnet-repo]
|
||||
* This repository contains all the scripts mentioned in the reminder of this section
|
||||
* [Digital Ocean CLI][doctl]
|
||||
* [Terraform CLI][Terraform]
|
||||
* [Ansible CLI][Ansible]
|
||||
|
||||
[testnet-repo]: https://github.com/interchainio/tendermint-testnet
|
||||
[Ansible]: https://docs.ansible.com/ansible/latest/index.html
|
||||
[Terraform]: https://www.terraform.io/docs
|
||||
[doctl]: https://docs.digitalocean.com/reference/doctl/how-to/install/
|
||||
|
||||
### Requirements for Result Extraction
|
||||
|
||||
* Matlab or Octave
|
||||
* [Prometheus][prometheus] server installed
|
||||
* blockstore DB of one of the full nodes in the testnet
|
||||
* Prometheus DB
|
||||
|
||||
[prometheus]: https://prometheus.io/
|
||||
|
||||
## 200 Node Testnet
|
||||
|
||||
### Running the test
|
||||
|
||||
This section explains how the tests were carried out for reproducibility purposes.
|
||||
|
||||
1. [If you haven't done it before]
|
||||
Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`.
|
||||
2. Copy file `testnets/testnet200.toml` onto `testnet.toml` (do NOT commit this change)
|
||||
3. Set the variable `VERSION_TAG` in the `Makefile` to the git hash that is to be tested.
|
||||
4. Follow steps 5-10 of the `README.md` to configure and start the 200 node testnet
|
||||
* WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests (see step 9)
|
||||
5. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric.
|
||||
All nodes should be increasing their heights.
|
||||
6. `ssh` into the `testnet-load-runner`, then copy script `script/200-node-loadscript.sh` and run it from the load runner node.
|
||||
* Before running it, you need to edit the script to provide the IP address of a full node.
|
||||
This node will receive all transactions from the load runner node.
|
||||
* This script will take about 40 mins to run
|
||||
* It is running 90-seconds-long experiments in a loop with different loads
|
||||
7. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine
|
||||
8. Verify that the data was collected without errors
|
||||
* at least one blockstore DB for a Tendermint validator
|
||||
* the Prometheus database from the Prometheus node
|
||||
* for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s)
|
||||
9. **Run `make terraform-destroy`**
|
||||
* Don't forget to type `yes`! Otherwise you're in trouble.
|
||||
|
||||
### Result Extraction
|
||||
|
||||
The method for extracting the results described here is highly manual (and exploratory) at this stage.
|
||||
The Core team should improve it at every iteration to increase the amount of automation.
|
||||
|
||||
#### Steps
|
||||
|
||||
1. Unzip the blockstore into a directory
|
||||
2. Extract the latency report and the raw latencies for all the experiments. Run these commands from the directory containing the blockstore
|
||||
* `go run github.com/tendermint/tendermint/test/loadtime/cmd/report@3ec6e424d --database-type goleveldb --data-dir ./ > results/report.txt`
|
||||
* `go run github.com/tendermint/tendermint/test/loadtime/cmd/report@3ec6e424d --database-type goleveldb --data-dir ./ --csv results/raw.csv`
|
||||
3. File `report.txt` contains an unordered list of experiments with varying concurrent connections and transaction rate
|
||||
* Create files `report01.txt`, `report02.txt`, `report04.txt` and, for each experiment in file `report.txt`,
|
||||
copy its related lines to the filename that matches the number of connections.
|
||||
* Sort the experiments in `report01.txt` in ascending tx rate order. Likewise for `report02.txt` and `report04.txt`.
|
||||
4. Generate file `report_tabbed.txt` by showing the contents `report01.txt`, `report02.txt`, `report04.txt` side by side
|
||||
* This effectively creates a table where rows are a particular tx rate and columns are a particular number of websocket connections.
|
||||
5. Extract the raw latencies from file `raw.csv` using the following bash loop. This creates a `.csv` file and a `.dat` file per experiment.
|
||||
The format of the `.dat` files is amenable to loading them as matrices in Octave
|
||||
|
||||
```bash
|
||||
uuids=($(cat report01.txt report02.txt report04.txt | grep '^Experiment ID: ' | awk '{ print $3 }'))
|
||||
c=1
|
||||
for i in 01 02 04; do
|
||||
for j in 0025 0050 0100 0200; do
|
||||
echo $i $j $c "${uuids[$c]}"
|
||||
filename=c${i}_r${j}
|
||||
grep ${uuids[$c]} raw.csv > ${filename}.csv
|
||||
cat ${filename}.csv | tr , ' ' | awk '{ print $2, $3 }' > ${filename}.dat
|
||||
c=$(expr $c + 1)
|
||||
done
|
||||
done
|
||||
```
|
||||
|
||||
6. Enter Octave
|
||||
7. Load all `.dat` files generated in step 5 into matrices using this Octave code snippet
|
||||
|
||||
```octave
|
||||
conns = { "01"; "02"; "04" };
|
||||
rates = { "0025"; "0050"; "0100"; "0200" };
|
||||
for i = 1:length(conns)
|
||||
for j = 1:length(rates)
|
||||
filename = strcat("c", conns{i}, "_r", rates{j}, ".dat");
|
||||
load("-ascii", filename);
|
||||
endfor
|
||||
endfor
|
||||
```
|
||||
|
||||
8. Set variable release to the current release undergoing QA
|
||||
|
||||
```octave
|
||||
release = "v0.34.x";
|
||||
```
|
||||
|
||||
9. Generate a plot with all (or some) experiments, where the X axis is the experiment time,
|
||||
and the y axis is the latency of transactions.
|
||||
The following snippet plots all experiments.
|
||||
|
||||
```octave
|
||||
legends = {};
|
||||
hold off;
|
||||
for i = 1:length(conns)
|
||||
for j = 1:length(rates)
|
||||
data_name = strcat("c", conns{i}, "_r", rates{j});
|
||||
l = strcat("c=", conns{i}, " r=", rates{j});
|
||||
m = eval(data_name); plot((m(:,1) - min(m(:,1))) / 1e+9, m(:,2) / 1e+9, ".");
|
||||
hold on;
|
||||
legends(1, end+1) = l;
|
||||
endfor
|
||||
endfor
|
||||
legend(legends, "location", "northeastoutside");
|
||||
xlabel("experiment time (s)");
|
||||
ylabel("latency (s)");
|
||||
t = sprintf("200-node testnet - %s", release);
|
||||
title(t);
|
||||
```
|
||||
|
||||
10. Consider adjusting the axis, in case you want to compare your results to the baseline, for instance
|
||||
|
||||
```octave
|
||||
axis([0, 100, 0, 30], "tic");
|
||||
```
|
||||
|
||||
11. Use Octave's GUI menu to save the plot (e.g. as `.png`)
|
||||
|
||||
12. Repeat steps 9 and 10 to obtain as many plots as deemed necessary.
|
||||
|
||||
13. To generate a latency vs throughput plot, using the raw CSV file generated
|
||||
in step 2, follow the instructions for the [`latency_throughput.py`] script.
|
||||
|
||||
[`latency_throughput.py`]: ../../scripts/qa/reporting/README.md
|
||||
|
||||
#### Extracting Prometheus Metrics
|
||||
|
||||
1. Stop the prometheus server if it is running as a service (e.g. a `systemd` unit).
|
||||
2. Unzip the prometheus database retrieved from the testnet, and move it to replace the
|
||||
local prometheus database.
|
||||
3. Start the prometheus server and make sure no error logs appear at start up.
|
||||
4. Introduce the metrics you want to gather or plot.
|
||||
|
||||
## Rotating Node Testnet
|
||||
|
||||
### Running the test
|
||||
|
||||
This section explains how the tests were carried out for reproducibility purposes.
|
||||
|
||||
1. [If you haven't done it before]
|
||||
Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`.
|
||||
2. Copy file `testnet_rotating.toml` onto `testnet.toml` (do NOT commit this change)
|
||||
3. Set variable `VERSION_TAG` to the git hash that is to be tested.
|
||||
4. Run `make terraform-apply EPHEMERAL_SIZE=25`
|
||||
* WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests
|
||||
5. Follow steps 6-10 of the `README.md` to configure and start the "stable" part of the rotating node testnet
|
||||
6. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric.
|
||||
All nodes should be increasing their heights.
|
||||
7. On a different shell,
|
||||
* run `make runload ROTATE_CONNECTIONS=X ROTATE_TX_RATE=Y`
|
||||
* `X` and `Y` should reflect a load below the saturation point (see, e.g.,
|
||||
[this paragraph](./v034/README.md#finding-the-saturation-point) for further info)
|
||||
8. Run `make rotate` to start the script that creates the ephemeral nodes, and kills them when they are caught up.
|
||||
* WARNING: If you run this command from your laptop, the laptop needs to be up and connected for full length
|
||||
of the experiment.
|
||||
9. When the height of the chain reaches 3000, stop the `make rotate` script
|
||||
10. When the rotate script has made two iterations (i.e., all ephemeral nodes have caught up twice)
|
||||
after height 3000 was reached, stop `make rotate`
|
||||
11. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine
|
||||
12. Verify that the data was collected without errors
|
||||
* at least one blockstore DB for a Tendermint validator
|
||||
* the Prometheus database from the Prometheus node
|
||||
* for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s)
|
||||
13. **Run `make terraform-destroy`**
|
||||
|
||||
Steps 8 to 10 are highly manual at the moment and will be improved in next iterations.
|
||||
|
||||
### Result Extraction
|
||||
|
||||
In order to obtain a latency plot, follow the instructions above for the 200 node experiment, but:
|
||||
|
||||
* The `results.txt` file contains only one experiment
|
||||
* Therefore, no need for any `for` loops
|
||||
|
||||
As for prometheus, the same method as for the 200 node experiment can be applied.
|
||||
278
docs/qa/v034/README.md
Normal file
@@ -0,0 +1,278 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
title: Tendermint Quality Assurance Results for v0.34.x
|
||||
description: This is a report on the results obtained when running v0.34.x on testnets
|
||||
order: 2
|
||||
---
|
||||
|
||||
# v0.34.x
|
||||
|
||||
## 200 Node Testnet
|
||||
|
||||
### Finding the Saturation Point
|
||||
|
||||
The first goal when examining the results of the tests is identifying the saturation point.
|
||||
The saturation point is a setup with a transaction load big enough to prevent the testnet
|
||||
from being stable: the load runner tries to produce slightly more transactions than can
|
||||
be processed by the testnet.
|
||||
|
||||
The following table summarizes the results for v0.34.x, for the different experiments
|
||||
(extracted from file [`v034_report_tabbed.txt`](./img/v034_report_tabbed.txt)).
|
||||
|
||||
The X axis of this table is `c`, the number of connections created by the load runner process to the target node.
|
||||
The Y axis of this table is `r`, the rate or number of transactions issued per second.
|
||||
|
||||
| | c=1 | c=2 | c=4 |
|
||||
| :--- | ----: | ----: | ----: |
|
||||
| r=25 | 2225 | 4450 | 8900 |
|
||||
| r=50 | 4450 | 8900 | 17800 |
|
||||
| r=100 | 8900 | 17800 | 35600 |
|
||||
| r=200 | 17800 | 35600 | 38660 |
|
||||
|
||||
The table shows the number of 1024-byte-long transactions that were produced by the load runner,
|
||||
and processed by Tendermint, during the 90 seconds of the experiment's duration.
|
||||
Each cell in the table refers to an experiment with a particular number of websocket connections (`c`)
|
||||
to a chosen validator, and the number of transactions per second that the load runner
|
||||
tries to produce (`r`). Note that the overall load that the tool attempts to generate is $c \cdot r$.
|
||||
|
||||
We can see that the saturation point is beyond the diagonal that spans cells
|
||||
|
||||
* `r=200,c=2`
|
||||
* `r=100,c=4`
|
||||
|
||||
given that the total transactions should be close to the product of the rate, the number of connections,
|
||||
and the experiment time (89 seconds, since the last batch never gets sent).
|
||||
|
||||
All experiments below the saturation diagonal (`r=200,c=4`) have in common that the total
|
||||
number of transactions processed is noticeably less than the product $c \cdot r \cdot 89$,
|
||||
which is the expected number of transactions when the system is able to deal well with the
|
||||
load.
|
||||
With `r=200,c=4`, we obtained 38660 whereas the theoretical number of transactions should
|
||||
have been $200 \cdot 4 \cdot 89 = 71200$.
|
||||
|
||||
At this point, we chose an experiment at the limit of the saturation diagonal,
|
||||
in order to further study the performance of this release.
|
||||
**The chosen experiment is `r=200,c=2`**.
|
||||
|
||||
This is a plot of the CPU load (average over 1 minute, as output by `top`) of the load runner for `r=200,c=2`,
|
||||
where we can see that the load stays close to 0 most of the time.
|
||||
|
||||

|
||||
|
||||
### Examining latencies
|
||||
|
||||
The method described [here](../method.md) allows us to plot the latencies of transactions
|
||||
for all experiments.
|
||||
|
||||

|
||||
|
||||
As we can see, even the experiments beyond the saturation diagonal managed to keep
|
||||
transaction latency stable (i.e. not constantly increasing).
|
||||
Our interpretation for this is that contention within Tendermint was propagated,
|
||||
via the websockets, to the load runner,
|
||||
hence the load runner could not produce the target load, but a fraction of it.
|
||||
|
||||
Further examination of the Prometheus data (see below), showed that the mempool contained many transactions
|
||||
at steady state, but did not grow much without quickly returning to this steady state. This demonstrates
|
||||
that the transactions were able to be processed by the Tendermint network at least as quickly as they
|
||||
were submitted to the mempool. Finally, the test script made sure that, at the end of an experiment, the
|
||||
mempool was empty so that all transactions submitted to the chain were processed.
|
||||
|
||||
Finally, the number of points present in the plot appears to be much less than expected given the
|
||||
number of transactions in each experiment, particularly close to or above the saturation diagonal.
|
||||
This is a visual effect of the plot; what appear to be points in the plot are actually potentially huge
|
||||
clusters of points. To corroborate this, we have zoomed in the plot above by setting (carefully chosen)
|
||||
tiny axis intervals. The cluster shown below looks like a single point in the plot above.
|
||||
|
||||

|
||||
|
||||
The plot of latencies can we used as a baseline to compare with other releases.
|
||||
|
||||
The following plot summarizes average latencies versus overall throughputs
|
||||
across different numbers of WebSocket connections to the node into which
|
||||
transactions are being loaded.
|
||||
|
||||

|
||||
|
||||
### Prometheus Metrics on the Chosen Experiment
|
||||
|
||||
As mentioned [above](#finding-the-saturation-point), the chosen experiment is `r=200,c=2`.
|
||||
This section further examines key metrics for this experiment extracted from Prometheus data.
|
||||
|
||||
#### Mempool Size
|
||||
|
||||
The mempool size, a count of the number of transactions in the mempool, was shown to be stable and homogeneous
|
||||
at all full nodes. It did not exhibit any unconstrained growth.
|
||||
The plot below shows the evolution over time of the cumulative number of transactions inside all full nodes' mempools
|
||||
at a given time.
|
||||
The two spikes that can be observed correspond to a period where consensus instances proceeded beyond the initial round
|
||||
at some nodes.
|
||||
|
||||

|
||||
|
||||
The plot below shows evolution of the average over all full nodes, which oscillates between 1500 and 2000
|
||||
outstanding transactions.
|
||||
|
||||

|
||||
|
||||
The peaks observed coincide with the moments when some nodes proceeded beyond the initial round of consensus (see below).
|
||||
|
||||
#### Peers
|
||||
|
||||
The number of peers was stable at all nodes.
|
||||
It was higher for the seed nodes (around 140) than for the rest (between 21 and 74).
|
||||
The fact that non-seed nodes reach more than 50 peers is due to #9548.
|
||||
|
||||

|
||||
|
||||
#### Consensus Rounds per Height
|
||||
|
||||
Most heights took just one round, but some nodes needed to advance to round 1 at some point.
|
||||
|
||||

|
||||
|
||||
#### Blocks Produced per Minute, Transactions Processed per Minute
|
||||
|
||||
The blocks produced per minute are the slope of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 2 minutes, the height goes from 530 to 569.
|
||||
This results in an average of 19.5 blocks produced per minute.
|
||||
|
||||
The transactions processed per minute are the slope of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 2 minutes, the total goes from 64525 to 100125 transactions,
|
||||
resulting in 17800 transactions per minute. However, we can see in the plot that
|
||||
all transactions in the load are processed long before the two minutes.
|
||||
If we adjust the time window when transactions are processed (approx. 105 seconds),
|
||||
we obtain 20343 transactions per minute.
|
||||
|
||||
#### Memory Resident Set Size
|
||||
|
||||
Resident Set Size of all monitored processes is plotted below.
|
||||
|
||||

|
||||
|
||||
The average over all processes oscillates around 1.2 GiB and does not demonstrate unconstrained growth.
|
||||
|
||||

|
||||
|
||||
#### CPU utilization
|
||||
|
||||
The best metric from Prometheus to gauge CPU utilization in a Unix machine is `load1`,
|
||||
as it usually appears in the
|
||||
[output of `top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux).
|
||||
|
||||

|
||||
|
||||
It is contained in most cases below 5, which is generally considered acceptable load.
|
||||
|
||||
### Test Result
|
||||
|
||||
**Result: N/A** (v0.34.x is the baseline)
|
||||
|
||||
Date: 2022-10-14
|
||||
|
||||
Version: 3ec6e424d6ae4c96867c2dcf8310572156068bb6
|
||||
|
||||
## Rotating Node Testnet
|
||||
|
||||
For this testnet, we will use a load that can safely be considered below the saturation
|
||||
point for the size of this testnet (between 13 and 38 full nodes): `c=4,r=800`.
|
||||
|
||||
N.B.: The version of Tendermint used for these tests is affected by #9539.
|
||||
However, the reduced load that reaches the mempools is orthogonal to functionality
|
||||
we are focusing on here.
|
||||
|
||||
### Latencies
|
||||
|
||||
The plot of all latencies can be seen in the following plot.
|
||||
|
||||

|
||||
|
||||
We can observe there are some very high latencies, towards the end of the test.
|
||||
Upon suspicion that they are duplicate transactions, we examined the latencies
|
||||
raw file and discovered there are more than 100K duplicate transactions.
|
||||
|
||||
The following plot shows the latencies file where all duplicate transactions have
|
||||
been removed, i.e., only the first occurrence of a duplicate transaction is kept.
|
||||
|
||||

|
||||
|
||||
This problem, existing in `v0.34.x`, will need to be addressed, perhaps in the same way
|
||||
we addressed it when running the 200 node test with high loads: increasing the `cache_size`
|
||||
configuration parameter.
|
||||
|
||||
### Prometheus Metrics
|
||||
|
||||
The set of metrics shown here are less than for the 200 node experiment.
|
||||
We are only interested in those for which the catch-up process (blocksync) may have an impact.
|
||||
|
||||
#### Blocks and Transactions per minute
|
||||
|
||||
Just as shown for the 200 node test, the blocks produced per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 5229 seconds, the height goes from 2 to 3638.
|
||||
This results in an average of 41 blocks produced per minute.
|
||||
|
||||
The following plot shows only the heights reported by ephemeral nodes
|
||||
(which are also included in the plot above). Note that the _height_ metric
|
||||
is only showed _once the node has switched to consensus_, hence the gaps
|
||||
when nodes are killed, wiped out, started from scratch, and catching up.
|
||||
|
||||

|
||||
|
||||
The transactions processed per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
The small lines we see periodically close to `y=0` are the transactions that
|
||||
ephemeral nodes start processing when they are caught up.
|
||||
|
||||
Over a period of 5229 minutes, the total goes from 0 to 387697 transactions,
|
||||
resulting in 4449 transactions per minute. We can see some abrupt changes in
|
||||
the plot's gradient. This will need to be investigated.
|
||||
|
||||
#### Peers
|
||||
|
||||
The plot below shows the evolution in peers throughout the experiment.
|
||||
The periodic changes observed are due to the ephemeral nodes being stopped,
|
||||
wiped out, and recreated.
|
||||
|
||||

|
||||
|
||||
The validators' plots are concentrated at the higher part of the graph, whereas the ephemeral nodes
|
||||
are mostly at the lower part.
|
||||
|
||||
#### Memory Resident Set Size
|
||||
|
||||
The average Resident Set Size (RSS) over all processes seems stable, and slightly growing toward the end.
|
||||
This might be related to the increased in transaction load observed above.
|
||||
|
||||

|
||||
|
||||
The memory taken by the validators and the ephemeral nodes (when they are up) is comparable.
|
||||
|
||||
#### CPU utilization
|
||||
|
||||
The plot shows metric `load1` for all nodes.
|
||||
|
||||

|
||||
|
||||
It is contained under 5 most of the time, which is considered normal load.
|
||||
The purple line, which follows a different pattern is the validator receiving all
|
||||
transactions, via RPC, from the load runner process.
|
||||
|
||||
### Test Result
|
||||
|
||||
**Result: N/A**
|
||||
|
||||
Date: 2022-10-10
|
||||
|
||||
Version: a28c987f5a604ff66b515dd415270063e6fb069d
|
||||
BIN
docs/qa/v034/img/v034_200node_latencies.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
docs/qa/v034/img/v034_200node_latencies_zoomed.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
docs/qa/v034/img/v034_latency_throughput.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
docs/qa/v034/img/v034_r200c2_heights.png
Normal file
|
After Width: | Height: | Size: 378 KiB |
BIN
docs/qa/v034/img/v034_r200c2_load-runner.png
Normal file
|
After Width: | Height: | Size: 150 KiB |
BIN
docs/qa/v034/img/v034_r200c2_load1.png
Normal file
|
After Width: | Height: | Size: 759 KiB |
BIN
docs/qa/v034/img/v034_r200c2_mempool_size.png
Normal file
|
After Width: | Height: | Size: 2.4 MiB |
BIN
docs/qa/v034/img/v034_r200c2_mempool_size_avg.png
Normal file
|
After Width: | Height: | Size: 192 KiB |
BIN
docs/qa/v034/img/v034_r200c2_peers.png
Normal file
|
After Width: | Height: | Size: 130 KiB |
BIN
docs/qa/v034/img/v034_r200c2_rounds.png
Normal file
|
After Width: | Height: | Size: 1.0 MiB |
BIN
docs/qa/v034/img/v034_r200c2_rss.png
Normal file
|
After Width: | Height: | Size: 926 KiB |
BIN
docs/qa/v034/img/v034_r200c2_rss_avg.png
Normal file
|
After Width: | Height: | Size: 157 KiB |
BIN
docs/qa/v034/img/v034_r200c2_total-txs.png
Normal file
|
After Width: | Height: | Size: 534 KiB |
52
docs/qa/v034/img/v034_report_tabbed.txt
Normal file
@@ -0,0 +1,52 @@
|
||||
Experiment ID: 3d5cf4ef-1a1a-4b46-aa2d-da5643d2e81e │Experiment ID: 80e472ec-13a1-4772-a827-3b0c907fb51d │Experiment ID: 07aca6cf-c5a4-4696-988f-e3270fc6333b
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 25 │ Rate: 25 │ Rate: 25
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 2225 │ Total Valid Tx: 4450 │ Total Valid Tx: 8900
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 599.404362ms │ Minimum Latency: 448.145181ms │ Minimum Latency: 412.485729ms
|
||||
Maximum Latency: 3.539686885s │ Maximum Latency: 3.237392049s │ Maximum Latency: 12.026665368s
|
||||
Average Latency: 1.441485349s │ Average Latency: 1.441267946s │ Average Latency: 2.150192457s
|
||||
Standard Deviation: 541.049869ms │ Standard Deviation: 525.040007ms │ Standard Deviation: 2.233852478s
|
||||
│ │
|
||||
Experiment ID: 953dc544-dd40-40e8-8712-20c34c3ce45e │Experiment ID: d31fc258-16e7-45cd-9dc8-13ab87bc0b0a │Experiment ID: 15d90a7e-b941-42f4-b411-2f15f857739e
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 50 │ Rate: 50 │ Rate: 50
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 4450 │ Total Valid Tx: 8900 │ Total Valid Tx: 17800
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 482.046942ms │ Minimum Latency: 435.458913ms │ Minimum Latency: 510.746448ms
|
||||
Maximum Latency: 3.761483455s │ Maximum Latency: 7.175583584s │ Maximum Latency: 6.551497882s
|
||||
Average Latency: 1.450408183s │ Average Latency: 1.681673116s │ Average Latency: 1.738083875s
|
||||
Standard Deviation: 587.560056ms │ Standard Deviation: 1.147902047s │ Standard Deviation: 943.46522ms
|
||||
│ │
|
||||
Experiment ID: 9a0b9980-9ce6-4db5-a80a-65ca70294b87 │Experiment ID: df8fa4f4-80af-4ded-8a28-356d15018b43 │Experiment ID: d0e41c2c-89c0-4f38-8e34-ca07adae593a
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 100 │ Rate: 100 │ Rate: 100
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 8900 │ Total Valid Tx: 17800 │ Total Valid Tx: 35600
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 477.417219ms │ Minimum Latency: 564.29247ms │ Minimum Latency: 840.71089ms
|
||||
Maximum Latency: 6.63744785s │ Maximum Latency: 6.988553219s │ Maximum Latency: 9.555312398s
|
||||
Average Latency: 1.561216103s │ Average Latency: 1.76419063s │ Average Latency: 3.200941683s
|
||||
Standard Deviation: 1.011333552s │ Standard Deviation: 1.068459423s │ Standard Deviation: 1.732346601s
|
||||
│ │
|
||||
Experiment ID: 493df3ee-4a36-4bce-80f8-6d65da66beda │Experiment ID: 13060525-f04f-46f6-8ade-286684b2fe50 │Experiment ID: 1777cbd2-8c96-42e4-9ec7-9b21f2225e4d
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 200 │ Rate: 200 │ Rate: 200
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 17800 │ Total Valid Tx: 35600 │ Total Valid Tx: 38660
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 493.705261ms │ Minimum Latency: 955.090573ms │ Minimum Latency: 1.9485821s
|
||||
Maximum Latency: 7.440921872s │ Maximum Latency: 10.086673491s │ Maximum Latency: 17.73103976s
|
||||
Average Latency: 1.875510582s │ Average Latency: 3.438130099s │ Average Latency: 8.143862237s
|
||||
Standard Deviation: 1.304336995s │ Standard Deviation: 1.966391574s │ Standard Deviation: 3.943140002s
|
||||
|
||||
BIN
docs/qa/v034/img/v034_rotating_heights.png
Normal file
|
After Width: | Height: | Size: 157 KiB |
BIN
docs/qa/v034/img/v034_rotating_heights_ephe.png
Normal file
|
After Width: | Height: | Size: 140 KiB |
BIN
docs/qa/v034/img/v034_rotating_latencies.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/qa/v034/img/v034_rotating_latencies_uniq.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/qa/v034/img/v034_rotating_load1.png
Normal file
|
After Width: | Height: | Size: 1.5 MiB |
BIN
docs/qa/v034/img/v034_rotating_peers.png
Normal file
|
After Width: | Height: | Size: 486 KiB |
BIN
docs/qa/v034/img/v034_rotating_rss_avg.png
Normal file
|
After Width: | Height: | Size: 193 KiB |
BIN
docs/qa/v034/img/v034_rotating_total-txs.png
Normal file
|
After Width: | Height: | Size: 197 KiB |
326
docs/qa/v037/README.md
Normal file
@@ -0,0 +1,326 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
title: Tendermint Quality Assurance Results for v0.37.x
|
||||
description: This is a report on the results obtained when running v0.37.x on testnets
|
||||
order: 2
|
||||
---
|
||||
|
||||
# v0.37.x
|
||||
|
||||
## Issues discovered
|
||||
|
||||
During this iteration of the QA process, the following issues were found:
|
||||
|
||||
* (critical, fixed) [\#9533] - This bug caused full nodes to sometimes get stuck
|
||||
when blocksyncing, requiring a manual restart to unblock them. Importantly,
|
||||
this bug was also present in v0.34.x and the fix was also backported in
|
||||
[\#9534].
|
||||
* (critical, fixed) [\#9539] - `loadtime` is very likely to include more than
|
||||
one "=" character in transactions, with is rejected by the e2e application.
|
||||
* (critical, fixed) [\#9581] - Absent prometheus label makes Tendermint crash
|
||||
when enabling Prometheus metric collection
|
||||
* (non-critical, not fixed) [\#9548] - Full nodes can go over 50 connected
|
||||
peers, which is not intended by the default configuration.
|
||||
* (non-critical, not fixed) [\#9537] - With the default mempool cache setting,
|
||||
duplicated transactions are not rejected when gossipped and eventually flood
|
||||
all mempools. The 200 node testnets were thus run with a value of 200000 (as
|
||||
opposed to the default 10000)
|
||||
|
||||
## 200 Node Testnet
|
||||
|
||||
### Finding the Saturation Point
|
||||
|
||||
The first goal is to identify the saturation point and compare it with the baseline (v0.34.x).
|
||||
For further details, see [this paragraph](../v034/README.md#finding-the-saturation-point)
|
||||
in the baseline version.
|
||||
|
||||
The following table summarizes the results for v0.37.x, for the different experiments
|
||||
(extracted from file [`v037_report_tabbed.txt`](./img/v037_report_tabbed.txt)).
|
||||
|
||||
The X axis of this table is `c`, the number of connections created by the load runner process to the target node.
|
||||
The Y axis of this table is `r`, the rate or number of transactions issued per second.
|
||||
|
||||
| | c=1 | c=2 | c=4 |
|
||||
| :--- | ----: | ----: | ----: |
|
||||
| r=25 | 2225 | 4450 | 8900 |
|
||||
| r=50 | 4450 | 8900 | 17800 |
|
||||
| r=100 | 8900 | 17800 | 35600 |
|
||||
| r=200 | 17800 | 35600 | 38660 |
|
||||
|
||||
For comparison, this is the table with the baseline version.
|
||||
|
||||
| | c=1 | c=2 | c=4 |
|
||||
| :--- | ----: | ----: | ----: |
|
||||
| r=25 | 2225 | 4450 | 8900 |
|
||||
| r=50 | 4450 | 8900 | 17800 |
|
||||
| r=100 | 8900 | 17800 | 35400 |
|
||||
| r=200 | 17800 | 35600 | 37358 |
|
||||
|
||||
The saturation point is beyond the diagonal:
|
||||
|
||||
* `r=200,c=2`
|
||||
* `r=100,c=4`
|
||||
|
||||
which is at the same place as the baseline. For more details on the saturation point, see
|
||||
[this paragraph](../v034/README.md#finding-the-saturation-point) in the baseline version.
|
||||
|
||||
The experiment chosen to examine Prometheus metrics is the same as in the baseline:
|
||||
**`r=200,c=2`**.
|
||||
|
||||
The load runner's CPU load was negligible (near 0) when running `r=200,c=2`.
|
||||
|
||||
### Examining latencies
|
||||
|
||||
The method described [here](../method.md) allows us to plot the latencies of transactions
|
||||
for all experiments.
|
||||
|
||||

|
||||
|
||||
The data seen in the plot is similar to that of the baseline.
|
||||
|
||||

|
||||
|
||||
Therefore, for further details on these plots,
|
||||
see [this paragraph](../v034/README.md#examining-latencies) in the baseline version.
|
||||
|
||||
The following plot summarizes average latencies versus overall throughputs
|
||||
across different numbers of WebSocket connections to the node into which
|
||||
transactions are being loaded.
|
||||
|
||||

|
||||
|
||||
This is similar to that of the baseline plot:
|
||||
|
||||

|
||||
|
||||
### Prometheus Metrics on the Chosen Experiment
|
||||
|
||||
As mentioned [above](#finding-the-saturation-point), the chosen experiment is `r=200,c=2`.
|
||||
This section further examines key metrics for this experiment extracted from Prometheus data.
|
||||
|
||||
#### Mempool Size
|
||||
|
||||
The mempool size, a count of the number of transactions in the mempool, was shown to be stable and homogeneous
|
||||
at all full nodes. It did not exhibit any unconstrained growth.
|
||||
The plot below shows the evolution over time of the cumulative number of transactions inside all full nodes' mempools
|
||||
at a given time.
|
||||
|
||||

|
||||
|
||||
The plot below shows evolution of the average over all full nodes, which oscillate between 1500 and 2000 outstanding transactions.
|
||||
|
||||

|
||||
|
||||
The peaks observed coincide with the moments when some nodes reached round 1 of consensus (see below).
|
||||
|
||||
**These plots yield similar results to the baseline**:
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
#### Peers
|
||||
|
||||
The number of peers was stable at all nodes.
|
||||
It was higher for the seed nodes (around 140) than for the rest (between 16 and 78).
|
||||
|
||||

|
||||
|
||||
Just as in the baseline, the fact that non-seed nodes reach more than 50 peers is due to #9548.
|
||||
|
||||
**This plot yields similar results to the baseline**:
|
||||
|
||||

|
||||
|
||||
#### Consensus Rounds per Height
|
||||
|
||||
Most heights took just one round, but some nodes needed to advance to round 1 at some point.
|
||||
|
||||

|
||||
|
||||
**This plot yields slightly better results than the baseline**:
|
||||
|
||||

|
||||
|
||||
#### Blocks Produced per Minute, Transactions Processed per Minute
|
||||
|
||||
The blocks produced per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 2 minutes, the height goes from 477 to 524.
|
||||
This results in an average of 23.5 blocks produced per minute.
|
||||
|
||||
The transactions processed per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 2 minutes, the total goes from 64525 to 100125 transactions,
|
||||
resulting in 17800 transactions per minute. However, we can see in the plot that
|
||||
all transactions in the load are process long before the two minutes.
|
||||
If we adjust the time window when transactions are processed (approx. 90 seconds),
|
||||
we obtain 23733 transactions per minute.
|
||||
|
||||
**These plots yield similar results to the baseline**:
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
#### Memory Resident Set Size
|
||||
|
||||
Resident Set Size of all monitored processes is plotted below.
|
||||
|
||||

|
||||
|
||||
The average over all processes oscillates around 380 MiB and does not demonstrate unconstrained growth.
|
||||
|
||||

|
||||
|
||||
**These plots yield similar results to the baseline**:
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
#### CPU utilization
|
||||
|
||||
The best metric from Prometheus to gauge CPU utilization in a Unix machine is `load1`,
|
||||
as it usually appears in the
|
||||
[output of `top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux).
|
||||
|
||||

|
||||
|
||||
It is contained below 5 on most nodes.
|
||||
|
||||
**This plot yields similar results to the baseline**:
|
||||
|
||||

|
||||
|
||||
### Test Result
|
||||
|
||||
**Result: PASS**
|
||||
|
||||
Date: 2022-10-14
|
||||
|
||||
Version: 1cf9d8e276afe8595cba960b51cd056514965fd1
|
||||
|
||||
## Rotating Node Testnet
|
||||
|
||||
We use the same load as in the baseline: `c=4,r=800`.
|
||||
|
||||
Just as in the baseline tests, the version of Tendermint used for these tests is affected by #9539.
|
||||
See this paragraph in the [baseline report](../v034/README.md#rotating-node-testnet) for further details.
|
||||
Finally, note that this setup allows for a fairer comparison between this version and the baseline.
|
||||
|
||||
### Latencies
|
||||
|
||||
The plot of all latencies can be seen here.
|
||||
|
||||

|
||||
|
||||
Which is similar to the baseline.
|
||||
|
||||

|
||||
|
||||
Note that we are comparing against the baseline plot with _unique_
|
||||
transactions. This is because the problem with duplicate transactions
|
||||
detected during the baseline experiment did not show up for `v0.37`,
|
||||
which is _not_ proof that the problem is not present in `v0.37`.
|
||||
|
||||
### Prometheus Metrics
|
||||
|
||||
The set of metrics shown here match those shown on the baseline (`v0.34`) for the same experiment.
|
||||
We also show the baseline results for comparison.
|
||||
|
||||
#### Blocks and Transactions per minute
|
||||
|
||||
The blocks produced per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 4446 seconds, the height goes from 5 to 3323.
|
||||
This results in an average of 45 blocks produced per minute,
|
||||
which is similar to the baseline, shown below.
|
||||
|
||||

|
||||
|
||||
The following two plots show only the heights reported by ephemeral nodes.
|
||||
The second plot is the baseline plot for comparison.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
By the length of the segments, we can see that ephemeral nodes in `v0.37`
|
||||
catch up slightly faster.
|
||||
|
||||
The transactions processed per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 3852 seconds, the total goes from 597 to 267298 transactions in one of the validators,
|
||||
resulting in 4154 transactions per minute, which is slightly lower than the baseline,
|
||||
although the baseline had to deal with duplicate transactions.
|
||||
|
||||
For comparison, this is the baseline plot.
|
||||
|
||||

|
||||
|
||||
#### Peers
|
||||
|
||||
The plot below shows the evolution of the number of peers throughout the experiment.
|
||||
|
||||

|
||||
|
||||
This is the baseline plot, for comparison.
|
||||
|
||||

|
||||
|
||||
The plotted values and their evolution are comparable in both plots.
|
||||
|
||||
For further details on these plots, see the baseline report.
|
||||
|
||||
#### Memory Resident Set Size
|
||||
|
||||
The average Resident Set Size (RSS) over all processes looks slightly more stable
|
||||
on `v0.37` (first plot) than on the baseline (second plot).
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
The memory taken by the validators and the ephemeral nodes when they are up is comparable (not shown in the plots),
|
||||
just as observed in the baseline.
|
||||
|
||||
#### CPU utilization
|
||||
|
||||
The plot shows metric `load1` for all nodes.
|
||||
|
||||

|
||||
|
||||
This is the baseline plot.
|
||||
|
||||

|
||||
|
||||
In both cases, it is contained under 5 most of the time, which is considered normal load.
|
||||
The green line in the `v0.37` plot and the purple line in the baseline plot (`v0.34`)
|
||||
correspond to the validators receiving all transactions, via RPC, from the load runner process.
|
||||
In both cases, they oscillate around 5 (normal load). The main difference is that other
|
||||
nodes are generally less loaded in `v0.37`.
|
||||
|
||||
### Test Result
|
||||
|
||||
**Result: PASS**
|
||||
|
||||
Date: 2022-10-10
|
||||
|
||||
Version: 155110007b9d8b83997a799016c1d0844c8efbaf
|
||||
|
||||
[\#9533]: https://github.com/tendermint/tendermint/pull/9533
|
||||
[\#9534]: https://github.com/tendermint/tendermint/pull/9534
|
||||
[\#9539]: https://github.com/tendermint/tendermint/issues/9539
|
||||
[\#9548]: https://github.com/tendermint/tendermint/issues/9548
|
||||
[\#9537]: https://github.com/tendermint/tendermint/issues/9537
|
||||
[\#9581]: https://github.com/tendermint/tendermint/issues/9581
|
||||
BIN
docs/qa/v037/img/v037_200node_latencies.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
docs/qa/v037/img/v037_latency_throughput.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
docs/qa/v037/img/v037_r200c2_heights.png
Normal file
|
After Width: | Height: | Size: 411 KiB |
BIN
docs/qa/v037/img/v037_r200c2_load1.png
Normal file
|
After Width: | Height: | Size: 887 KiB |
BIN
docs/qa/v037/img/v037_r200c2_mempool_size.png
Normal file
|
After Width: | Height: | Size: 2.3 MiB |
BIN
docs/qa/v037/img/v037_r200c2_mempool_size_avg.png
Normal file
|
After Width: | Height: | Size: 183 KiB |
BIN
docs/qa/v037/img/v037_r200c2_peers.png
Normal file
|
After Width: | Height: | Size: 133 KiB |
BIN
docs/qa/v037/img/v037_r200c2_rounds.png
Normal file
|
After Width: | Height: | Size: 589 KiB |
BIN
docs/qa/v037/img/v037_r200c2_rss.png
Normal file
|
After Width: | Height: | Size: 816 KiB |
BIN
docs/qa/v037/img/v037_r200c2_rss_avg.png
Normal file
|
After Width: | Height: | Size: 154 KiB |
BIN
docs/qa/v037/img/v037_r200c2_total-txs.png
Normal file
|
After Width: | Height: | Size: 538 KiB |
52
docs/qa/v037/img/v037_report_tabbed.txt
Normal file
@@ -0,0 +1,52 @@
|
||||
Experiment ID: af129eae-7039-4c76-8c37-cff9ac636a84 │Experiment ID: 0f88bd33-9bf0-4197-8d1d-9a737c301ec6 │Experiment ID: 88227cad-2ba8-4eb6-b493-041d8120b46f
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 25 │ Rate: 25 │ Rate: 25
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 2225 │ Total Valid Tx: 4450 │ Total Valid Tx: 8900
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 506.248587ms │ Minimum Latency: 469.53452ms │ Minimum Latency: 588.900721ms
|
||||
Maximum Latency: 3.032125789s │ Maximum Latency: 6.548830955s │ Maximum Latency: 6.533739843s
|
||||
Average Latency: 1.427767726s │ Average Latency: 1.448582257s │ Average Latency: 1.717432341s
|
||||
Standard Deviation: 524.11782ms │ Standard Deviation: 768.684133ms │ Standard Deviation: 1.000015768s
|
||||
│ │
|
||||
Experiment ID: f03d39bd-0233-4b3c-b461-543445ae1d4b │Experiment ID: 46674f1c-e591-4e36-bb9b-f375c19fc475 │Experiment ID: 5385c159-8d4d-455b-bced-dcd4a3209988
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 50 │ Rate: 50 │ Rate: 50
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 4450 │ Total Valid Tx: 8900 │ Total Valid Tx: 17800
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 477.46027ms │ Minimum Latency: 455.757111ms │ Minimum Latency: 594.749081ms
|
||||
Maximum Latency: 2.483895394s │ Maximum Latency: 2.904715695s │ Maximum Latency: 9.294950389s
|
||||
Average Latency: 1.407374662s │ Average Latency: 1.397385779s │ Average Latency: 2.621122536s
|
||||
Standard Deviation: 505.150067ms │ Standard Deviation: 551.67603ms │ Standard Deviation: 1.772725794s
|
||||
│ │
|
||||
Experiment ID: 9161b4a7-d75c-455f-b82d-2b5235d533cf │Experiment ID: 993a13a8-9db1-4b2b-9c20-71a5b85e4bbf │Experiment ID: ad1eb9e1-f4d6-41fd-9ba7-0f1f7dde1e3e
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 100 │ Rate: 100 │ Rate: 100
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 8900 │ Total Valid Tx: 17800 │ Total Valid Tx: 35400
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 448.050467ms │ Minimum Latency: 605.436195ms │ Minimum Latency: 1.16816912s
|
||||
Maximum Latency: 3.789711139s │ Maximum Latency: 7.292770222s │ Maximum Latency: 11.378681842s
|
||||
Average Latency: 1.451342158s │ Average Latency: 2.07457999s │ Average Latency: 3.918384209s
|
||||
Standard Deviation: 644.075973ms │ Standard Deviation: 1.230204022s │ Standard Deviation: 2.172400458s
|
||||
│ │
|
||||
Experiment ID: 3cbe9c3d-9c43-4c9f-b5ca-b567d20bbd57 │Experiment ID: af836c5e-d9b6-4d5d-971c-2fc7f07aa2a0 │Experiment ID: 77606397-4989-41d4-b13b-f1f4d1af063f
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 200 │ Rate: 200 │ Rate: 200
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 17800 │ Total Valid Tx: 35600 │ Total Valid Tx: 37358
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 519.984701ms │ Minimum Latency: 820.755087ms │ Minimum Latency: 1.712574804s
|
||||
Maximum Latency: 12.609056712s │ Maximum Latency: 9.260798095s │ Maximum Latency: 25.739223696s
|
||||
Average Latency: 2.717853101s │ Average Latency: 3.477731881s │ Average Latency: 8.547725264s
|
||||
Standard Deviation: 2.390778155s │ Standard Deviation: 1.675000913s │ Standard Deviation: 4.76961569s
|
||||
|
||||
BIN
docs/qa/v037/img/v037_rotating_heights.png
Normal file
|
After Width: | Height: | Size: 167 KiB |
BIN
docs/qa/v037/img/v037_rotating_heights_ephe.png
Normal file
|
After Width: | Height: | Size: 138 KiB |
BIN
docs/qa/v037/img/v037_rotating_latencies.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/qa/v037/img/v037_rotating_load1.png
Normal file
|
After Width: | Height: | Size: 1.3 MiB |
BIN
docs/qa/v037/img/v037_rotating_peers.png
Normal file
|
After Width: | Height: | Size: 577 KiB |
BIN
docs/qa/v037/img/v037_rotating_rss_avg.png
Normal file
|
After Width: | Height: | Size: 217 KiB |
BIN
docs/qa/v037/img/v037_rotating_total-txs.png
Normal file
|
After Width: | Height: | Size: 181 KiB |
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
clist "github.com/tendermint/tendermint/libs/clist"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -55,6 +56,7 @@ func (evR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
ID: EvidenceChannel,
|
||||
Priority: 6,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmproto.EvidenceList{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -66,11 +68,11 @@ func (evR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
|
||||
// Receive implements Reactor.
|
||||
// It adds any received evidence to the evpool.
|
||||
func (evR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
evis, err := decodeMsg(msgBytes)
|
||||
func (evR *Reactor) Receive(e p2p.Envelope) {
|
||||
evis, err := evidenceListFromProto(e.Message)
|
||||
if err != nil {
|
||||
evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
evR.Switch.StopPeerForError(src, err)
|
||||
evR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err)
|
||||
evR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -80,7 +82,7 @@ func (evR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
case *types.ErrInvalidEvidence:
|
||||
evR.Logger.Error(err.Error())
|
||||
// punish peer
|
||||
evR.Switch.StopPeerForError(src, err)
|
||||
evR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
case nil:
|
||||
default:
|
||||
@@ -126,11 +128,15 @@ func (evR *Reactor) broadcastEvidenceRoutine(peer p2p.Peer) {
|
||||
evis := evR.prepareEvidenceMessage(peer, ev)
|
||||
if len(evis) > 0 {
|
||||
evR.Logger.Debug("Gossiping evidence to peer", "ev", ev, "peer", peer)
|
||||
msgBytes, err := encodeMsg(evis)
|
||||
evp, err := evidenceListToProto(evis)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
success := peer.Send(EvidenceChannel, msgBytes)
|
||||
|
||||
success := peer.Send(p2p.Envelope{
|
||||
ChannelID: EvidenceChannel,
|
||||
Message: evp,
|
||||
})
|
||||
if !success {
|
||||
time.Sleep(peerRetryMessageIntervalMS * time.Millisecond)
|
||||
continue
|
||||
@@ -210,7 +216,7 @@ type PeerState interface {
|
||||
|
||||
// encodemsg takes a array of evidence
|
||||
// returns the byte encoding of the List Message
|
||||
func encodeMsg(evis []types.Evidence) ([]byte, error) {
|
||||
func evidenceListToProto(evis []types.Evidence) (*tmproto.EvidenceList, error) {
|
||||
evi := make([]tmproto.Evidence, len(evis))
|
||||
for i := 0; i < len(evis); i++ {
|
||||
ev, err := types.EvidenceToProto(evis[i])
|
||||
@@ -222,19 +228,13 @@ func encodeMsg(evis []types.Evidence) ([]byte, error) {
|
||||
epl := tmproto.EvidenceList{
|
||||
Evidence: evi,
|
||||
}
|
||||
|
||||
return epl.Marshal()
|
||||
return &epl, nil
|
||||
}
|
||||
|
||||
// decodemsg takes an array of bytes
|
||||
// returns an array of evidence
|
||||
func decodeMsg(bz []byte) (evis []types.Evidence, err error) {
|
||||
lm := tmproto.EvidenceList{}
|
||||
if err := lm.Unmarshal(bz); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func evidenceListFromProto(m proto.Message) ([]types.Evidence, error) {
|
||||
lm := m.(*tmproto.EvidenceList)
|
||||
|
||||
evis = make([]types.Evidence, len(lm.Evidence))
|
||||
evis := make([]types.Evidence, len(lm.Evidence))
|
||||
for i := 0; i < len(lm.Evidence); i++ {
|
||||
ev, err := types.EvidenceFromProto(&lm.Evidence[i])
|
||||
if err != nil {
|
||||
|
||||
@@ -208,7 +208,10 @@ func TestReactorBroadcastEvidenceMemoryLeak(t *testing.T) {
|
||||
// i.e. broadcastEvidenceRoutine finishes when peer is stopped
|
||||
defer leaktest.CheckTimeout(t, 10*time.Second)()
|
||||
|
||||
p.On("Send", evidence.EvidenceChannel, mock.AnythingOfType("[]uint8")).Return(false)
|
||||
p.On("Send", mock.MatchedBy(func(i interface{}) bool {
|
||||
e, ok := i.(p2p.Envelope)
|
||||
return ok && e.ChannelID == evidence.EvidenceChannel
|
||||
})).Return(false)
|
||||
quitChan := make(<-chan struct{})
|
||||
p.On("Quit").Return(quitChan)
|
||||
ps := peerState{2}
|
||||
|
||||
@@ -21,7 +21,6 @@ func (evpool *Pool) verify(evidence types.Evidence) error {
|
||||
state = evpool.State()
|
||||
height = state.LastBlockHeight
|
||||
evidenceParams = state.ConsensusParams.Evidence
|
||||
ageNumBlocks = height - evidence.Height()
|
||||
)
|
||||
|
||||
// verify the time of the evidence
|
||||
@@ -34,10 +33,9 @@ func (evpool *Pool) verify(evidence types.Evidence) error {
|
||||
return fmt.Errorf("evidence has a different time to the block it is associated with (%v != %v)",
|
||||
evidence.Time(), evTime)
|
||||
}
|
||||
ageDuration := state.LastBlockTime.Sub(evTime)
|
||||
|
||||
// check that the evidence hasn't expired
|
||||
if ageDuration > evidenceParams.MaxAgeDuration && ageNumBlocks > evidenceParams.MaxAgeNumBlocks {
|
||||
// checking if evidence is expired calculated using the block evidence time and height
|
||||
if IsEvidenceExpired(height, state.LastBlockTime, evidence.Height(), evTime, evidenceParams) {
|
||||
return fmt.Errorf(
|
||||
"evidence from height %d (created at: %v) is too old; min height is %d and evidence can not be older than %v",
|
||||
evidence.Height(),
|
||||
@@ -284,3 +282,14 @@ func getSignedHeader(blockStore BlockStore, height int64) (*types.SignedHeader,
|
||||
Commit: commit,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// check that the evidence hasn't expired
|
||||
func IsEvidenceExpired(heightNow int64, timeNow time.Time, heightEv int64, timeEv time.Time, evidenceParams types.EvidenceParams) bool {
|
||||
ageDuration := timeNow.Sub(timeEv)
|
||||
ageNumBlocks := heightNow - heightEv
|
||||
|
||||
if ageDuration > evidenceParams.MaxAgeDuration && ageNumBlocks > evidenceParams.MaxAgeNumBlocks {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
108
go.mod
@@ -3,7 +3,7 @@ module github.com/tendermint/tendermint
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.2.0
|
||||
github.com/BurntSushi/toml v1.2.1
|
||||
github.com/adlio/schema v1.3.3
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/fortytw2/leaktest v1.3.0
|
||||
@@ -11,7 +11,7 @@ require (
|
||||
github.com/go-kit/log v0.2.1
|
||||
github.com/go-logfmt/logfmt v0.5.1
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/golangci/golangci-lint v1.49.0
|
||||
github.com/golangci/golangci-lint v1.50.1
|
||||
github.com/google/orderedcode v0.0.1
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/informalsystems/tm-load-test v1.0.0
|
||||
@@ -22,41 +22,42 @@ require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pointlander/peg v1.0.1
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/prometheus/common v0.37.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/rs/cors v1.8.2
|
||||
github.com/sasha-s/go-deadlock v0.3.1
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/viper v1.13.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
|
||||
golang.org/x/net v0.0.0-20220812174116-3211cb980234
|
||||
google.golang.org/grpc v1.49.0
|
||||
golang.org/x/crypto v0.1.0
|
||||
golang.org/x/net v0.1.0
|
||||
google.golang.org/grpc v1.50.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/bufbuild/buf v1.8.0
|
||||
github.com/bufbuild/buf v1.9.0
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.0
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2
|
||||
github.com/cosmos/gogoproto v1.4.2
|
||||
github.com/gofrs/uuid v4.3.0+incompatible
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae
|
||||
github.com/vektra/mockery/v2 v2.14.0
|
||||
github.com/vektra/mockery/v2 v2.14.1
|
||||
gonum.org/v1/gonum v0.12.0
|
||||
google.golang.org/protobuf v1.28.1
|
||||
google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8
|
||||
)
|
||||
|
||||
require (
|
||||
4d63.com/gochecknoglobals v0.1.0 // indirect
|
||||
github.com/Abirdcfly/dupword v0.0.7 // indirect
|
||||
github.com/Antonboom/errname v0.1.7 // indirect
|
||||
github.com/Antonboom/nilnil v0.1.1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
@@ -64,9 +65,9 @@ require (
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/OpenPeeDeeP/depguard v1.1.0 // indirect
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1 // indirect
|
||||
github.com/alexkohler/prealloc v1.0.0 // indirect
|
||||
github.com/alingse/asasalint v0.0.11 // indirect
|
||||
github.com/ashanbrown/forbidigo v1.3.0 // indirect
|
||||
@@ -77,7 +78,8 @@ require (
|
||||
github.com/bombsimon/wsl/v3 v3.3.0 // indirect
|
||||
github.com/breml/bidichk v0.2.3 // indirect
|
||||
github.com/breml/errchkjson v0.3.0 // indirect
|
||||
github.com/bufbuild/connect-go v0.4.0 // indirect
|
||||
github.com/bufbuild/connect-go v1.0.0 // indirect
|
||||
github.com/bufbuild/protocompile v0.1.0 // indirect
|
||||
github.com/butuzov/ireturn v0.1.1 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
@@ -87,8 +89,8 @@ require (
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/containerd/typeurl v1.0.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/curioswitch/go-reassign v0.1.2 // indirect
|
||||
github.com/daixiang0/gci v0.6.3 // indirect
|
||||
github.com/curioswitch/go-reassign v0.2.0 // indirect
|
||||
github.com/daixiang0/gci v0.8.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/denis-tingaikin/go-header v0.4.3 // indirect
|
||||
@@ -96,9 +98,9 @@ require (
|
||||
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.17+incompatible // indirect
|
||||
github.com/docker/docker v20.10.19+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/esimonov/ifshort v1.0.4 // indirect
|
||||
github.com/ettle/strcase v0.1.1 // indirect
|
||||
@@ -108,12 +110,12 @@ require (
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/fzipp/gocyclo v0.6.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.0.7 // indirect
|
||||
github.com/go-critic/go-critic v0.6.4 // indirect
|
||||
github.com/go-critic/go-critic v0.6.5 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-toolsmith/astcast v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astcopy v1.0.1 // indirect
|
||||
github.com/go-toolsmith/astequal v1.0.2 // indirect
|
||||
github.com/go-toolsmith/astcopy v1.0.2 // indirect
|
||||
github.com/go-toolsmith/astequal v1.0.3 // indirect
|
||||
github.com/go-toolsmith/astfmt v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astp v1.0.0 // indirect
|
||||
github.com/go-toolsmith/strparse v1.0.0 // indirect
|
||||
@@ -127,14 +129,14 @@ require (
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
|
||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect
|
||||
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
|
||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
|
||||
github.com/golangci/misspell v0.3.5 // indirect
|
||||
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect
|
||||
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
|
||||
github.com/google/btree v1.0.0 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect
|
||||
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
|
||||
github.com/gostaticanalysis/comment v1.4.2 // indirect
|
||||
@@ -149,15 +151,14 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect
|
||||
github.com/jgautheron/goconst v1.5.1 // indirect
|
||||
github.com/jhump/protocompile v0.0.0-20220812162104-d108583e055d // indirect
|
||||
github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b // indirect
|
||||
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
|
||||
github.com/jmhodges/levigo v1.0.0 // indirect
|
||||
github.com/julz/importas v0.1.0 // indirect
|
||||
github.com/kisielk/errcheck v1.6.2 // indirect
|
||||
github.com/kisielk/gotool v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.15.9 // indirect
|
||||
github.com/kkHAIKE/contextcheck v1.1.3 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/kulti/thelper v0.6.3 // indirect
|
||||
github.com/kunwardeep/paralleltest v1.0.6 // indirect
|
||||
@@ -167,6 +168,7 @@ require (
|
||||
github.com/leonklingele/grouper v1.1.0 // indirect
|
||||
github.com/lufeee/execinquery v1.2.1 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/maratori/testableexamples v1.0.0 // indirect
|
||||
github.com/maratori/testpackage v1.1.0 // indirect
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
@@ -174,20 +176,20 @@ require (
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 // indirect
|
||||
github.com/mgechev/revive v1.2.3 // indirect
|
||||
github.com/mgechev/revive v1.2.4 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/buildkit v0.10.3 // indirect
|
||||
github.com/moby/buildkit v0.10.4 // indirect
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||
github.com/moricho/tparallel v0.2.1 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/nakabonne/nestif v0.3.1 // indirect
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
|
||||
github.com/nishanths/exhaustive v0.8.1 // indirect
|
||||
github.com/nishanths/exhaustive v0.8.3 // indirect
|
||||
github.com/nishanths/predeclared v0.2.2 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||
github.com/opencontainers/runc v1.1.3 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
@@ -198,10 +200,10 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 // indirect
|
||||
github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 // indirect
|
||||
github.com/polyfloyd/go-errorlint v1.0.2 // indirect
|
||||
github.com/polyfloyd/go-errorlint v1.0.5 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.3.17 // indirect
|
||||
github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.3.18 // indirect
|
||||
github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f // indirect
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect
|
||||
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
|
||||
github.com/rs/zerolog v1.27.0 // indirect
|
||||
@@ -210,7 +212,7 @@ require (
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect
|
||||
github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
|
||||
github.com/sashamelentyev/usestdlibvars v1.13.0 // indirect
|
||||
github.com/sashamelentyev/usestdlibvars v1.20.0 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/securego/gosec/v2 v2.13.1 // indirect
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
|
||||
@@ -226,16 +228,15 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
|
||||
github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect
|
||||
github.com/stretchr/objx v0.4.0 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/sylvia7788/contextcheck v1.0.6 // indirect
|
||||
github.com/tdakkota/asciicheck v0.1.1 // indirect
|
||||
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect
|
||||
github.com/tetafro/godot v1.4.11 // indirect
|
||||
github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect
|
||||
github.com/timonwong/logrlint v0.1.0 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.2 // indirect
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect
|
||||
github.com/timonwong/loggercheck v0.9.3 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.7.0 // indirect
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
|
||||
github.com/ultraware/funlen v0.0.3 // indirect
|
||||
github.com/ultraware/whitespace v0.0.5 // indirect
|
||||
github.com/uudashr/gocognit v1.0.6 // indirect
|
||||
@@ -244,26 +245,27 @@ require (
|
||||
gitlab.com/bosi/decorder v0.2.3 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.34.0 // indirect
|
||||
go.opentelemetry.io/otel v1.9.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.9.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 // indirect
|
||||
go.opentelemetry.io/otel v1.11.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.32.3 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.11.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.22.0 // indirect
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde // indirect
|
||||
golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 // indirect
|
||||
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
honnef.co/go/tools v0.3.3 // indirect
|
||||
mvdan.cc/gofumpt v0.3.1 // indirect
|
||||
mvdan.cc/gofumpt v0.4.0 // indirect
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
|
||||
mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect
|
||||
|
||||