mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-14 00:32:52 +00:00
Compare commits
6 Commits
main
...
wb/exec-tx
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f861062ee2 | ||
|
|
b2409b3345 | ||
|
|
d268e56383 | ||
|
|
2164883501 | ||
|
|
61619ab072 | ||
|
|
58effdd8b3 |
@@ -1,168 +0,0 @@
|
||||
version: 2.1
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: tendermintdev/docker-tendermint-build
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
environment:
|
||||
GOBIN: /tmp/bin
|
||||
release:
|
||||
machine: true
|
||||
docs:
|
||||
docker:
|
||||
- image: tendermintdev/docker-website-deployment
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
commands:
|
||||
run_test:
|
||||
parameters:
|
||||
script_path:
|
||||
type: string
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: "Running test"
|
||||
command: |
|
||||
bash << parameters.script_path >>
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
command: |
|
||||
mkdir -p /tmp/bin
|
||||
- run:
|
||||
name: Cache go modules
|
||||
command: make go-mod-cache
|
||||
- run:
|
||||
name: tools
|
||||
command: make tools
|
||||
- run:
|
||||
name: "Build binaries"
|
||||
command: make install install_abci
|
||||
- save_cache:
|
||||
name: "Save go modules cache"
|
||||
key: go-mod-v1-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
- save_cache:
|
||||
name: "Save source code cache"
|
||||
key: go-src-v1-{{ .Revision }}
|
||||
paths:
|
||||
- ".git"
|
||||
- persist_to_workspace:
|
||||
root: "/tmp/bin"
|
||||
paths:
|
||||
- "."
|
||||
|
||||
deploy_docs:
|
||||
executor: docs
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: "Pull versions"
|
||||
command: git fetch origin v0.32 v0.33
|
||||
- run:
|
||||
name: "Build docs"
|
||||
command: make build-docs
|
||||
- run:
|
||||
name: "Sync to S3"
|
||||
command: make sync-docs
|
||||
|
||||
prepare_build:
|
||||
executor: golang
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- run:
|
||||
name: Get next release number
|
||||
command: |
|
||||
export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`"
|
||||
echo "Last tag: ${LAST_TAG}"
|
||||
if [ -z "${LAST_TAG}" ]; then
|
||||
export LAST_TAG="${CIRCLE_BRANCH}"
|
||||
echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag."
|
||||
fi
|
||||
export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`"
|
||||
echo "Next tag: ${NEXT_TAG}"
|
||||
echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source
|
||||
- run:
|
||||
name: Build dependencies
|
||||
command: make tools
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- "release-version.source"
|
||||
- save_cache:
|
||||
key: v2-release-deps-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
|
||||
# # Test RPC implementation against the swagger documented specs
|
||||
# contract_tests:
|
||||
# working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
# machine:
|
||||
# image: circleci/classic:latest
|
||||
# environment:
|
||||
# GOBIN: /home/circleci/.go_workspace/bin
|
||||
# GOPATH: /home/circleci/.go_workspace/
|
||||
# GOOS: linux
|
||||
# GOARCH: amd64
|
||||
# parallelism: 1
|
||||
# steps:
|
||||
# - checkout
|
||||
# - run:
|
||||
# name: Test RPC endpoints against swagger documentation
|
||||
# command: |
|
||||
# set -x
|
||||
# export PATH=~/.local/bin:$PATH
|
||||
# # install node and dredd
|
||||
# ./scripts/get_nodejs.sh
|
||||
# # build the binaries with a proper version of Go
|
||||
# docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks
|
||||
# # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use
|
||||
# go get github.com/snikch/goodman/cmd/goodman
|
||||
# make contract-tests
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
docs:
|
||||
jobs:
|
||||
- deploy_docs:
|
||||
context: tendermint-docs
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
tags:
|
||||
only:
|
||||
- /^v.*/
|
||||
- deploy_docs:
|
||||
context: tendermint-docs-staging
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- docs-staging
|
||||
# - contract_tests:
|
||||
# requires:
|
||||
# - setup_dependencies
|
||||
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -7,5 +7,6 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
* @ebuchman @tendermint/tendermint-engineering
|
||||
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering
|
||||
|
||||
11
.github/ISSUE_TEMPLATE/bug-report.md
vendored
11
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@@ -1,13 +1,18 @@
|
||||
---
|
||||
name: Bug Report
|
||||
name: Bug report
|
||||
about: Create a report to help us squash bugs!
|
||||
|
||||
---
|
||||
<!--
|
||||
|
||||
Please fill in as much of the template below as you can.
|
||||
|
||||
Be ready for followup questions, and please respond in a timely
|
||||
manner. We might ask you to provide additional logs and data (tendermint & app).
|
||||
If you have general questions, please create a new discussion:
|
||||
https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
Be ready for followup questions, and please respond in a timely manner. We might
|
||||
ask you to provide additional logs and data (tendermint & app).
|
||||
|
||||
-->
|
||||
|
||||
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Ask a question
|
||||
url: https://github.com/tendermint/tendermint/discussions
|
||||
about: Please ask and answer questions here
|
||||
11
.github/ISSUE_TEMPLATE/feature-request.md
vendored
11
.github/ISSUE_TEMPLATE/feature-request.md
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: Feature Request
|
||||
name: Feature request
|
||||
about: Create a proposal to request a feature
|
||||
|
||||
---
|
||||
@@ -25,12 +25,3 @@ Are there any disadvantages of including this feature? -->
|
||||
## Proposal
|
||||
|
||||
<!-- Detailed description of requirements of implementation -->
|
||||
|
||||
____
|
||||
|
||||
#### For Admin Use
|
||||
|
||||
- [ ] Not duplicate issue
|
||||
- [ ] Appropriate labels applied
|
||||
- [ ] Appropriate contributors tagged
|
||||
- [ ] Contributor assigned/self-assigned
|
||||
|
||||
11
.github/ISSUE_TEMPLATE/proposal.md
vendored
11
.github/ISSUE_TEMPLATE/proposal.md
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: Protocol Change Proposal
|
||||
name: Protocol change proposal
|
||||
about: Create a proposal to request a change to the protocol
|
||||
|
||||
---
|
||||
@@ -26,12 +26,3 @@ Are there any disadvantages of including this change? -->
|
||||
## Proposal
|
||||
|
||||
<!-- Detailed description of requirements of implementation -->
|
||||
|
||||
____
|
||||
|
||||
#### For Admin Use
|
||||
|
||||
- [ ] Not duplicate issue
|
||||
- [ ] Appropriate labels applied
|
||||
- [ ] Appropriate contributors tagged
|
||||
- [ ] Contributor assigned/self-assigned
|
||||
|
||||
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,7 +1,32 @@
|
||||
## Description
|
||||
<!--
|
||||
|
||||
_Please add a description of the changes that this PR introduces and the files that
|
||||
are the most critical to review._
|
||||
Please add a reference to the issue that this PR addresses and indicate which
|
||||
files are most critical to review. If it fully addresses a particular issue,
|
||||
please include "Closes #XXX" (where "XXX" is the issue number).
|
||||
|
||||
Closes: #XXX
|
||||
If this PR is non-trivial/large/complex, please ensure that you have either
|
||||
created an issue that the team's had a chance to respond to, or had some
|
||||
discussion with the team prior to submitting substantial pull requests. The team
|
||||
can be reached via GitHub Discussions or the Cosmos Network Discord server in
|
||||
the #tendermint-core channel. GitHub Discussions is preferred over Discord as it
|
||||
allows us to keep track of conversations topically.
|
||||
https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
If the work in this PR is not aligned with the team's current priorities, please
|
||||
be advised that it may take some time before it is merged - especially if it has
|
||||
not yet been discussed with the team.
|
||||
|
||||
See the project board for the team's current priorities:
|
||||
https://github.com/orgs/tendermint/projects/15/views/5
|
||||
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
#### PR checklist
|
||||
|
||||
- [ ] Tests written/updated, or no tests needed
|
||||
- [ ] `CHANGELOG_PENDING.md` updated, or no changelog entry needed
|
||||
- [ ] Updated relevant documentation (`docs/`) and code comments, or no
|
||||
documentation updates needed
|
||||
|
||||
|
||||
16
.github/auto-comment.yml
vendored
16
.github/auto-comment.yml
vendored
@@ -1,16 +0,0 @@
|
||||
pullRequestOpened: |
|
||||
:wave: Thanks for creating a PR!
|
||||
|
||||
Before we can merge this PR, please make sure that all the following items have been
|
||||
checked off. If any of the checklist items are not applicable, please leave them but
|
||||
write a little note why.
|
||||
|
||||
- [ ] Wrote tests
|
||||
- [ ] Updated CHANGELOG_PENDING.md
|
||||
- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work.
|
||||
- [ ] Updated relevant documentation (`docs/`) and code comments
|
||||
- [ ] Re-reviewed `Files changed` in the Github PR explorer
|
||||
- [ ] Applied Appropriate Labels
|
||||
|
||||
|
||||
Thank you for your contribution to Tendermint! :rocket:
|
||||
25
.github/codecov.yml
vendored
Normal file
25
.github/codecov.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
coverage:
|
||||
precision: 2
|
||||
round: down
|
||||
range: "70...100"
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 20%
|
||||
patch: off
|
||||
changes: off
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
comment: false
|
||||
|
||||
ignore:
|
||||
- "docs"
|
||||
- "DOCKER"
|
||||
- "scripts"
|
||||
- "**/*.pb.go"
|
||||
- "libs/pubsub/query/query.peg.go"
|
||||
- "*.md"
|
||||
- "*.rst"
|
||||
- "*.yml"
|
||||
51
.github/dependabot.yml
vendored
Normal file
51
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "main"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
###################################
|
||||
##
|
||||
## Update All Go Dependencies
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "main"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
6
.github/issue_template.md
vendored
Normal file
6
.github/issue_template.md
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
<!--
|
||||
|
||||
If you want to ask a general question, please create a new discussion instead of
|
||||
an issue: https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
-->
|
||||
15
.github/linters/markdownlint.yml
vendored
Normal file
15
.github/linters/markdownlint.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# markdownlint configuration for Super-Linter
|
||||
# - https://github.com/DavidAnson/markdownlint
|
||||
# - https://github.com/github/super-linter
|
||||
|
||||
# Default state for all rules
|
||||
default: true
|
||||
|
||||
# See https://github.com/DavidAnson/markdownlint#rules--aliases for rules
|
||||
MD007: {"indent": 4}
|
||||
MD013: false
|
||||
MD024: {siblings_only: true}
|
||||
MD025: false
|
||||
MD033: {no-inline-html: false}
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
9
.github/linters/yaml-lint.yml
vendored
Normal file
9
.github/linters/yaml-lint.yml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
# Default rules for YAML linting from super-linter.
|
||||
# See: See https://yamllint.readthedocs.io/en/stable/rules.html
|
||||
extends: default
|
||||
rules:
|
||||
document-end: disable
|
||||
document-start: disable
|
||||
line-length: disable
|
||||
truthy: disable
|
||||
14
.github/mergify.yml
vendored
14
.github/mergify.yml
vendored
@@ -1,13 +1,13 @@
|
||||
queue_rules:
|
||||
- name: default
|
||||
conditions:
|
||||
- base=v0.34.x
|
||||
- base=main
|
||||
- label=S:automerge
|
||||
|
||||
pull_request_rules:
|
||||
- name: Automerge to v0.34.x
|
||||
- name: Automerge to main
|
||||
conditions:
|
||||
- base=v0.34.x
|
||||
- base=main
|
||||
- label=S:automerge
|
||||
actions:
|
||||
queue:
|
||||
@@ -17,3 +17,11 @@ pull_request_rules:
|
||||
{{ title }} (#{{ number }})
|
||||
|
||||
{{ body }}
|
||||
- name: backport patches to v0.34.x branch
|
||||
conditions:
|
||||
- base=main
|
||||
- label=S:backport-to-v0.34.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.34.x
|
||||
|
||||
82
.github/workflows/build.yml
vendored
Normal file
82
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
name: Build
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to main or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
goos: ["linux"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: install
|
||||
run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
6
.github/workflows/check-generated.yml
vendored
6
.github/workflows/check-generated.yml
vendored
@@ -7,7 +7,7 @@ name: Check generated code
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- v0.34.x
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -18,7 +18,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
||||
123
.github/workflows/coverage.yml
vendored
123
.github/workflows/coverage.yml
vendored
@@ -1,123 +0,0 @@
|
||||
name: Test Coverage
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
|
||||
# cache multiple
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00"
|
||||
path: ./pkgs.txt.part.00
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01"
|
||||
path: ./pkgs.txt.part.01
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02"
|
||||
path: ./pkgs.txt.part.02
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03"
|
||||
path: ./pkgs.txt.part.03
|
||||
|
||||
build-linux:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: GOOS=linux GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: split-test-files
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}"
|
||||
if: env.GIT_DIFF
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./${{ matrix.part }}profile.out
|
||||
|
||||
upload-coverage-report:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
21
.github/workflows/docker.yml
vendored
21
.github/workflows/docker.yml
vendored
@@ -1,14 +1,13 @@
|
||||
name: Build & Push
|
||||
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags
|
||||
name: Docker
|
||||
# Build & Push rebuilds the Tendermint docker image on every push to main and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/tendermint/tendermint
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -39,18 +38,18 @@ jobs:
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.0.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v2.0.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v3.1.0
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
62
.github/workflows/docs-deployment.yml
vendored
Normal file
62
.github/workflows/docs-deployment.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
# Build and deploy the docs.tendermint.com website content.
|
||||
# The static content is published to GitHub Pages.
|
||||
#
|
||||
# For documentation build info, see docs/DOCS_README.md.
|
||||
name: Build static documentation site
|
||||
on:
|
||||
workflow_dispatch: # allow manual updates
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- docs/**
|
||||
- spec/**
|
||||
|
||||
jobs:
|
||||
# This is split into two jobs so that the build, which runs npm, does not
|
||||
# have write access to anything. The deploy requires write access to publish
|
||||
# to the branch used by GitHub Pages, however, so we can't just make the
|
||||
# whole workflow read-only.
|
||||
build:
|
||||
name: VuePress build
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: alpine:latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Install generator dependencies
|
||||
run: |
|
||||
apk add --no-cache make bash git npm
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
# We need to fetch full history so the backport branches for previous
|
||||
# versions will be available for the build.
|
||||
fetch-depth: 0
|
||||
- name: Build documentation
|
||||
run: |
|
||||
git config --global --add safe.directory "$PWD"
|
||||
make build-docs
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output/
|
||||
|
||||
deploy:
|
||||
name: Deploy to GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: 'docs-tendermint-com'
|
||||
folder: ~/output
|
||||
single-commit: true
|
||||
21
.github/workflows/docs-toc.yml
vendored
Normal file
21
.github/workflows/docs-toc.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# TODO(thane): Re-enable once we've pulled in the ADRs and RFCs from master.
|
||||
# Verify that important design docs have ToC entries.
|
||||
#name: Check documentation ToC
|
||||
#on:
|
||||
# pull_request:
|
||||
# push:
|
||||
# branches:
|
||||
# - main
|
||||
#
|
||||
#jobs:
|
||||
# check:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# - uses: technote-space/get-diff-action@v6
|
||||
# with:
|
||||
# PATTERNS: |
|
||||
# docs/architecture/**
|
||||
# docs/rfc/**
|
||||
# - run: ./docs/presubmit.sh
|
||||
# if: env.GIT_DIFF
|
||||
7
.github/workflows/e2e-manual.yml
vendored
7
.github/workflows/e2e-manual.yml
vendored
@@ -1,4 +1,5 @@
|
||||
# Manually run randomly generated E2E testnets (as nightly).
|
||||
# Runs randomly generated E2E testnets nightly on main
|
||||
# manually run e2e tests
|
||||
name: e2e-manual
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -16,7 +17,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
@@ -28,7 +29,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
|
||||
32
.github/workflows/e2e-nightly-34x.yml
vendored
32
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -1,12 +1,10 @@
|
||||
# Runs randomly generated E2E testnets nightly
|
||||
# on the 0.34.x release branch
|
||||
# Runs randomly generated E2E testnets nightly on the 0.34.x branch.
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-master workflow as well!
|
||||
# !! This file should be kept in sync with the e2e-nightly-main.yml file,
|
||||
# modulo changes to the version labels.
|
||||
|
||||
name: e2e-nightly-34x
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually, in theory
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
@@ -17,13 +15,13 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@@ -37,7 +35,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly
|
||||
run: ./build/generator -g 2 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
@@ -49,7 +47,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
@@ -58,19 +56,3 @@ jobs:
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
@@ -1,73 +1,73 @@
|
||||
# Runs randomly generated E2E testnets nightly on master
|
||||
# Runs randomly generated E2E testnets nightly on main
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-34x workflow as well!
|
||||
# !! Relevant changes to this file should be propagated to the e2e-nightly-<V>x
|
||||
# files for the supported backport branches, when appropriate, modulo version
|
||||
# markers.
|
||||
|
||||
name: e2e-nightly-master
|
||||
name: e2e-nightly-main
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test-2:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test-2
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on master
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on main
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test-2
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on master
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on main
|
||||
SLACK_FOOTER: ''
|
||||
16
.github/workflows/e2e.yml
vendored
16
.github/workflows/e2e.yml
vendored
@@ -1,11 +1,12 @@
|
||||
name: e2e
|
||||
# Runs the CI end-to-end test network on all pushes to master or release branches
|
||||
# Runs the CI end-to-end test network on all pushes to main or release branches
|
||||
# and every pull request, but only if any Go files have been changed.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
@@ -15,7 +16,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.17'
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
@@ -27,15 +28,10 @@ jobs:
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run two make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker runner
|
||||
run: make -j2 docker runner tests
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Run CI testnet
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml
|
||||
run: ./run-multiple.sh networks/ci.toml
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Emit logs on failure
|
||||
if: ${{ failure() }}
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml logs
|
||||
|
||||
18
.github/workflows/fuzz-nightly.yml
vendored
18
.github/workflows/fuzz-nightly.yml
vendored
@@ -1,9 +1,13 @@
|
||||
# Runs fuzzing nightly.
|
||||
name: fuzz-nightly
|
||||
name: Fuzz Tests
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- "test/fuzz/**/*.go"
|
||||
|
||||
jobs:
|
||||
fuzz-nightly-test:
|
||||
@@ -11,13 +15,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
run: go install github.com/dvyukov/go-fuzz/go-fuzz@latest github.com/dvyukov/go-fuzz/go-fuzz-build@latest
|
||||
|
||||
- name: Fuzz mempool
|
||||
working-directory: test/fuzz
|
||||
@@ -49,14 +53,14 @@ jobs:
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 1
|
||||
retention-days: 3
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
retention-days: 1
|
||||
retention-days: 3
|
||||
|
||||
- name: Set crashers count
|
||||
working-directory: test/fuzz
|
||||
@@ -72,7 +76,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
16
.github/workflows/janitor.yml
vendored
Normal file
16
.github/workflows/janitor.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Janitor
|
||||
# Janitor cleans up previous runs of various workflows
|
||||
# To add more workflows to cancel visit https://api.github.com/repos/tendermint/tendermint/actions/workflows and find the actions name
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
cancel:
|
||||
name: "Cancel Previous Runs"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.10.0
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
20
.github/workflows/lint.yml
vendored
20
.github/workflows/lint.yml
vendored
@@ -1,12 +1,18 @@
|
||||
name: Lint
|
||||
# Lint runs golangci-lint over the entire Tendermint repository
|
||||
# This workflow is run on every pull request and push to master
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
|
||||
name: Golang Linter
|
||||
# Lint runs golangci-lint over the entire Tendermint repository.
|
||||
#
|
||||
# This workflow is run on every pull request and push to main.
|
||||
#
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum}
|
||||
# files have been modified.
|
||||
#
|
||||
# To run this locally, simply run `make lint` from the root of the repo.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
jobs:
|
||||
golangci:
|
||||
name: golangci-lint
|
||||
@@ -16,7 +22,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.17'
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
@@ -28,7 +34,7 @@ jobs:
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.50.1
|
||||
version: v1.45
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
12
.github/workflows/linter.yml
vendored
12
.github/workflows/linter.yml
vendored
@@ -1,14 +1,14 @@
|
||||
name: Lint
|
||||
name: Markdown Linter
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
- "**.yaml"
|
||||
pull_request:
|
||||
branches: [master]
|
||||
branches: [main]
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
@@ -21,12 +21,12 @@ jobs:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v3
|
||||
uses: docker://github/super-linter:v4
|
||||
env:
|
||||
LINTER_RULES_PATH: .
|
||||
VALIDATE_ALL_CODEBASE: true
|
||||
DEFAULT_BRANCH: master
|
||||
DEFAULT_BRANCH: main
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
VALIDATE_OPENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
YAML_CONFIG_FILE: yaml-lint.yml
|
||||
|
||||
23
.github/workflows/markdown-links.yml
vendored
Normal file
23
.github/workflows/markdown-links.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Check Markdown links
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.md
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
with:
|
||||
check-modified-files-only: 'yes'
|
||||
config-file: '.md-link-check.json'
|
||||
if: env.GIT_DIFF
|
||||
65
.github/workflows/pre-release.yml
vendored
65
.github/workflows/pre-release.yml
vendored
@@ -1,65 +0,0 @@
|
||||
name: "Pre-release"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
prerelease:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
# Link to CHANGELOG_PENDING.md as release notes.
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG_PENDING.md > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
prerelease-success:
|
||||
needs: prerelease
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon pre-release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
4
.github/workflows/proto-lint.yml
vendored
4
.github/workflows/proto-lint.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
- 'proto/**'
|
||||
push:
|
||||
branches:
|
||||
- v0.34.x
|
||||
- main
|
||||
paths:
|
||||
- 'proto/**'
|
||||
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.8.0
|
||||
- uses: bufbuild/buf-setup-action@v1.6.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
29
.github/workflows/release.yml
vendored
29
.github/workflows/release.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
release:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -16,7 +16,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.17'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
@@ -35,28 +35,3 @@ jobs:
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
release-success:
|
||||
needs: release
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
8
.github/workflows/stale.yml
vendored
8
.github/workflows/stale.yml
vendored
@@ -7,12 +7,14 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v6
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions."
|
||||
days-before-stale: 10
|
||||
days-before-close: 4
|
||||
days-before-stale: -1
|
||||
days-before-close: -1
|
||||
days-before-pr-stale: 10
|
||||
days-before-pr-close: 4
|
||||
exempt-pr-labels: "S:wip"
|
||||
|
||||
142
.github/workflows/tests.yml
vendored
142
.github/workflows/tests.yml
vendored
@@ -1,146 +1,34 @@
|
||||
name: Tests
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to master or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
name: Test
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths:
|
||||
- "**.go"
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
cleanup-runs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: rokroskar/workflow-run-cleanup-action@master
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'"
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03", "04", "05"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_abci_apps
|
||||
run: abci/tests/test_app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
Makefile
|
||||
- name: Run Go Tests
|
||||
run: |
|
||||
make test-group-${{ matrix.part }} NUM_SPLIT=6
|
||||
if: env.GIT_DIFF
|
||||
|
||||
63
.gitignore
vendored
63
.gitignore
vendored
@@ -1,69 +1,56 @@
|
||||
*.swp
|
||||
*.swo
|
||||
.bak
|
||||
*.bak
|
||||
*.iml
|
||||
*.swo
|
||||
*.swp
|
||||
*/.glide
|
||||
*/vendor
|
||||
.DS_Store
|
||||
build/*
|
||||
rpc/test/.tendermint
|
||||
.tendermint
|
||||
remote_dump
|
||||
.bak
|
||||
.idea/
|
||||
.revision
|
||||
vendor
|
||||
.tendermint
|
||||
.tendermint-lite
|
||||
.terraform
|
||||
.vagrant
|
||||
test/e2e/build
|
||||
test/maverick/maverick
|
||||
test/e2e/networks/*/
|
||||
test/p2p/data/
|
||||
test/logs
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci/abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
build/*
|
||||
coverage.txt
|
||||
docs/.vuepress/dist
|
||||
docs/_build
|
||||
docs/dist
|
||||
docs/.vuepress/dist
|
||||
*.log
|
||||
abci-cli
|
||||
docs/node_modules/
|
||||
docs/spec
|
||||
docs/.vuepress/public/rpc
|
||||
index.html.md
|
||||
|
||||
scripts/wal2json/wal2json
|
||||
scripts/cutWALUntil/cutWALUntil
|
||||
|
||||
.idea/
|
||||
*.iml
|
||||
|
||||
.vscode/
|
||||
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
profile\.out
|
||||
remote_dump
|
||||
rpc/test/.tendermint
|
||||
scripts/cutWALUntil/cutWALUntil
|
||||
scripts/wal2json/wal2json
|
||||
shunit2
|
||||
|
||||
.tendermint-lite
|
||||
addrbook.json
|
||||
|
||||
*/vendor
|
||||
.vendor-new/
|
||||
*/.glide
|
||||
.terraform
|
||||
terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
profile\.out
|
||||
test/app/grpc_client
|
||||
test/loadtime/build
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
test/maverick/maverick
|
||||
test/p2p/data/
|
||||
vendor
|
||||
test/fuzz/**/corpus
|
||||
test/fuzz/**/crashers
|
||||
test/fuzz/**/suppressions
|
||||
test/fuzz/**/*.zip
|
||||
proto/spec/**/*.pb.go
|
||||
*.aux
|
||||
*.bbl
|
||||
*.blg
|
||||
*.log
|
||||
*.pdf
|
||||
*.gz
|
||||
*.dvi
|
||||
# Python virtual environments
|
||||
.venv
|
||||
|
||||
@@ -2,6 +2,7 @@ linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
@@ -20,11 +21,12 @@ linters:
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- staticcheck
|
||||
# - structcheck // to be fixed by golangci-lint
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
@@ -40,7 +42,5 @@ linters-settings:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
|
||||
run:
|
||||
skip-files:
|
||||
- libs/pubsub/query/query.peg.go
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
@@ -5,7 +5,7 @@ env:
|
||||
- GO111MODULE=on
|
||||
|
||||
builds:
|
||||
- id: "Tendermint"
|
||||
- id: "tendermint"
|
||||
main: ./cmd/tendermint/main.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/tendermint/tendermint/version.TMCoreSemVer={{ .Version }}
|
||||
@@ -25,8 +25,7 @@ checksum:
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
name_template: "{{.Version}}"
|
||||
name_template: "{{.Version}} (WARNING: BETA SOFTWARE)"
|
||||
|
||||
archives:
|
||||
- files:
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
# markdownlint configuration
|
||||
# https://github.com/DavidAnson/markdownlint
|
||||
|
||||
# Default state for all rules
|
||||
default: true
|
||||
|
||||
# See https://github.com/DavidAnson/markdownlint#rules--aliases for rules
|
||||
MD001: false
|
||||
MD007: {indent: 4}
|
||||
MD013: false
|
||||
MD024: {siblings_only: true}
|
||||
MD024: { siblings_only: true }
|
||||
MD025: false
|
||||
MD033: false
|
||||
MD036: false
|
||||
MD010: false
|
||||
MD012: false
|
||||
MD028: false
|
||||
MD033: { no-inline-html: false }
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
|
||||
6
.md-link-check.json
Normal file
6
.md-link-check.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503]
|
||||
}
|
||||
10
.mergify.yml
10
.mergify.yml
@@ -1,10 +0,0 @@
|
||||
pull_request_rules:
|
||||
- name: Automerge to master
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:automerge
|
||||
actions:
|
||||
merge:
|
||||
method: squash
|
||||
strict: true
|
||||
commit_message: title+body
|
||||
9
.vscode/settings.json
vendored
9
.vscode/settings.json
vendored
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
|
||||
}
|
||||
148
CHANGELOG.md
148
CHANGELOG.md
@@ -2,126 +2,6 @@
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.24
|
||||
|
||||
*Nov 21, 2022*
|
||||
|
||||
Apart from one minor bug fix, this release aims to optimize the output of the
|
||||
RPC (both HTTP and WebSocket endpoints). See our [upgrading
|
||||
guidelines](./UPGRADING.md#v03424) for more details.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- `[rpc]` [\#9724](https://github.com/tendermint/tendermint/issues/9724) Remove
|
||||
useless whitespace in RPC output (@adizere, @thanethomson)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- `[rpc]` [\#9692](https://github.com/tendermint/tendermint/issues/9692) Remove
|
||||
`Cache-Control` header response from `/check_tx` endpoint (@JayT106)
|
||||
|
||||
## v0.34.23
|
||||
|
||||
*Nov 9, 2022*
|
||||
|
||||
This release introduces some new Prometheus metrics to help in determining what
|
||||
kinds of messages are consuming the most P2P bandwidth. This builds towards our
|
||||
broader goal of optimizing Tendermint bandwidth consumption, and will give us
|
||||
meaningful insights once we can establish these metrics for a number of chains.
|
||||
|
||||
We now also return `Cache-Control` headers for select RPC endpoints to help
|
||||
facilitate caching.
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106
|
||||
|
||||
### IMPROVEMENTS
|
||||
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
|
||||
Envelope type and associated methods for sending and receiving Envelopes
|
||||
instead of raw bytes. This also adds new metrics,
|
||||
`tendermint_p2p_message_send_bytes_total` and
|
||||
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
|
||||
each message type have been sent.
|
||||
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
|
||||
caching of RPC responses (@JayT106)
|
||||
|
||||
The following RPC endpoints will return `Cache-Control` headers with a maximum
|
||||
age of 1 day:
|
||||
|
||||
- `/abci_info`
|
||||
- `/block`, if `height` is supplied
|
||||
- `/block_by_hash`
|
||||
- `/block_results`, if `height` is supplied
|
||||
- `/blockchain`
|
||||
- `/check_tx`
|
||||
- `/commit`, if `height` is supplied
|
||||
- `/consensus_params`, if `height` is supplied
|
||||
- `/genesis`
|
||||
- `/genesis_chunked`
|
||||
- `/tx`
|
||||
- `/validators`, if `height` is supplied
|
||||
|
||||
## v0.34.22
|
||||
|
||||
This release includes several bug fixes, [one of
|
||||
which](https://github.com/tendermint/tendermint/pull/9518) we discovered while
|
||||
building up a baseline for v0.34 against which to compare our upcoming v0.37
|
||||
release during our [QA process](./docs/qa/).
|
||||
|
||||
Special thanks to external contributors on this release: @RiccardoM
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#9423](https://github.com/tendermint/tendermint/pull/9423) Support
|
||||
HTTPS URLs from the WebSocket client (@RiccardoM, @cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [config] [\#9483](https://github.com/tendermint/tendermint/issues/9483)
|
||||
Calling `tendermint init` would incorrectly leave out the new `[storage]`
|
||||
section delimiter in the generated configuration file - this has now been
|
||||
fixed
|
||||
- [p2p] [\#9500](https://github.com/tendermint/tendermint/issues/9500) Prevent
|
||||
peers who have errored being added to the peer set (@jmalicevic)
|
||||
- [indexer] [\#9473](https://github.com/tendermint/tendermint/issues/9473) Fix
|
||||
bug that caused the psql indexer to index empty blocks whenever one of the
|
||||
transactions returned a non zero code. The relevant deduplication logic has
|
||||
been moved within the kv indexer only (@cmwaters)
|
||||
- [blocksync] [\#9518](https://github.com/tendermint/tendermint/issues/9518) A
|
||||
block sync stall was observed during our QA process whereby the node was
|
||||
unable to make progress. Retrying block requests after a timeout fixes this.
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Release highlights include:
|
||||
|
||||
- A new `[storage]` configuration section and flag `discard_abci_responses`,
|
||||
which, if enabled, discards all ABCI responses except the latest one in order
|
||||
to reduce disk space usage in the state store. When enabled, the
|
||||
`block_results` RPC endpoint can no longer function and will return an error.
|
||||
- A new CLI command, `reindex-event`, to re-index block and tx events to the
|
||||
event sinks. You can run this command when the event store backend
|
||||
dropped/disconnected or you want to replace the backend. When
|
||||
`discard_abci_responses` is enabled, you will not be able to use this command.
|
||||
|
||||
Special thanks to external contributors on this release: @rootwarp & @animart
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#9083](https://github.com/tendermint/tendermint/issues/9083) Backport command to reindex missed events (@cmwaters)
|
||||
- [cli] [\#9107](https://github.com/tendermint/tendermint/issues/9107) Add the `p2p.external-address` argument to set the node P2P external address (@amimart)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#9054](https://github.com/tendermint/tendermint/issues/9054) `discard_abci_responses` flag added to discard all ABCI
|
||||
responses except the last in order to save on storage space in the state
|
||||
store (@samricotta)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [mempool] [\#9033](https://github.com/tendermint/tendermint/issues/9033) Rework lock discipline to mitigate callback deadlocks in the
|
||||
priority mempool
|
||||
- [cli] [\#9103](https://github.com/tendermint/tendermint/issues/9103) fix unsafe-reset-all for working with home path (@rootwarp)
|
||||
|
||||
## v0.34.20
|
||||
|
||||
Special thanks to external contributors on this release: @joeabbey @yihuang
|
||||
@@ -558,7 +438,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Remove `MockEvidence` in favor of testing with actual evidence types (`DuplicateVoteEvidence` & `LightClientAttackEvidence`) (@cmwaters)
|
||||
- [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker)
|
||||
- [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [db] [\#5233](https://github.com/tendermint/tendermint/pull/5233) Add support for `badgerdb` database backend (@erikgrinaker)
|
||||
- [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes)
|
||||
- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia (light client attack) evidence can be detected, verified and committed (@cmwaters)
|
||||
@@ -572,7 +452,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [rpc] [\#5017](https://github.com/tendermint/tendermint/pull/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes)
|
||||
- [rpc] [\#5108](https://github.com/tendermint/tendermint/pull/5108) Subscribe using the websocket for new evidence events (@cmwaters)
|
||||
- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section.
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
@@ -652,7 +532,7 @@ This security release fixes:
|
||||
Tendermint 0.33.0 and above allow block proposers to include signatures for the
|
||||
wrong block. This may happen naturally if you start a network, have it run for
|
||||
some time and restart it **without changing the chainID**. (It is a
|
||||
[misconfiguration](https://docs.tendermint.com/v0.33/tendermint-core/using-tendermint.html)
|
||||
[misconfiguration](https://docs.tendermint.com/master/tendermint-core/using-tendermint.html)
|
||||
to reuse chainIDs.) Correct block proposers will accidentally include signatures
|
||||
for the wrong block if they see these signatures, and then commits won't validate,
|
||||
making all proposed blocks invalid. A malicious validator (even with a minimal
|
||||
@@ -951,7 +831,7 @@ and a validator address plus a timestamp. Note we may remove the validator
|
||||
address & timestamp fields in the future (see ADR-25).
|
||||
|
||||
`lite2` package has been added to solve `lite` issues and introduce weak
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client.md) for complete details.
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
|
||||
`lite` package is now deprecated and will be removed in v0.34 release.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
@@ -1312,7 +1192,7 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah
|
||||
*August 28, 2019*
|
||||
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/v0.32.x/docs/guides/java.md)
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md)
|
||||
guide.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@@ -1345,7 +1225,7 @@ Special thanks to external contributors on this release:
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/v0.34.x/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele)
|
||||
- [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors`
|
||||
option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors).
|
||||
@@ -1662,7 +1542,7 @@ Special thanks to external contributors on this release:
|
||||
- [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation
|
||||
* Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
<https://docs.tendermint.com/>)
|
||||
https://docs.tendermint.com/master/introduction/install.html#compile-with-cleveldb-support)
|
||||
* Use `boltdb` tag to compile Tendermint with bolt db
|
||||
- [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except
|
||||
when IP lookup fails)
|
||||
@@ -1886,7 +1766,7 @@ more details.
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
|
||||
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
|
||||
* Apps
|
||||
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
|
||||
@@ -1939,7 +1819,7 @@ more details.
|
||||
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
|
||||
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
|
||||
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md)
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md)
|
||||
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
|
||||
(@guagualvcha)
|
||||
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
|
||||
@@ -2637,7 +2517,7 @@ Special thanks to external contributors on this release:
|
||||
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
|
||||
It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-012-peer-transport.md).
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
@@ -2718,7 +2598,7 @@ BREAKING CHANGES:
|
||||
- [abci] Added address of the original proposer of the block to Header
|
||||
- [abci] Change ABCI Header to match Tendermint exactly
|
||||
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
- Remove PubKey from `Validator` (so it's just Address and Power)
|
||||
- Introduce `ValidatorUpdate` (with just PubKey and Power)
|
||||
- InitChain and EndBlock use ValidatorUpdate
|
||||
@@ -2740,7 +2620,7 @@ BREAKING CHANGES:
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
|
||||
- Add NextValidatorSet to State, changes on-disk representation of state
|
||||
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-020-block-size.md)).
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)).
|
||||
- Remove ConsensusParams.BlockSize.MaxTxs
|
||||
- Introduce maximum sizes for all components of a block, including ChainID
|
||||
- [types] Updates to the block Header:
|
||||
@@ -2751,7 +2631,7 @@ BREAKING CHANGES:
|
||||
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
|
||||
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
|
||||
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-014-secp-malleability.md)):
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)):
|
||||
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
|
||||
- malleability removed by requiring `s` to be in canonical form.
|
||||
|
||||
@@ -3551,7 +3431,7 @@ Also includes the Grand Repo-Merge of 2017.
|
||||
BREAKING CHANGES:
|
||||
|
||||
- Config and Flags:
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/v0.10.0/config/config.go#L11),
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
|
||||
containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig`
|
||||
- This affects the following flags:
|
||||
- `--seeds` is now `--p2p.seeds`
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
# Unreleased Changes
|
||||
|
||||
## v0.34.25
|
||||
## v0.34.21
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
@@ -16,7 +20,8 @@
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [#9083] backport cli command to reindex missed events (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ will indicate their support with a heartfelt emoji.
|
||||
|
||||
If the issue would benefit from thorough discussion, maintainers may
|
||||
request that you create a [Request For
|
||||
Comment](https://github.com/tendermint/tendermint/tree/main/rfc). Discussion
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion
|
||||
at the RFC stage will build collective understanding of the dimensions
|
||||
of the problems and help structure conversations around trade-offs.
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.18-alpine as builder
|
||||
FROM golang:1.15-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
@@ -8,7 +8,7 @@ WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM golang:1.18-alpine
|
||||
FROM golang:1.15-alpine
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
|
||||
@@ -6,7 +6,7 @@ DockerHub tags for official releases are [here](https://hub.docker.com/r/tenderm
|
||||
|
||||
Official releases can be found [here](https://github.com/tendermint/tendermint/releases).
|
||||
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/main/DOCKER/Dockerfile).
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
|
||||
|
||||
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
|
||||
|
||||
@@ -20,9 +20,9 @@ Respective versioned files can be found <https://raw.githubusercontent.com/tende
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
|
||||
|
||||
For more background, see the [the docs](https://docs.tendermint.com/v0.34/introduction/#quick-start).
|
||||
For more background, see the [the docs](https://docs.tendermint.com/master/introduction/#quick-start).
|
||||
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/v0.34/introduction/quick-start.html).
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/master/introduction/quick-start.html).
|
||||
|
||||
## How to use this image
|
||||
|
||||
@@ -37,7 +37,7 @@ docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app
|
||||
|
||||
## Local cluster
|
||||
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/v0.34.x/Makefile) and run:
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run:
|
||||
|
||||
```sh
|
||||
make build-linux
|
||||
@@ -49,8 +49,8 @@ Note that this will build and use a different image than the ones provided here.
|
||||
|
||||
## License
|
||||
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/main/LICENSE).
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/master/LICENSE).
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/main/CONTRIBUTING.md) for more information.
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.
|
||||
|
||||
6
LICENSE
6
LICENSE
@@ -1,3 +1,5 @@
|
||||
Tendermint Core
|
||||
License: Apache2.0
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
@@ -179,7 +181,7 @@
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
@@ -187,7 +189,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright 2016 All in Bits, Inc
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
31
Makefile
31
Makefile
@@ -1,5 +1,6 @@
|
||||
PACKAGES=$(shell go list ./...)
|
||||
OUTPUT?=build/tendermint
|
||||
BUILDDIR?=$(CURDIR)/build
|
||||
OUTPUT?=$(BUILDDIR)/tendermint
|
||||
|
||||
BUILD_TAGS?=tendermint
|
||||
|
||||
@@ -74,9 +75,9 @@ install:
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Protobuf ###
|
||||
@@ -200,7 +201,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
@golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
@@ -286,3 +287,25 @@ endif
|
||||
contract-tests:
|
||||
dredd
|
||||
.PHONY: contract-tests
|
||||
|
||||
# Implements test splitting and running. This is pulled directly from
|
||||
# the github action workflows for better local reproducibility.
|
||||
|
||||
GO_TEST_FILES != find $(CURDIR) -name "*_test.go"
|
||||
|
||||
# default to four splits by default
|
||||
NUM_SPLIT ?= 4
|
||||
|
||||
$(BUILDDIR):
|
||||
mkdir -p $@
|
||||
|
||||
# The format statement filters out all packages that don't have tests.
|
||||
# Note we need to check for both in-package tests (.TestGoFiles) and
|
||||
# out-of-package tests (.XTestGoFiles).
|
||||
$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR)
|
||||
go list -f "{{ if (or .TestGoFiles .XTestGoFiles) }}{{ .ImportPath }}{{ end }}" ./... | sort > $@
|
||||
|
||||
split-test-packages:$(BUILDDIR)/packages.txt
|
||||
split -d -n l/$(NUM_SPLIT) $< $<.
|
||||
test-group-%:split-test-packages
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
|
||||
213
README.md
213
README.md
@@ -1,170 +1,161 @@
|
||||
# Tendermint
|
||||
|
||||
_UPDATE: TendermintCore featureset is frozen for LTS, see issue https://github.com/tendermint/tendermint/issues/9972_<br/>
|
||||
_This is the latest stable release used by cosmoshub-4, version 0.34.24_<br/>
|
||||
_The previous main branch (v0.38.xx) can now be found under "main_backup"_<br/>
|
||||
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant][bft] [State Machine Replication][smr]. Or
|
||||
[Blockchain], for short.
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
|
||||
[![Version][version-badge]][version-url]
|
||||
[![API Reference][api-badge]][api-url]
|
||||
[![Go version][go-badge]][go-url]
|
||||
[![Discord chat][discord-badge]][discord-url]
|
||||
[![License][license-badge]][license-url]
|
||||
[![Sourcegraph][sg-badge]][sg-url]
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/AzefAFd)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
[](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
|
||||
|
||||
| Branch | Tests | Linting |
|
||||
|--------|------------------------------------|---------------------------------|
|
||||
| main | [![Tests][tests-badge]][tests-url] | [![Lint][lint-badge]][lint-url] |
|
||||
| Branch | Tests | Coverage | Linting |
|
||||
| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------- |
|
||||
| master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) </br>  | [](https://codecov.io/gh/tendermint/tendermint) |  |
|
||||
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a
|
||||
state transition machine - written in any programming language - and securely
|
||||
replicates it on many machines.
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
|
||||
For protocol details, refer to the [Tendermint Specification](./spec/README.md).
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
|
||||
For detailed analysis of the consensus protocol, including safety and liveness
|
||||
proofs, read our paper, "[The latest gossip on BFT
|
||||
consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the
|
||||
[website](https://docs.tendermint.com/).
|
||||
For detailed analysis of the consensus protocol, including safety and liveness proofs,
|
||||
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Releases
|
||||
|
||||
Please do not depend on `main` as your production branch. Use
|
||||
[releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint has been in the production of private and public environments, most
|
||||
notably the blockchains of the Cosmos Network. we haven't released v1.0 yet
|
||||
since we are making breaking changes to the protocol and the APIs. See below for
|
||||
more details about [versioning](#versioning).
|
||||
Tendermint is being used in production in both private and public environments,
|
||||
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
|
||||
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help.
|
||||
You can contact us [over email](mailto:hello@newtendermint.org) or [join the
|
||||
chat](https://discord.gg/gnoland).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/AzefAFd).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, please [email us](mailto:security@newtendermint.org).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md)
|
||||
|
||||
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
|
||||
to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|-------------------|
|
||||
| Go version | Go 1.18 or higher |
|
||||
| Requirement | Notes |
|
||||
| ----------- | ---------------- |
|
||||
| Go version | Go1.15 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
|
||||
|
||||
### Install
|
||||
|
||||
See the [install instructions](./docs/introduction/install.md).
|
||||
See the [install instructions](/docs/introduction/install.md).
|
||||
|
||||
### Quick Start
|
||||
|
||||
- [Single node](./docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](./docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md)
|
||||
- [Single node](/docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
|
||||
## Contributing
|
||||
|
||||
Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
|
||||
|
||||
Before contributing to the project, please take a look at the [contributing
|
||||
guidelines](CONTRIBUTING.md) and the [style guide](STYLE_GUIDE.md). You may also
|
||||
find it helpful to read the [specifications](./spec/README.md), and familiarize
|
||||
yourself with our [Architectural Decision Records
|
||||
(ADRs)](./docs/architecture/README.md) and
|
||||
[Request For Comments (RFCs)](./docs/rfc/README.md).
|
||||
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
|
||||
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
|
||||
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
|
||||
and familiarize yourself with our
|
||||
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
|
||||
## Versioning
|
||||
|
||||
### Semantic Versioning
|
||||
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and
|
||||
how the version changes. According to SemVer, anything in the public API can
|
||||
change at any time before version 1.0.0
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR
|
||||
version is used to signal breaking changes across Tendermint's API. This API
|
||||
includes all publicly exposed types, functions, and methods in non-internal Go
|
||||
packages as well as the types and methods accessible via the Tendermint RPC
|
||||
interface.
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||
include the Go APIs.
|
||||
|
||||
Breaking changes to these public APIs will be documented in the CHANGELOG.
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- config
|
||||
- libs
|
||||
- bech32
|
||||
- bits
|
||||
- bytes
|
||||
- json
|
||||
- log
|
||||
- math
|
||||
- net
|
||||
- os
|
||||
- protoio
|
||||
- rand
|
||||
- sync
|
||||
- strings
|
||||
- service
|
||||
- node
|
||||
- rpc/client
|
||||
- types
|
||||
|
||||
### Upgrades
|
||||
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0, we do not
|
||||
guarantee that breaking changes (ie. bumps in the MINOR version) will work with
|
||||
existing Tendermint blockchains. In these cases you will have to start a new
|
||||
blockchain, or write something custom to get the old data into the new chain.
|
||||
However, any bump in the PATCH version should be compatible with existing
|
||||
blockchain histories.
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0,
|
||||
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
|
||||
will work with existing Tendermint blockchains. In these cases you will
|
||||
have to start a new blockchain, or write something custom to get the old
|
||||
data into the new chain. However, any bump in the PATCH version should be
|
||||
compatible with existing blockchain histories.
|
||||
|
||||
|
||||
For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Supported Versions
|
||||
|
||||
Because we are a small core team, we only ship patch updates, including security
|
||||
updates, to the most recent minor release and the second-most recent minor
|
||||
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
|
||||
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
Because we are a small core team, we only ship patch updates, including security updates,
|
||||
to the most recent minor release and the second-most recent minor release. Consequently,
|
||||
we strongly recommend keeping Tendermint up-to-date. Upgrading instructions can be found
|
||||
in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Resources
|
||||
|
||||
### Libraries
|
||||
### Tendermint Core
|
||||
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building
|
||||
applications in Golang
|
||||
- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs)
|
||||
- [ABCI Tower](https://github.com/penumbra-zone/tower-abci)
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](https://docs.tendermint.com/master/spec/).
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: <https://docs.tendermint.com/master/>
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test).
|
||||
Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
|
||||
### Applications
|
||||
|
||||
- [Cosmos Hub](https://hub.cosmos.network/)
|
||||
- [Terra](https://www.terra.money/)
|
||||
- [Celestia](https://celestia.org/)
|
||||
- [Anoma](https://anoma.network/)
|
||||
- [Vocdoni](https://docs.vocdoni.io/)
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
|
||||
### Research
|
||||
|
||||
- [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
|
||||
- [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
- [Original Whitepaper: "Tendermint: Consensus Without Mining"](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
- [Tendermint Core Blog](https://medium.com/tendermint/tagged/tendermint-core)
|
||||
- [Cosmos Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
## Join us!
|
||||
|
||||
The development of Tendermint Core was led primarily by All in Bits, Inc. The
|
||||
Tendermint trademark is owned by New Tendermint, LLC. If you'd like to work
|
||||
full-time on Tendermint2 or [gno.land](https://gno.land), [we're
|
||||
hiring](mailto:hiring@newtendermint.org)!
|
||||
|
||||
[bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance
|
||||
[smr]: https://en.wikipedia.org/wiki/State_machine_replication
|
||||
[Blockchain]: https://en.wikipedia.org/wiki/Blockchain
|
||||
[version-badge]: https://img.shields.io/github/tag/tendermint/tendermint.svg
|
||||
[version-url]: https://github.com/tendermint/tendermint/releases/latest
|
||||
[api-badge]: https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
||||
[api-url]: https://pkg.go.dev/github.com/tendermint/tendermint
|
||||
[go-badge]: https://img.shields.io/badge/go-1.18-blue.svg
|
||||
[go-url]: https://github.com/moovweb/gvm
|
||||
[discord-badge]: https://img.shields.io/discord/669268347736686612.svg
|
||||
[discord-url]: https://discord.gg/cosmosnetwork
|
||||
[license-badge]: https://img.shields.io/github/license/tendermint/tendermint.svg
|
||||
[license-url]: https://github.com/tendermint/tendermint/blob/main/LICENSE
|
||||
[sg-badge]: https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg
|
||||
[sg-url]: https://sourcegraph.com/github.com/tendermint/tendermint?badge
|
||||
[tests-url]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml
|
||||
[tests-badge]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml/badge.svg?branch=main
|
||||
[lint-badge]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml/badge.svg
|
||||
[lint-url]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml
|
||||
- [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
153
SECURITY.md
153
SECURITY.md
@@ -2,146 +2,98 @@
|
||||
|
||||
## Reporting a Bug
|
||||
|
||||
As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security),
|
||||
we operate a [bug bounty][hackerone]. See the policy for more
|
||||
details on submissions and rewards, and see "Example Vulnerabilities" (below)
|
||||
for examples of the kinds of bugs we're most interested in.
|
||||
As part of our [Coordinated Vulnerability Disclosure
|
||||
Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
|
||||
|
||||
### Guidelines
|
||||
### Guidelines
|
||||
|
||||
We require that all researchers:
|
||||
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting
|
||||
vulnerability information in public places, including Github Issues, Discord
|
||||
channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience,
|
||||
disruption to production systems (including but not limited to the Cosmos
|
||||
Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential
|
||||
between yourself and the Tendermint Core engineering team until the issue has
|
||||
been resolved and disclosed
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Avoid posting personally identifiable information, privately or publicly
|
||||
|
||||
If you follow these guidelines when reporting an issue to us, we commit to:
|
||||
|
||||
* Not pursue or support any legal action related to your research on this
|
||||
vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a
|
||||
timely fashion
|
||||
* Not pursue or support any legal action related to your research on this vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
|
||||
## Disclosure Process
|
||||
## Disclosure Process
|
||||
|
||||
Tendermint Core uses the following disclosure process:
|
||||
|
||||
1. Once a security report is received, the Tendermint Core team works to verify
|
||||
the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the
|
||||
vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private
|
||||
repositories. See “Supported Releases” below for more information on which
|
||||
releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE
|
||||
Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time
|
||||
to prepare their systems for the update. Notifications can include forum
|
||||
posts, tweets, and emails to partners and validators, including emails sent
|
||||
to the [Tendermint Security Mailing List][tmsec-mailing].
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new
|
||||
releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these
|
||||
releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we
|
||||
notify the community, again, through the same channels as above. We also
|
||||
publish a Security Advisory on Github and publish the CVE, as long as neither
|
||||
the Security Advisory nor the CVE include any information on how to exploit
|
||||
these vulnerabilities beyond what information is already available in the
|
||||
patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to
|
||||
submitters.
|
||||
10. One week after the releases go out, we will publish a post with further
|
||||
details on the vulnerability as well as our response to it.
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc).
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
|
||||
This process can take some time. Every effort will be made to handle the bug in
|
||||
as timely a manner as possible, however it's important that we follow the
|
||||
process described above to ensure that disclosures are handled consistently and
|
||||
to keep Tendermint Core and its downstream dependent projects--including but not
|
||||
limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
|
||||
### Example Timeline
|
||||
### Example Timeline
|
||||
|
||||
The following is an example timeline for the triage and response. The required
|
||||
roles and team members are described in parentheses after each task; however,
|
||||
multiple people can play each role and each person may play multiple roles.
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
|
||||
#### 24+ Hours Before Release Time
|
||||
#### > 24 Hours Before Release Time
|
||||
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Create patches in a private security repo, and ensure that PRs are open
|
||||
targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG)
|
||||
5. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG)
|
||||
4. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
|
||||
#### 24 Hours Before Release Time
|
||||
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels
|
||||
(Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
|
||||
#### Release Time
|
||||
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT
|
||||
LEAD)
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
2. Cut Cosmos SDK release for eligible versions (COSMOS ENG)
|
||||
3. Cut Gaia release for eligible versions (GAIA ENG)
|
||||
4. Post “Security releases” on forum (TENDERMINT LEAD)
|
||||
5. Post new Tweet linking to forum post (COMMS LEAD)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is
|
||||
out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information
|
||||
(ADMIN)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
|
||||
#### After Release Time
|
||||
|
||||
1. Write forum post with exploit details (TENDERMINT LEAD)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
|
||||
#### 7 Days After Release Time
|
||||
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
|
||||
## Supported Releases
|
||||
|
||||
The Tendermint Core team commits to releasing security patch releases for both
|
||||
the latest minor release as well for the major/minor release that the Cosmos Hub
|
||||
is running.
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
|
||||
If you are running older versions of Tendermint Core, we encourage you to
|
||||
upgrade at your earliest opportunity so that you can receive security patches
|
||||
directly from the Tendermint repo. While you are welcome to backport security
|
||||
patches to older versions for your own use, we will not publish or promote these
|
||||
backports.
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
|
||||
## Scope
|
||||
|
||||
The full scope of our bug bounty program is outlined on our
|
||||
[Hacker One program page][hackerone]. Please also note that, in the interest of
|
||||
the safety of our users and staff, a few things are explicitly excluded from
|
||||
scope:
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Findings derived from social engineering (e.g., phishing)
|
||||
|
||||
## Example Vulnerabilities
|
||||
## Example Vulnerabilities
|
||||
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re
|
||||
most interested in. It is not exhaustive: there are other kinds of issues we may
|
||||
also be interested in!
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
|
||||
### Specification
|
||||
|
||||
@@ -153,8 +105,7 @@ also be interested in!
|
||||
|
||||
Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
|
||||
* Validation of blockchain data structures, including blocks, block parts,
|
||||
votes, and so on
|
||||
* Validation of blockchain data structures, including blocks, block parts, votes, and so on
|
||||
* Execution of blocks
|
||||
* Validator set changes
|
||||
* Proposer round robin
|
||||
@@ -163,9 +114,6 @@ Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
* A node halting (liveness failure)
|
||||
* Syncing new and old nodes
|
||||
|
||||
Assuming more than 1/3 the voting power is Byzantine:
|
||||
|
||||
* Attacks that go unpunished (unhandled evidence)
|
||||
|
||||
### Networking
|
||||
|
||||
@@ -191,7 +139,7 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Libraries
|
||||
|
||||
* Serialization
|
||||
* Serialization (Amino)
|
||||
* Reading/Writing files and databases
|
||||
|
||||
### Cryptography
|
||||
@@ -202,8 +150,5 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Light Client
|
||||
|
||||
* Core verification
|
||||
* Core verification
|
||||
* Bisection/sequential algorithms
|
||||
|
||||
[hackerone]: https://hackerone.com/cosmos
|
||||
[tmsec-mailing]: https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc
|
||||
|
||||
@@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig
|
||||
* Make use of table driven testing where possible and not-cumbersome
|
||||
* [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)
|
||||
* Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require)
|
||||
* When using mocks, it is recommended to use Testify [mock](<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
* When using mocks, it is recommended to use Testify [mock] (<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
) along with [Mockery](https://github.com/vektra/mockery) for autogeneration
|
||||
|
||||
## Errors
|
||||
|
||||
47
UPGRADING.md
47
UPGRADING.md
@@ -1,15 +1,6 @@
|
||||
# Upgrading Tendermint Core
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of
|
||||
Tendermint Core.
|
||||
|
||||
## v0.34.24
|
||||
|
||||
Note that in [\#9724](https://github.com/tendermint/tendermint/pull/9724) we
|
||||
un-prettified the JSON output (i.e. removed all indentation) of the HTTP and
|
||||
WebSocket RPC for performance and subscription stability reasons. We recommend
|
||||
using a tool such as [jq](https://github.com/stedolan/jq) to obtain prettified
|
||||
output if you rely on that prettified output in some way.
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
|
||||
## v0.34.20
|
||||
|
||||
@@ -24,7 +15,7 @@ and gas cost).
|
||||
Operators can enable the priority mempool by setting `mempool.version` to
|
||||
`"v1"` in the `config.toml`. For more technical details about the priority
|
||||
mempool, see [ADR 067: Mempool
|
||||
Refactor](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-067-mempool-refactor.md).
|
||||
Refactor](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-067-mempool-refactor.md).
|
||||
|
||||
## v0.34.0
|
||||
|
||||
@@ -32,7 +23,7 @@ Refactor](https://github.com/tendermint/tendermint/blob/main/docs/architecture/a
|
||||
This release is not compatible with previous blockchains due to changes to
|
||||
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
|
||||
|
||||
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
@@ -42,7 +33,7 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
were added to support the new State Sync feature.
|
||||
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
|
||||
new nodes are able to join a network in a matter of seconds.
|
||||
Read [the spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#state-sync)
|
||||
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
|
||||
if you want to learn more about State Sync, or if you'd like your application to use it.
|
||||
(If you don't want to support State Sync in your application, you can just implement these new
|
||||
ABCI methods as no-ops, leaving them empty.)
|
||||
@@ -58,7 +49,7 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
Applications should be able to handle these evidence types
|
||||
(i.e., through slashing or other accountability measures).
|
||||
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
(used in ABCI as part of `ValidatorUpdate`) now uses a `oneof` protobuf type.
|
||||
Note that since Tendermint only supports ed25519 validator keys, there's only one
|
||||
option in the `oneof`. For more, see "Protocol Buffers," below.
|
||||
@@ -73,9 +64,12 @@ directory. For more, see "Protobuf," below.
|
||||
|
||||
### Blockchain Protocol
|
||||
|
||||
* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from
|
||||
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
|
||||
fields.
|
||||
* `Header#LastResultsHash` previously was the root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data)` responses.
|
||||
As of 0.34,`Header#LastResultsHash` is now the root hash of a Merkle tree built from:
|
||||
* `BeginBlock#Events`
|
||||
* Root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data,
|
||||
GasWanted, GasUsed, Events)` responses
|
||||
* `BeginBlock#Events`
|
||||
|
||||
* Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input,
|
||||
to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962).
|
||||
@@ -165,7 +159,7 @@ The `bech32` package has moved to the Cosmos SDK:
|
||||
### CLI
|
||||
|
||||
The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API.
|
||||
See [the docs](https://docs.tendermint.com/v0.33/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
|
||||
### Light Client
|
||||
|
||||
@@ -179,7 +173,6 @@ Other user-relevant changes include:
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The new light client stores headers and validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
|
||||
@@ -321,7 +314,7 @@ Evidence Params has been changed to include duration.
|
||||
### RPC Changes
|
||||
|
||||
* `/validators` is now paginated (default: 30 vals per page)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/v0.33/rpc/#/Info/block_results)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/master/rpc/#/Info/block_results)
|
||||
* Event suffix has been removed from the ID in event responses
|
||||
* IDs are now integers not `json-client-XYZ`
|
||||
|
||||
@@ -440,11 +433,11 @@ the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
<https://docs.tendermint.com/v0.33/introduction/install.html#compile-with-cleveldb-support>)
|
||||
<https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support>)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
This release contains a breaking change to the behavior of the pubsub system.
|
||||
This release contains a breaking change to the behaviour of the pubsub system.
|
||||
It also contains some minor breaking changes in the Go API and ABCI.
|
||||
There are no changes to the block or p2p protocols, so v0.31.0 should work fine
|
||||
with blockchains created from the v0.30 series.
|
||||
@@ -462,7 +455,7 @@ In this case, the WS client will receive an error with description:
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -515,14 +508,14 @@ due to changes in how various data structures are hashed.
|
||||
Any implementations of Tendermint blockchain verification, including lite clients,
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Merkle tree](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/state.md#consensusparams)
|
||||
* [Merkle tree](https://github.com/tendermint/spec/blob/master/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/spec/blob/master/spec/blockchain/state.md#consensusparams)
|
||||
|
||||
There was also a small change to field ordering in the vote struct. Any
|
||||
implementations of an out-of-process validator (like a Key-Management Server)
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Vote](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/signing.md#votes)
|
||||
* [Vote](https://github.com/tendermint/spec/blob/master/spec/consensus/signing.md#votes)
|
||||
|
||||
Finally, the proposer selection algorithm continues to evolve. See the
|
||||
[work-in-progress
|
||||
@@ -643,7 +636,7 @@ to `timeout_propose = "3s"`.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
The default behavior of `/abci_query` has been changed to not return a proof,
|
||||
The default behaviour of `/abci_query` has been changed to not return a proof,
|
||||
and the name of the parameter that controls this has been changed from `trusted`
|
||||
to `prove`. To get proofs with your queries, ensure you set `prove=true`.
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md)
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [A protobuf file](./types/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@ const (
|
||||
echoRetryIntervalSeconds = 1
|
||||
)
|
||||
|
||||
//go:generate mockery --case underscore --name Client
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
// All `Async` methods return a `ReqRes` object.
|
||||
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
|
||||
@@ -34,12 +36,14 @@ type Client interface {
|
||||
QueryAsync(types.RequestQuery) *ReqRes
|
||||
CommitAsync() *ReqRes
|
||||
InitChainAsync(types.RequestInitChain) *ReqRes
|
||||
PrepareProposalAsync(types.RequestPrepareProposal) *ReqRes
|
||||
BeginBlockAsync(types.RequestBeginBlock) *ReqRes
|
||||
EndBlockAsync(types.RequestEndBlock) *ReqRes
|
||||
ListSnapshotsAsync(types.RequestListSnapshots) *ReqRes
|
||||
OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes
|
||||
LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes
|
||||
ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes
|
||||
ProcessProposalAsync(types.RequestProcessProposal) *ReqRes
|
||||
|
||||
FlushSync() error
|
||||
EchoSync(msg string) (*types.ResponseEcho, error)
|
||||
@@ -50,12 +54,14 @@ type Client interface {
|
||||
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync() (*types.ResponseCommit, error)
|
||||
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
PrepareProposalSync(types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)
|
||||
BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
ProcessProposalSync(types.RequestProcessProposal) (*types.ResponseProcessProposal, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -300,6 +300,25 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshot
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) PrepareProposalAsync(params types.RequestPrepareProposal) *ReqRes {
|
||||
req := types.ToRequestPrepareProposal(params)
|
||||
res, err := cli.client.PrepareProposal(context.Background(), req.GetPrepareProposal(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_PrepareProposal{PrepareProposal: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ProcessProposalAsync(params types.RequestProcessProposal) *ReqRes {
|
||||
req := types.ToRequestProcessProposal(params)
|
||||
res, err := cli.client.ProcessProposal(context.Background(), req.GetProcessProposal(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ProcessProposal{ProcessProposal: res}})
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
@@ -417,3 +436,14 @@ func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
reqres := cli.ApplySnapshotChunkAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) PrepareProposalSync(
|
||||
params types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
reqres := cli.PrepareProposalAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetPrepareProposal(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ProcessProposalSync(params types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
reqres := cli.ProcessProposalAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetProcessProposal(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -207,6 +207,28 @@ func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotCh
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestPrepareProposal(req),
|
||||
types.ToResponsePrepareProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestProcessProposal(req),
|
||||
types.ToResponseProcessProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync() error {
|
||||
@@ -323,6 +345,22 @@ func (app *localClient) ApplySnapshotChunkSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v1.1.1. DO NOT EDIT.
|
||||
// Code generated by mockery v2.14.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -575,6 +575,84 @@ func (_m *Client) OnStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// PrepareProposalAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) PrepareProposalAsync(_a0 types.RequestPrepareProposal) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// PrepareProposalSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) PrepareProposalSync(_a0 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestPrepareProposal) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ProcessProposalAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ProcessProposalAsync(_a0 types.RequestProcessProposal) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ProcessProposalSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ProcessProposalSync(_a0 types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseProcessProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// QueryAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) QueryAsync(_a0 types.RequestQuery) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
@@ -734,3 +812,18 @@ func (_m *Client) String() string {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewClient interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewClient(t mockConstructorTestingTNewClient) *Client {
|
||||
mock := &Client{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -279,6 +279,14 @@ func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotC
|
||||
return cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestPrepareProposal(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestProcessProposal(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync() error {
|
||||
@@ -417,6 +425,24 @@ func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
return reqres.Response.GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestPrepareProposal(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetPrepareProposal(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestProcessProposal(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetProcessProposal(), cli.Error()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
|
||||
@@ -492,6 +518,10 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_ListSnapshots)
|
||||
case *types.Request_OfferSnapshot:
|
||||
_, ok = res.Value.(*types.Response_OfferSnapshot)
|
||||
case *types.Request_PrepareProposal:
|
||||
_, ok = res.Value.(*types.Response_PrepareProposal)
|
||||
case *types.Request_ProcessProposal:
|
||||
_, ok = res.Value.(*types.Response_ProcessProposal)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -170,3 +170,13 @@ func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.Respo
|
||||
|
||||
return resQuery
|
||||
}
|
||||
|
||||
func (app *Application) ProcessProposal(
|
||||
req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package kvstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -87,7 +87,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -114,11 +114,12 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -161,7 +162,7 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
@@ -180,6 +181,7 @@ func TestValUpdates(t *testing.T) {
|
||||
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
}
|
||||
|
||||
func makeApplyBlock(
|
||||
@@ -187,8 +189,7 @@ func makeApplyBlock(
|
||||
kvstore types.Application,
|
||||
heightInt int,
|
||||
diff []types.ValidatorUpdate,
|
||||
txs ...[]byte,
|
||||
) {
|
||||
txs ...[]byte) {
|
||||
// make and apply block
|
||||
height := int64(heightInt)
|
||||
hash := []byte("foo")
|
||||
@@ -206,6 +207,7 @@ func makeApplyBlock(
|
||||
kvstore.Commit()
|
||||
|
||||
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||
|
||||
}
|
||||
|
||||
// order doesn't matter
|
||||
|
||||
@@ -76,6 +76,10 @@ func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) t
|
||||
return app.execValidatorTx(req.Tx)
|
||||
}
|
||||
|
||||
if isPrepareTx(req.Tx) {
|
||||
return app.execPrepareTx(req.Tx)
|
||||
}
|
||||
|
||||
// otherwise, update the key-value store
|
||||
return app.app.DeliverTx(req)
|
||||
}
|
||||
@@ -170,6 +174,21 @@ func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) PrepareProposal(
|
||||
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
return types.ResponsePrepareProposal{TxRecords: app.substPrepareTx(req.Txs, req.MaxTxBytes)}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ProcessProposal(
|
||||
req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
@@ -284,3 +303,50 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
|
||||
const PreparePrefix = "prepare"
|
||||
|
||||
func isPrepareTx(tx []byte) bool {
|
||||
return bytes.HasPrefix(tx, []byte(PreparePrefix))
|
||||
}
|
||||
|
||||
// execPrepareTx is noop. tx data is considered as placeholder
|
||||
// and is substitute at the PrepareProposal.
|
||||
func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.ResponseDeliverTx {
|
||||
// noop
|
||||
return types.ResponseDeliverTx{}
|
||||
}
|
||||
|
||||
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
|
||||
// proposal for transactions with the prefix stripped.
|
||||
// It marks all of the original transactions as 'REMOVED' so that
|
||||
// Tendermint will remove them from its mempool.
|
||||
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte, maxTxBytes int64) []*types.TxRecord {
|
||||
trs := make([]*types.TxRecord, 0, len(blockData))
|
||||
var removed []*types.TxRecord
|
||||
var totalBytes int64
|
||||
for _, tx := range blockData {
|
||||
txMod := tx
|
||||
action := types.TxRecord_UNMODIFIED
|
||||
if isPrepareTx(tx) {
|
||||
removed = append(removed, &types.TxRecord{
|
||||
Tx: tx,
|
||||
Action: types.TxRecord_REMOVED,
|
||||
})
|
||||
txMod = bytes.TrimPrefix(tx, []byte(PreparePrefix))
|
||||
action = types.TxRecord_ADDED
|
||||
}
|
||||
totalBytes += int64(len(txMod))
|
||||
if totalBytes > maxTxBytes {
|
||||
break
|
||||
}
|
||||
trs = append(trs, &types.TxRecord{
|
||||
Tx: txMod,
|
||||
Action: action,
|
||||
})
|
||||
}
|
||||
|
||||
return append(trs, removed...)
|
||||
}
|
||||
|
||||
@@ -2,8 +2,9 @@
|
||||
Package server is used to start a new ABCI server.
|
||||
|
||||
It contains two server implementation:
|
||||
- gRPC server
|
||||
- socket server
|
||||
* gRPC server
|
||||
* socket server
|
||||
|
||||
*/
|
||||
package server
|
||||
|
||||
|
||||
@@ -230,6 +230,12 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_OfferSnapshot:
|
||||
res := s.app.OfferSnapshot(*r.OfferSnapshot)
|
||||
responses <- types.ToResponseOfferSnapshot(res)
|
||||
case *types.Request_PrepareProposal:
|
||||
res := s.app.PrepareProposal(*r.PrepareProposal)
|
||||
responses <- types.ToResponsePrepareProposal(res)
|
||||
case *types.Request_ProcessProposal:
|
||||
res := s.app.ProcessProposal(*r.ProcessProposal)
|
||||
responses <- types.ToResponseProcessProposal(res)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
|
||||
responses <- types.ToResponseLoadSnapshotChunk(res)
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
context "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Application
|
||||
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
// to be driven by a blockchain-based replication engine via the ABCI.
|
||||
// All methods take a RequestXxx argument and return a ResponseXxx argument,
|
||||
@@ -18,7 +20,9 @@ type Application interface {
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal
|
||||
ProcessProposal(RequestProcessProposal) ResponseProcessProposal
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
@@ -95,6 +99,27 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
|
||||
return ResponseApplySnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal {
|
||||
trs := make([]*TxRecord, 0, len(req.Txs))
|
||||
var totalBytes int64
|
||||
for _, tx := range req.Txs {
|
||||
totalBytes += int64(len(tx))
|
||||
if totalBytes > req.MaxTxBytes {
|
||||
break
|
||||
}
|
||||
trs = append(trs, &TxRecord{
|
||||
Action: TxRecord_UNMODIFIED,
|
||||
Tx: tx,
|
||||
})
|
||||
}
|
||||
return ResponsePrepareProposal{TxRecords: trs}
|
||||
}
|
||||
|
||||
func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProcessProposal {
|
||||
return ResponseProcessProposal{
|
||||
Status: ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// GRPCApplication is a GRPC wrapper for Application
|
||||
@@ -182,3 +207,15 @@ func (app *GRPCApplication) ApplySnapshotChunk(
|
||||
res := app.app.ApplySnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) PrepareProposal(
|
||||
ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
|
||||
res := app.app.PrepareProposal(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ProcessProposal(
|
||||
ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) {
|
||||
res := app.app.ProcessProposal(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -159,6 +159,18 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestPrepareProposal(req RequestPrepareProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_PrepareProposal{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestProcessProposal(req RequestProcessProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ProcessProposal{&req},
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func ToResponseException(errStr string) *Response {
|
||||
@@ -256,3 +268,15 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
|
||||
Value: &Response_ApplySnapshotChunk{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponsePrepareProposal(res ResponsePrepareProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_PrepareProposal{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseProcessProposal(res ResponseProcessProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ProcessProposal{&res},
|
||||
}
|
||||
}
|
||||
|
||||
238
abci/types/mocks/application.go
Normal file
238
abci/types/mocks/application.go
Normal file
@@ -0,0 +1,238 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// Application is an autogenerated mock type for the Application type
|
||||
type Application struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseApplySnapshotChunk)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// BeginBlock provides a mock function with given fields: _a0
|
||||
func (_m *Application) BeginBlock(_a0 types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseBeginBlock)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0
|
||||
func (_m *Application) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCheckTx)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields:
|
||||
func (_m *Application) Commit() types.ResponseCommit {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func() types.ResponseCommit); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCommit)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeliverTx provides a mock function with given fields: _a0
|
||||
func (_m *Application) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseDeliverTx)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// EndBlock provides a mock function with given fields: _a0
|
||||
func (_m *Application) EndBlock(_a0 types.RequestEndBlock) types.ResponseEndBlock {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseEndBlock)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0
|
||||
func (_m *Application) Info(_a0 types.RequestInfo) types.ResponseInfo {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) types.ResponseInfo); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInfo)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0
|
||||
func (_m *Application) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInitChain)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0
|
||||
func (_m *Application) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseListSnapshots)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseLoadSnapshotChunk)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0
|
||||
func (_m *Application) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseOfferSnapshot)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) PrepareProposal(_a0 types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponsePrepareProposal)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) ProcessProposal(_a0 types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseProcessProposal)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0
|
||||
func (_m *Application) Query(_a0 types.RequestQuery) types.ResponseQuery {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) types.ResponseQuery); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseQuery)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SetOption provides a mock function with given fields: _a0
|
||||
func (_m *Application) SetOption(_a0 types.RequestSetOption) types.ResponseSetOption {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseSetOption
|
||||
if rf, ok := ret.Get(0).(func(types.RequestSetOption) types.ResponseSetOption); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseSetOption)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewApplication interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewApplication(t mockConstructorTestingTNewApplication) *Application {
|
||||
mock := &Application{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
187
abci/types/mocks/base.go
Normal file
187
abci/types/mocks/base.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// BaseMock provides a wrapper around the generated Application mock and a BaseApplication.
|
||||
// BaseMock first tries to use the mock's implementation of the method.
|
||||
// If no functionality was provided for the mock by the user, BaseMock dispatches
|
||||
// to the BaseApplication and uses its functionality.
|
||||
// BaseMock allows users to provide mocked functionality for only the methods that matter
|
||||
// for their test while avoiding a panic if the code calls Application methods that are
|
||||
// not relevant to the test.
|
||||
type BaseMock struct {
|
||||
base *types.BaseApplication
|
||||
*Application
|
||||
}
|
||||
|
||||
func NewBaseMock() BaseMock {
|
||||
return BaseMock{
|
||||
base: types.NewBaseApplication(),
|
||||
Application: new(Application),
|
||||
}
|
||||
}
|
||||
|
||||
// Info/Query Connection
|
||||
// Return application info
|
||||
func (m BaseMock) Info(input types.RequestInfo) types.ResponseInfo {
|
||||
var ret types.ResponseInfo
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Info(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Info(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) Query(input types.RequestQuery) types.ResponseQuery {
|
||||
var ret types.ResponseQuery
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Query(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Query(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Mempool Connection
|
||||
// Validate a tx for the mempool
|
||||
func (m BaseMock) CheckTx(input types.RequestCheckTx) types.ResponseCheckTx {
|
||||
var ret types.ResponseCheckTx
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.CheckTx(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.CheckTx(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Consensus Connection
|
||||
// Initialize blockchain w validators/other info from TendermintCore
|
||||
func (m BaseMock) InitChain(input types.RequestInitChain) types.ResponseInitChain {
|
||||
var ret types.ResponseInitChain
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.InitChain(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.InitChain(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) PrepareProposal(input types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
var ret types.ResponsePrepareProposal
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.PrepareProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.PrepareProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ProcessProposal(input types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
var ret types.ResponseProcessProposal
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ProcessProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ProcessProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Commit the state and return the application Merkle root hash
|
||||
func (m BaseMock) Commit() types.ResponseCommit {
|
||||
var ret types.ResponseCommit
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Commit()
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Commit()
|
||||
return ret
|
||||
}
|
||||
|
||||
// State Sync Connection
|
||||
// List available snapshots
|
||||
func (m BaseMock) ListSnapshots(input types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
var ret types.ResponseListSnapshots
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ListSnapshots(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ListSnapshots(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) OfferSnapshot(input types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
var ret types.ResponseOfferSnapshot
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.OfferSnapshot(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.OfferSnapshot(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) LoadSnapshotChunk(input types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
var ret types.ResponseLoadSnapshotChunk
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.LoadSnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.LoadSnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ApplySnapshotChunk(input types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
var ret types.ResponseApplySnapshotChunk
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ApplySnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ApplySnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) BeginBlock(input types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
var ret types.ResponseBeginBlock
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.BeginBlock(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.BeginBlock(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) DeliverTx(input types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var ret types.ResponseDeliverTx
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.DeliverTx(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.DeliverTx(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) EndBlock(input types.RequestEndBlock) types.ResponseEndBlock {
|
||||
var ret types.ResponseEndBlock
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.EndBlock(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.EndBlock(input)
|
||||
return ret
|
||||
}
|
||||
@@ -41,6 +41,16 @@ func (r ResponseQuery) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsAccepted returns true if Code is ACCEPT
|
||||
func (r ResponseProcessProposal) IsAccepted() bool {
|
||||
return r.Status == ResponseProcessProposal_ACCEPT
|
||||
}
|
||||
|
||||
// IsStatusUnknown returns true if Code is UNKNOWN
|
||||
func (r ResponseProcessProposal) IsStatusUnknown() bool {
|
||||
return r.Status == ResponseProcessProposal_UNKNOWN
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
10
appveyor.yml
10
appveyor.yml
@@ -1,12 +1,12 @@
|
||||
version: 1.0.{build}
|
||||
configuration: Release
|
||||
platform:
|
||||
- x64
|
||||
- x86
|
||||
- x64
|
||||
- x86
|
||||
clone_folder: c:\go\path\src\github.com\tendermint\tendermint
|
||||
before_build:
|
||||
- cmd: set GOPATH=%GOROOT%\path
|
||||
- cmd: set PATH=%GOPATH%\bin;%PATH%
|
||||
- cmd: set GOPATH=%GOROOT%\path
|
||||
- cmd: set PATH=%GOPATH%\bin;%PATH%
|
||||
build_script:
|
||||
- cmd: make test
|
||||
- cmd: make test
|
||||
test: off
|
||||
|
||||
@@ -1,41 +1,42 @@
|
||||
/*
|
||||
Package Behaviour provides a mechanism for reactors to report behaviour of peers.
|
||||
Package Behavior provides a mechanism for reactors to report behavior of peers.
|
||||
|
||||
Instead of a reactor calling the switch directly it will call the behaviour module which will
|
||||
Instead of a reactor calling the switch directly it will call the behavior module which will
|
||||
handle the stoping and marking peer as good on behalf of the reactor.
|
||||
|
||||
There are four different behaviours a reactor can report.
|
||||
There are four different behaviors a reactor can report.
|
||||
|
||||
1. bad message
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
# This message will request the peer be stopped for an error
|
||||
This message will request the peer be stopped for an error
|
||||
|
||||
2. message out of order
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
# This message will request the peer be stopped for an error
|
||||
This message will request the peer be stopped for an error
|
||||
|
||||
3. consesnsus Vote
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
# This message will request the peer be marked as good
|
||||
This message will request the peer be marked as good
|
||||
|
||||
4. block part
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
|
||||
*/
|
||||
package behaviour
|
||||
package behaviour //nolint:misspell
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package behaviour
|
||||
package behaviour //nolint:misspell
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// PeerBehaviour is a struct describing a behaviour a peer performed.
|
||||
// PeerBehaviour is a struct describing a behavior a peer performed.
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behaviour performed by the peer.
|
||||
// behavior performed by the peer.
|
||||
type PeerBehaviour struct {
|
||||
peerID p2p.ID
|
||||
reason interface{}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package behaviour
|
||||
package behaviour //nolint:misspell
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -7,13 +7,13 @@ import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// Reporter provides an interface for reactors to report the behaviour
|
||||
// Reporter provides an interface for reactors to report the behavior
|
||||
// of peers synchronously to other components.
|
||||
type Reporter interface {
|
||||
Report(behaviour PeerBehaviour) error
|
||||
Report(behavior PeerBehaviour) error
|
||||
}
|
||||
|
||||
// SwitchReporter reports peer behaviour to an internal Switch.
|
||||
// SwitchReporter reports peer behavior to an internal Switch.
|
||||
type SwitchReporter struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
@@ -25,14 +25,14 @@ func NewSwitchReporter(sw *p2p.Switch) *SwitchReporter {
|
||||
}
|
||||
}
|
||||
|
||||
// Report reports the behaviour of a peer to the Switch.
|
||||
func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error {
|
||||
peer := spbr.sw.Peers().Get(behaviour.peerID)
|
||||
// Report reports the behavior of a peer to the Switch.
|
||||
func (spbr *SwitchReporter) Report(behavior PeerBehaviour) error {
|
||||
peer := spbr.sw.Peers().Get(behavior.peerID)
|
||||
if peer == nil {
|
||||
return errors.New("peer not found")
|
||||
}
|
||||
|
||||
switch reason := behaviour.reason.(type) {
|
||||
switch reason := behavior.reason.(type) {
|
||||
case consensusVote, blockPart:
|
||||
spbr.sw.MarkPeerAsGood(peer)
|
||||
case badMessage:
|
||||
@@ -48,14 +48,14 @@ func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error {
|
||||
|
||||
// MockReporter is a concrete implementation of the Reporter
|
||||
// interface used in reactor tests to ensure reactors report the correct
|
||||
// behaviour in manufactured scenarios.
|
||||
// behavior in manufactured scenarios.
|
||||
type MockReporter struct {
|
||||
mtx tmsync.RWMutex
|
||||
pb map[p2p.ID][]PeerBehaviour
|
||||
}
|
||||
|
||||
// NewMockReporter returns a Reporter which records all reported
|
||||
// behaviours in memory.
|
||||
// behaviors in memory.
|
||||
func NewMockReporter() *MockReporter {
|
||||
return &MockReporter{
|
||||
pb: map[p2p.ID][]PeerBehaviour{},
|
||||
@@ -63,15 +63,15 @@ func NewMockReporter() *MockReporter {
|
||||
}
|
||||
|
||||
// Report stores the PeerBehaviour produced by the peer identified by peerID.
|
||||
func (mpbr *MockReporter) Report(behaviour PeerBehaviour) error {
|
||||
func (mpbr *MockReporter) Report(behavior PeerBehaviour) error {
|
||||
mpbr.mtx.Lock()
|
||||
defer mpbr.mtx.Unlock()
|
||||
mpbr.pb[behaviour.peerID] = append(mpbr.pb[behaviour.peerID], behaviour)
|
||||
mpbr.pb[behavior.peerID] = append(mpbr.pb[behavior.peerID], behavior)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBehaviours returns all behaviours reported on the peer identified by peerID.
|
||||
// GetBehaviours returns all behaviors reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviours(peerID p2p.ID) []PeerBehaviour {
|
||||
mpbr.mtx.RLock()
|
||||
defer mpbr.mtx.RUnlock()
|
||||
|
||||
@@ -1,41 +1,41 @@
|
||||
package behaviour_test
|
||||
package behaviour_test //nolint:misspell
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
bh "github.com/tendermint/tendermint/behaviour"
|
||||
bh "github.com/tendermint/tendermint/behaviour" //nolint:misspell
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behaviour in memory indexed by the peerID.
|
||||
// peer behavior in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID p2p.ID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviours := pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 0 {
|
||||
t.Error("Expected to have no behaviours reported")
|
||||
behaviors := pr.GetBehaviours(peerID)
|
||||
if len(behaviors) != 0 {
|
||||
t.Error("Expected to have no behaviors reported")
|
||||
}
|
||||
|
||||
badMessage := bh.BadMessage(peerID, "bad message")
|
||||
if err := pr.Report(badMessage); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
behaviours = pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 1 {
|
||||
t.Error("Expected the peer have one reported behaviour")
|
||||
behaviors = pr.GetBehaviours(peerID)
|
||||
if len(behaviors) != 1 {
|
||||
t.Error("Expected the peer have one reported behavior")
|
||||
}
|
||||
|
||||
if behaviours[0] != badMessage {
|
||||
if behaviors[0] != badMessage {
|
||||
t.Error("Expected Bad Message to have been reported")
|
||||
}
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID p2p.ID
|
||||
behaviour bh.PeerBehaviour
|
||||
peerID p2p.ID
|
||||
behavior bh.PeerBehaviour
|
||||
}
|
||||
|
||||
// equalBehaviours returns true if a and b contain the same PeerBehaviours with
|
||||
@@ -44,26 +44,26 @@ func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
|
||||
aHistogram := map[bh.PeerBehaviour]int{}
|
||||
bHistogram := map[bh.PeerBehaviour]int{}
|
||||
|
||||
for _, behaviour := range a {
|
||||
aHistogram[behaviour]++
|
||||
for _, behavior := range a {
|
||||
aHistogram[behavior]++
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
bHistogram[behaviour]++
|
||||
for _, behavior := range b {
|
||||
bHistogram[behavior]++
|
||||
}
|
||||
|
||||
if len(aHistogram) != len(bHistogram) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, behaviour := range a {
|
||||
if aHistogram[behaviour] != bHistogram[behaviour] {
|
||||
for _, behavior := range a {
|
||||
if aHistogram[behavior] != bHistogram[behavior] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
if bHistogram[behaviour] != aHistogram[behaviour] {
|
||||
for _, behavior := range b {
|
||||
if bHistogram[behavior] != aHistogram[behavior] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -72,8 +72,8 @@ func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
|
||||
}
|
||||
|
||||
// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices
|
||||
// of peer behaviours can be compared for the behaviours they contain and the
|
||||
// freequencies that those behaviours occur.
|
||||
// of peer behaviors can be compared for the behaviors they contain and the
|
||||
// freequencies that those behaviors occur.
|
||||
func TestEqualPeerBehaviours(t *testing.T) {
|
||||
var (
|
||||
peerID p2p.ID = "MockPeer"
|
||||
@@ -85,7 +85,7 @@ func TestEqualPeerBehaviours(t *testing.T) {
|
||||
}{
|
||||
// Empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{}},
|
||||
// Single behaviours
|
||||
// Single behaviors
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Equal Frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote, consensusVote},
|
||||
@@ -100,9 +100,9 @@ func TestEqualPeerBehaviours(t *testing.T) {
|
||||
}{
|
||||
// Comparing empty sets to non empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Different behaviours
|
||||
// Different behaviors
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{blockPart}},
|
||||
// Same behaviour with different frequencies
|
||||
// Same behavior with different frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
}
|
||||
@@ -128,8 +128,8 @@ func TestEqualPeerBehaviours(t *testing.T) {
|
||||
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviourScript = []struct {
|
||||
peerID p2p.ID
|
||||
behaviours []bh.PeerBehaviour
|
||||
peerID p2p.ID
|
||||
behaviors []bh.PeerBehaviour
|
||||
}{
|
||||
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},
|
||||
{"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
|
||||
@@ -166,7 +166,7 @@ func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case pb := <-scriptItems:
|
||||
if err := pr.Report(pb.behaviour); err != nil {
|
||||
if err := pr.Report(pb.behavior); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
case <-done:
|
||||
@@ -181,7 +181,7 @@ func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
go func() {
|
||||
defer sendingWg.Done()
|
||||
for _, item := range behaviourScript {
|
||||
for _, reason := range item.behaviours {
|
||||
for _, reason := range item.behaviors {
|
||||
scriptItems <- scriptItem{item.peerID, reason}
|
||||
}
|
||||
}
|
||||
@@ -197,9 +197,9 @@ func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
|
||||
for _, items := range behaviourScript {
|
||||
reported := pr.GetBehaviours(items.peerID)
|
||||
if !equalBehaviours(reported, items.behaviours) {
|
||||
if !equalBehaviours(reported, items.behaviors) {
|
||||
t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
|
||||
items.peerID, items.behaviours, reported)
|
||||
items.peerID, items.behaviors, reported)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -20,6 +19,58 @@ const (
|
||||
BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
msg := bcproto.Message{}
|
||||
|
||||
switch pb := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
|
||||
case *bcproto.BlockResponse:
|
||||
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
|
||||
case *bcproto.NoBlockResponse:
|
||||
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
|
||||
case *bcproto.StatusRequest:
|
||||
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
|
||||
case *bcproto.StatusResponse:
|
||||
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", pb)
|
||||
}
|
||||
|
||||
bz, err := proto.Marshal(&msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *bcproto.Message_BlockRequest:
|
||||
return msg.BlockRequest, nil
|
||||
case *bcproto.Message_BlockResponse:
|
||||
return msg.BlockResponse, nil
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
return msg.NoBlockResponse, nil
|
||||
case *bcproto.Message_StatusRequest:
|
||||
return msg.StatusRequest, nil
|
||||
case *bcproto.Message_StatusResponse:
|
||||
return msg.StatusResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateMsg validates a message.
|
||||
func ValidateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
@@ -57,31 +108,3 @@ func ValidateMsg(pb proto.Message) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
//
|
||||
// Deprecated: Will be removed in v0.37.
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
if um, ok := pb.(p2p.Wrapper); ok {
|
||||
pb = um.Wrap()
|
||||
}
|
||||
bz, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
//
|
||||
// Deprecated: Will be removed in v0.37.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pb.Unwrap()
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:lll // ignore line length in tests
|
||||
// nolint:lll // ignore line length in tests
|
||||
func TestBlockchainMessageVectors(t *testing.T) {
|
||||
block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil)
|
||||
block.Version.Block = 11 // overwrite updated protocol version
|
||||
|
||||
@@ -32,7 +32,6 @@ const (
|
||||
maxTotalRequesters = 600
|
||||
maxPendingRequests = maxTotalRequesters
|
||||
maxPendingRequestsPerPeer = 20
|
||||
requestRetrySeconds = 30
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending us data at at least that rate, we
|
||||
@@ -411,7 +410,6 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
}
|
||||
|
||||
// for debugging purposes
|
||||
//
|
||||
//nolint:unused
|
||||
func (pool *BlockPool) debug() string {
|
||||
pool.mtx.Lock()
|
||||
@@ -603,7 +601,7 @@ OUTER_LOOP:
|
||||
}
|
||||
peer = bpr.pool.pickIncrAvailablePeer(bpr.height)
|
||||
if peer == nil {
|
||||
bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height)
|
||||
// log.Info("No peers available", "height", height)
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
continue PICK_PEER_LOOP
|
||||
}
|
||||
@@ -613,7 +611,6 @@ OUTER_LOOP:
|
||||
bpr.peerID = peer.id
|
||||
bpr.mtx.Unlock()
|
||||
|
||||
to := time.NewTimer(requestRetrySeconds * time.Second)
|
||||
// Send request and wait.
|
||||
bpr.pool.sendRequest(bpr.height, peer.id)
|
||||
WAIT_LOOP:
|
||||
@@ -626,11 +623,6 @@ OUTER_LOOP:
|
||||
return
|
||||
case <-bpr.Quit():
|
||||
return
|
||||
case <-to.C:
|
||||
bpr.Logger.Debug("Retrying block request after timeout", "height", bpr.height, "peer", bpr.peerID)
|
||||
// Simulate a redo
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP
|
||||
case peerID := <-bpr.redoCh:
|
||||
if peerID == bpr.peerID {
|
||||
bpr.reset()
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -146,20 +144,21 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height()})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -183,73 +182,75 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockResponse{Block: bl},
|
||||
}, bcR.Logger)
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bl})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
}, bcR.Logger)
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
if err := bc.ValidateMsg(e.Message); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
bcR.Switch.StopPeerForError(e.Src, err)
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message)
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch msg := e.Message.(type) {
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
bcR.respondToPeer(msg, e.Src)
|
||||
bcR.respondToPeer(msg, src)
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Block content is invalid", "err", err)
|
||||
return
|
||||
}
|
||||
bcR.pool.AddBlock(e.Src.ID(), bi, msg.Block.Size())
|
||||
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobut", "err", err)
|
||||
return
|
||||
}
|
||||
src.TrySend(BlockchainChannel, msgBytes)
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height)
|
||||
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
|
||||
case *bcproto.NoBlockResponse:
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
msg := &bcproto.Message{}
|
||||
err := proto.Unmarshal(msgBytes, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
uw, err := msg.Unwrap()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bcR.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: chID,
|
||||
Src: peer,
|
||||
Message: uw,
|
||||
})
|
||||
}
|
||||
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
|
||||
@@ -285,10 +286,13 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
}, bcR.Logger)
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
@@ -300,7 +304,7 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() //nolint: errcheck
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
|
||||
}
|
||||
}
|
||||
@@ -356,14 +360,20 @@ FOR_LOOP:
|
||||
didProcessCh <- struct{}{}
|
||||
}
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstParts, err := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("failed to make ",
|
||||
"height", first.Height,
|
||||
"err", err.Error())
|
||||
break FOR_LOOP
|
||||
}
|
||||
firstPartSetHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommitLight(
|
||||
err = state.Validators.VerifyCommitLight(
|
||||
chainID, firstID, first.Height, second.LastCommit)
|
||||
|
||||
if err == nil {
|
||||
@@ -421,9 +431,13 @@ FOR_LOOP:
|
||||
|
||||
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
|
||||
func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
|
||||
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
bm, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
return fmt.Errorf("could not convert msg to proto: %w", err)
|
||||
}
|
||||
|
||||
bcR.Switch.Broadcast(BlockchainChannel, bm)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
@@ -16,11 +16,11 @@ import (
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
mpmocks "github.com/tendermint/tendermint/mempool/mocks"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
@@ -54,6 +54,7 @@ type BlockchainReactorPair struct {
|
||||
}
|
||||
|
||||
func newBlockchainReactor(
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
@@ -72,9 +73,7 @@ func newBlockchainReactor(
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
@@ -82,16 +81,26 @@ func newBlockchainReactor(
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
mp := &mpmocks.Mempool{}
|
||||
mp.On("Lock").Return()
|
||||
mp.On("Unlock").Return()
|
||||
mp.On("FlushAppConn", mock.Anything).Return(nil)
|
||||
mp.On("Update",
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
stateStore = sm.NewStore(db)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
mp, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -118,9 +127,10 @@ func newBlockchainReactor(
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
@@ -146,8 +156,8 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 2)
|
||||
|
||||
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[0] = newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
@@ -194,25 +204,6 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLegacyReactorReceiveBasic(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
reactor := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 10).reactor
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
reactor.AddPeer(peer)
|
||||
m := &bcproto.StatusRequest{}
|
||||
wm := m.Wrap()
|
||||
msg, err := proto.Marshal(wm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(BlockchainChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
@@ -227,7 +218,7 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
|
||||
// Other chain needs a different validator set
|
||||
otherGenDoc, otherPrivVals := randGenesisDoc(1, false, 30)
|
||||
otherChain := newBlockchainReactor(log.TestingLogger(), otherGenDoc, otherPrivVals, maxBlockHeight)
|
||||
otherChain := newBlockchainReactor(t, log.TestingLogger(), otherGenDoc, otherPrivVals, maxBlockHeight)
|
||||
|
||||
defer func() {
|
||||
err := otherChain.reactor.Stop()
|
||||
@@ -238,10 +229,10 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 4)
|
||||
|
||||
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[2] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[3] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[0] = newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[2] = newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[3] = newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
@@ -279,7 +270,7 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
// race, but can't be easily avoided.
|
||||
reactorPairs[3].reactor.store = otherChain.reactor.store
|
||||
|
||||
lastReactorPair := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
lastReactorPair := newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
@@ -303,21 +294,6 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
@@ -2,11 +2,10 @@ package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/behaviour" //nolint:misspell
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -101,7 +100,7 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *st
|
||||
fsm := NewFSM(startHeight, bcR)
|
||||
bcR.fsm = fsm
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
|
||||
// bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
|
||||
// bcR.swReporter = behavior.NewSwitchReporter(bcR.BaseReactor.Switch)
|
||||
|
||||
return bcR
|
||||
}
|
||||
@@ -173,20 +172,21 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -206,28 +206,35 @@ func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
|
||||
bcR.Logger.Error("Could not send block message to peer", "err", err)
|
||||
return false
|
||||
}
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockResponse{Block: pbbi},
|
||||
}, bcR.Logger)
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: pbbi})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
}, bcR.Logger)
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
@@ -243,27 +250,34 @@ func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
if err := bc.ValidateMsg(e.Message); err != nil {
|
||||
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error decoding message", "src", src, "chId", chID, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
switch msg := e.Message.(type) {
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
|
||||
if queued := bcR.sendBlockToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
|
||||
bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height)
|
||||
}
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
|
||||
if queued := bcR.sendStatusResponseToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
|
||||
bcR.Logger.Error("Could not send status message to peer", "src", src)
|
||||
}
|
||||
|
||||
case *bcproto.BlockResponse:
|
||||
@@ -275,23 +289,23 @@ func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: e.Src.ID(),
|
||||
peerID: src.ID(),
|
||||
height: bi.Height,
|
||||
block: bi,
|
||||
length: msg.Size(),
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
|
||||
bcR.Logger.Info("Received", "src", src, "height", bi.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
case *bcproto.NoBlockResponse:
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: noBlockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: e.Src.ID(),
|
||||
peerID: src.ID(),
|
||||
height: msg.Height,
|
||||
},
|
||||
}
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
case *bcproto.StatusResponse:
|
||||
@@ -299,35 +313,18 @@ func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: e.Src.ID(),
|
||||
peerID: src.ID(),
|
||||
height: msg.Height,
|
||||
length: msg.Size(),
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg))
|
||||
bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
msg := &bcproto.Message{}
|
||||
err := proto.Unmarshal(msgBytes, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
uw, err := msg.Unwrap()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bcR.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: chID,
|
||||
Src: peer,
|
||||
Message: uw,
|
||||
})
|
||||
}
|
||||
|
||||
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
|
||||
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
|
||||
|
||||
@@ -468,7 +465,13 @@ func (bcR *BlockchainReactor) processBlock() error {
|
||||
|
||||
chainID := bcR.initialState.ChainID
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstParts, err := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("failed to make ",
|
||||
"height", first.Height,
|
||||
"err", err.Error())
|
||||
return err
|
||||
}
|
||||
firstPartSetHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
@@ -495,10 +498,11 @@ func (bcR *BlockchainReactor) processBlock() error {
|
||||
// Implements bcRNotifier
|
||||
// sendStatusRequest broadcasts `BlockStore` height.
|
||||
func (bcR *BlockchainReactor) sendStatusRequest() {
|
||||
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
@@ -509,10 +513,11 @@ func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) erro
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
|
||||
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockRequest{Height: height},
|
||||
}, bcR.Logger)
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return errSendQueueFull
|
||||
}
|
||||
@@ -535,8 +540,8 @@ func (bcR *BlockchainReactor) switchToConsensus() {
|
||||
// Called by FSM and pool:
|
||||
// - pool calls when it detects slow peer or when peer times out
|
||||
// - FSM calls when:
|
||||
// - adding a block (addBlock) fails
|
||||
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
|
||||
// - adding a block (addBlock) fails
|
||||
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
|
||||
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
|
||||
msgData := bcFsmMessage{
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
@@ -17,12 +17,12 @@ import (
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
mpmocks "github.com/tendermint/tendermint/mempool/mocks"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
@@ -104,9 +104,7 @@ func newBlockchainReactor(
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
@@ -114,16 +112,26 @@ func newBlockchainReactor(
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
mp := &mpmocks.Mempool{}
|
||||
mp.On("Lock").Return()
|
||||
mp.On("Unlock").Return()
|
||||
mp.On("FlushAppConn", mock.Anything).Return(nil)
|
||||
mp.On("Update",
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
stateStore = sm.NewStore(db)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
mp, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -139,9 +147,10 @@ func newBlockchainReactor(
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round, lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
@@ -351,40 +360,6 @@ outerFor:
|
||||
assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
func TestLegacyReactorReceiveBasic(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
reactor := newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, 10)
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
reactor.AddPeer(peer)
|
||||
m := &bcproto.StatusRequest{}
|
||||
wm := m.Wrap()
|
||||
msg, err := proto.Marshal(wm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(BlockchainChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package v2
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
@@ -15,7 +16,7 @@ type iIO interface {
|
||||
sendBlockNotFound(height int64, peerID p2p.ID) error
|
||||
sendStatusResponse(base, height int64, peerID p2p.ID) error
|
||||
|
||||
broadcastStatusRequest()
|
||||
broadcastStatusRequest() error
|
||||
|
||||
trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
}
|
||||
@@ -46,10 +47,13 @@ func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockRequest{Height: height},
|
||||
}, sio.sw.Logger); !queued {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return fmt.Errorf("send queue full")
|
||||
}
|
||||
return nil
|
||||
@@ -61,10 +65,12 @@ func (sio *switchIO) sendStatusResponse(base int64, height int64, peerID p2p.ID)
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
}, sio.sw.Logger); !queued {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{Height: height, Base: base})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
@@ -85,10 +91,11 @@ func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockResponse{Block: bpb},
|
||||
}, sio.sw.Logger); !queued {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bpb})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
@@ -100,10 +107,12 @@ func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error {
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: height},
|
||||
}, sio.sw.Logger); !queued {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
@@ -118,10 +127,14 @@ func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
return ok
|
||||
}
|
||||
|
||||
func (sio *switchIO) broadcastStatusRequest() {
|
||||
func (sio *switchIO) broadcastStatusRequest() error {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// XXX: maybe we should use an io specific peer list here
|
||||
sio.sw.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
sio.sw.Broadcast(BlockchainChannel, msgBytes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -158,11 +158,12 @@ func (state *pcState) handle(event Event) (Event, error) {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
var (
|
||||
first, second = firstItem.block, secondItem.block
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()}
|
||||
)
|
||||
first, second := firstItem.block, secondItem.block
|
||||
firstParts, err := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to make part set, height %d: %v ", first.Height, err.Error()))
|
||||
}
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()}
|
||||
|
||||
// verify if +second+ last commit "confirms" +first+ block
|
||||
err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit)
|
||||
|
||||
@@ -82,7 +82,7 @@ func executeProcessorTests(t *testing.T, tests []testFields) {
|
||||
}
|
||||
}()
|
||||
|
||||
// First step must always initialise the currentState as state.
|
||||
// First step must always initialize the currentState as state.
|
||||
if step.currentState != nil {
|
||||
state = makeState(step.currentState)
|
||||
}
|
||||
|
||||
@@ -5,9 +5,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/behaviour" //nolint:misspell
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
@@ -141,7 +139,7 @@ func (r *BlockchainReactor) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// startSync begins a fast sync, signalled by r.events being non-nil. If state is non-nil,
|
||||
// startSync begins a fast sync, signaled by r.events being non-nil. If state is non-nil,
|
||||
// the scheduler and processor is updated with this state on startup.
|
||||
func (r *BlockchainReactor) startSync(state *state.State) error {
|
||||
r.mtx.Lock()
|
||||
@@ -217,7 +215,7 @@ type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.ID
|
||||
size int
|
||||
size int64
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
@@ -351,7 +349,9 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
case <-doProcessBlockCh:
|
||||
r.processor.send(rProcessBlock{})
|
||||
case <-doStatusCh:
|
||||
r.io.broadcastStatusRequest()
|
||||
if err := r.io.broadcastStatusRequest(); err != nil {
|
||||
r.logger.Error("Error broadcasting status request", "err", err)
|
||||
}
|
||||
|
||||
// Events from peers. Closing the channel signals event loop termination.
|
||||
case event, ok := <-events:
|
||||
@@ -455,31 +455,39 @@ func (r *BlockchainReactor) Stop() error {
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling different message types.
|
||||
func (r *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
if err := bc.ValidateMsg(e.Message); err != nil {
|
||||
r.logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
|
||||
func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
r.logger.Error("error decoding message",
|
||||
"src", src.ID(), "chId", chID, "msg", msg, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
r.logger.Debug("Receive", "src", e.Src.ID(), "chID", e.ChannelID, "msg", e.Message)
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
r.logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
switch msg := e.Message.(type) {
|
||||
r.logger.Debug("Receive", "src", src.ID(), "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcproto.StatusRequest:
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), e.Src.ID()); err != nil {
|
||||
r.logger.Error("Could not send status message to peer", "src", e.Src)
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src.ID()); err != nil {
|
||||
r.logger.Error("Could not send status message to peer", "src", src)
|
||||
}
|
||||
|
||||
case *bcproto.BlockRequest:
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
if err := r.io.sendBlockToPeer(block, e.Src.ID()); err != nil {
|
||||
if err = r.io.sendBlockToPeer(block, src.ID()); err != nil {
|
||||
r.logger.Error("Could not send block message to peer: ", err)
|
||||
}
|
||||
} else {
|
||||
r.logger.Info("peer asking for a block we don't have", "src", e.Src, "height", msg.Height)
|
||||
peerID := e.Src.ID()
|
||||
if err := r.io.sendBlockNotFound(msg.Height, peerID); err != nil {
|
||||
r.logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
peerID := src.ID()
|
||||
if err = r.io.sendBlockNotFound(msg.Height, peerID); err != nil {
|
||||
r.logger.Error("Couldn't send block not found: ", err)
|
||||
}
|
||||
}
|
||||
@@ -487,7 +495,7 @@ func (r *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
case *bcproto.StatusResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcStatusResponse{peerID: e.Src.ID(), base: msg.Base, height: msg.Height}
|
||||
r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
@@ -500,10 +508,10 @@ func (r *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcBlockResponse{
|
||||
peerID: e.Src.ID(),
|
||||
peerID: src.ID(),
|
||||
block: bi,
|
||||
size: int64(len(msgBytes)),
|
||||
time: time.Now(),
|
||||
size: msg.Size(),
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
@@ -511,29 +519,12 @@ func (r *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
case *bcproto.NoBlockResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcNoBlockResponse{peerID: e.Src.ID(), height: msg.Height, time: time.Now()}
|
||||
r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
msg := &bcproto.Message{}
|
||||
err := proto.Unmarshal(msgBytes, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
uw, err := msg.Unwrap()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
r.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: chID,
|
||||
Src: peer,
|
||||
Message: uw,
|
||||
})
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor interface
|
||||
func (r *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer.ID())
|
||||
@@ -568,7 +559,6 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,22 +9,24 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/behaviour" //nolint:misspell
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
mpmocks "github.com/tendermint/tendermint/mempool/mocks"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
@@ -53,19 +55,34 @@ func (mp mockPeer) NodeInfo() p2p.NodeInfo {
|
||||
func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
|
||||
func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} }
|
||||
|
||||
func (mp mockPeer) SendEnvelope(e p2p.Envelope) bool { return true }
|
||||
func (mp mockPeer) TrySendEnvelope(e p2p.Envelope) bool { return true }
|
||||
|
||||
func (mp mockPeer) Send(byte, []byte) bool { return true }
|
||||
func (mp mockPeer) TrySend(byte, []byte) bool { return true }
|
||||
|
||||
func (mp mockPeer) Set(string, interface{}) {}
|
||||
func (mp mockPeer) Get(string) interface{} { return struct{}{} }
|
||||
|
||||
func (mp mockPeer) SetRemovalFailed() {}
|
||||
func (mp mockPeer) GetRemovalFailed() bool { return false }
|
||||
// nolint:unused // ignore
|
||||
type mockBlockStore struct {
|
||||
blocks map[int64]*types.Block
|
||||
}
|
||||
|
||||
type mockBlockApplier struct{}
|
||||
// nolint:unused // ignore
|
||||
func (ml *mockBlockStore) Height() int64 {
|
||||
return int64(len(ml.blocks))
|
||||
}
|
||||
|
||||
// nolint:unused // ignore
|
||||
func (ml *mockBlockStore) LoadBlock(height int64) *types.Block {
|
||||
return ml.blocks[height]
|
||||
}
|
||||
|
||||
// nolint:unused // ignore
|
||||
func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) {
|
||||
ml.blocks[block.Height] = block
|
||||
}
|
||||
|
||||
type mockBlockApplier struct {
|
||||
}
|
||||
|
||||
// XXX: Add whitelist/blacklist?
|
||||
func (mba *mockBlockApplier) ApplyBlock(
|
||||
@@ -115,7 +132,8 @@ func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool
|
||||
return true
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) broadcastStatusRequest() {
|
||||
func (sio *mockSwitchIo) broadcastStatusRequest() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type testReactorParams struct {
|
||||
@@ -126,8 +144,8 @@ type testReactorParams struct {
|
||||
mockA bool
|
||||
}
|
||||
|
||||
func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight)
|
||||
func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
|
||||
store, state, _ := newReactorStore(t, p.genDoc, p.privVals, p.startHeight)
|
||||
reporter := behaviour.NewMockReporter()
|
||||
|
||||
var appl blockApplier
|
||||
@@ -135,6 +153,18 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
if p.mockA {
|
||||
appl = &mockBlockApplier{}
|
||||
} else {
|
||||
mp := &mpmocks.Mempool{}
|
||||
mp.On("Lock").Return()
|
||||
mp.On("Unlock").Return()
|
||||
mp.On("FlushAppConn", mock.Anything).Return(nil)
|
||||
mp.On("Update",
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
@@ -143,10 +173,8 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
db := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
stateStore := sm.NewStore(db)
|
||||
appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mp, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -287,7 +315,7 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// reactor := newTestReactor(params)
|
||||
// reactor.Start()
|
||||
// reactor.reporter = behaviour.NewMockReporter()
|
||||
// reactor.reporter = behavior.NewMockReporter()
|
||||
// mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
// reactor.io = mockSwitch
|
||||
// // time for go routines to start
|
||||
@@ -336,7 +364,9 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
// }
|
||||
|
||||
func TestReactorHelperMode(t *testing.T) {
|
||||
channelID := byte(0x40)
|
||||
var (
|
||||
channelID = byte(0x40)
|
||||
)
|
||||
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
@@ -352,7 +382,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
|
||||
type testEvent struct {
|
||||
peer string
|
||||
event proto.Message
|
||||
event interface{}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@@ -364,10 +394,10 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
name: "status request",
|
||||
params: params,
|
||||
msgs: []testEvent{
|
||||
{"P1", &bcproto.StatusRequest{}},
|
||||
{"P1", &bcproto.BlockRequest{Height: 13}},
|
||||
{"P1", &bcproto.BlockRequest{Height: 20}},
|
||||
{"P1", &bcproto.BlockRequest{Height: 22}},
|
||||
{"P1", bcproto.StatusRequest{}},
|
||||
{"P1", bcproto.BlockRequest{Height: 13}},
|
||||
{"P1", bcproto.BlockRequest{Height: 20}},
|
||||
{"P1", bcproto.BlockRequest{Height: 22}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -375,7 +405,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
reactor := newTestReactor(params)
|
||||
reactor := newTestReactor(t, params)
|
||||
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
reactor.io = mockSwitch
|
||||
err := reactor.Start()
|
||||
@@ -384,27 +414,25 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
for i := 0; i < len(tt.msgs); i++ {
|
||||
step := tt.msgs[i]
|
||||
switch ev := step.event.(type) {
|
||||
case *bcproto.StatusRequest:
|
||||
case bcproto.StatusRequest:
|
||||
old := mockSwitch.numStatusResponse
|
||||
reactor.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: channelID,
|
||||
Src: mockPeer{id: p2p.ID(step.peer)},
|
||||
Message: ev})
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
|
||||
case *bcproto.BlockRequest:
|
||||
case bcproto.BlockRequest:
|
||||
if ev.Height > params.startHeight {
|
||||
old := mockSwitch.numNoBlockResponse
|
||||
reactor.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: channelID,
|
||||
Src: mockPeer{id: p2p.ID(step.peer)},
|
||||
Message: ev})
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
|
||||
} else {
|
||||
old := mockSwitch.numBlockResponse
|
||||
reactor.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: channelID,
|
||||
Src: mockPeer{id: p2p.ID(step.peer)},
|
||||
Message: ev})
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
|
||||
}
|
||||
}
|
||||
@@ -415,40 +443,12 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLegacyReactorReceiveBasic(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
params := testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
startHeight: 20,
|
||||
mockA: true,
|
||||
}
|
||||
reactor := newTestReactor(params)
|
||||
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
reactor.io = mockSwitch
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
reactor.AddPeer(peer)
|
||||
m := &bcproto.StatusRequest{}
|
||||
wm := m.Wrap()
|
||||
msg, err := proto.Marshal(wm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(BlockchainChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReactorSetSwitchNil(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
|
||||
reactor := newTestReactor(testReactorParams{
|
||||
reactor := newTestReactor(t, testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
@@ -459,28 +459,12 @@ func TestReactorSetSwitchNil(t *testing.T) {
|
||||
assert.Nil(t, reactor.io)
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) (
|
||||
*types.GenesisDoc, []types.PrivValidator,
|
||||
) {
|
||||
*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
@@ -503,10 +487,10 @@ func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower
|
||||
// Why are we importing the entire blockExecutor dependency graph here
|
||||
// when we have the facilities to
|
||||
func newReactorStore(
|
||||
t *testing.T,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64,
|
||||
) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
@@ -520,21 +504,28 @@ func newReactorStore(
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
mp := &mpmocks.Mempool{}
|
||||
mp.On("Lock").Return()
|
||||
mp.On("Unlock").Return()
|
||||
mp.On("FlushAppConn", mock.Anything).Return(nil)
|
||||
mp.On("Update",
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
},
|
||||
)
|
||||
stateStore = sm.NewStore(db)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
mp, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -560,9 +551,11 @@ func newReactorStore(
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
|
||||
thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
|
||||
@@ -52,6 +52,11 @@ func (rt *Routine) setLogger(logger log.Logger) {
|
||||
rt.logger = logger
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func (rt *Routine) setMetrics(metrics *Metrics) {
|
||||
rt.metrics = metrics
|
||||
}
|
||||
|
||||
func (rt *Routine) start() {
|
||||
rt.logger.Info("routine start", "msg", log.NewLazySprintf("%s: run", rt.name))
|
||||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
|
||||
|
||||
@@ -366,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
|
||||
}
|
||||
|
||||
// CONTRACT: peer exists and in Ready state.
|
||||
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int, now time.Time) error {
|
||||
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error {
|
||||
peer := sc.peers[peerID]
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
@@ -379,7 +379,7 @@ func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int, now tim
|
||||
height, pendingTime, now)
|
||||
}
|
||||
|
||||
peer.lastRate = int64(size) / now.Sub(pendingTime).Nanoseconds()
|
||||
peer.lastRate = size / now.Sub(pendingTime).Nanoseconds()
|
||||
|
||||
sc.setStateAtHeight(height, blockStateReceived)
|
||||
delete(sc.pendingBlocks, height)
|
||||
@@ -532,7 +532,7 @@ func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
err = sc.markReceived(event.peerID, event.block.Height, event.block.Size(), event.time)
|
||||
err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time)
|
||||
if err != nil {
|
||||
sc.removePeer(event.peerID)
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
|
||||
@@ -853,7 +853,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
size int
|
||||
size int64
|
||||
tm time.Time
|
||||
}
|
||||
tests := []struct {
|
||||
@@ -2225,7 +2225,7 @@ func TestScHandle(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var sc *scheduler
|
||||
for i, step := range tt.steps {
|
||||
// First step must always initialise the currentState as state.
|
||||
// First step must always initialize the currentState as state.
|
||||
if step.currentSc != nil {
|
||||
sc = newTestScheduler(*step.currentSc)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package debug
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@@ -81,7 +82,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
|
||||
func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) {
|
||||
start := time.Now().UTC()
|
||||
|
||||
tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp")
|
||||
tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err)
|
||||
return
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -67,6 +68,7 @@ func zipDir(src, dest string) error {
|
||||
_, err = io.Copy(headerWriter, file)
|
||||
return err
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dest and returns an error upon failure. The
|
||||
@@ -109,5 +111,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error {
|
||||
return fmt.Errorf("failed to encode state dump: %w", err)
|
||||
}
|
||||
|
||||
return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
|
||||
return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package debug
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -55,7 +56,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Create a temporary directory which will contain all the state dumps and
|
||||
// relevant files and directories that will be compressed into a file.
|
||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
@@ -104,7 +105,7 @@ func killProc(pid uint64, dir string) error {
|
||||
// pipe STDERR output from tailing the Tendermint process to a file
|
||||
//
|
||||
// NOTE: This will only work on UNIX systems.
|
||||
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) //nolint: gosec
|
||||
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) // nolint: gosec
|
||||
|
||||
outFile, err := os.Create(filepath.Join(dir, "stacktrace.out"))
|
||||
if err != nil {
|
||||
|
||||
@@ -3,7 +3,7 @@ package debug
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -67,17 +67,16 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
//nolint:gosec,nolintlint
|
||||
resp, err := http.Get(endpoint)
|
||||
resp, err := http.Get(endpoint) // nolint: gosec
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read %s profile response body: %w", profile, err)
|
||||
}
|
||||
|
||||
return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
|
||||
return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
// InitFilesCmd initialises a fresh Tendermint Core instance.
|
||||
// InitFilesCmd initializes a fresh Tendermint Core instance.
|
||||
var InitFilesCmd = &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "Initialize Tendermint",
|
||||
|
||||
@@ -101,7 +101,7 @@ func init() {
|
||||
}
|
||||
|
||||
func runProxy(cmd *cobra.Command, args []string) error {
|
||||
// Initialise logger.
|
||||
// Initialize logger.
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
var option log.Option
|
||||
if verbose {
|
||||
|
||||
@@ -32,17 +32,14 @@ var (
|
||||
// ReIndexEventCmd constructs a command to re-index events in a block height interval.
|
||||
var ReIndexEventCmd = &cobra.Command{
|
||||
Use: "reindex-event",
|
||||
Short: "Re-index events to the event store backends",
|
||||
Short: "reindex events to the event store backends",
|
||||
Long: `
|
||||
reindex-event is an offline tooling to re-index block and tx events to the eventsinks.
|
||||
You can run this command when the event store backend dropped/disconnected or you want to
|
||||
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
|
||||
you can run this command when the event store backend dropped/disconnected or you want to
|
||||
replace the backend. The default start-height is 0, meaning the tooling will start
|
||||
reindex from the base block height(inclusive); and the default end-height is 0, meaning
|
||||
the tooling will reindex until the latest block height(inclusive). User can omit
|
||||
either or both arguments.
|
||||
|
||||
Note: This operation requires ABCIResponses. Do not set DiscardABCIResponses to true if you
|
||||
want to use this command.
|
||||
`,
|
||||
Example: `
|
||||
tendermint reindex-event
|
||||
|
||||
@@ -29,7 +29,7 @@ var ResetStateCmd = &cobra.Command{
|
||||
Short: "Remove all the data and WAL",
|
||||
PreRun: deprecateSnakeCase,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
config, err = ParseConfig(cmd)
|
||||
config, err = ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -54,7 +54,7 @@ var ResetPrivValidatorCmd = &cobra.Command{
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAllCmd(cmd *cobra.Command, args []string) (err error) {
|
||||
config, err = ParseConfig(cmd)
|
||||
config, err = ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -71,7 +71,7 @@ func resetAllCmd(cmd *cobra.Command, args []string) (err error) {
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) (err error) {
|
||||
config, err = ParseConfig(cmd)
|
||||
config, err = ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -77,9 +77,7 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store,
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
stateStore := state.NewStore(stateDB, state.StoreOptions{
|
||||
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
|
||||
})
|
||||
stateStore := state.NewStore(stateDB)
|
||||
|
||||
return blockStore, stateStore, nil
|
||||
}
|
||||
|
||||
@@ -29,25 +29,12 @@ func registerFlagsRootCmd(cmd *cobra.Command) {
|
||||
|
||||
// ParseConfig retrieves the default environment configuration,
|
||||
// sets up the Tendermint root and ensures that the root exists
|
||||
func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) {
|
||||
func ParseConfig() (*cfg.Config, error) {
|
||||
conf := cfg.DefaultConfig()
|
||||
err := viper.Unmarshal(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var home string
|
||||
if os.Getenv("TMHOME") != "" {
|
||||
home = os.Getenv("TMHOME")
|
||||
} else {
|
||||
home, err = cmd.Flags().GetString(cli.HomeFlag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conf.RootDir = home
|
||||
|
||||
conf.SetRoot(conf.RootDir)
|
||||
cfg.EnsureRoot(conf.RootDir)
|
||||
if err := conf.ValidateBasic(); err != nil {
|
||||
@@ -65,7 +52,7 @@ var RootCmd = &cobra.Command{
|
||||
return nil
|
||||
}
|
||||
|
||||
config, err = ParseConfig(cmd)
|
||||
config, err = ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -17,7 +18,9 @@ import (
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
)
|
||||
|
||||
var defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
|
||||
var (
|
||||
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
|
||||
)
|
||||
|
||||
// clearConfig clears env vars, the given root dir, and resets viper.
|
||||
func clearConfig(dir string) {
|
||||
@@ -85,6 +88,7 @@ func TestRootHome(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRootFlagsEnv(t *testing.T) {
|
||||
|
||||
// defaults
|
||||
defaults := cfg.DefaultConfig()
|
||||
defaultLogLvl := defaults.LogLevel
|
||||
@@ -112,6 +116,7 @@ func TestRootFlagsEnv(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRootConfig(t *testing.T) {
|
||||
|
||||
// write non-default config
|
||||
nonDefaultLogLvl := "abc:debug"
|
||||
cvals := map[string]string{
|
||||
@@ -135,7 +140,7 @@ func TestRootConfig(t *testing.T) {
|
||||
|
||||
// XXX: path must match cfg.defaultConfigPath
|
||||
configFilePath := filepath.Join(defaultRoot, "config")
|
||||
err := tmos.EnsureDir(configFilePath, 0o700)
|
||||
err := tmos.EnsureDir(configFilePath, 0700)
|
||||
require.Nil(t, err)
|
||||
|
||||
// write the non-defaults to a different path
|
||||
@@ -163,5 +168,5 @@ func WriteConfigVals(dir string, vals map[string]string) error {
|
||||
data += fmt.Sprintf("%s = \"%s\"\n", k, v)
|
||||
}
|
||||
cfile := filepath.Join(dir, "config.toml")
|
||||
return os.WriteFile(cfile, []byte(data), 0o600)
|
||||
return ioutil.WriteFile(cfile, []byte(data), 0600)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user