mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-12 15:52:50 +00:00
Compare commits
1 Commits
wb/trigger
...
docs-stagi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5182ffee25 |
23
.github/CODEOWNERS
vendored
23
.github/CODEOWNERS
vendored
@@ -1,10 +1,25 @@
|
||||
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
|
||||
|
||||
# Everything goes through the following "global owners" by default.
|
||||
# Everything goes through the following "global owners" by default.
|
||||
# Unless a later match takes precedence, these three will be
|
||||
# requested for review when someone opens a PR.
|
||||
# requested for review when someone opens a PR.
|
||||
# Note that the last matching pattern takes precedence, so
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair
|
||||
* @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for tooling packages
|
||||
.github/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
DOCKER/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for core Tendermint packages
|
||||
abci/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
evidence/ @cmwaters @ebuchman @melekes @tessr
|
||||
light/ @cmwaters @melekes @ebuchman @tessr
|
||||
|
||||
# Overrides for docs
|
||||
*.md @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
docs/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
|
||||
|
||||
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,7 +1,7 @@
|
||||
Please add a description of the changes that this PR introduces and the files that
|
||||
are the most critical to review.
|
||||
## Description
|
||||
|
||||
If this PR fixes an open Issue, please include "Closes #XXX" (where "XXX" is the Issue number)
|
||||
so that GitHub will automatically close the Issue when this PR is merged.
|
||||
_Please add a description of the changes that this PR introduces and the files that
|
||||
are the most critical to review._
|
||||
|
||||
Closes: #XXX
|
||||
|
||||
|
||||
14
.github/codecov.yml
vendored
14
.github/codecov.yml
vendored
@@ -5,14 +5,19 @@ coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 20%
|
||||
patch: off
|
||||
threshold: 1%
|
||||
patch: on
|
||||
changes: off
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
comment: false
|
||||
comment:
|
||||
layout: "diff, files"
|
||||
behavior: default
|
||||
require_changes: no
|
||||
require_base: no
|
||||
require_head: yes
|
||||
|
||||
ignore:
|
||||
- "docs"
|
||||
@@ -20,6 +25,3 @@ ignore:
|
||||
- "scripts"
|
||||
- "**/*.pb.go"
|
||||
- "libs/pubsub/query/query.peg.go"
|
||||
- "*.md"
|
||||
- "*.rst"
|
||||
- "*.yml"
|
||||
|
||||
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@@ -23,5 +23,6 @@ updates:
|
||||
reviewers:
|
||||
- melekes
|
||||
- tessr
|
||||
- erikgrinaker
|
||||
labels:
|
||||
- T:dependencies
|
||||
|
||||
19
.github/mergify.yml
vendored
19
.github/mergify.yml
vendored
@@ -6,22 +6,5 @@ pull_request_rules:
|
||||
actions:
|
||||
merge:
|
||||
method: squash
|
||||
strict: smart+fasttrack
|
||||
strict: true
|
||||
commit_message: title+body
|
||||
- name: backport patches to v0.34.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:backport-to-v0.34.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.34.x
|
||||
- name: backport patches to v0.35.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:backport-to-v0.35.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.35.x
|
||||
|
||||
|
||||
31
.github/workflows/coverage.yml
vendored
31
.github/workflows/coverage.yml
vendored
@@ -2,8 +2,6 @@ name: Test Coverage
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths:
|
||||
- "**.go"
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
@@ -12,7 +10,7 @@ jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
@@ -46,13 +44,12 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
@@ -69,13 +66,12 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
@@ -85,10 +81,10 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
go-version: 1.15
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
@@ -99,12 +95,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
@@ -124,9 +119,9 @@ jobs:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v2.1.0
|
||||
- uses: codecov/codecov-action@v1.2.1
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
8
.github/workflows/docker.yml
vendored
8
.github/workflows/docker.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@master
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -40,17 +40,17 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1.10.0
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.7.0
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
32
.github/workflows/docs.yml
vendored
Normal file
32
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Documentation
|
||||
# This job builds and deploys documentation to github pages.
|
||||
# It runs on every push to master, and can be manually triggered.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: tendermintdev/docker-website-deployment
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v2.3.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install and Build 🔧
|
||||
run: |
|
||||
apk add rsync
|
||||
make build-docs
|
||||
|
||||
- name: Deploy 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@3.7.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: ~/output
|
||||
76
.github/workflows/e2e-nightly-35x.yml
vendored
76
.github/workflows/e2e-nightly-35x.yml
vendored
@@ -1,76 +0,0 @@
|
||||
# Runs randomly generated E2E testnets nightly on v0.35.x.
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-master workflow as well!
|
||||
|
||||
name: e2e-nightly-35x
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
ref: 'v0.35.x'
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.35.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.35.x
|
||||
SLACK_FOOTER: ''
|
||||
73
.github/workflows/e2e-nightly-master.yml
vendored
73
.github/workflows/e2e-nightly-master.yml
vendored
@@ -1,73 +0,0 @@
|
||||
# Runs randomly generated E2E testnets nightly on master
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-34x workflow as well!
|
||||
|
||||
name: e2e-nightly-master
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on master
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on master
|
||||
SLACK_FOOTER: ''
|
||||
@@ -1,12 +1,7 @@
|
||||
# Runs randomly generated E2E testnets nightly
|
||||
# on the 0.34.x release branch
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-master workflow as well!
|
||||
|
||||
name: e2e-nightly-34x
|
||||
# Runs randomly generated E2E testnets nightly.
|
||||
name: e2e-nightly
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually, in theory
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
@@ -17,17 +12,19 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01']
|
||||
group: ['00', '01', '02', '03']
|
||||
# todo: expand to multiple versions after 0.35 release
|
||||
branch: ['master', 'v0.34.x']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.15'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
ref: ${{ matrix.branch}}
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
@@ -37,7 +34,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 2 -d networks/nightly
|
||||
run: ./build/generator -g 4 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
@@ -49,28 +46,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x
|
||||
SLACK_MESSAGE: Nightly E2E tests failed
|
||||
SLACK_FOOTER: ''
|
||||
14
.github/workflows/e2e.yml
vendored
14
.github/workflows/e2e.yml
vendored
@@ -16,9 +16,9 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: '1.15'
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -28,11 +28,15 @@ jobs:
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run two make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker runner tests
|
||||
run: make -j2 docker runner
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Run CI testnet
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/ci.toml
|
||||
run: ./build/runner -f networks/ci.toml
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Emit logs on failure
|
||||
if: ${{ failure() }}
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml logs
|
||||
|
||||
39
.github/workflows/fuzz-nightly.yml
vendored
39
.github/workflows/fuzz-nightly.yml
vendored
@@ -4,10 +4,6 @@ on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
pull_request:
|
||||
branches: [master]
|
||||
paths:
|
||||
- "test/fuzz/**/*.go"
|
||||
|
||||
jobs:
|
||||
fuzz-nightly-test:
|
||||
@@ -15,22 +11,17 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.15'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
- name: Fuzz mempool-v1
|
||||
- name: Fuzz mempool
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz mempool-v0
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-addrbook
|
||||
@@ -53,35 +44,21 @@ jobs:
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-rpc-server
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive crashers
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 3
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
retention-days: 3
|
||||
|
||||
- name: Set crashers count
|
||||
working-directory: test/fuzz
|
||||
run: echo "::set-output name=count::$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
run: echo "::set-output name=crashers-count::$(find . -type d -name "crashers" | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
id: set-crashers-count
|
||||
|
||||
outputs:
|
||||
crashers-count: ${{ steps.set-crashers-count.outputs.count }}
|
||||
crashers_count: ${{ steps.set-crashers-count.outputs.crashers-count }}
|
||||
|
||||
fuzz-nightly-fail:
|
||||
needs: fuzz-nightly-test
|
||||
if: ${{ needs.fuzz-nightly-test.outputs.crashers-count != 0 }}
|
||||
if: ${{ needs.set-crashers-count.outputs.crashers-count != 0 }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
16
.github/workflows/janitor.yml
vendored
16
.github/workflows/janitor.yml
vendored
@@ -1,16 +0,0 @@
|
||||
name: Janitor
|
||||
# Janitor cleans up previous runs of various workflows
|
||||
# To add more workflows to cancel visit https://api.github.com/repos/tendermint/tendermint/actions/workflows and find the actions name
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
cancel:
|
||||
name: "Cancel Previous Runs"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.9.1
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
65
.github/workflows/jepsen.yml
vendored
65
.github/workflows/jepsen.yml
vendored
@@ -1,65 +0,0 @@
|
||||
# Runs a Jepsen test - cas-register (no nemesis) by default.
|
||||
# See inputs for various options.
|
||||
# Repo: https://github.com/tendermint/jepsen
|
||||
#
|
||||
# If you want to test a new breaking version of Tendermint, you'll need to
|
||||
# update the Merkleeyes ABCI app and 'merkleeyesUrl' input accordingly. You can
|
||||
# upload a new tarball to
|
||||
# https://github.com/tendermint/jepsen/releases/tag/0.2.1.
|
||||
#
|
||||
# Manually triggered.
|
||||
name: jepsen
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
workload:
|
||||
description: 'Test workload to run: (cas-register | set)'
|
||||
required: true
|
||||
default: 'cas-register'
|
||||
nemesis:
|
||||
description: 'Nemesis to use: (none | clocks | single-partitions | half-partitions | ring-partitions | split-dup-validators | peekaboo-dup-validators | changing-validators | crash | truncate-tendermint | truncate-merkleeyes)'
|
||||
required: true
|
||||
default: 'none'
|
||||
dupOrSuperByzValidators:
|
||||
description: '"--dup-validators" (multiple validators share the same key) and(or) "--super-byzantine-validators" (byzantine validators have just shy of 2/3 the voting weight)'
|
||||
required: false
|
||||
default: ''
|
||||
concurrency:
|
||||
description: 'How many workers should we run? Must be an integer and >= 10, optionally followed by n (e.g. 3n) to multiply by the number of nodes.'
|
||||
required: true
|
||||
default: 10
|
||||
timeLimit:
|
||||
description: 'Excluding setup and teardown, how long should a test run for, in seconds?'
|
||||
required: true
|
||||
default: 60
|
||||
tendermintUrl:
|
||||
description: 'Where to grab the Tendermint tarball (w/ linux/amd64 binary)'
|
||||
required: true
|
||||
default: 'https://github.com/melekes/katas/releases/download/0.2.0/tendermint.tar.gz'
|
||||
merkleeyesUrl:
|
||||
description: 'Where to grab the Merkleeyes tarball (w/ linux/amd64 binary)'
|
||||
required: true
|
||||
default: 'https://github.com/tendermint/jepsen/releases/download/0.2.1/merkleeyes_0.1.7.tar.gz'
|
||||
|
||||
jobs:
|
||||
jepsen-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the Jepsen repository
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
repository: 'tendermint/jepsen'
|
||||
|
||||
- name: Start a Jepsen cluster in background
|
||||
working-directory: docker
|
||||
run: ./bin/up --daemon
|
||||
|
||||
- name: Run the test
|
||||
run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}'
|
||||
|
||||
- name: Archive results
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: results
|
||||
path: tendermint/store/latest
|
||||
retention-days: 3
|
||||
4
.github/workflows/linkchecker.yml
vendored
4
.github/workflows/linkchecker.yml
vendored
@@ -6,7 +6,7 @@ jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
- uses: actions/checkout@master
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
@@ -13,17 +13,17 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v2.5.2
|
||||
- uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.42.1
|
||||
version: v1.31
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
5
.github/workflows/linter.yml
vendored
5
.github/workflows/linter.yml
vendored
@@ -11,7 +11,6 @@ on:
|
||||
branches: [master]
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -19,7 +18,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2.3.4
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v3
|
||||
env:
|
||||
@@ -28,5 +27,5 @@ jobs:
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
VALIDATE_OPENAPI: true
|
||||
VALIDATE_OPAENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
|
||||
8
.github/workflows/proto-docker.yml
vendored
8
.github/workflows/proto-docker.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@master
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -34,16 +34,16 @@ jobs:
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1.10.0
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.7.0
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
|
||||
4
.github/workflows/proto.yml
vendored
4
.github/workflows/proto.yml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@master
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@master
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
|
||||
11
.github/workflows/release.yml
vendored
11
.github/workflows/release.yml
vendored
@@ -2,7 +2,7 @@ name: "Release"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
branches:
|
||||
- "RC[0-9]/**"
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
@@ -12,13 +12,16 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.15'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
@@ -32,6 +35,6 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
8
.github/workflows/stale.yml
vendored
8
.github/workflows/stale.yml
vendored
@@ -7,14 +7,12 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v4
|
||||
- uses: actions/stale@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions."
|
||||
days-before-stale: -1
|
||||
days-before-close: -1
|
||||
days-before-pr-stale: 10
|
||||
days-before-pr-close: 4
|
||||
days-before-stale: 10
|
||||
days-before-close: 4
|
||||
exempt-pr-labels: "S:wip"
|
||||
|
||||
70
.github/workflows/tests.yml
vendored
70
.github/workflows/tests.yml
vendored
@@ -10,6 +10,14 @@ on:
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
cleanup-runs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: rokroskar/workflow-run-cleanup-action@master
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'"
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
@@ -17,9 +25,9 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -28,7 +36,7 @@ jobs:
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -36,12 +44,44 @@ jobs:
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_abci_apps
|
||||
run: abci/tests/test_app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
@@ -49,22 +89,22 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -80,22 +120,22 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -15,7 +15,7 @@
|
||||
.vagrant
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci/abci-cli
|
||||
abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
build/*
|
||||
@@ -24,7 +24,6 @@ docs/.vuepress/dist
|
||||
docs/_build
|
||||
docs/dist
|
||||
docs/node_modules/
|
||||
docs/spec
|
||||
index.html.md
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
profile\.out
|
||||
@@ -36,10 +35,10 @@ shunit2
|
||||
terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
test/app/grpc_client
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
test/maverick/maverick
|
||||
test/p2p/data/
|
||||
vendor
|
||||
test/fuzz/**/corpus
|
||||
|
||||
@@ -1,35 +1,32 @@
|
||||
linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- exportloopref
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
- goconst
|
||||
# - gocritic
|
||||
- gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
- golint
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
# - interfacer
|
||||
- lll
|
||||
# - maligned
|
||||
- misspell
|
||||
# - maligned
|
||||
- nakedret
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
@@ -40,6 +37,8 @@ linters:
|
||||
- varcheck
|
||||
# - whitespace
|
||||
# - wsl
|
||||
# - gocognit
|
||||
- nolintlint
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
@@ -54,9 +53,9 @@ issues:
|
||||
linters-settings:
|
||||
dogsled:
|
||||
max-blank-identifiers: 3
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
project_name: tendermint
|
||||
project_name: Tendermint
|
||||
|
||||
env:
|
||||
# Require use of Go modules.
|
||||
|
||||
474
CHANGELOG.md
474
CHANGELOG.md
@@ -1,380 +1,21 @@
|
||||
# Changelog
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.35.0-rc2
|
||||
|
||||
September 27, 2021
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
|
||||
- [crypto/armor]: [\#6963](https://github.com/tendermint/tendermint/pull/6963) remove package which is unused, and based on
|
||||
deprecated fundamentals. Downstream users should maintain this
|
||||
library. (@tychoish)
|
||||
- [state] [store] [proxy] [rpc/core]: [\#6937](https://github.com/tendermint/tendermint/pull/6937) move packages to
|
||||
`internal` to prevent consumption of these internal APIs by
|
||||
external users. (@tychoish)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters).
|
||||
|
||||
|
||||
## v0.35.0-rc1
|
||||
|
||||
September 8, 2021
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106, @bipulprasad, @alessio, @Yawning, @silasdavis,
|
||||
@cuonglm, @tanyabouman, @JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
|
||||
- [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes)
|
||||
- [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
- [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
|
||||
- [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes)
|
||||
- [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters)
|
||||
- [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106)
|
||||
- [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||
- [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
|
||||
- [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated.
|
||||
- [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0
|
||||
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
|
||||
- [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield)
|
||||
- [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
|
||||
- [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish)
|
||||
|
||||
- Apps
|
||||
- [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
- [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
- [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
|
||||
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`.
|
||||
- It is not required any longer to set ldflags to set version strings
|
||||
- [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app
|
||||
|
||||
- Go API
|
||||
- [pubsub] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
|
||||
- [p2p] [\#6618](https://github.com/tendermint/tendermint/pull/6618) [\#6583](https://github.com/tendermint/tendermint/pull/6583) Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish)
|
||||
- [node] [\#6540](https://github.com/tendermint/tendermint/pull/6540) Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
|
||||
- [p2p] [\#6547](https://github.com/tendermint/tendermint/pull/6547) Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
|
||||
- [libs/log] [\#6534](https://github.com/tendermint/tendermint/pull/6534) Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
|
||||
- [libs/time] [\#6495](https://github.com/tendermint/tendermint/pull/6495) Move types/time to libs/time to improve consistency. (@tychoish)
|
||||
- [mempool] [\#6529](https://github.com/tendermint/tendermint/pull/6529) The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
|
||||
- [abci/client, proxy] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] [\#5720](https://github.com/tendermint/tendermint/pull/5720) Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
- [store] [\#5848](https://github.com/tendermint/tendermint/pull/5848) Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||
- [state] [\#5864](https://github.com/tendermint/tendermint/pull/5864) Use an iterator when pruning state (@cmwaters)
|
||||
- [types] [\#6023](https://github.com/tendermint/tendermint/pull/6023) Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
|
||||
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
|
||||
- [light] [\#6054](https://github.com/tendermint/tendermint/pull/6054) Move `MaxRetryAttempt` option from client to provider.
|
||||
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
|
||||
- [all] [\#6077](https://github.com/tendermint/tendermint/pull/6077) Change spelling from British English to American (@cmwaters)
|
||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||
- Rename "behaviour" pkg to "behavior" and internalized it in blocksync v2
|
||||
- [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
|
||||
- [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Unexpose `WSEvents` (@melekes)
|
||||
- [rpc/jsonrpc/client/ws_client] [\#6176](https://github.com/tendermint/tendermint/pull/6176) `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
|
||||
- [internal/libs] [\#6366](https://github.com/tendermint/tendermint/pull/6366) Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
|
||||
- [libs/rand] [\#6364](https://github.com/tendermint/tendermint/pull/6364) Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
|
||||
- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
|
||||
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
|
||||
and `TxInfo`. (@alexanderbez)
|
||||
- [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||
- [types] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Move `NodeKey` to types to make the type public.
|
||||
- [config] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
|
||||
- [blocksync] [\#6755](https://github.com/tendermint/tendermint/pull/6755) Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters)
|
||||
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] [\#5771](https://github.com/tendermint/tendermint/pull/5771) Use an order-preserving varint key encoding (@cmwaters)
|
||||
- [mempool] [\#6396](https://github.com/tendermint/tendermint/pull/6396) Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
|
||||
- [state] [\#6541](https://github.com/tendermint/tendermint/pull/6541) Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
- Tooling
|
||||
- [tools] [\#6498](https://github.com/tendermint/tendermint/pull/6498) Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [cli/indexer] [\#6676](https://github.com/tendermint/tendermint/pull/6676) Reindex events command line tooling. (@JayT106)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
|
||||
- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
|
||||
- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to
|
||||
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
|
||||
exchange reactors behave the same. (@cmwaters)
|
||||
- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type
|
||||
- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez)
|
||||
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
|
||||
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
|
||||
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
|
||||
`mempool.version` configuration, where `v1` is the default configuration.
|
||||
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
||||
- Transactions are gossiped in FIFO order as they are in `v0`.
|
||||
- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||
- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106)
|
||||
- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106)
|
||||
- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] Console log formatting changes as a result of [\#6534](https://github.com/tendermint/tendermint/pull/6534) and [\#6589](https://github.com/tendermint/tendermint/pull/6589). (@tychoish)
|
||||
- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [types] [\#6478](https://github.com/tendermint/tendermint/pull/6478) Add `block_id` to `newblock` event (@jeebster)
|
||||
- [crypto/ed25519] [\#5632](https://github.com/tendermint/tendermint/pull/5632) Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [crypto/ed25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
|
||||
- [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
|
||||
- [privval] [\#5603](https://github.com/tendermint/tendermint/pull/5603) Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [privval] [\#5725](https://github.com/tendermint/tendermint/pull/5725) Add gRPC support to private validator.
|
||||
- [privval] [\#5876](https://github.com/tendermint/tendermint/pull/5876) `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
|
||||
- [abci/client] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` requests return an error if queue is full (@melekes)
|
||||
- [mempool] [\#5673](https://github.com/tendermint/tendermint/pull/5673) Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
|
||||
- [abci] [\#5706](https://github.com/tendermint/tendermint/pull/5706) Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [blocksync/v1] [\#5728](https://github.com/tendermint/tendermint/pull/5728) Remove blocksync v1 (@melekes)
|
||||
- [blocksync/v0] [\#5741](https://github.com/tendermint/tendermint/pull/5741) Relax termination conditions and increase sync timeout (@melekes)
|
||||
- [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` output now contains node ID (`id` field) (@melekes)
|
||||
- [blocksync/v2] [\#5774](https://github.com/tendermint/tendermint/pull/5774) Send status request when new peer joins (@melekes)
|
||||
- [store] [\#5888](https://github.com/tendermint/tendermint/pull/5888) store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
|
||||
- [consensus] [\#5987](https://github.com/tendermint/tendermint/pull/5987) and [\#5792](https://github.com/tendermint/tendermint/pull/5792) Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon)
|
||||
- [types] [\#5994](https://github.com/tendermint/tendermint/pull/5994) Reduce the use of protobuf types in core logic. (@marbar3778)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
|
||||
- [rpc/client/http] [\#6163](https://github.com/tendermint/tendermint/pull/6163) Do not drop events even if the `out` channel is full (@melekes)
|
||||
- [node] [\#6059](https://github.com/tendermint/tendermint/pull/6059) Validate and complete genesis doc before saving to state store (@silasdavis)
|
||||
- [state] [\#6067](https://github.com/tendermint/tendermint/pull/6067) Batch save state data (@githubsands & @cmwaters)
|
||||
- [crypto] [\#6120](https://github.com/tendermint/tendermint/pull/6120) Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
|
||||
- [types] [\#6120](https://github.com/tendermint/tendermint/pull/6120) use batch verification for verifying commits signatures.
|
||||
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
|
||||
- [privval/file] [\#6185](https://github.com/tendermint/tendermint/pull/6185) Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
|
||||
- [privval] [\#6240](https://github.com/tendermint/tendermint/pull/6240) Add `context.Context` to privval interface.
|
||||
- [rpc] [\#6265](https://github.com/tendermint/tendermint/pull/6265) set cache control in http-rpc response header (@JayT106)
|
||||
- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
|
||||
- [node/state] [\#6370](https://github.com/tendermint/tendermint/pull/6370) graceful shutdown in the consensus reactor (@JayT106)
|
||||
- [crypto/merkle] [\#6443](https://github.com/tendermint/tendermint/pull/6443) Improve HashAlternatives performance (@cuonglm)
|
||||
- [crypto/merkle] [\#6513](https://github.com/tendermint/tendermint/pull/6513) Optimize HashAlternatives (@marbar3778)
|
||||
- [p2p/pex] [\#6509](https://github.com/tendermint/tendermint/pull/6509) Improve addrBook.hash performance (@cuonglm)
|
||||
- [consensus/metrics] [\#6549](https://github.com/tendermint/tendermint/pull/6549) Change block_size gauge to a histogram for better observability over time (@marbar3778)
|
||||
- [statesync] [\#6587](https://github.com/tendermint/tendermint/pull/6587) Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
|
||||
- [state/privval] [\#6578](https://github.com/tendermint/tendermint/pull/6578) No GetPubKey retry beyond the proposal/voting window (@JayT106)
|
||||
- [rpc] [\#6615](https://github.com/tendermint/tendermint/pull/6615) Add TotalGasUsed to block_results response (@crypto-facs)
|
||||
- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
||||
- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106)
|
||||
- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106)
|
||||
|
||||
## v0.34.14
|
||||
|
||||
This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) The tendermint binary now has built-in suppport for running the end-to-end test application (with state sync support) (@cmwaters).
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state. This may be useful in the event of non-determinstic app hash or when reverting an upgrade. @cmwaters
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [\#7103](https://github.com/tendermint/tendermint/pull/7104) Remove IAVL dependency (backport of #6550) (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [ABCI] [\#7110](https://github.com/tendermint/tendermint/issues/7110) Revert "change client to use multi-reader mutexes (#6873)" (@tychoish).
|
||||
|
||||
## v0.34.13
|
||||
|
||||
*September 6, 2021*
|
||||
|
||||
This release backports improvements to state synchronization and ABCI
|
||||
performance under concurrent load, and the PostgreSQL event indexer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6881](https://github.com/tendermint/tendermint/issues/6881) improvements to stateprovider logic (@cmwaters)
|
||||
- [ABCI] [\#6873](https://github.com/tendermint/tendermint/issues/6873) change client to use multi-reader mutexes (@tychoish)
|
||||
- [indexing] [\#6906](https://github.com/tendermint/tendermint/issues/6906) enable the PostgreSQL indexer sink (@creachadair)
|
||||
|
||||
## v0.34.12
|
||||
|
||||
*August 17, 2021*
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
|
||||
`/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug
|
||||
with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters)
|
||||
- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106)
|
||||
|
||||
## v0.34.11
|
||||
|
||||
*June 18, 2021*
|
||||
|
||||
This release improves the robustness of statesync; tweaking channel priorities and timeouts and
|
||||
adding two new parameters to the state sync config.
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Apps
|
||||
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
||||
- [statesync] [\#6582](https://github.com/tendermint/tendermint/pull/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
||||
|
||||
## v0.34.10
|
||||
|
||||
*April 14, 2021*
|
||||
|
||||
This release fixes a bug where peers would sometimes try to send messages
|
||||
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
|
||||
this issue!
|
||||
|
||||
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
|
||||
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
|
||||
|
||||
## v0.34.9
|
||||
|
||||
*April 8, 2021*
|
||||
|
||||
This release fixes a moderate severity security issue, Security Advisory Alderfly,
|
||||
which impacts all networks that rely on Tendermint light clients.
|
||||
Further details will be released once networks have upgraded.
|
||||
|
||||
This release also includes a small Go API-breaking change, to reduce panics in the RPC layer.
|
||||
|
||||
Special thanks to our external contributors on this release: @gchaincl
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
- [rpc/jsonrpc/server] [\#6204](https://github.com/tendermint/tendermint/issues/6204) Modify `WriteRPCResponseHTTP(Error)` to return an error (@melekes)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6226](https://github.com/tendermint/tendermint/issues/6226) Index block events and expose a new RPC method, `/block_search`, to allow querying for blocks by `BeginBlock` and `EndBlock` events (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [rpc/jsonrpc/server] [\#6191](https://github.com/tendermint/tendermint/issues/6191) Correctly unmarshal `RPCRequest` when data is `null` (@melekes)
|
||||
- [p2p] [\#6289](https://github.com/tendermint/tendermint/issues/6289) Fix "unknown channels" bug on CustomReactors (@gchaincl)
|
||||
- [light/evidence] Adds logic to handle forward lunatic attacks (@cmwaters)
|
||||
|
||||
## v0.34.8
|
||||
|
||||
*February 25, 2021*
|
||||
|
||||
This release, in conjunction with [a fix in the Cosmos SDK](https://github.com/cosmos/cosmos-sdk/pull/8641),
|
||||
introduces changes that should mean the logs are much, much quieter. 🎉
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] [\#6174](https://github.com/tendermint/tendermint/issues/6174) Include timestamp (`ts` field; `time.RFC3339Nano` format) in JSON logger output (@melekes)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [abci] [\#6124](https://github.com/tendermint/tendermint/issues/6124) Fixes a panic condition during callback execution in `ReCheckTx` during high tx load. (@alexanderbez)
|
||||
|
||||
## v0.34.7
|
||||
|
||||
*February 18, 2021*
|
||||
|
||||
This release fixes a downstream security issue which impacts Cosmos SDK
|
||||
users who are:
|
||||
|
||||
* Using Cosmos SDK v0.40.0 or later, AND
|
||||
* Running validator nodes, AND
|
||||
* Using the file-based `FilePV` implementation for their consensus keys
|
||||
|
||||
Users who fulfill all the above criteria were susceptible to leaking
|
||||
private key material in the logs. All other users are unaffected.
|
||||
|
||||
The root cause was a discrepancy
|
||||
between the Tendermint Core (untyped) logger and the Cosmos SDK (typed) logger:
|
||||
Tendermint Core's logger automatically stringifies Go interfaces whenever possible;
|
||||
however, the Cosmos SDK's logger uses reflection to log the fields within a Go interface.
|
||||
|
||||
The introduction of the typed logger meant that previously un-logged fields within
|
||||
interfaces are now sometimes logged, including the private key material inside the
|
||||
`FilePV` struct.
|
||||
|
||||
Tendermint Core v0.34.7 fixes this issue; however, we strongly recommend that all validators
|
||||
use remote signer implementations instead of `FilePV` in production.
|
||||
|
||||
Thank you to @joe-bowman for his assistance with this vulnerability and a particular
|
||||
shout-out to @marbar3778 for diagnosing it quickly.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#6128](https://github.com/tendermint/tendermint/pull/6128) Remove privValidator from log call (@tessr)
|
||||
|
||||
## v0.34.6
|
||||
|
||||
*February 18, 2021*
|
||||
|
||||
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling problems._
|
||||
|
||||
## v0.34.4
|
||||
|
||||
*February 11, 2021*
|
||||
|
||||
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
|
||||
All Tendermint clients are recommended to upgrade.
|
||||
Thank you to our friends at Crypto.com for the initial report of this memory leak!
|
||||
|
||||
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6022](https://github.com/tendermint/tendermint/pull/6022) Fix a bug when the number of validators equals 100 (@melekes)
|
||||
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
|
||||
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
|
||||
|
||||
## v0.34.3
|
||||
## v0.34.3
|
||||
|
||||
*January 19, 2021*
|
||||
|
||||
This release includes a fix for a high-severity security vulnerability,
|
||||
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
|
||||
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
|
||||
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||
This release includes a fix for a high-severity security vulnerability.
|
||||
More information on this vulnerability will be released on January 26, 2021
|
||||
and this changelog will be updated.
|
||||
|
||||
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
It also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [[security fix]](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) Use correct source of evidence time (@cmwaters)
|
||||
- [evidence] [N/A] Use correct source of evidence time (@cmwaters)
|
||||
- [proto] [\#5886](https://github.com/tendermint/tendermint/pull/5889) Bump gogoproto to 1.3.2 (@marbar3778)
|
||||
|
||||
## v0.34.2
|
||||
@@ -385,6 +26,8 @@ This release fixes a substantial bug in evidence handling where evidence could
|
||||
sometimes be broadcast before the block containing that evidence was fully committed,
|
||||
resulting in some nodes panicking when trying to verify said evidence.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
@@ -408,6 +51,8 @@ disconnecting from this node. As a temporary remedy (until the mempool package
|
||||
is refactored), the `max-batch-bytes` was disabled. Transactions will be sent
|
||||
one by one without batching.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -436,6 +81,8 @@ Holy smokes, this is a big one! For a more reader-friendly overview of the chang
|
||||
Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio,
|
||||
@joe-bowman, @cuonglm, @SadPencil and @dongsam.
|
||||
|
||||
And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -462,14 +109,14 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
|
||||
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- `MaxBatchBytes` new config setting defines the max size of one batch.
|
||||
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
|
||||
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
|
||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
|
||||
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
|
||||
@@ -528,7 +175,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
|
||||
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
|
||||
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
|
||||
@@ -566,7 +213,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
|
||||
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
|
||||
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
|
||||
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
|
||||
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)
|
||||
@@ -676,6 +323,9 @@ as 2/3+ of the signatures are checked._
|
||||
|
||||
Special thanks to @njmurarka at Bluzelle Networks for reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [consensus] Do not allow signatures for a wrong block in commits (@ebuchman)
|
||||
@@ -691,6 +341,8 @@ need to update your code.**
|
||||
|
||||
Special thanks to external contributors on this release: @tau3,
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -750,6 +402,8 @@ Special thanks to external contributors on this release: @tau3,
|
||||
|
||||
Special thanks to external contributors on this release: @whylee259, @greg-szabo
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -836,6 +490,9 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -848,6 +505,8 @@ and reporting this.
|
||||
Special thanks to external contributors on this release:
|
||||
@antho1404, @michaelfig, @gterzian, @tau3, @Shivani912
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -898,6 +557,9 @@ Special thanks to external contributors on this release:
|
||||
Special thanks to external contributors on this release:
|
||||
@princesinha19
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#3333](https://github.com/tendermint/tendermint/issues/3333) Add `order_by` to `/tx_search` endpoint, allowing to change default ordering from asc to desc (@princesinha19)
|
||||
@@ -916,6 +578,9 @@ Special thanks to external contributors on this release:
|
||||
|
||||
Special thanks to external contributors on this release: @mrekucci, @PSalant726, @princesinha19, @greg-szabo, @dongsam, @cuonglm, @jgimeno, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
*January 14, 2020*
|
||||
|
||||
This release contains breaking changes to the `Block#Header`, specifically
|
||||
@@ -1144,6 +809,9 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -1155,6 +823,9 @@ _January, 9, 2020_
|
||||
|
||||
Special thanks to external contributors on this release: @greg-szabo, @gregzaitsev, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc/lib] [\#4248](https://github.com/tendermint/tendermint/issues/4248) RPC client basic authentication support (@greg-szabo)
|
||||
@@ -1176,6 +847,9 @@ Special thanks to external contributors on this release: @greg-szabo, @gregzaits
|
||||
|
||||
Special thanks to external contributors on this release: @erikgrinaker, @guagualvcha, @hsyis, @cosmostuba, @whunmr, @austinabell
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1215,6 +889,9 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1241,6 +918,9 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
|
||||
@@ -1256,6 +936,9 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
|
||||
@@ -1266,6 +949,9 @@ and reporting this issue.
|
||||
|
||||
Special thanks to external contributors on this release: @jon-certik, @gracenoah, @PSalant726, @gchaincl
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -1301,6 +987,9 @@ guide.
|
||||
Special thanks to external contributors on this release:
|
||||
@gchaincl, @bluele, @climber73
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info)
|
||||
@@ -1321,6 +1010,9 @@ Special thanks to external contributors on this release:
|
||||
Special thanks to external contributors on this release:
|
||||
@ruseinov, @bluele, @guagualvcha
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1360,6 +1052,9 @@ This release contains a minor enhancement to the ABCI and some breaking changes
|
||||
- CheckTx requests include a `CheckTxType` enum that can be set to `Recheck` to indicate to the application that this transaction was already checked/validated and certain expensive operations (like checking signatures) can be skipped
|
||||
- Removed various functions from `libs` pkgs
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1405,6 +1100,9 @@ and the RPC, namely:
|
||||
[docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events)
|
||||
- Bind RPC to localhost by default, not to the public interface [UPGRADING/RPC_Changes](./UPGRADING.md#rpc_changes)
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1505,6 +1203,8 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1525,6 +1225,8 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1552,6 +1254,8 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1568,6 +1272,8 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1863,6 +1569,8 @@ See the [v0.31.0
|
||||
Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for
|
||||
more details.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -2083,6 +1791,8 @@ This release contains two important fixes: one for p2p layer where we sometimes
|
||||
were not closing connections and one for consensus layer where consensus with
|
||||
no empty blocks (`create_empty_blocks = false`) could halt.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [pex] [\#3037](https://github.com/tendermint/tendermint/issues/3037) Only log "Reached max attempts to dial" once
|
||||
@@ -2122,6 +1832,8 @@ While we are trying to stabilize the Block protocol to preserve compatibility
|
||||
with old chains, there may be some final changes yet to come before Cosmos
|
||||
launch as we continue to audit and test the software.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -2170,6 +1882,8 @@ launch as we continue to audit and test the software.
|
||||
Special thanks to external contributors on this release:
|
||||
@HaoyangLiu
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES:
|
||||
- [consensus] Fix consensus halt from proposing blocks with too much evidence
|
||||
@@ -2223,7 +1937,7 @@ See [UPGRADING.md](UPGRADING.md) for more details.
|
||||
|
||||
- [build] [\#3085](https://github.com/tendermint/tendermint/issues/3085) Fix `Version` field in build scripts (@husio)
|
||||
- [crypto/multisig] [\#3102](https://github.com/tendermint/tendermint/issues/3102) Fix multisig keys address length
|
||||
- [crypto/encoding] [\#3101](https://github.com/tendermint/tendermint/issues/3101) Fix `PubKeyMultisigThreshold` unmarshaling into `crypto.PubKey` interface
|
||||
- [crypto/encoding] [\#3101](https://github.com/tendermint/tendermint/issues/3101) Fix `PubKeyMultisigThreshold` unmarshalling into `crypto.PubKey` interface
|
||||
- [p2p/conn] [\#3111](https://github.com/tendermint/tendermint/issues/3111) Make SecretConnection thread safe
|
||||
- [rpc] [\#3053](https://github.com/tendermint/tendermint/issues/3053) Fix internal error in `/tx_search` when results are empty
|
||||
(@gianfelipe93)
|
||||
@@ -2298,6 +2012,8 @@ Special thanks to @dlguddus for discovering a [major
|
||||
issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677)
|
||||
in the proposer selection algorithm.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
This release is primarily about fixes to the proposer selection algorithm
|
||||
in preparation for the [Cosmos Game of
|
||||
@@ -2360,6 +2076,8 @@ Special thanks to external contributors on this release:
|
||||
@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko,
|
||||
@nagarajmanjunath, @tomtau
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
@@ -2399,6 +2117,8 @@ Special thanks to external contributors on this release:
|
||||
Special thanks to external contributors on this release:
|
||||
@danil-lashin, @kevlubkcm, @krhubert, @srmo
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -2443,6 +2163,8 @@ Special thanks to external contributors on this release:
|
||||
|
||||
Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#2582](https://github.com/tendermint/tendermint/issues/2582) Enable CORS on RPC API (@hleb-albau)
|
||||
@@ -2460,6 +2182,8 @@ Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
|
||||
|
||||
Special thanks to external contributors on this release: @katakonst
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#2704](https://github.com/tendermint/tendermint/issues/2704) Simplify valid POL round logic
|
||||
@@ -2633,6 +2357,8 @@ It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -2662,7 +2388,7 @@ FEATURES:
|
||||
- [libs] [\#2286](https://github.com/tendermint/tendermint/issues/2286) Panic if `autofile` or `db/fsdb` permissions change from 0600.
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialized (@bradyjoestar)
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialised (@bradyjoestar)
|
||||
- [mempool] [\#2399](https://github.com/tendermint/tendermint/issues/2399) Make mempool cache a proper LRU (@bradyjoestar)
|
||||
- [p2p] [\#2126](https://github.com/tendermint/tendermint/issues/2126) Introduce PeerTransport interface to improve isolation of concerns
|
||||
- [libs/common] [\#2326](https://github.com/tendermint/tendermint/issues/2326) Service returns ErrNotStarted
|
||||
|
||||
@@ -1,42 +1,61 @@
|
||||
# Unreleased Changes
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## vX.X
|
||||
|
||||
Month, DD, YYYY
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [rpc] Remove the deprecated gRPC interface to the RPC service (@creachadair).
|
||||
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
|
||||
- Apps
|
||||
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- [p2p] \#7035 Remove legacy P2P routing implementation and
|
||||
associated configuration options (@tychoish)
|
||||
|
||||
- Go API
|
||||
|
||||
- [blocksync] \#7046 Remove v2 implementation of the blocksync
|
||||
service and recactor, which was disabled in the previous release
|
||||
(@tychoish)
|
||||
- [p2p] \#7064 Remove WDRR queue implementation. (@tychoish)
|
||||
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
- [proto/p2p] Renamed `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
### FEATURES
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
|
||||
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade.
|
||||
- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish)
|
||||
### FEATURES
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [privval] \#5725 Add gRPC support to private validator.
|
||||
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
|
||||
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
|
||||
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes)
|
||||
- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
|
||||
- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes)
|
||||
- [consensus] \#5792 Deprecates the `time_iota_ms` consensus parameter, to reduce the bug surface. The parameter is no longer used. (@valardragon)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek)
|
||||
- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778)
|
||||
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
||||
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
||||
231
CONTRIBUTING.md
231
CONTRIBUTING.md
@@ -26,8 +26,7 @@ will indicate their support with a heartfelt emoji.
|
||||
|
||||
If the issue would benefit from thorough discussion, maintainers may
|
||||
request that you create a [Request For
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc)
|
||||
in the Tendermint spec repo. Discussion
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion
|
||||
at the RFC stage will build collective understanding of the dimensions
|
||||
of the problems and help structure conversations around trade-offs.
|
||||
|
||||
@@ -109,20 +108,37 @@ We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
|
||||
|
||||
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
|
||||
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`.
|
||||
There are two ways to generate your proto stubs.
|
||||
|
||||
1. Use Docker, pull an image that will generate your proto stubs with no need to install anything. `make proto-gen-docker`
|
||||
2. Run `make proto-gen` after installing `buf` and `gogoproto`, you can do this by running `make protobuf`.
|
||||
|
||||
### Installation Instructions
|
||||
|
||||
To install `protoc`, download an appropriate release (<https://github.com/protocolbuffers/protobuf>) and then move the provided binaries into your PATH (follow instructions in README included with the download).
|
||||
|
||||
To install `gogoproto`, do the following:
|
||||
|
||||
```sh
|
||||
go get github.com/gogo/protobuf/gogoproto
|
||||
cd $GOPATH/pkg/mod/github.com/gogo/protobuf@v1.3.1 # or wherever go get installs things
|
||||
make install
|
||||
```
|
||||
|
||||
You should now be able to run `make proto-gen` from inside the root Tendermint directory to generate new files from proto files.
|
||||
|
||||
### Visual Studio Code
|
||||
|
||||
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
|
||||
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -227,86 +243,125 @@ Fixes #nnnn
|
||||
|
||||
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
|
||||
### Release Procedure
|
||||
|
||||
#### Major Release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
|
||||
|
||||
1. Start on the latest RC branch (`RCx/vX.X.0`).
|
||||
2. Run integration tests.
|
||||
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
|
||||
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
|
||||
- `git checkout RCx/vX.X.0`
|
||||
- `git checkout -b release/vX.X.0`
|
||||
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
|
||||
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
|
||||
- `git push origin vX.X.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
|
||||
new major release series.
|
||||
|
||||
##### Major Release (from `master`)
|
||||
|
||||
1. Start on `master`
|
||||
2. Run integration tests (see `test_integrations` in Makefile)
|
||||
3. Prepare release in a pull request against `master` (to be squash merged):
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
|
||||
had release candidates, squash all the RC updates into one
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
5. Update the `CHANGELOG.md` file on master with the releases changelog.
|
||||
6. Delete any RC branches and tags for this release (if applicable)
|
||||
|
||||
#### Minor Release (Point Releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
|
||||
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
|
||||
2. Run integration tests: `make test_integrations`
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes that will land them back on `vX.X.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
|
||||
#### Release Candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of RC branches. RC branches typically have names formatted
|
||||
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
|
||||
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
|
||||
2. Create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.34.0-rc4`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Create a new release candidate branch for any possible updates to the RC:
|
||||
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit tests
|
||||
All repos should be hooked up to [CircleCI](https://circleci.com/).
|
||||
|
||||
Unit tests are located in `_test.go` files as directed by [the Go testing
|
||||
package](https://golang.org/pkg/testing/). If you're adding or removing a
|
||||
function, please check there's a `TestType_Method` test for it.
|
||||
|
||||
Run: `make test`
|
||||
|
||||
### Integration tests
|
||||
|
||||
Integration tests are also located in `_test.go` files. What differentiates
|
||||
them is a more complicated setup, which usually involves setting up two or more
|
||||
components.
|
||||
|
||||
Run: `make test_integrations`
|
||||
|
||||
### End-to-end tests
|
||||
|
||||
End-to-end tests are used to verify a fully integrated Tendermint network.
|
||||
|
||||
See [README](./test/e2e/README.md) for details.
|
||||
|
||||
Run:
|
||||
|
||||
```sh
|
||||
cd test/e2e && \
|
||||
make && \
|
||||
./build/runner -f networks/ci.toml
|
||||
```
|
||||
|
||||
### Model-based tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
For components, that have been [formally
|
||||
verified](https://en.wikipedia.org/wiki/Formal_verification) using
|
||||
[TLA+](https://en.wikipedia.org/wiki/TLA%2B), it may be possible to generate
|
||||
tests using a combination of the [Apalache Model
|
||||
Checker](https://apalache.informal.systems/) and [tendermint-rs testgen
|
||||
util](https://github.com/informalsystems/tendermint-rs/tree/master/testgen).
|
||||
|
||||
Now, I know there's a lot to take in. If you want to learn more, check out [
|
||||
this video](https://www.youtube.com/watch?v=aveoIMphzW8) by Andrey Kupriyanov
|
||||
& Igor Konnov.
|
||||
|
||||
At the moment, we have model-based tests for the light client, located in the
|
||||
`./light/mbt` directory.
|
||||
|
||||
Run: `cd light/mbt && go test`
|
||||
|
||||
### Fuzz tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
[Fuzz tests](https://en.wikipedia.org/wiki/Fuzzing) can be found inside the
|
||||
`./test/fuzz` directory. See [README.md](./test/fuzz/README.md) for details.
|
||||
|
||||
Run: `cd test/fuzz && make fuzz-{PACKAGE-COMPONENT}`
|
||||
|
||||
### Jepsen tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
[Jepsen](http://jepsen.io/) tests are used to verify the
|
||||
[linearizability](https://jepsen.io/consistency/models/linearizable) property
|
||||
of the Tendermint consensus. They are located in a separate repository
|
||||
-> <https://github.com/tendermint/jepsen>. Please refer to its README for more
|
||||
information.
|
||||
If they have `.go` files in the root directory, they will be automatically
|
||||
tested by circle using `go test -v -race ./...`. If not, they will need a
|
||||
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
|
||||
includes its continuous integration status using a badge in the `README.md`.
|
||||
|
||||
### RPC Testing
|
||||
|
||||
**If you contribute to the RPC endpoints it's important to document your
|
||||
changes in the [Openapi file](./rpc/openapi/openapi.yaml)**.
|
||||
|
||||
To test your changes you must install `nodejs` and run:
|
||||
If you contribute to the RPC endpoints it's important to document your changes in the [Openapi file](./rpc/openapi/openapi.yaml)
|
||||
To test your changes you should install `nodejs` and run:
|
||||
|
||||
```bash
|
||||
npm i -g dredd
|
||||
@@ -314,8 +369,4 @@ make build-linux build-contract-tests-hooks
|
||||
make contract-tests
|
||||
```
|
||||
|
||||
**WARNING: these are currently broken due to <https://github.com/apiaryio/dredd>
|
||||
not supporting complete OpenAPI 3**.
|
||||
|
||||
This command will popup a network and check every endpoint against what has
|
||||
been documented.
|
||||
This command will popup a network and check every endpoint against what has been documented
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.16-alpine as builder
|
||||
FROM golang:1.15-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
|
||||
@@ -9,7 +9,7 @@ RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm &
|
||||
RUN yum -y groupinstall "Development Tools"
|
||||
RUN yum -y install leveldb-devel which
|
||||
|
||||
ENV GOVERSION=1.16.5
|
||||
ENV GOVERSION=1.12.9
|
||||
|
||||
RUN cd /tmp && \
|
||||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
|
||||
@@ -31,8 +31,8 @@ To get started developing applications, see the [application developers guide](h
|
||||
A quick example of a built-in app and Tendermint core in one container.
|
||||
|
||||
```sh
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init validator
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint start --proxy-app=kvstore
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy-app=kvstore
|
||||
```
|
||||
|
||||
## Local cluster
|
||||
|
||||
@@ -3,7 +3,7 @@ set -e
|
||||
|
||||
if [ ! -d "$TMHOME/config" ]; then
|
||||
echo "Running tendermint init to create (default) configuration for docker run."
|
||||
tendermint init validator
|
||||
tendermint init
|
||||
|
||||
sed -i \
|
||||
-e "s/^proxy-app\s*=.*/proxy-app = \"$PROXY_APP\"/" \
|
||||
|
||||
42
Makefile
42
Makefile
@@ -12,7 +12,7 @@ else
|
||||
VERSION := $(shell git describe)
|
||||
endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
|
||||
@@ -58,6 +58,8 @@ LD_FLAGS += $(LDFLAGS)
|
||||
all: check build test install
|
||||
.PHONY: all
|
||||
|
||||
# The below include contains the tools.
|
||||
include tools/Makefile
|
||||
include test/Makefile
|
||||
|
||||
###############################################################################
|
||||
@@ -83,13 +85,22 @@ proto-all: proto-gen proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
|
||||
proto-gen:
|
||||
## If you get the following error,
|
||||
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
|
||||
## See https://stackoverflow.com/a/25518702
|
||||
## Note the $< here is substituted for the %.proto
|
||||
## Note the $@ here is substituted for the %.pb.go
|
||||
@sh scripts/protocgen.sh
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-gen-docker:
|
||||
@docker pull -q tendermintdev/docker-build-proto
|
||||
@echo "Generating Protobuf files"
|
||||
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
|
||||
.PHONY: proto-gen
|
||||
.PHONY: proto-gen-docker
|
||||
|
||||
proto-lint:
|
||||
@$(DOCKER_BUF) lint --error-format=json
|
||||
@$(DOCKER_BUF) check lint --error-format=json
|
||||
.PHONY: proto-lint
|
||||
|
||||
proto-format:
|
||||
@@ -98,11 +109,11 @@ proto-format:
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-check-breaking:
|
||||
@$(DOCKER_BUF) breaking --against .git#branch=master
|
||||
@$(DOCKER_BUF) check breaking --against-input .git#branch=master
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
proto-check-breaking-ci:
|
||||
@$(DOCKER_BUF) breaking --against $(HTTPS_GIT)#branch=master
|
||||
@$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master
|
||||
.PHONY: proto-check-breaking-ci
|
||||
|
||||
###############################################################################
|
||||
@@ -131,11 +142,11 @@ generate_test_cert:
|
||||
# generate server cerificate
|
||||
@certstrap request-cert -cn server -ip 127.0.0.1
|
||||
# self-sign server cerificate with rootCA
|
||||
@certstrap sign server --CA "root CA"
|
||||
@certstrap sign server --CA "root CA"
|
||||
# generate client cerificate
|
||||
@certstrap request-cert -cn client -ip 127.0.0.1
|
||||
# self-sign client cerificate with rootCA
|
||||
@certstrap sign client --CA "root CA"
|
||||
@certstrap sign client --CA "root CA"
|
||||
.PHONY: generate_test_cert
|
||||
|
||||
###############################################################################
|
||||
@@ -202,7 +213,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
@golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
@@ -214,7 +225,7 @@ DESTINATION = ./index.html.md
|
||||
build-docs:
|
||||
@cd docs && \
|
||||
while read -r branch path_prefix; do \
|
||||
(git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
(git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
@@ -227,25 +238,16 @@ build-docs:
|
||||
|
||||
build-docker: build-linux
|
||||
cp $(BUILDDIR)/tendermint DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile .
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Local testnet using docker ###
|
||||
###############################################################################
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux:
|
||||
build-linux: tools
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
.PHONY: build-linux
|
||||
|
||||
|
||||
47
README.md
47
README.md
@@ -8,8 +8,8 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/cosmosnetwork)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/vcExX9T)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
[](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
|
||||
@@ -18,7 +18,8 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|--------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
|
||||
| master |  | [](https://codecov.io/gh/tendermint/tendermint) |  |
|
||||
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines.
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
|
||||
@@ -29,11 +30,13 @@ see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/ab
|
||||
|
||||
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
|
||||
Tendermint is being used in production in both private and public environments,
|
||||
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
|
||||
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork).
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/vcExX9T).
|
||||
|
||||
## Security
|
||||
|
||||
@@ -48,7 +51,7 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.16 or higher |
|
||||
| Go version | Go1.15 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -82,12 +85,32 @@ and familiarize yourself with our
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR version is used
|
||||
to signal breaking changes across Tendermint's API. This API includes all
|
||||
publicly exposed types, functions, and methods in non-internal Go packages as well as
|
||||
the types and methods accessible via the Tendermint RPC interface.
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||
include the Go APIs.
|
||||
|
||||
Breaking changes to these public APIs will be documented in the CHANGELOG.
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- config
|
||||
- libs
|
||||
- bits
|
||||
- bytes
|
||||
- json
|
||||
- log
|
||||
- math
|
||||
- net
|
||||
- os
|
||||
- protoio
|
||||
- rand
|
||||
- sync
|
||||
- strings
|
||||
- service
|
||||
- node
|
||||
- rpc/client
|
||||
- types
|
||||
|
||||
### Upgrades
|
||||
|
||||
@@ -144,4 +167,4 @@ If you'd like to work full-time on Tendermint Core, [we're hiring](https://inter
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
|
||||
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
|
||||
that also maintains [tendermint.com](https://tendermint.com).
|
||||
that also maintains [tendermint.com](https://tendermint.com).
|
||||
161
RELEASES.md
161
RELEASES.md
@@ -1,161 +0,0 @@
|
||||
# Releases
|
||||
|
||||
Tendermint uses [semantic versioning](https://semver.org/) with each release following
|
||||
a `vX.Y.Z` format. The `master` branch is used for active development and thus it's
|
||||
advisable not to build against it.
|
||||
|
||||
The latest changes are always initially merged into `master`.
|
||||
Releases are specified using tags and are built from long-lived "backport" branches
|
||||
that are cut from `master` when the release process begins.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
|
||||
and the backport branches have names like `v0.34.x` or `v0.33.x`
|
||||
(literally, `x`; it is not a placeholder in this case). Tendermint only
|
||||
maintains the last two releases at a time (the oldest release is predominantly
|
||||
just security patches).
|
||||
|
||||
## Backporting
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported
|
||||
to these backport branches.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
|
||||
to the needed branch. There should be a label for any backport branch that you'll be targeting.
|
||||
To notify the bot to backport a pull request, mark the pull request with the label corresponding
|
||||
to the correct backport branch. For example, to backport to v0.35.x, add the label `S:backport-to-v0.35.x`.
|
||||
Once the original pull request is merged, the bot will try to cherry-pick the pull request
|
||||
to the backport branch. If the bot fails to backport, it will open a pull request.
|
||||
The author of the original pull request is responsible for solving the conflicts and
|
||||
merging the pull request.
|
||||
|
||||
### Creating a backport branch
|
||||
|
||||
If this is the first release candidate for a major release, you get to have the honor of creating
|
||||
the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the
|
||||
tags on `master` so that `go mod` is able to order the branches correctly. You
|
||||
should tag `master` with a "dev" tag that is "greater than" the backport
|
||||
branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072)
|
||||
for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
|
||||
1. Start on `master`
|
||||
2. Create and push the backport branch:
|
||||
`git checkout -b v0.35.x; git push origin v0.35.x`
|
||||
3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up:
|
||||
`git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."; git push origin v0.36.0-dev`
|
||||
4. Create a new workflow (still on master) to run e2e nightlies for the new backport branch.
|
||||
(See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml
|
||||
for an example.)
|
||||
5. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x`
|
||||
[label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered.
|
||||
|
||||
## Release candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of backport branches.
|
||||
|
||||
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
|
||||
(for example, `v0.35.0-rc0`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
|
||||
Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
2. Run the integration tests and the e2e nightlies
|
||||
(which can be triggered from the Github UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||
3. Prepare the changelog:
|
||||
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. Each RC should have
|
||||
it's own changelog section. These will be squashed when the final candidate is released.
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary.
|
||||
Check the changelog for breaking changes in these components.
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
6. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||
7. Push the tag back up to origin:
|
||||
`git push origin v0.35.0-rc0`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
8. Future RCs will continue to be built off of this branch.
|
||||
|
||||
Note that this process should only be used for "true" RCs--
|
||||
release candidates that, if successful, will be the next release.
|
||||
For more experimental "RCs," create a new, short-lived branch and tag that instead.
|
||||
|
||||
## Major release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, begin by creating a backport branch, as described above.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.35.0`.
|
||||
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||
- `git push origin v0.35.0`
|
||||
6. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
7. Add the release to the documentation site generator config (see
|
||||
[DOCS_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
- Start on branch `master`.
|
||||
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
ensure the newest release is the default for the landing page.
|
||||
- Add a new entry to `themeConfig.versions` in
|
||||
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
|
||||
release in the dropdown versions menu.
|
||||
|
||||
## Minor release (point releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of
|
||||
long-lived backport branches, rather than from master. As non-breaking changes
|
||||
land on `master`, they should also be backported into these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky
|
||||
changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the TMDefaultVersion in `version.go`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
78
SECURITY.md
78
SECURITY.md
@@ -7,55 +7,54 @@ Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
|
||||
|
||||
### Guidelines
|
||||
### Guidelines
|
||||
|
||||
We require that all researchers:
|
||||
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Avoid posting personally identifiable information, privately or publicly
|
||||
|
||||
If you follow these guidelines when reporting an issue to us, we commit to:
|
||||
|
||||
* Not pursue or support any legal action related to your research on this vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
|
||||
## Disclosure Process
|
||||
## Disclosure Process
|
||||
|
||||
Tendermint Core uses the following disclosure process:
|
||||
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc).
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
|
||||
### Example Timeline
|
||||
### Example Timeline
|
||||
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
|
||||
#### 24+ Hours Before Release Time
|
||||
#### > 24 Hours Before Release Time
|
||||
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Create patches in a private security repo, and ensure that PRs are open targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG)
|
||||
5. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG)
|
||||
4. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
|
||||
#### 24 Hours Before Release Time
|
||||
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
|
||||
#### Release Time
|
||||
|
||||
@@ -65,36 +64,36 @@ The following is an example timeline for the triage and response. The required r
|
||||
4. Post “Security releases” on forum (TENDERMINT LEAD)
|
||||
5. Post new Tweet linking to forum post (COMMS LEAD)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
|
||||
#### After Release Time
|
||||
|
||||
1. Write forum post with exploit details (TENDERMINT LEAD)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
|
||||
#### 7 Days After Release Time
|
||||
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
|
||||
## Supported Releases
|
||||
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
|
||||
## Scope
|
||||
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Findings derived from social engineering (e.g., phishing)
|
||||
|
||||
## Example Vulnerabilities
|
||||
## Example Vulnerabilities
|
||||
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
|
||||
### Specification
|
||||
|
||||
@@ -115,9 +114,6 @@ Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
* A node halting (liveness failure)
|
||||
* Syncing new and old nodes
|
||||
|
||||
Assuming more than 1/3 the voting power is Byzantine:
|
||||
|
||||
* Attacks that go unpunished (unhandled evidence)
|
||||
|
||||
### Networking
|
||||
|
||||
@@ -143,7 +139,7 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Libraries
|
||||
|
||||
* Serialization
|
||||
* Serialization (Amino)
|
||||
* Reading/Writing files and databases
|
||||
|
||||
### Cryptography
|
||||
@@ -154,5 +150,5 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Light Client
|
||||
|
||||
* Core verification
|
||||
* Core verification
|
||||
* Bisection/sequential algorithms
|
||||
|
||||
221
UPGRADING.md
221
UPGRADING.md
@@ -2,198 +2,43 @@
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
|
||||
## v0.35
|
||||
## Unreleased
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors.
|
||||
|
||||
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
|
||||
|
||||
* Messages are written to a byte stream using uin64 length delimiters instead of int64.
|
||||
* When mempool `v1` is enabled, transactions broadcasted via `sync` mode may return a successful
|
||||
response with a transaction hash indicating that the transaction was successfully inserted into
|
||||
the mempool. While this is true for `v0`, the `v1` mempool reactor may at a later point in time
|
||||
evict or even drop this transaction after a hash has been returned. Thus, the user or client must
|
||||
query for that transaction to check if it is still in the mempool.
|
||||
|
||||
### Config Changes
|
||||
|
||||
* The configuration file field `[fastsync]` has been renamed to `[blocksync]`.
|
||||
|
||||
* The top level configuration file field `fast-sync` has moved under the new `[blocksync]`
|
||||
field as `blocksync.enable`.
|
||||
|
||||
* `blocksync.version = "v1"` and `blocksync.version = "v2"` (previously `fastsync`)
|
||||
are no longer supported. Please use `v0` instead. During the v0.35 release cycle, `v0` was
|
||||
determined to suit the existing needs and the cost of maintaining the `v1` and `v2` modules
|
||||
was determined to be greater than necessary.
|
||||
|
||||
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
|
||||
|
||||
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
|
||||
you have updated all the variables in your `config.toml` file.
|
||||
|
||||
* Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`)
|
||||
[ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
|
||||
|
||||
* `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace
|
||||
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
|
||||
persistent peers, there's no gaurantee that the node will remain connected with these peers.
|
||||
|
||||
* configuration values starting with `priv-validator-` have moved to the new
|
||||
`priv-validator` section, without the `priv-validator-` prefix.
|
||||
|
||||
* The fast sync process as well as the blockchain package and service has all
|
||||
been renamed to block sync
|
||||
|
||||
### Database Key Format Changes
|
||||
|
||||
The format of all tendermint on-disk database keys changes in
|
||||
0.35. Upgrading nodes must either re-sync all data or run a migration
|
||||
script provided in this release. The script located in
|
||||
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
|
||||
provides the function `Migrate(context.Context, db.DB)` which you can
|
||||
operationalize as makes sense for your deployment.
|
||||
|
||||
For ease of use the `tendermint` command includes a CLI version of the
|
||||
migration script, which you can invoke, as in:
|
||||
|
||||
tendermint key-migrate
|
||||
|
||||
This reads the configuration file as normal and allows the
|
||||
`--db-backend` and `--db-dir` flags to change database operations as
|
||||
needed.
|
||||
|
||||
The migration operation is idempotent and can be run more than once,
|
||||
if needed.
|
||||
you have updated all the variables in your `config.toml` file.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
|
||||
|
||||
* The `--fast-sync` command line option has been renamed to `--blocksync.enable`
|
||||
|
||||
* If you had previously used `tendermint gen_node_key` to generate a new node
|
||||
key, keep in mind that it no longer saves the output to a file. You can use
|
||||
`tendermint init validator` or pipe the output of `tendermint gen_node_key` to
|
||||
`tendermint init` or pipe the output of `tendermint gen_node_key` to
|
||||
`$TMHOME/config/node_key.json`:
|
||||
|
||||
```
|
||||
$ tendermint gen_node_key > $TMHOME/config/node_key.json
|
||||
```
|
||||
|
||||
* CLI commands and flags are all now hyphen-case instead of snake_case.
|
||||
* CLI commands and flags are all now hyphen-case instead of snake_case.
|
||||
Make sure to adjust any scripts that calls a cli command with snake_casing
|
||||
|
||||
### API Changes
|
||||
|
||||
The p2p layer was reimplemented as part of the 0.35 release cycle and
|
||||
all reactors were refactored to accomodate the change. As part of that work these
|
||||
implementations moved into the `internal` package and are no longer
|
||||
considered part of the public Go API of tendermint. These packages
|
||||
are:
|
||||
|
||||
- `p2p`
|
||||
- `mempool`
|
||||
- `consensus`
|
||||
- `statesync`
|
||||
- `blockchain`
|
||||
- `evidence`
|
||||
|
||||
Accordingly, the `node` package was changed to reduce access to
|
||||
tendermint internals: applications that use tendermint as a library
|
||||
will need to change to accommodate these changes. Most notably:
|
||||
|
||||
- The `Node` type has become internal, and all constructors return a
|
||||
`service.Service` implementation.
|
||||
|
||||
- The `node.DefaultNewNode` and `node.NewNode` constructors are no
|
||||
longer exported and have been replaced with `node.New` and
|
||||
`node.NewDefault` which provide more functional interfaces.
|
||||
|
||||
### gRPC Support
|
||||
|
||||
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
|
||||
|
||||
### Peer Management Interface
|
||||
|
||||
When running with the new P2P Layer, the methods `UnsafeDialSeeds` and
|
||||
`UnsafeDialPeers` RPC methods will always return an error. They are
|
||||
deprecated and will be removed in 0.36 when the legacy peer stack is
|
||||
removed.
|
||||
|
||||
Additionally the format of the Peer list returned in the `NetInfo`
|
||||
method changes in this release to accommodate the different way that
|
||||
the new stack tracks data about peers. This change affects users of
|
||||
both stacks.
|
||||
|
||||
### Using the updated p2p library
|
||||
|
||||
The P2P library was reimplemented in this release. The new implementation is
|
||||
enabled by default in this version of Tendermint. The legacy implementation is still
|
||||
included in this version of Tendermint as a backstop to work around unforeseen
|
||||
production issues. The new and legacy version are interoperable. If necessary,
|
||||
you can enable the legacy implementation in the server configuration file.
|
||||
|
||||
To make use of the legacy P2P implemementation add or update the following field of
|
||||
your server's configuration file under the `[p2p]` section:
|
||||
|
||||
```toml
|
||||
[p2p]
|
||||
...
|
||||
use-legacy = true
|
||||
...
|
||||
```
|
||||
|
||||
If you need to do this, please consider filing an issue in the Tendermint repository
|
||||
to let us know why. We plan to remove the legacy P2P code in the next (v0.36) release.
|
||||
|
||||
#### New p2p queue types
|
||||
|
||||
The new p2p implementation enables selection of the queue type to be used for
|
||||
passing messages between peers.
|
||||
|
||||
The following values may be used when selecting which queue type to use:
|
||||
|
||||
* `fifo`: (**default**) An unbuffered and lossless queue that passes messages through
|
||||
in the order in which they were received.
|
||||
|
||||
* `priority`: A priority queue of messages.
|
||||
|
||||
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
|
||||
weighted deficit round robin queue is created per peer. Each queue contains a
|
||||
separate 'flow' for each of the channels of communication that exist between any two
|
||||
peers. Tendermint maintains a channel per message type between peers. Each WDRR
|
||||
queue maintains a shared buffered with a fixed capacity through which messages on different
|
||||
flows are passed.
|
||||
For more information on WDRR scheduling, see: https://en.wikipedia.org/wiki/Deficit_round_robin
|
||||
|
||||
To select a queue type, add or update the following field under the `[p2p]`
|
||||
section of your server's configuration file.
|
||||
|
||||
```toml
|
||||
[p2p]
|
||||
...
|
||||
queue-type = wdrr
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
### Support for Custom Reactor and Mempool Implementations
|
||||
|
||||
The changes to p2p layer removed existing support for custom
|
||||
reactors. Based on our understanding of how this functionality was
|
||||
used, the introduction of the prioritized mempool covers nearly all of
|
||||
the use cases for custom reactors. If you are currently running custom
|
||||
reactors and mempools and are having trouble seeing the migration path
|
||||
for your project please feel free to reach out to the Tendermint Core
|
||||
development team directly.
|
||||
|
||||
## v0.34.0
|
||||
|
||||
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
|
||||
This release is not compatible with previous blockchains due to changes to
|
||||
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
|
||||
|
||||
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
@@ -340,8 +185,8 @@ Other user-relevant changes include:
|
||||
|
||||
* The old `lite` package was removed; the new light client uses the `light` package.
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The new light clients stores headers & validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
@@ -473,12 +318,12 @@ Evidence Params has been changed to include duration.
|
||||
### Go API
|
||||
|
||||
* `libs/common` has been removed in favor of specific pkgs.
|
||||
* `async`
|
||||
* `service`
|
||||
* `rand`
|
||||
* `net`
|
||||
* `strings`
|
||||
* `cmap`
|
||||
* `async`
|
||||
* `service`
|
||||
* `rand`
|
||||
* `net`
|
||||
* `strings`
|
||||
* `cmap`
|
||||
* removal of `errors` pkg
|
||||
|
||||
### RPC Changes
|
||||
@@ -547,9 +392,9 @@ Prior to the update, suppose your `ResponseDeliverTx` look like:
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Tags: []kv.Pair{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -568,14 +413,14 @@ the following `Events`:
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "transfer",
|
||||
Attributes: kv.Pairs{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
},
|
||||
}
|
||||
{
|
||||
Type: "transfer",
|
||||
Attributes: kv.Pairs{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -623,9 +468,9 @@ In this case, the WS client will receive an error with description:
|
||||
"jsonrpc": "2.0",
|
||||
"id": "{ID}#event",
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -831,9 +676,9 @@ just the `Data` field set:
|
||||
|
||||
```go
|
||||
[]ProofOp{
|
||||
ProofOp{
|
||||
Data: <proof bytes>,
|
||||
}
|
||||
ProofOp{
|
||||
Data: <proof bytes>,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package abciclient
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -15,7 +15,7 @@ const (
|
||||
echoRetryIntervalSeconds = 1
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Client
|
||||
//go:generate mockery --case underscore --name Client
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
//
|
||||
@@ -80,8 +80,12 @@ func NewClient(addr, transport string, mustConnect bool) (client Client, err err
|
||||
return
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type Callback func(*types.Request, *types.Response)
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type ReqRes struct {
|
||||
*types.Request
|
||||
*sync.WaitGroup
|
||||
@@ -103,50 +107,34 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
}
|
||||
}
|
||||
|
||||
// Sets sets the callback. If reqRes is already done, it will call the cb
|
||||
// immediately. Note, reqRes.cb should not change if reqRes.done and only one
|
||||
// callback is supported.
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
// Sets the callback for this ReqRes atomically.
|
||||
// If reqRes is already done, calls cb immediately.
|
||||
// NOTE: reqRes.cb should not change if reqRes.done.
|
||||
// NOTE: only one callback is supported.
|
||||
func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
reqRes.mtx.Lock()
|
||||
|
||||
if r.done {
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
if reqRes.done {
|
||||
reqRes.mtx.Unlock()
|
||||
cb(reqRes.Response)
|
||||
return
|
||||
}
|
||||
|
||||
r.cb = cb
|
||||
r.mtx.Unlock()
|
||||
reqRes.cb = cb
|
||||
reqRes.mtx.Unlock()
|
||||
}
|
||||
|
||||
// InvokeCallback invokes a thread-safe execution of the configured callback
|
||||
// if non-nil.
|
||||
func (r *ReqRes) InvokeCallback() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
func (reqRes *ReqRes) GetCallback() func(*types.Response) {
|
||||
reqRes.mtx.Lock()
|
||||
defer reqRes.mtx.Unlock()
|
||||
return reqRes.cb
|
||||
}
|
||||
|
||||
// GetCallback returns the configured callback of the ReqRes object which may be
|
||||
// nil. Note, it is not safe to concurrently call this in cases where it is
|
||||
// marked done and SetCallback is called before calling GetCallback as that
|
||||
// will invoke the callback twice and create a potential race condition.
|
||||
//
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5439
|
||||
func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.cb
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
r.mtx.Lock()
|
||||
r.done = true
|
||||
r.mtx.Unlock()
|
||||
// NOTE: it should be safe to read reqRes.cb without locks after this.
|
||||
func (reqRes *ReqRes) SetDone() {
|
||||
reqRes.mtx.Lock()
|
||||
reqRes.done = true
|
||||
reqRes.mtx.Unlock()
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
package abciclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
)
|
||||
|
||||
// Creator creates new ABCI clients.
|
||||
type Creator func() (Client, error)
|
||||
|
||||
// NewLocalCreator returns a Creator for the given app,
|
||||
// which will be running locally.
|
||||
func NewLocalCreator(app types.Application) Creator {
|
||||
mtx := new(tmsync.Mutex)
|
||||
|
||||
return func() (Client, error) {
|
||||
return NewLocalClient(mtx, app), nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewRemoteCreator returns a Creator for the given address (e.g.
|
||||
// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you
|
||||
// want the client to connect before reporting success.
|
||||
func NewRemoteCreator(addr, transport string, mustConnect bool) Creator {
|
||||
return func() (Client, error) {
|
||||
remoteApp, err := NewClient(addr, transport, mustConnect)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to proxy: %w", err)
|
||||
}
|
||||
|
||||
return remoteApp, nil
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package abciclient provides an ABCI implementation in Go.
|
||||
// Package abcicli provides an ABCI implementation in Go.
|
||||
//
|
||||
// There are 3 clients available:
|
||||
// 1. socket (unix or TCP)
|
||||
@@ -26,4 +26,4 @@
|
||||
//
|
||||
// sync: waits for all Async calls to complete (essentially what Flush does in
|
||||
// the socket client) and calls Sync method.
|
||||
package abciclient
|
||||
package abcicli
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package abciclient
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// A gRPC client.
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package abciclient
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
// Code generated by mockery v2.3.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
@@ -20,15 +20,15 @@ type Client struct {
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,15 +66,15 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
|
||||
}
|
||||
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,15 +112,15 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,15 +158,15 @@ func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*t
|
||||
}
|
||||
|
||||
// CommitAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitAsync(_a0 context.Context) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,15 +204,15 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -250,15 +250,15 @@ func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx)
|
||||
}
|
||||
|
||||
// EchoAsync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -296,15 +296,15 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
|
||||
}
|
||||
|
||||
// EndBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -356,15 +356,15 @@ func (_m *Client) Error() error {
|
||||
}
|
||||
|
||||
// FlushAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -393,15 +393,15 @@ func (_m *Client) FlushSync(_a0 context.Context) error {
|
||||
}
|
||||
|
||||
// InfoAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -439,15 +439,15 @@ func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.R
|
||||
}
|
||||
|
||||
// InitChainAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -499,15 +499,15 @@ func (_m *Client) IsRunning() bool {
|
||||
}
|
||||
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -545,15 +545,15 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -591,15 +591,15 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo
|
||||
}
|
||||
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -670,15 +670,15 @@ func (_m *Client) OnStop() {
|
||||
}
|
||||
|
||||
// QueryAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abciclient.ReqRes, error) {
|
||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abciclient.ReqRes); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -751,7 +751,7 @@ func (_m *Client) SetLogger(_a0 log.Logger) {
|
||||
}
|
||||
|
||||
// SetResponseCallback provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetResponseCallback(_a0 abciclient.Callback) {
|
||||
func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
@@ -796,8 +796,3 @@ func (_m *Client) String() string {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Wait provides a mock function with given fields:
|
||||
func (_m *Client) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package abciclient
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -12,15 +12,18 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/timer"
|
||||
)
|
||||
|
||||
const (
|
||||
// reqQueueSize is the max number of queued async requests.
|
||||
// (memory: 256MB max assuming 1MB transactions)
|
||||
reqQueueSize = 256
|
||||
// Don't wait longer than...
|
||||
flushThrottleMS = 20
|
||||
)
|
||||
|
||||
type reqResWithContext struct {
|
||||
@@ -37,7 +40,8 @@ type socketClient struct {
|
||||
mustConnect bool
|
||||
conn net.Conn
|
||||
|
||||
reqQueue chan *reqResWithContext
|
||||
reqQueue chan *reqResWithContext
|
||||
flushTimer *timer.ThrottleTimer
|
||||
|
||||
mtx tmsync.Mutex
|
||||
err error
|
||||
@@ -53,6 +57,7 @@ var _ Client = (*socketClient)(nil)
|
||||
func NewSocketClient(addr string, mustConnect bool) Client {
|
||||
cli := &socketClient{
|
||||
reqQueue: make(chan *reqResWithContext, reqQueueSize),
|
||||
flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS),
|
||||
mustConnect: mustConnect,
|
||||
|
||||
addr: addr,
|
||||
@@ -97,7 +102,8 @@ func (cli *socketClient) OnStop() {
|
||||
cli.conn.Close()
|
||||
}
|
||||
|
||||
cli.drainQueue()
|
||||
cli.flushQueue()
|
||||
cli.flushTimer.Stop()
|
||||
}
|
||||
|
||||
// Error returns an error if the client was stopped abruptly.
|
||||
@@ -120,25 +126,38 @@ func (cli *socketClient) SetResponseCallback(resCb Callback) {
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
bw := bufio.NewWriter(conn)
|
||||
w := bufio.NewWriter(conn)
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.reqQueue:
|
||||
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
|
||||
|
||||
if reqres.C.Err() != nil {
|
||||
cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err())
|
||||
continue
|
||||
}
|
||||
cli.willSendReq(reqres.R)
|
||||
|
||||
if err := types.WriteMessage(reqres.R.Request, bw); err != nil {
|
||||
cli.willSendReq(reqres.R)
|
||||
err := types.WriteMessage(reqres.R.Request, w)
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
|
||||
return
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// If it's a flush request, flush the current buffer.
|
||||
if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok {
|
||||
err = w.Flush()
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-cli.flushTimer.Ch: // flush queue
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}:
|
||||
default:
|
||||
// Probably will fill the buffer, or retry later.
|
||||
}
|
||||
case <-cli.Quit():
|
||||
return
|
||||
}
|
||||
@@ -207,7 +226,9 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
//
|
||||
// NOTE: It is possible this callback isn't set on the reqres object. At this
|
||||
// point, in which case it will be called after, when it is set.
|
||||
reqres.InvokeCallback()
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -473,6 +494,14 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s
|
||||
}
|
||||
}
|
||||
|
||||
// Maybe auto-flush, or unset auto-flush
|
||||
switch req.Value.(type) {
|
||||
case *types.Request_Flush:
|
||||
cli.flushTimer.Unset()
|
||||
default:
|
||||
cli.flushTimer.Set()
|
||||
}
|
||||
|
||||
return reqres, nil
|
||||
}
|
||||
|
||||
@@ -510,9 +539,7 @@ func queueErr(e error) error {
|
||||
return fmt.Errorf("can't queue req: %w", e)
|
||||
}
|
||||
|
||||
// drainQueue marks as complete and discards all remaining pending requests
|
||||
// from the queue.
|
||||
func (cli *socketClient) drainQueue() {
|
||||
func (cli *socketClient) flushQueue() {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
@@ -522,17 +549,14 @@ func (cli *socketClient) drainQueue() {
|
||||
reqres.Done()
|
||||
}
|
||||
|
||||
// Mark all queued messages as resolved.
|
||||
//
|
||||
// TODO(creachadair): We can't simply range the channel, because it is never
|
||||
// closed, and the writer continues to add work.
|
||||
// See https://github.com/tendermint/tendermint/issues/6996.
|
||||
// mark all queued messages as resolved
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.reqQueue:
|
||||
reqres.R.Done()
|
||||
default:
|
||||
return
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package abciclient_test
|
||||
package abcicli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,14 +6,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"math/rand"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
@@ -100,9 +99,9 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
}
|
||||
|
||||
func setupClientServer(t *testing.T, app types.Application) (
|
||||
service.Service, abciclient.Client) {
|
||||
service.Service, abcicli.Client) {
|
||||
// some port between 20k and 30k
|
||||
port := 20000 + rand.Int31()%10000
|
||||
port := 20000 + tmrand.Int32()%10000
|
||||
addr := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
s, err := server.NewServer(addr, "socket", app)
|
||||
@@ -110,7 +109,7 @@ func setupClientServer(t *testing.T, app types.Application) (
|
||||
err = s.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
c := abciclient.NewSocketClient(addr, true)
|
||||
c := abcicli.NewSocketClient(addr, true)
|
||||
err = c.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -15,8 +15,9 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
@@ -27,7 +28,7 @@ import (
|
||||
|
||||
// client is a global variable so it can be reused by the console
|
||||
var (
|
||||
client abciclient.Client
|
||||
client abcicli.Client
|
||||
logger log.Logger
|
||||
|
||||
ctx = context.Background()
|
||||
@@ -46,6 +47,9 @@ var (
|
||||
flagHeight int
|
||||
flagProve bool
|
||||
|
||||
// counter
|
||||
flagSerial bool
|
||||
|
||||
// kvstore
|
||||
flagPersist string
|
||||
)
|
||||
@@ -57,17 +61,22 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "kvstore", "version":
|
||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
||||
return nil
|
||||
case "version": // skip running for version command
|
||||
return nil
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
allowLevel, err := log.AllowLevel(flagLogLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger = log.NewFilter(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), allowLevel)
|
||||
}
|
||||
|
||||
if client == nil {
|
||||
var err error
|
||||
client, err = abciclient.NewClient(flagAddress, flagAbci, false)
|
||||
client, err = abcicli.NewClient(flagAddress, flagAbci, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -129,6 +138,10 @@ func addQueryFlags() {
|
||||
"whether or not to return a merkle proof of the query result")
|
||||
}
|
||||
|
||||
func addCounterFlags() {
|
||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
||||
}
|
||||
|
||||
func addKVStoreFlags() {
|
||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
@@ -147,6 +160,8 @@ func addCommands() {
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
|
||||
// examples
|
||||
addCounterFlags()
|
||||
RootCmd.AddCommand(counterCmd)
|
||||
addKVStoreFlags()
|
||||
RootCmd.AddCommand(kvstoreCmd)
|
||||
}
|
||||
@@ -246,6 +261,14 @@ var queryCmd = &cobra.Command{
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
var counterCmd = &cobra.Command{
|
||||
Use: "counter",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdCounter,
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
Use: "kvstore",
|
||||
Short: "ABCI demo example",
|
||||
@@ -573,8 +596,34 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
app := counter.NewApplication(flagSerial)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
|
||||
86
abci/example/counter/counter.go
Normal file
86
abci/example/counter/counter.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package counter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
hashCount int
|
||||
txCount int
|
||||
serial bool
|
||||
}
|
||||
|
||||
func NewApplication(serial bool) *Application {
|
||||
return &Application{serial: serial}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() (resp types.ResponseCommit) {
|
||||
app.hashCount++
|
||||
if app.txCount == 0 {
|
||||
return types.ResponseCommit{}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
switch reqQuery.Path {
|
||||
case "hash":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
||||
case "tx":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
|
||||
default:
|
||||
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
@@ -61,7 +61,7 @@ func testStream(t *testing.T, app types.Application) {
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
client := abciclient.NewSocketClient(socket, false)
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
|
||||
err = client.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
mrand "math/rand"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
@@ -11,8 +9,7 @@ import (
|
||||
// from the input value
|
||||
func RandVal(i int) types.ValidatorUpdate {
|
||||
pubkey := tmrand.Bytes(32)
|
||||
// Random value between [0, 2^16 - 1]
|
||||
power := mrand.Uint32() & (1<<16 - 1) // nolint:gosec // G404: Use of weak random number generator
|
||||
power := tmrand.Uint16() + 1
|
||||
v := types.UpdateValidator(pubkey, int64(power), "")
|
||||
return v
|
||||
}
|
||||
|
||||
@@ -87,16 +87,15 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var key, value string
|
||||
|
||||
var key, value []byte
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
key, value = parts[0], parts[1]
|
||||
} else {
|
||||
key, value = string(req.Tx), string(req.Tx)
|
||||
key, value = req.Tx, req.Tx
|
||||
}
|
||||
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
err := app.state.db.Set(prefixKey(key), value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -106,10 +105,10 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
{Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko"), Index: true},
|
||||
{Key: []byte("key"), Value: key, Index: true},
|
||||
{Key: []byte("index_key"), Value: []byte("index is working"), Index: true},
|
||||
{Key: []byte("noindex_key"), Value: []byte("index is working"), Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
@@ -229,7 +229,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeSocketClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) {
|
||||
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
@@ -241,7 +241,7 @@ func makeSocketClientServer(app types.Application, name string) (abciclient.Clie
|
||||
}
|
||||
|
||||
// Connect to the socket
|
||||
client := abciclient.NewSocketClient(socket, false)
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
if err = server.Stop(); err != nil {
|
||||
@@ -253,7 +253,7 @@ func makeSocketClientServer(app types.Application, name string) (abciclient.Clie
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func makeGRPCClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) {
|
||||
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
@@ -265,7 +265,7 @@ func makeGRPCClientServer(app types.Application, name string) (abciclient.Client
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
client := abciclient.NewGRPCClient(socket, true)
|
||||
client := abcicli.NewGRPCClient(socket, true)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
if err := server.Stop(); err != nil {
|
||||
@@ -313,7 +313,7 @@ func TestClientServer(t *testing.T) {
|
||||
runClientTests(t, gclient)
|
||||
}
|
||||
|
||||
func runClientTests(t *testing.T, client abciclient.Client) {
|
||||
func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
// run some tests....
|
||||
key := testKey
|
||||
value := key
|
||||
@@ -325,7 +325,7 @@ func runClientTests(t *testing.T, client abciclient.Client) {
|
||||
testClient(t, client, tx, key, value)
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abciclient.Client, tx []byte, key, value string) {
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/encoding"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -30,7 +30,7 @@ type PersistentKVStoreApplication struct {
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
|
||||
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
|
||||
valAddrToPubKeyMap map[string]pc.PublicKey
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
@@ -46,15 +46,11 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
|
||||
|
||||
return &PersistentKVStoreApplication{
|
||||
app: &Application{state: state},
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
valAddrToPubKeyMap: make(map[string]pc.PublicKey),
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Close() error {
|
||||
return app.app.state.db.Close()
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
|
||||
app.logger = l
|
||||
}
|
||||
@@ -194,8 +190,8 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida
|
||||
return
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte {
|
||||
pk, err := encoding.PubKeyFromProto(pubkey)
|
||||
func MakeValSetChangeTx(pubkey pc.PublicKey, power int64) []byte {
|
||||
pk, err := cryptoenc.PubKeyFromProto(pubkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -243,7 +239,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
||||
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
|
||||
pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"runtime"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmlog "github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// var maxNumberConnections = 2
|
||||
@@ -240,15 +240,22 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
|
||||
// Pull responses from 'responses' and write them to conn.
|
||||
func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) {
|
||||
bw := bufio.NewWriter(conn)
|
||||
for res := range responses {
|
||||
if err := types.WriteMessage(res, bw); err != nil {
|
||||
var count int
|
||||
var bufWriter = bufio.NewWriter(conn)
|
||||
for {
|
||||
var res = <-responses
|
||||
err := types.WriteMessage(res, bufWriter)
|
||||
if err != nil {
|
||||
closeConn <- fmt.Errorf("error writing message: %w", err)
|
||||
return
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
|
||||
return
|
||||
if _, ok := res.Value.(*types.Response_Flush); ok {
|
||||
err = bufWriter.Flush()
|
||||
if err != nil {
|
||||
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
abciclientent "github.com/tendermint/tendermint/abci/client"
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
)
|
||||
@@ -20,7 +20,7 @@ func TestClientServerNoAddrPrefix(t *testing.T) {
|
||||
err = server.Start()
|
||||
assert.NoError(t, err, "expected no error on server.Start")
|
||||
|
||||
client, err := abciclientent.NewClient(addr, transport, true)
|
||||
client, err := abciclient.NewClient(addr, transport, true)
|
||||
assert.NoError(t, err, "expected no error on NewClient")
|
||||
err = client.Start()
|
||||
assert.NoError(t, err, "expected no error on client.Start")
|
||||
|
||||
@@ -5,22 +5,20 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
mrand "math/rand"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func InitChain(client abciclient.Client) error {
|
||||
func InitChain(client abcicli.Client) error {
|
||||
total := 10
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
for i := 0; i < total; i++ {
|
||||
pubkey := tmrand.Bytes(33)
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
power := mrand.Int()
|
||||
power := tmrand.Int()
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChainSync(ctx, types.RequestInitChain{
|
||||
@@ -34,7 +32,7 @@ func InitChain(client abciclient.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Commit(client abciclient.Client, hashExp []byte) error {
|
||||
func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
res, err := client.CommitSync(ctx)
|
||||
data := res.Data
|
||||
if err != nil {
|
||||
@@ -51,7 +49,7 @@ func Commit(client abciclient.Client, hashExp []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeliverTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
@@ -70,7 +68,7 @@ func DeliverTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
|
||||
56
abci/tests/test_app/app.go
Normal file
56
abci/tests/test_app/app.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func startClient(abciType string) abcicli.Client {
|
||||
// Start client
|
||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
client.SetLogger(logger.With("module", "abcicli"))
|
||||
if err := client.Start(); err != nil {
|
||||
panicf("connecting to abci_app: %v", err.Error())
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func commit(client abcicli.Client, hashExp []byte) {
|
||||
res, err := client.CommitSync(ctx)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(res.Data, hashExp) {
|
||||
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
|
||||
}
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
|
||||
}
|
||||
}
|
||||
|
||||
func panicf(format string, a ...interface{}) {
|
||||
panic(fmt.Sprintf(format, a...))
|
||||
}
|
||||
93
abci/tests/test_app/main.go
Normal file
93
abci/tests/test_app/main.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
var abciType string
|
||||
|
||||
func init() {
|
||||
abciType = os.Getenv("ABCI")
|
||||
if abciType == "" {
|
||||
abciType = "socket"
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
testCounter()
|
||||
}
|
||||
|
||||
const (
|
||||
maxABCIConnectTries = 10
|
||||
)
|
||||
|
||||
func ensureABCIIsUp(typ string, n int) error {
|
||||
var err error
|
||||
cmdString := "abci-cli echo hello"
|
||||
if typ == "grpc" {
|
||||
cmdString = "abci-cli --abci grpc echo hello"
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
cmd := exec.Command("bash", "-c", cmdString)
|
||||
_, err = cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func testCounter() {
|
||||
abciApp := os.Getenv("ABCI_APP")
|
||||
if abciApp == "" {
|
||||
panic("No ABCI_APP specified")
|
||||
}
|
||||
|
||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
||||
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
|
||||
cmd := exec.Command("bash", "-c", subCommand)
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error on process kill: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
log.Fatalf("echo failed: %v", err) //nolint:gocritic
|
||||
}
|
||||
|
||||
client := startClient(abciType)
|
||||
defer func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
log.Printf("error trying client stop: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// commit(client, nil)
|
||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
|
||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
28
abci/tests/test_app/test.sh
Executable file
28
abci/tests/test_app/test.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#! /bin/bash
|
||||
set -e
|
||||
|
||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
||||
|
||||
# Get the directory of where this script is.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd "$DIR"
|
||||
|
||||
echo "RUN COUNTER OVER SOCKET"
|
||||
# test golang counter
|
||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
|
||||
echo "RUN COUNTER OVER GRPC"
|
||||
# test golang counter via grpc
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
# test nodejs counter
|
||||
# TODO: fix node app
|
||||
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter
|
||||
@@ -37,6 +37,7 @@ function testExample() {
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli counter
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
package types
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/internal/libs/protoio"
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -15,7 +15,11 @@ const (
|
||||
func WriteMessage(msg proto.Message, w io.Writer) error {
|
||||
protoWriter := protoio.NewDelimitedWriter(w)
|
||||
_, err := protoWriter.WriteMsg(msg)
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadMessage reads a varint length-delimited protobuf message.
|
||||
|
||||
@@ -25,7 +25,7 @@ func TestMarshalJSON(t *testing.T) {
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: "pho", Value: "bo"},
|
||||
{Key: []byte("pho"), Value: []byte("bo")},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -92,7 +92,7 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: "abc", Value: "def"},
|
||||
{Key: []byte("abc"), Value: []byte("def")},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -4,20 +4,20 @@ import (
|
||||
fmt "fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/encoding"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
"github.com/tendermint/tendermint/crypto/sr25519"
|
||||
)
|
||||
|
||||
func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
|
||||
pke := ed25519.PubKey(pk)
|
||||
|
||||
pkp, err := encoding.PubKeyToProto(pke)
|
||||
pkp, err := cryptoenc.PubKeyToProto(pke)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return ValidatorUpdate{
|
||||
// Address:
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
@@ -29,21 +29,12 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
|
||||
return Ed25519ValidatorUpdate(pk, power)
|
||||
case secp256k1.KeyType:
|
||||
pke := secp256k1.PubKey(pk)
|
||||
pkp, err := encoding.PubKeyToProto(pke)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ValidatorUpdate{
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
case sr25519.KeyType:
|
||||
pke := sr25519.PubKey(pk)
|
||||
pkp, err := encoding.PubKeyToProto(pke)
|
||||
pkp, err := cryptoenc.PubKeyToProto(pke)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ValidatorUpdate{
|
||||
// Address:
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func (r ResponseQuery) IsErr() bool {
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
// override JSON marshalling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
42
behaviour/doc.go
Normal file
42
behaviour/doc.go
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
Package Behaviour provides a mechanism for reactors to report behaviour of peers.
|
||||
|
||||
Instead of a reactor calling the switch directly it will call the behaviour module which will
|
||||
handle the stoping and marking peer as good on behalf of the reactor.
|
||||
|
||||
There are four different behaviours a reactor can report.
|
||||
|
||||
1. bad message
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
|
||||
2. message out of order
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
|
||||
3. consesnsus Vote
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
|
||||
4. block part
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
|
||||
*/
|
||||
package behaviour
|
||||
49
behaviour/peer_behaviour.go
Normal file
49
behaviour/peer_behaviour.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// PeerBehaviour is a struct describing a behaviour a peer performed.
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behaviour performed by the peer.
|
||||
type PeerBehaviour struct {
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BadMessage returns a badMessage PeerBehaviour.
|
||||
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}}
|
||||
}
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour.
|
||||
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: messageOutOfOrder{explanation}}
|
||||
}
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// ConsensusVote returns a consensusVote PeerBehaviour.
|
||||
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}}
|
||||
}
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BlockPart returns blockPart PeerBehaviour.
|
||||
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}}
|
||||
}
|
||||
86
behaviour/reporter.go
Normal file
86
behaviour/reporter.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// Reporter provides an interface for reactors to report the behaviour
|
||||
// of peers synchronously to other components.
|
||||
type Reporter interface {
|
||||
Report(behaviour PeerBehaviour) error
|
||||
}
|
||||
|
||||
// SwitchReporter reports peer behaviour to an internal Switch.
|
||||
type SwitchReporter struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
|
||||
// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch.
|
||||
func NewSwitchReporter(sw *p2p.Switch) *SwitchReporter {
|
||||
return &SwitchReporter{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
|
||||
// Report reports the behaviour of a peer to the Switch.
|
||||
func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error {
|
||||
peer := spbr.sw.Peers().Get(behaviour.peerID)
|
||||
if peer == nil {
|
||||
return errors.New("peer not found")
|
||||
}
|
||||
|
||||
switch reason := behaviour.reason.(type) {
|
||||
case consensusVote, blockPart:
|
||||
spbr.sw.MarkPeerAsGood(peer)
|
||||
case badMessage:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
case messageOutOfOrder:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
default:
|
||||
return errors.New("unknown reason reported")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockReporter is a concrete implementation of the Reporter
|
||||
// interface used in reactor tests to ensure reactors report the correct
|
||||
// behaviour in manufactured scenarios.
|
||||
type MockReporter struct {
|
||||
mtx tmsync.RWMutex
|
||||
pb map[p2p.NodeID][]PeerBehaviour
|
||||
}
|
||||
|
||||
// NewMockReporter returns a Reporter which records all reported
|
||||
// behaviours in memory.
|
||||
func NewMockReporter() *MockReporter {
|
||||
return &MockReporter{
|
||||
pb: map[p2p.NodeID][]PeerBehaviour{},
|
||||
}
|
||||
}
|
||||
|
||||
// Report stores the PeerBehaviour produced by the peer identified by peerID.
|
||||
func (mpbr *MockReporter) Report(behaviour PeerBehaviour) error {
|
||||
mpbr.mtx.Lock()
|
||||
defer mpbr.mtx.Unlock()
|
||||
mpbr.pb[behaviour.peerID] = append(mpbr.pb[behaviour.peerID], behaviour)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBehaviours returns all behaviours reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviours(peerID p2p.NodeID) []PeerBehaviour {
|
||||
mpbr.mtx.RLock()
|
||||
defer mpbr.mtx.RUnlock()
|
||||
if items, ok := mpbr.pb[peerID]; ok {
|
||||
result := make([]PeerBehaviour, len(items))
|
||||
copy(result, items)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
return []PeerBehaviour{}
|
||||
}
|
||||
205
behaviour/reporter_test.go
Normal file
205
behaviour/reporter_test.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package behaviour_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
bh "github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behaviour in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID p2p.NodeID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviours := pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 0 {
|
||||
t.Error("Expected to have no behaviours reported")
|
||||
}
|
||||
|
||||
badMessage := bh.BadMessage(peerID, "bad message")
|
||||
if err := pr.Report(badMessage); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
behaviours = pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 1 {
|
||||
t.Error("Expected the peer have one reported behaviour")
|
||||
}
|
||||
|
||||
if behaviours[0] != badMessage {
|
||||
t.Error("Expected Bad Message to have been reported")
|
||||
}
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID p2p.NodeID
|
||||
behaviour bh.PeerBehaviour
|
||||
}
|
||||
|
||||
// equalBehaviours returns true if a and b contain the same PeerBehaviours with
|
||||
// the same freequencies and otherwise false.
|
||||
func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
|
||||
aHistogram := map[bh.PeerBehaviour]int{}
|
||||
bHistogram := map[bh.PeerBehaviour]int{}
|
||||
|
||||
for _, behaviour := range a {
|
||||
aHistogram[behaviour]++
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
bHistogram[behaviour]++
|
||||
}
|
||||
|
||||
if len(aHistogram) != len(bHistogram) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, behaviour := range a {
|
||||
if aHistogram[behaviour] != bHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
if bHistogram[behaviour] != aHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices
|
||||
// of peer behaviours can be compared for the behaviours they contain and the
|
||||
// freequencies that those behaviours occur.
|
||||
func TestEqualPeerBehaviours(t *testing.T) {
|
||||
var (
|
||||
peerID p2p.NodeID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{}},
|
||||
// Single behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Equal Frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote, consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
// Equal frequencies different orders
|
||||
{[]bh.PeerBehaviour{consensusVote, blockPart},
|
||||
[]bh.PeerBehaviour{blockPart, consensusVote}},
|
||||
}
|
||||
unequals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Comparing empty sets to non empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Different behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{blockPart}},
|
||||
// Same behaviour with different frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
}
|
||||
)
|
||||
|
||||
for _, test := range equals {
|
||||
if !equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be equal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range unequals {
|
||||
if equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be unequal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPeerBehaviourConcurrency constructs a scenario in which
|
||||
// multiple goroutines are using the same MockReporter instance.
|
||||
// This test reproduces the conditions in which MockReporter will
|
||||
// be used within a Reactor `Receive` method tests to ensure thread safety.
|
||||
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviourScript = []struct {
|
||||
peerID p2p.NodeID
|
||||
behaviours []bh.PeerBehaviour
|
||||
}{
|
||||
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},
|
||||
{"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
|
||||
{
|
||||
"3",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", ""),
|
||||
bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", "")}},
|
||||
{
|
||||
"4",
|
||||
[]bh.PeerBehaviour{bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", "")}},
|
||||
{
|
||||
"5",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", ""),
|
||||
bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", "")}},
|
||||
}
|
||||
)
|
||||
|
||||
var receiveWg sync.WaitGroup
|
||||
pr := bh.NewMockReporter()
|
||||
scriptItems := make(chan scriptItem)
|
||||
done := make(chan int)
|
||||
numConsumers := 3
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
receiveWg.Add(1)
|
||||
go func() {
|
||||
defer receiveWg.Done()
|
||||
for {
|
||||
select {
|
||||
case pb := <-scriptItems:
|
||||
if err := pr.Report(pb.behaviour); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var sendingWg sync.WaitGroup
|
||||
sendingWg.Add(1)
|
||||
go func() {
|
||||
defer sendingWg.Done()
|
||||
for _, item := range behaviourScript {
|
||||
for _, reason := range item.behaviours {
|
||||
scriptItems <- scriptItem{item.peerID, reason}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
sendingWg.Wait()
|
||||
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
done <- 1
|
||||
}
|
||||
|
||||
receiveWg.Wait()
|
||||
|
||||
for _, items := range behaviourScript {
|
||||
reported := pr.GetBehaviours(items.peerID)
|
||||
if !equalBehaviours(reported, items.behaviours) {
|
||||
t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
|
||||
items.peerID, items.behaviours, reported)
|
||||
}
|
||||
}
|
||||
}
|
||||
17
blockchain/doc.go
Normal file
17
blockchain/doc.go
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
Package blockchain provides two implementations of the fast-sync protocol.
|
||||
|
||||
- v0 was the very first implementation. it's battle tested, but does not have a
|
||||
lot of test coverage.
|
||||
- v2 is the newest implementation, with a focus on testability and readability.
|
||||
|
||||
Check out ADR-40 for the formal model and requirements.
|
||||
|
||||
# Termination criteria
|
||||
|
||||
1. the maximum peer height is reached
|
||||
2. termination timeout is triggered, which is set if the peer set is empty or
|
||||
there are no pending requests.
|
||||
|
||||
*/
|
||||
package blockchain
|
||||
@@ -1,7 +1,7 @@
|
||||
package blocksync
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
flow "github.com/tendermint/tendermint/internal/libs/flowrate"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
flow "github.com/tendermint/tendermint/libs/flowrate"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -62,10 +63,10 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
|
||||
// PeerID responsible for delivering the block.
|
||||
type BlockRequest struct {
|
||||
Height int64
|
||||
PeerID types.NodeID
|
||||
PeerID p2p.NodeID
|
||||
}
|
||||
|
||||
// BlockPool keeps track of the block sync peers, block requests and block responses.
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
service.BaseService
|
||||
lastAdvance time.Time
|
||||
@@ -75,7 +76,7 @@ type BlockPool struct {
|
||||
requesters map[int64]*bpRequester
|
||||
height int64 // the lowest key in requesters.
|
||||
// peers
|
||||
peers map[types.NodeID]*bpPeer
|
||||
peers map[p2p.NodeID]*bpPeer
|
||||
maxPeerHeight int64 // the biggest reported height
|
||||
|
||||
// atomic
|
||||
@@ -83,26 +84,20 @@ type BlockPool struct {
|
||||
|
||||
requestsCh chan<- BlockRequest
|
||||
errorsCh chan<- peerError
|
||||
|
||||
startHeight int64
|
||||
lastHundredBlockTimeStamp time.Time
|
||||
lastSyncRate float64
|
||||
}
|
||||
|
||||
// NewBlockPool returns a new BlockPool with the height equal to start. Block
|
||||
// requests and errors will be sent to requestsCh and errorsCh accordingly.
|
||||
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
|
||||
bp := &BlockPool{
|
||||
peers: make(map[types.NodeID]*bpPeer),
|
||||
peers: make(map[p2p.NodeID]*bpPeer),
|
||||
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
startHeight: start,
|
||||
numPending: 0,
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
numPending: 0,
|
||||
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
lastSyncRate: 0,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
}
|
||||
bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp)
|
||||
return bp
|
||||
@@ -112,7 +107,6 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
pool.lastAdvance = time.Now()
|
||||
pool.lastHundredBlockTimeStamp = pool.lastAdvance
|
||||
go pool.makeRequestersRoutine()
|
||||
return nil
|
||||
}
|
||||
@@ -223,19 +217,6 @@ func (pool *BlockPool) PopRequest() {
|
||||
delete(pool.requesters, pool.height)
|
||||
pool.height++
|
||||
pool.lastAdvance = time.Now()
|
||||
|
||||
// the lastSyncRate will be updated every 100 blocks, it uses the adaptive filter
|
||||
// to smooth the block sync rate and the unit represents the number of blocks per second.
|
||||
if (pool.height-pool.startHeight)%100 == 0 {
|
||||
newSyncRate := 100 / time.Since(pool.lastHundredBlockTimeStamp).Seconds()
|
||||
if pool.lastSyncRate == 0 {
|
||||
pool.lastSyncRate = newSyncRate
|
||||
} else {
|
||||
pool.lastSyncRate = 0.9*pool.lastSyncRate + 0.1*newSyncRate
|
||||
}
|
||||
pool.lastHundredBlockTimeStamp = time.Now()
|
||||
}
|
||||
|
||||
} else {
|
||||
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
|
||||
}
|
||||
@@ -244,13 +225,13 @@ func (pool *BlockPool) PopRequest() {
|
||||
// RedoRequest invalidates the block at pool.height,
|
||||
// Remove the peer and redo request from others.
|
||||
// Returns the ID of the removed peer.
|
||||
func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
|
||||
func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
request := pool.requesters[height]
|
||||
peerID := request.getPeerID()
|
||||
if peerID != types.NodeID("") {
|
||||
if peerID != p2p.NodeID("") {
|
||||
// RemovePeer will redo all requesters associated with this peer.
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
@@ -259,7 +240,7 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) {
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.NodeID, block *types.Block, blockSize int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
@@ -306,7 +287,7 @@ func (pool *BlockPool) LastAdvance() time.Time {
|
||||
}
|
||||
|
||||
// SetPeerRange sets the peer's alleged blockchain base and height.
|
||||
func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int64) {
|
||||
func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
@@ -327,14 +308,14 @@ func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int6
|
||||
|
||||
// RemovePeer removes the peer with peerID from the pool. If there's no peer
|
||||
// with peerID, function is a no-op.
|
||||
func (pool *BlockPool) RemovePeer(peerID types.NodeID) {
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.NodeID) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removePeer(peerID types.NodeID) {
|
||||
func (pool *BlockPool) removePeer(peerID p2p.NodeID) {
|
||||
for _, requester := range pool.requesters {
|
||||
if requester.getPeerID() == peerID {
|
||||
requester.redo(peerID)
|
||||
@@ -415,14 +396,14 @@ func (pool *BlockPool) requestersLen() int64 {
|
||||
return int64(len(pool.requesters))
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendRequest(height int64, peerID types.NodeID) {
|
||||
func (pool *BlockPool) sendRequest(height int64, peerID p2p.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
pool.requestsCh <- BlockRequest{height, peerID}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendError(err error, peerID types.NodeID) {
|
||||
func (pool *BlockPool) sendError(err error, peerID p2p.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
@@ -448,20 +429,6 @@ func (pool *BlockPool) debug() string {
|
||||
return str
|
||||
}
|
||||
|
||||
func (pool *BlockPool) targetSyncBlocks() int64 {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
return pool.maxPeerHeight - pool.startHeight + 1
|
||||
}
|
||||
|
||||
func (pool *BlockPool) getLastSyncRate() float64 {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
return pool.lastSyncRate
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bpPeer struct {
|
||||
@@ -470,7 +437,7 @@ type bpPeer struct {
|
||||
height int64
|
||||
base int64
|
||||
pool *BlockPool
|
||||
id types.NodeID
|
||||
id p2p.NodeID
|
||||
recvMonitor *flow.Monitor
|
||||
|
||||
timeout *time.Timer
|
||||
@@ -478,7 +445,7 @@ type bpPeer struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func newBPPeer(pool *BlockPool, peerID types.NodeID, base int64, height int64) *bpPeer {
|
||||
func newBPPeer(pool *BlockPool, peerID p2p.NodeID, base int64, height int64) *bpPeer {
|
||||
peer := &bpPeer{
|
||||
pool: pool,
|
||||
id: peerID,
|
||||
@@ -543,10 +510,10 @@ type bpRequester struct {
|
||||
pool *BlockPool
|
||||
height int64
|
||||
gotBlockCh chan struct{}
|
||||
redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat
|
||||
redoCh chan p2p.NodeID // redo may send multitime, add peerId to identify repeat
|
||||
|
||||
mtx tmsync.Mutex
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
@@ -555,7 +522,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
pool: pool,
|
||||
height: height,
|
||||
gotBlockCh: make(chan struct{}, 1),
|
||||
redoCh: make(chan types.NodeID, 1),
|
||||
redoCh: make(chan p2p.NodeID, 1),
|
||||
|
||||
peerID: "",
|
||||
block: nil,
|
||||
@@ -570,7 +537,7 @@ func (bpr *bpRequester) OnStart() error {
|
||||
}
|
||||
|
||||
// Returns true if the peer matches and block doesn't already exist.
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool {
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.NodeID) bool {
|
||||
bpr.mtx.Lock()
|
||||
if bpr.block != nil || bpr.peerID != peerID {
|
||||
bpr.mtx.Unlock()
|
||||
@@ -592,7 +559,7 @@ func (bpr *bpRequester) getBlock() *types.Block {
|
||||
return bpr.block
|
||||
}
|
||||
|
||||
func (bpr *bpRequester) getPeerID() types.NodeID {
|
||||
func (bpr *bpRequester) getPeerID() p2p.NodeID {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
return bpr.peerID
|
||||
@@ -614,7 +581,7 @@ func (bpr *bpRequester) reset() {
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
// NOTE: Nonblocking, and does nothing if another redo
|
||||
// was already requested.
|
||||
func (bpr *bpRequester) redo(peerID types.NodeID) {
|
||||
func (bpr *bpRequester) redo(peerID p2p.NodeID) {
|
||||
select {
|
||||
case bpr.redoCh <- peerID:
|
||||
default:
|
||||
@@ -2,7 +2,6 @@ package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
mrand "math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -11,6 +10,7 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ func init() {
|
||||
}
|
||||
|
||||
type testPeer struct {
|
||||
id types.NodeID
|
||||
id p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
inputChan chan inputData // make sure each peer's data is sequential
|
||||
@@ -49,7 +49,7 @@ func (p testPeer) simulateInput(input inputData) {
|
||||
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
|
||||
}
|
||||
|
||||
type testPeers map[types.NodeID]testPeer
|
||||
type testPeers map[p2p.NodeID]testPeer
|
||||
|
||||
func (ps testPeers) start() {
|
||||
for _, v := range ps {
|
||||
@@ -66,8 +66,8 @@ func (ps testPeers) stop() {
|
||||
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
|
||||
peers := make(testPeers, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerID := types.NodeID(tmrand.Str(12))
|
||||
height := minHeight + mrand.Int63n(maxHeight-minHeight)
|
||||
peerID := p2p.NodeID(tmrand.Str(12))
|
||||
height := minHeight + tmrand.Int63n(maxHeight-minHeight)
|
||||
base := minHeight + int64(i)
|
||||
if base > height {
|
||||
base = height
|
||||
@@ -182,7 +182,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
|
||||
// Pull from channels
|
||||
counter := 0
|
||||
timedOut := map[types.NodeID]struct{}{}
|
||||
timedOut := map[p2p.NodeID]struct{}{}
|
||||
for {
|
||||
select {
|
||||
case err := <-errorsCh:
|
||||
@@ -203,7 +203,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
peers := make(testPeers, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
peerID := types.NodeID(fmt.Sprintf("%d", i+1))
|
||||
peerID := p2p.NodeID(fmt.Sprintf("%d", i+1))
|
||||
height := int64(i + 1)
|
||||
peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)}
|
||||
}
|
||||
@@ -227,10 +227,10 @@ func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
assert.EqualValues(t, 10, pool.MaxPeerHeight())
|
||||
|
||||
// remove not-existing peer
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(types.NodeID("Superman")) })
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(p2p.NodeID("Superman")) })
|
||||
|
||||
// remove peer with biggest height
|
||||
pool.RemovePeer(types.NodeID("10"))
|
||||
pool.RemovePeer(p2p.NodeID("10"))
|
||||
assert.EqualValues(t, 9, pool.MaxPeerHeight())
|
||||
|
||||
// remove all peers
|
||||
@@ -2,19 +2,16 @@ package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/blocksync"
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -29,23 +26,22 @@ var (
|
||||
// TODO: Remove once p2p refactor is complete.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{
|
||||
BlockSyncChannel: {
|
||||
BlockchainChannel: {
|
||||
MsgType: new(bcproto.Message),
|
||||
Descriptor: &p2p.ChannelDescriptor{
|
||||
ID: byte(BlockSyncChannel),
|
||||
ID: byte(BlockchainChannel),
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvMessageCapacity: blocksync.MaxMsgSize,
|
||||
MaxSendBytes: 100,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSyncChannel is a channel for blocks and status updates
|
||||
BlockSyncChannel = p2p.ChannelID(0x40)
|
||||
// BlockchainChannel is a channel for blocks and status updates
|
||||
BlockchainChannel = p2p.ChannelID(0x40)
|
||||
|
||||
trySyncIntervalMS = 10
|
||||
|
||||
@@ -60,21 +56,21 @@ const (
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// For when we switch from block sync reactor to the consensus
|
||||
// For when we switch from blockchain reactor and fast sync to the consensus
|
||||
// machine.
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
|
||||
type peerError struct {
|
||||
err error
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e peerError) Error() string {
|
||||
return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
|
||||
}
|
||||
|
||||
// Reactor handles long-term catchup syncing.
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type Reactor struct {
|
||||
service.BaseService
|
||||
|
||||
@@ -85,19 +81,11 @@ type Reactor struct {
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
consReactor consensusReactor
|
||||
blockSync *tmsync.AtomicBool
|
||||
fastSync bool
|
||||
|
||||
blockSyncCh *p2p.Channel
|
||||
// blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope
|
||||
// messages that the reactor will consume in processBlockSyncCh and receiving messages
|
||||
// from the peer updates channel and other goroutines. We do this instead of directly
|
||||
// sending on blockSyncCh.Out to avoid race conditions in the case where other goroutines
|
||||
// send Envelopes directly to the to blockSyncCh.Out channel, since processBlockSyncCh
|
||||
// may close the blockSyncCh.Out channel at the same time that other goroutines send to
|
||||
// blockSyncCh.Out.
|
||||
blockSyncOutBridgeCh chan p2p.Envelope
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
closeCh chan struct{}
|
||||
blockchainCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdatesCh
|
||||
closeCh chan struct{}
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
@@ -106,10 +94,6 @@ type Reactor struct {
|
||||
// requestRoutine spawned goroutines when stopping the reactor and before
|
||||
// stopping the p2p Channel(s).
|
||||
poolWG sync.WaitGroup
|
||||
|
||||
metrics *consensus.Metrics
|
||||
|
||||
syncStartTime time.Time
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
@@ -119,10 +103,9 @@ func NewReactor(
|
||||
blockExec *sm.BlockExecutor,
|
||||
store *store.BlockStore,
|
||||
consReactor consensusReactor,
|
||||
blockSyncCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdates,
|
||||
blockSync bool,
|
||||
metrics *consensus.Metrics,
|
||||
blockchainCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdatesCh,
|
||||
fastSync bool,
|
||||
) (*Reactor, error) {
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
|
||||
@@ -137,23 +120,20 @@ func NewReactor(
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
|
||||
r := &Reactor{
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
blockSync: tmsync.NewBool(blockSync),
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockSyncCh: blockSyncCh,
|
||||
blockSyncOutBridgeCh: make(chan p2p.Envelope),
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
metrics: metrics,
|
||||
syncStartTime: time.Time{},
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: fastSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "BlockSync", r)
|
||||
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
@@ -162,21 +142,19 @@ func NewReactor(
|
||||
// messages on that p2p channel accordingly. The caller must be sure to execute
|
||||
// OnStop to ensure the outbound p2p Channels are closed.
|
||||
//
|
||||
// If blockSync is enabled, we also start the pool and the pool processing
|
||||
// If fastSync is enabled, we also start the pool and the pool processing
|
||||
// goroutine. If the pool fails to start, an error is returned.
|
||||
func (r *Reactor) OnStart() error {
|
||||
if r.blockSync.IsSet() {
|
||||
if r.fastSync {
|
||||
if err := r.pool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
r.poolWG.Add(1)
|
||||
go r.requestRoutine()
|
||||
|
||||
r.poolWG.Add(1)
|
||||
go r.poolRoutine(false)
|
||||
}
|
||||
|
||||
go r.processBlockSyncCh()
|
||||
go r.processBlockchainCh()
|
||||
go r.processPeerUpdates()
|
||||
|
||||
return nil
|
||||
@@ -185,7 +163,7 @@ func (r *Reactor) OnStart() error {
|
||||
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
|
||||
// blocking until they all exit.
|
||||
func (r *Reactor) OnStop() {
|
||||
if r.blockSync.IsSet() {
|
||||
if r.fastSync {
|
||||
if err := r.pool.Stop(); err != nil {
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
@@ -201,13 +179,13 @@ func (r *Reactor) OnStop() {
|
||||
// Wait for all p2p Channels to be closed before returning. This ensures we
|
||||
// can easily reason about synchronization of all p2p Channels and ensure no
|
||||
// panics will occur.
|
||||
<-r.blockSyncCh.Done()
|
||||
<-r.blockchainCh.Done()
|
||||
<-r.peerUpdates.Done()
|
||||
}
|
||||
|
||||
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
|
||||
// Otherwise, we'll respond saying we do not have it.
|
||||
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) {
|
||||
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID p2p.NodeID) {
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
blockProto, err := block.ToProto()
|
||||
@@ -216,7 +194,7 @@ func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID)
|
||||
return
|
||||
}
|
||||
|
||||
r.blockSyncCh.Out <- p2p.Envelope{
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.BlockResponse{Block: blockProto},
|
||||
}
|
||||
@@ -225,16 +203,16 @@ func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID)
|
||||
}
|
||||
|
||||
r.Logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height)
|
||||
r.blockSyncCh.Out <- p2p.Envelope{
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
}
|
||||
}
|
||||
|
||||
// handleBlockSyncMessage handles envelopes sent from peers on the
|
||||
// BlockSyncChannel. It returns an error only if the Envelope.Message is unknown
|
||||
// handleBlockchainMessage handles envelopes sent from peers on the
|
||||
// BlockchainChannel. It returns an error only if the Envelope.Message is unknown
|
||||
// for this channel. This should never be called outside of handleMessage.
|
||||
func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error {
|
||||
func (r *Reactor) handleBlockchainMessage(envelope p2p.Envelope) error {
|
||||
logger := r.Logger.With("peer", envelope.From)
|
||||
|
||||
switch msg := envelope.Message.(type) {
|
||||
@@ -251,7 +229,7 @@ func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error {
|
||||
r.pool.AddBlock(envelope.From, block, block.Size())
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
r.blockSyncCh.Out <- p2p.Envelope{
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: envelope.From,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: r.store.Height(),
|
||||
@@ -279,19 +257,15 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
r.Logger.Error("recovering from processing message panic", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
r.Logger.Debug("received message", "message", envelope.Message, "peer", envelope.From)
|
||||
|
||||
switch chID {
|
||||
case BlockSyncChannel:
|
||||
err = r.handleBlockSyncMessage(envelope)
|
||||
case BlockchainChannel:
|
||||
err = r.handleBlockchainMessage(envelope)
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope)
|
||||
@@ -300,58 +274,55 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
return err
|
||||
}
|
||||
|
||||
// processBlockSyncCh initiates a blocking process where we listen for and handle
|
||||
// envelopes on the BlockSyncChannel and blockSyncOutBridgeCh. Any error encountered during
|
||||
// message execution will result in a PeerError being sent on the BlockSyncChannel.
|
||||
// When the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// processBlockchainCh initiates a blocking process where we listen for and handle
|
||||
// envelopes on the BlockchainChannel. Any error encountered during message
|
||||
// execution will result in a PeerError being sent on the BlockchainChannel. When
|
||||
// the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// gracefully.
|
||||
func (r *Reactor) processBlockSyncCh() {
|
||||
defer r.blockSyncCh.Close()
|
||||
func (r *Reactor) processBlockchainCh() {
|
||||
defer r.blockchainCh.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case envelope := <-r.blockSyncCh.In:
|
||||
if err := r.handleMessage(r.blockSyncCh.ID, envelope); err != nil {
|
||||
r.Logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err)
|
||||
r.blockSyncCh.Error <- p2p.PeerError{
|
||||
NodeID: envelope.From,
|
||||
Err: err,
|
||||
case envelope := <-r.blockchainCh.In():
|
||||
if err := r.handleMessage(r.blockchainCh.ID(), envelope); err != nil {
|
||||
r.Logger.Error("failed to process message", "ch_id", r.blockchainCh.ID(), "envelope", envelope, "err", err)
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: envelope.From,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
}
|
||||
|
||||
case envelope := <-r.blockSyncOutBridgeCh:
|
||||
r.blockSyncCh.Out <- envelope
|
||||
|
||||
case <-r.closeCh:
|
||||
r.Logger.Debug("stopped listening on block sync channel; closing...")
|
||||
r.Logger.Debug("stopped listening on blockchain channel; closing...")
|
||||
return
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processPeerUpdate processes a PeerUpdate.
|
||||
func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
|
||||
r.Logger.Debug("received peer update", "peer", peerUpdate.PeerID, "status", peerUpdate.Status)
|
||||
|
||||
// XXX: Pool#RedoRequest can sometimes give us an empty peer.
|
||||
if len(peerUpdate.NodeID) == 0 {
|
||||
if len(peerUpdate.PeerID) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
switch peerUpdate.Status {
|
||||
case p2p.PeerStatusUp:
|
||||
case p2p.PeerStatusNew, p2p.PeerStatusUp:
|
||||
// send a status update the newly added peer
|
||||
r.blockSyncOutBridgeCh <- p2p.Envelope{
|
||||
To: peerUpdate.NodeID,
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerUpdate.PeerID,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: r.store.Base(),
|
||||
Height: r.store.Height(),
|
||||
},
|
||||
}
|
||||
|
||||
case p2p.PeerStatusDown:
|
||||
r.pool.RemovePeer(peerUpdate.NodeID)
|
||||
case p2p.PeerStatusDown, p2p.PeerStatusRemoved, p2p.PeerStatusBanned:
|
||||
r.pool.RemovePeer(peerUpdate.PeerID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -373,10 +344,10 @@ func (r *Reactor) processPeerUpdates() {
|
||||
}
|
||||
}
|
||||
|
||||
// SwitchToBlockSync is called by the state sync reactor when switching to fast
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast
|
||||
// sync.
|
||||
func (r *Reactor) SwitchToBlockSync(state sm.State) error {
|
||||
r.blockSync.Set()
|
||||
func (r *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
r.fastSync = true
|
||||
r.initialState = state
|
||||
r.pool.height = state.LastBlockHeight + 1
|
||||
|
||||
@@ -384,11 +355,6 @@ func (r *Reactor) SwitchToBlockSync(state sm.State) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r.syncStartTime = time.Now()
|
||||
|
||||
r.poolWG.Add(1)
|
||||
go r.requestRoutine()
|
||||
|
||||
r.poolWG.Add(1)
|
||||
go r.poolRoutine(true)
|
||||
|
||||
@@ -399,6 +365,7 @@ func (r *Reactor) requestRoutine() {
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
defer statusUpdateTicker.Stop()
|
||||
|
||||
r.poolWG.Add(1)
|
||||
defer r.poolWG.Done()
|
||||
|
||||
for {
|
||||
@@ -410,15 +377,16 @@ func (r *Reactor) requestRoutine() {
|
||||
return
|
||||
|
||||
case request := <-r.requestsCh:
|
||||
r.blockSyncOutBridgeCh <- p2p.Envelope{
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: request.PeerID,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
}
|
||||
|
||||
case pErr := <-r.errorsCh:
|
||||
r.blockSyncCh.Error <- p2p.PeerError{
|
||||
NodeID: pErr.peerID,
|
||||
Err: pErr.err,
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: pErr.peerID,
|
||||
Err: pErr.err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
@@ -427,7 +395,7 @@ func (r *Reactor) requestRoutine() {
|
||||
go func() {
|
||||
defer r.poolWG.Done()
|
||||
|
||||
r.blockSyncOutBridgeCh <- p2p.Envelope{
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
Broadcast: true,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
}
|
||||
@@ -459,6 +427,8 @@ func (r *Reactor) poolRoutine(stateSynced bool) {
|
||||
defer trySyncTicker.Stop()
|
||||
defer switchToConsensusTicker.Stop()
|
||||
|
||||
go r.requestRoutine()
|
||||
|
||||
defer r.poolWG.Done()
|
||||
|
||||
FOR_LOOP:
|
||||
@@ -498,8 +468,6 @@ FOR_LOOP:
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
|
||||
r.blockSync.UnSet()
|
||||
|
||||
if r.consReactor != nil {
|
||||
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
@@ -556,16 +524,18 @@ FOR_LOOP:
|
||||
// NOTE: We've already removed the peer's request, but we still need
|
||||
// to clean up the rest.
|
||||
peerID := r.pool.RedoRequest(first.Height)
|
||||
r.blockSyncCh.Error <- p2p.PeerError{
|
||||
NodeID: peerID,
|
||||
Err: err,
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: peerID,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
|
||||
peerID2 := r.pool.RedoRequest(second.Height)
|
||||
if peerID2 != peerID {
|
||||
r.blockSyncCh.Error <- p2p.PeerError{
|
||||
NodeID: peerID2,
|
||||
Err: err,
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: peerID2,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -580,20 +550,18 @@ FOR_LOOP:
|
||||
|
||||
// TODO: Same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
state, err = r.blockExec.ApplyBlock(state, firstID, first)
|
||||
state, _, err = r.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO: This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
r.metrics.RecordConsMetrics(first)
|
||||
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.Logger.Info(
|
||||
"block sync rate",
|
||||
"fast sync rate",
|
||||
"height", r.pool.height,
|
||||
"max_peer_height", r.pool.MaxPeerHeight(),
|
||||
"blocks/s", lastRate,
|
||||
@@ -607,36 +575,6 @@ FOR_LOOP:
|
||||
|
||||
case <-r.closeCh:
|
||||
break FOR_LOOP
|
||||
case <-r.pool.Quit():
|
||||
break FOR_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) GetMaxPeerBlockHeight() int64 {
|
||||
return r.pool.MaxPeerHeight()
|
||||
}
|
||||
|
||||
func (r *Reactor) GetTotalSyncedTime() time.Duration {
|
||||
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
return time.Since(r.syncStartTime)
|
||||
}
|
||||
|
||||
func (r *Reactor) GetRemainingSyncTime() time.Duration {
|
||||
if !r.blockSync.IsSet() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
targetSyncs := r.pool.targetSyncBlocks()
|
||||
currentSyncs := r.store.Height() - r.pool.startHeight + 1
|
||||
lastSyncRate := r.pool.getLastSyncRate()
|
||||
if currentSyncs < 0 || lastSyncRate < 0.001 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
remain := float64(targetSyncs-currentSyncs) / lastSyncRate
|
||||
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
405
blockchain/v0/reactor_test.go
Normal file
405
blockchain/v0/reactor_test.go
Normal file
@@ -0,0 +1,405 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var rng = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
type reactorTestSuite struct {
|
||||
reactor *Reactor
|
||||
app proxy.AppConns
|
||||
|
||||
peerID p2p.NodeID
|
||||
|
||||
blockchainChannel *p2p.Channel
|
||||
blockchainInCh chan p2p.Envelope
|
||||
blockchainOutCh chan p2p.Envelope
|
||||
blockchainPeerErrCh chan p2p.PeerError
|
||||
|
||||
peerUpdatesCh chan p2p.PeerUpdate
|
||||
peerUpdates *p2p.PeerUpdatesCh
|
||||
}
|
||||
|
||||
func setup(
|
||||
t *testing.T,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64,
|
||||
chBuf uint,
|
||||
) *reactorTestSuite {
|
||||
t.Helper()
|
||||
|
||||
require.Len(t, privVals, 1, "only one validator can be supported")
|
||||
|
||||
app := &abci.BaseApplication{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
require.NoError(t, proxyApp.Start())
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
proxyApp.Consensus(),
|
||||
mock.Mempool{},
|
||||
sm.EmptyEvidencePool{},
|
||||
)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote, err := types.MakeVote(
|
||||
lastBlock.Header.Height,
|
||||
lastBlockMeta.BlockID,
|
||||
state.Validators,
|
||||
privVals[0],
|
||||
lastBlock.Header.ChainID,
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
lastCommit = types.NewCommit(
|
||||
vote.Height,
|
||||
vote.Round,
|
||||
lastBlockMeta.BlockID,
|
||||
[]types.CommitSig{vote.CommitSig()},
|
||||
)
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
pID := make([]byte, 16)
|
||||
_, err = rng.Read(pID)
|
||||
require.NoError(t, err)
|
||||
|
||||
peerUpdatesCh := make(chan p2p.PeerUpdate, chBuf)
|
||||
|
||||
rts := &reactorTestSuite{
|
||||
app: proxyApp,
|
||||
blockchainInCh: make(chan p2p.Envelope, chBuf),
|
||||
blockchainOutCh: make(chan p2p.Envelope, chBuf),
|
||||
blockchainPeerErrCh: make(chan p2p.PeerError, chBuf),
|
||||
peerUpdatesCh: peerUpdatesCh,
|
||||
peerUpdates: p2p.NewPeerUpdates(peerUpdatesCh),
|
||||
peerID: p2p.NodeID(fmt.Sprintf("%x", pID)),
|
||||
}
|
||||
|
||||
rts.blockchainChannel = p2p.NewChannel(
|
||||
BlockchainChannel,
|
||||
new(bcproto.Message),
|
||||
rts.blockchainInCh,
|
||||
rts.blockchainOutCh,
|
||||
rts.blockchainPeerErrCh,
|
||||
)
|
||||
|
||||
reactor, err := NewReactor(
|
||||
log.TestingLogger().With("module", "blockchain", "node", rts.peerID),
|
||||
state.Copy(),
|
||||
blockExec,
|
||||
blockStore,
|
||||
nil,
|
||||
rts.blockchainChannel,
|
||||
rts.peerUpdates,
|
||||
fastSync,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
rts.reactor = reactor
|
||||
|
||||
require.NoError(t, rts.reactor.Start())
|
||||
require.True(t, rts.reactor.IsRunning())
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, rts.reactor.Stop())
|
||||
require.NoError(t, rts.app.Stop())
|
||||
require.False(t, rts.reactor.IsRunning())
|
||||
})
|
||||
|
||||
return rts
|
||||
}
|
||||
|
||||
func simulateRouter(primary *reactorTestSuite, suites []*reactorTestSuite, dropChErr bool) {
|
||||
// create a mapping for efficient suite lookup by peer ID
|
||||
suitesByPeerID := make(map[p2p.NodeID]*reactorTestSuite)
|
||||
for _, suite := range suites {
|
||||
suitesByPeerID[suite.peerID] = suite
|
||||
}
|
||||
|
||||
// Simulate a router by listening for all outbound envelopes and proxying the
|
||||
// envelope to the respective peer (suite).
|
||||
go func() {
|
||||
for envelope := range primary.blockchainOutCh {
|
||||
if envelope.Broadcast {
|
||||
for _, s := range suites {
|
||||
// broadcast to everyone except source
|
||||
if s.peerID != primary.peerID {
|
||||
s.blockchainInCh <- p2p.Envelope{
|
||||
From: primary.peerID,
|
||||
To: s.peerID,
|
||||
Message: envelope.Message,
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
suitesByPeerID[envelope.To].blockchainInCh <- p2p.Envelope{
|
||||
From: primary.peerID,
|
||||
To: envelope.To,
|
||||
Message: envelope.Message,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for pErr := range primary.blockchainPeerErrCh {
|
||||
if dropChErr {
|
||||
primary.reactor.Logger.Debug("dropped peer error", "err", pErr.Err)
|
||||
} else {
|
||||
primary.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
PeerID: pErr.PeerID,
|
||||
Status: p2p.PeerStatusRemoved,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
maxBlockHeight := int64(64)
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 0),
|
||||
setup(t, genDoc, privVals, 0, 0),
|
||||
}
|
||||
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
for _, s := range testSuites {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
// connect reactor to every other reactor
|
||||
for _, ss := range testSuites {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
secondaryPool := testSuites[1].reactor.pool
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
height, _, _ := secondaryPool.GetStatus()
|
||||
return secondaryPool.MaxPeerHeight() > 0 && height > 0 && height < 10
|
||||
},
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be partially synced",
|
||||
)
|
||||
|
||||
// Remove synced node from the syncing node which should not result in any
|
||||
// deadlocks or race conditions within the context of poolRoutine.
|
||||
testSuites[1].peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusDown,
|
||||
PeerID: testSuites[0].peerID,
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactor_NoBlockResponse(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
maxBlockHeight := int64(65)
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 0),
|
||||
setup(t, genDoc, privVals, 0, 0),
|
||||
}
|
||||
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
for _, s := range testSuites {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
// connect reactor to every other reactor
|
||||
for _, ss := range testSuites {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{100, false},
|
||||
}
|
||||
|
||||
secondaryPool := testSuites[1].reactor.pool
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool { return secondaryPool.MaxPeerHeight() > 0 && secondaryPool.IsCaughtUp() },
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be fully synced",
|
||||
)
|
||||
|
||||
for _, tc := range testCases {
|
||||
block := testSuites[1].reactor.store.LoadBlock(tc.height)
|
||||
if tc.existent {
|
||||
require.True(t, block != nil)
|
||||
} else {
|
||||
require.Nil(t, block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactor_BadBlockStopsPeer(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
maxBlockHeight := int64(48)
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 1000), // fully synced node
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000), // new node
|
||||
}
|
||||
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
// connect reactor to every other reactor except the new node
|
||||
for _, ss := range testSuites[:len(testSuites)-1] {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
caughtUp := true
|
||||
for _, s := range testSuites[1 : len(testSuites)-1] {
|
||||
if s.reactor.pool.MaxPeerHeight() == 0 || !s.reactor.pool.IsCaughtUp() {
|
||||
caughtUp = false
|
||||
}
|
||||
}
|
||||
|
||||
return caughtUp
|
||||
},
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"expected all nodes to be fully synced",
|
||||
)
|
||||
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
require.Len(t, s.reactor.pool.peers, 3)
|
||||
}
|
||||
|
||||
// Mark testSuites[3] as an invalid peer which will cause newSuite to disconnect
|
||||
// from this peer.
|
||||
otherGenDoc, otherPrivVals := randGenesisDoc(config, 1, false, 30)
|
||||
otherSuite := setup(t, otherGenDoc, otherPrivVals, maxBlockHeight, 0)
|
||||
testSuites[3].reactor.store = otherSuite.reactor.store
|
||||
|
||||
// add a fake peer just so we do not wait for the consensus ticker to timeout
|
||||
otherSuite.reactor.pool.SetPeerRange("00ff", 10, 10)
|
||||
|
||||
// start the new peer's faux router
|
||||
newSuite := testSuites[len(testSuites)-1]
|
||||
simulateRouter(newSuite, testSuites, false)
|
||||
|
||||
// connect all nodes to the new peer
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
newSuite.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: s.peerID,
|
||||
}
|
||||
}
|
||||
|
||||
// wait for the new peer to catch up and become fully synced
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool { return newSuite.reactor.pool.MaxPeerHeight() > 0 && newSuite.reactor.pool.IsCaughtUp() },
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"expected new node to be fully synced",
|
||||
)
|
||||
|
||||
require.Eventuallyf(
|
||||
t,
|
||||
func() bool { return len(newSuite.reactor.pool.peers) < len(testSuites)-1 },
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"invalid number of peers; expected < %d, got: %d",
|
||||
len(testSuites)-1,
|
||||
len(newSuite.reactor.pool.peers),
|
||||
)
|
||||
}
|
||||
50
blockchain/v0/test_util.go
Normal file
50
blockchain/v0/test_util.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
func randGenesisDoc(
|
||||
config *cfg.Config,
|
||||
numValidators int,
|
||||
randPower bool,
|
||||
minPower int64,
|
||||
) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
187
blockchain/v2/io.go
Normal file
187
blockchain/v2/io.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
errPeerQueueFull = errors.New("peer queue full")
|
||||
)
|
||||
|
||||
type iIO interface {
|
||||
sendBlockRequest(peer p2p.Peer, height int64) error
|
||||
sendBlockToPeer(block *types.Block, peer p2p.Peer) error
|
||||
sendBlockNotFound(height int64, peer p2p.Peer) error
|
||||
sendStatusResponse(base, height int64, peer p2p.Peer) error
|
||||
|
||||
sendStatusRequest(peer p2p.Peer) error
|
||||
broadcastStatusRequest() error
|
||||
|
||||
trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
}
|
||||
|
||||
type switchIO struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
|
||||
func newSwitchIo(sw *p2p.Switch) *switchIO {
|
||||
return &switchIO{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state state.State, skipWAL bool)
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockRequest(peer p2p.Peer, height int64) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_BlockRequest{
|
||||
BlockRequest: &bcproto.BlockRequest{
|
||||
Height: height,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusResponse{
|
||||
StatusResponse: &bcproto.StatusResponse{
|
||||
Height: height,
|
||||
Base: base,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error {
|
||||
if block == nil {
|
||||
panic("trying to send nil block")
|
||||
}
|
||||
|
||||
bpb, err := block.ToProto()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_BlockResponse{
|
||||
BlockResponse: &bcproto.BlockResponse{
|
||||
Block: bpb,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockNotFound(height int64, peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_NoBlockResponse{
|
||||
NoBlockResponse: &bcproto.NoBlockResponse{
|
||||
Height: height,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool {
|
||||
conR, ok := sio.sw.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, skipWAL)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendStatusRequest(peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusRequest{
|
||||
StatusRequest: &bcproto.StatusRequest{},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) broadcastStatusRequest() error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusRequest{
|
||||
StatusRequest: &bcproto.StatusRequest{},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// XXX: maybe we should use an io specific peer list here
|
||||
sio.sw.Broadcast(BlockchainChannel, msgBytes)
|
||||
|
||||
return nil
|
||||
}
|
||||
125
blockchain/v2/metrics.go
Normal file
125
blockchain/v2/metrics.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
"github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "blockchain"
|
||||
)
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// events_in
|
||||
EventsIn metrics.Counter
|
||||
// events_in
|
||||
EventsHandled metrics.Counter
|
||||
// events_out
|
||||
EventsOut metrics.Counter
|
||||
// errors_in
|
||||
ErrorsIn metrics.Counter
|
||||
// errors_handled
|
||||
ErrorsHandled metrics.Counter
|
||||
// errors_out
|
||||
ErrorsOut metrics.Counter
|
||||
// events_shed
|
||||
EventsShed metrics.Counter
|
||||
// events_sent
|
||||
EventsSent metrics.Counter
|
||||
// errors_sent
|
||||
ErrorsSent metrics.Counter
|
||||
// errors_shed
|
||||
ErrorsShed metrics.Counter
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns metrics for in and out events, errors, etc. handled by routines.
|
||||
// Can we burn in the routine name here?
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_in",
|
||||
Help: "Events read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_handled",
|
||||
Help: "Events handled",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_out",
|
||||
Help: "Events output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_in",
|
||||
Help: "Errors read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_handled",
|
||||
Help: "Errors handled.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_out",
|
||||
Help: "Errors output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_sent",
|
||||
Help: "Errors sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_shed",
|
||||
Help: "Errors dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_sent",
|
||||
Help: "Events sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_shed",
|
||||
Help: "Events dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
// NopMetrics returns no-op Metrics.
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
EventsIn: discard.NewCounter(),
|
||||
EventsHandled: discard.NewCounter(),
|
||||
EventsOut: discard.NewCounter(),
|
||||
ErrorsIn: discard.NewCounter(),
|
||||
ErrorsHandled: discard.NewCounter(),
|
||||
ErrorsOut: discard.NewCounter(),
|
||||
EventsShed: discard.NewCounter(),
|
||||
EventsSent: discard.NewCounter(),
|
||||
ErrorsSent: discard.NewCounter(),
|
||||
ErrorsShed: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
192
blockchain/v2/processor.go
Normal file
192
blockchain/v2/processor.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Events generated by the processor:
|
||||
// block execution failure, event will indicate the peer(s) that caused the error
|
||||
type pcBlockVerificationFailure struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
firstPeerID p2p.NodeID
|
||||
secondPeerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockVerificationFailure) String() string {
|
||||
return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}",
|
||||
e.height, e.firstPeerID, e.secondPeerID)
|
||||
}
|
||||
|
||||
// successful block execution
|
||||
type pcBlockProcessed struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockProcessed) String() string {
|
||||
return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID)
|
||||
}
|
||||
|
||||
// processor has finished
|
||||
type pcFinished struct {
|
||||
priorityNormal
|
||||
blocksSynced int
|
||||
tmState tmState.State
|
||||
}
|
||||
|
||||
func (p pcFinished) Error() string {
|
||||
return "finished"
|
||||
}
|
||||
|
||||
type queueItem struct {
|
||||
block *types.Block
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
type blockQueue map[int64]queueItem
|
||||
|
||||
type pcState struct {
|
||||
// blocks waiting to be processed
|
||||
queue blockQueue
|
||||
|
||||
// draining indicates that the next rProcessBlock event with a queue miss constitutes completion
|
||||
draining bool
|
||||
|
||||
// the number of blocks successfully synced by the processor
|
||||
blocksSynced int
|
||||
|
||||
// the processorContext which contains the processor dependencies
|
||||
context processorContext
|
||||
}
|
||||
|
||||
func (state *pcState) String() string {
|
||||
return fmt.Sprintf("height: %d queue length: %d draining: %v blocks synced: %d",
|
||||
state.height(), len(state.queue), state.draining, state.blocksSynced)
|
||||
}
|
||||
|
||||
// newPcState returns a pcState initialized with the last verified block enqueued
|
||||
func newPcState(context processorContext) *pcState {
|
||||
return &pcState{
|
||||
queue: blockQueue{},
|
||||
draining: false,
|
||||
blocksSynced: 0,
|
||||
context: context,
|
||||
}
|
||||
}
|
||||
|
||||
// nextTwo returns the next two unverified blocks
|
||||
func (state *pcState) nextTwo() (queueItem, queueItem, error) {
|
||||
if first, ok := state.queue[state.height()+1]; ok {
|
||||
if second, ok := state.queue[state.height()+2]; ok {
|
||||
return first, second, nil
|
||||
}
|
||||
}
|
||||
return queueItem{}, queueItem{}, fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
// synced returns true when at most the last verified block remains in the queue
|
||||
func (state *pcState) synced() bool {
|
||||
return len(state.queue) <= 1
|
||||
}
|
||||
|
||||
func (state *pcState) enqueue(peerID p2p.NodeID, block *types.Block, height int64) {
|
||||
if item, ok := state.queue[height]; ok {
|
||||
panic(fmt.Sprintf(
|
||||
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
|
||||
height, block.Hash(), peerID, item.block.Hash(), item.peerID))
|
||||
}
|
||||
|
||||
state.queue[height] = queueItem{block: block, peerID: peerID}
|
||||
}
|
||||
|
||||
func (state *pcState) height() int64 {
|
||||
return state.context.tmState().LastBlockHeight
|
||||
}
|
||||
|
||||
// purgePeer moves all unprocessed blocks from the queue
|
||||
func (state *pcState) purgePeer(peerID p2p.NodeID) {
|
||||
// what if height is less than state.height?
|
||||
for height, item := range state.queue {
|
||||
if item.peerID == peerID {
|
||||
delete(state.queue, height)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle processes FSM events
|
||||
func (state *pcState) handle(event Event) (Event, error) {
|
||||
switch event := event.(type) {
|
||||
case bcResetState:
|
||||
state.context.setState(event.state)
|
||||
return noOp, nil
|
||||
|
||||
case scFinishedEv:
|
||||
if state.synced() {
|
||||
return pcFinished{tmState: state.context.tmState(), blocksSynced: state.blocksSynced}, nil
|
||||
}
|
||||
state.draining = true
|
||||
return noOp, nil
|
||||
|
||||
case scPeerError:
|
||||
state.purgePeer(event.peerID)
|
||||
return noOp, nil
|
||||
|
||||
case scBlockReceived:
|
||||
if event.block == nil {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// enqueue block if height is higher than state height, else ignore it
|
||||
if event.block.Height > state.height() {
|
||||
state.enqueue(event.peerID, event.block, event.block.Height)
|
||||
}
|
||||
return noOp, nil
|
||||
|
||||
case rProcessBlock:
|
||||
tmState := state.context.tmState()
|
||||
firstItem, secondItem, err := state.nextTwo()
|
||||
if err != nil {
|
||||
if state.draining {
|
||||
return pcFinished{tmState: tmState, blocksSynced: state.blocksSynced}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
var (
|
||||
first, second = firstItem.block, secondItem.block
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()}
|
||||
)
|
||||
|
||||
// verify if +second+ last commit "confirms" +first+ block
|
||||
err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
state.purgePeer(firstItem.peerID)
|
||||
if firstItem.peerID != secondItem.peerID {
|
||||
state.purgePeer(secondItem.peerID)
|
||||
}
|
||||
return pcBlockVerificationFailure{
|
||||
height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID},
|
||||
nil
|
||||
}
|
||||
|
||||
state.context.saveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
if err := state.context.applyBlock(firstID, first); err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
delete(state.queue, first.Height)
|
||||
state.blocksSynced++
|
||||
|
||||
return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
100
blockchain/v2/processor_context.go
Normal file
100
blockchain/v2/processor_context.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type processorContext interface {
|
||||
applyBlock(blockID types.BlockID, block *types.Block) error
|
||||
verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error
|
||||
saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
|
||||
tmState() state.State
|
||||
setState(state.State)
|
||||
}
|
||||
|
||||
type pContext struct {
|
||||
store blockStore
|
||||
applier blockApplier
|
||||
state state.State
|
||||
}
|
||||
|
||||
func newProcessorContext(st blockStore, ex blockApplier, s state.State) *pContext {
|
||||
return &pContext{
|
||||
store: st,
|
||||
applier: ex,
|
||||
state: s,
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
newState, _, err := pc.applier.ApplyBlock(pc.state, blockID, block)
|
||||
pc.state = newState
|
||||
return err
|
||||
}
|
||||
|
||||
func (pc pContext) tmState() state.State {
|
||||
return pc.state
|
||||
}
|
||||
|
||||
func (pc *pContext) setState(state state.State) {
|
||||
pc.state = state
|
||||
}
|
||||
|
||||
func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
return pc.state.Validators.VerifyCommitLight(chainID, blockID, height, commit)
|
||||
}
|
||||
|
||||
func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
pc.store.SaveBlock(block, blockParts, seenCommit)
|
||||
}
|
||||
|
||||
type mockPContext struct {
|
||||
applicationBL []int64
|
||||
verificationBL []int64
|
||||
state state.State
|
||||
}
|
||||
|
||||
func newMockProcessorContext(
|
||||
state state.State,
|
||||
verificationBlackList []int64,
|
||||
applicationBlackList []int64) *mockPContext {
|
||||
return &mockPContext{
|
||||
applicationBL: applicationBlackList,
|
||||
verificationBL: verificationBlackList,
|
||||
state: state,
|
||||
}
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
for _, h := range mpc.applicationBL {
|
||||
if h == block.Height {
|
||||
return fmt.Errorf("generic application error")
|
||||
}
|
||||
}
|
||||
mpc.state.LastBlockHeight = block.Height
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
for _, h := range mpc.verificationBL {
|
||||
if h == height {
|
||||
return fmt.Errorf("generic verification error")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) setState(state state.State) {
|
||||
mpc.state = state
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) tmState() state.State {
|
||||
return mpc.state
|
||||
}
|
||||
306
blockchain/v2/processor_test.go
Normal file
306
blockchain/v2/processor_test.go
Normal file
@@ -0,0 +1,306 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// pcBlock is a test helper structure with simple types. Its purpose is to help with test readability.
|
||||
type pcBlock struct {
|
||||
pid string
|
||||
height int64
|
||||
}
|
||||
|
||||
// params is a test structure used to create processor state.
|
||||
type params struct {
|
||||
height int64
|
||||
items []pcBlock
|
||||
blocksSynced int
|
||||
verBL []int64
|
||||
appBL []int64
|
||||
draining bool
|
||||
}
|
||||
|
||||
// makePcBlock makes an empty block.
|
||||
func makePcBlock(height int64) *types.Block {
|
||||
return &types.Block{Header: types.Header{Height: height}}
|
||||
}
|
||||
|
||||
// makeState takes test parameters and creates a specific processor state.
|
||||
func makeState(p *params) *pcState {
|
||||
var (
|
||||
tmState = tmState.State{LastBlockHeight: p.height}
|
||||
context = newMockProcessorContext(tmState, p.verBL, p.appBL)
|
||||
)
|
||||
state := newPcState(context)
|
||||
|
||||
for _, item := range p.items {
|
||||
state.enqueue(p2p.NodeID(item.pid), makePcBlock(item.height), item.height)
|
||||
}
|
||||
|
||||
state.blocksSynced = p.blocksSynced
|
||||
state.draining = p.draining
|
||||
return state
|
||||
}
|
||||
|
||||
func mBlockResponse(peerID p2p.NodeID, height int64) scBlockReceived {
|
||||
return scBlockReceived{
|
||||
peerID: peerID,
|
||||
block: makePcBlock(height),
|
||||
}
|
||||
}
|
||||
|
||||
type pcFsmMakeStateValues struct {
|
||||
currentState *params
|
||||
event Event
|
||||
wantState *params
|
||||
wantNextEvent Event
|
||||
wantErr error
|
||||
wantPanic bool
|
||||
}
|
||||
|
||||
type testFields struct {
|
||||
name string
|
||||
steps []pcFsmMakeStateValues
|
||||
}
|
||||
|
||||
func executeProcessorTests(t *testing.T, tests []testFields) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var state *pcState
|
||||
for _, step := range tt.steps {
|
||||
defer func() {
|
||||
r := recover()
|
||||
if (r != nil) != step.wantPanic {
|
||||
t.Errorf("recover = %v, wantPanic = %v", r, step.wantPanic)
|
||||
}
|
||||
}()
|
||||
|
||||
// First step must always initialise the currentState as state.
|
||||
if step.currentState != nil {
|
||||
state = makeState(step.currentState)
|
||||
}
|
||||
if state == nil {
|
||||
panic("Bad (initial?) step")
|
||||
}
|
||||
|
||||
nextEvent, err := state.handle(step.event)
|
||||
t.Log(state)
|
||||
assert.Equal(t, step.wantErr, err)
|
||||
assert.Equal(t, makeState(step.wantState), state)
|
||||
assert.Equal(t, step.wantNextEvent, nextEvent)
|
||||
// Next step may use the wantedState as their currentState.
|
||||
state = makeState(step.wantState)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRProcessPeerError(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "error for existing peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
event: scPeerError{peerID: "P2"},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}}},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error for unknown peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
event: scPeerError{peerID: "P3"},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestPcBlockResponse(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "add one block",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 1),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "add two blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 3),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp,
|
||||
},
|
||||
{ // use previous wantState as currentState,
|
||||
event: mBlockResponse("P1", 4),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}, {"P1", 4}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestRProcessBlockSuccess(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "noop - no blocks over current height",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: rProcessBlock{},
|
||||
wantState: ¶ms{}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "noop - high new blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}}, blocksSynced: 1},
|
||||
wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present after draining",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{ // some contiguous blocks - on stop check draining is set
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}},
|
||||
event: scFinishedEv{},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}, draining: true},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
{
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true},
|
||||
wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"},
|
||||
},
|
||||
{ // finish when H+1 or/and H+2 are missing
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true},
|
||||
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 1}, blocksSynced: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestRProcessBlockFailures(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from different peers - H+1 verification fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, verBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{}, verBL: []int64{1}},
|
||||
wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from same peer - H+1 applyBlock fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, appBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{}, appBL: []int64{1}}, wantPanic: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from same peers - H+1 verification fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 0, items: []pcBlock{{"P1", 1}, {"P1", 2}, {"P2", 3}},
|
||||
verBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 0, items: []pcBlock{{"P2", 3}}, verBL: []int64{1}},
|
||||
wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from different peers - H+1 applyBlock fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P2", 3}}, appBL: []int64{1}},
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{{"P2", 3}}, appBL: []int64{1}}, wantPanic: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestScFinishedEv(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "no blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100},
|
||||
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "maxHeight+1 block present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100},
|
||||
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "more blocks present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}, {"P1", 102}}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}, {"P1", 102}}, blocksSynced: 100, draining: true},
|
||||
wantNextEvent: noOp,
|
||||
wantErr: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
590
blockchain/v2/reactor.go
Normal file
590
blockchain/v2/reactor.go
Normal file
@@ -0,0 +1,590 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// chBufferSize is the buffer size of all event channels.
|
||||
chBufferSize int = 1000
|
||||
)
|
||||
|
||||
type blockStore interface {
|
||||
LoadBlock(height int64) *types.Block
|
||||
SaveBlock(*types.Block, *types.PartSet, *types.Commit)
|
||||
Base() int64
|
||||
Height() int64
|
||||
}
|
||||
|
||||
// BlockchainReactor handles fast sync protocol.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
fastSync bool // if true, enable fast sync on start
|
||||
stateSynced bool // set to true when SwitchToFastSync is called by state sync
|
||||
scheduler *Routine
|
||||
processor *Routine
|
||||
logger log.Logger
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
maxPeerHeight int64
|
||||
syncHeight int64
|
||||
events chan Event // non-nil during a fast sync
|
||||
|
||||
reporter behaviour.Reporter
|
||||
io iIO
|
||||
store blockStore
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error)
|
||||
}
|
||||
|
||||
// XXX: unify naming in this package around tmState
|
||||
func newReactor(state state.State, store blockStore, reporter behaviour.Reporter,
|
||||
blockApplier blockApplier, fastSync bool) *BlockchainReactor {
|
||||
initHeight := state.LastBlockHeight + 1
|
||||
if initHeight == 1 {
|
||||
initHeight = state.InitialHeight
|
||||
}
|
||||
scheduler := newScheduler(initHeight, time.Now())
|
||||
pContext := newProcessorContext(store, blockApplier, state)
|
||||
// TODO: Fix naming to just newProcesssor
|
||||
// newPcState requires a processorContext
|
||||
processor := newPcState(pContext)
|
||||
|
||||
return &BlockchainReactor{
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
fastSync: fastSync,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlockchainReactor creates a new reactor instance.
|
||||
func NewBlockchainReactor(
|
||||
state state.State,
|
||||
blockApplier blockApplier,
|
||||
store blockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
reporter := behaviour.NewMockReporter()
|
||||
return newReactor(state, store, reporter, blockApplier, fastSync)
|
||||
}
|
||||
|
||||
// SetSwitch implements Reactor interface.
|
||||
func (r *BlockchainReactor) SetSwitch(sw *p2p.Switch) {
|
||||
r.Switch = sw
|
||||
if sw != nil {
|
||||
r.io = newSwitchIo(sw)
|
||||
} else {
|
||||
r.io = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) setMaxPeerHeight(height int64) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if height > r.maxPeerHeight {
|
||||
r.maxPeerHeight = height
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) setSyncHeight(height int64) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.syncHeight = height
|
||||
}
|
||||
|
||||
// SyncHeight returns the height to which the BlockchainReactor has synced.
|
||||
func (r *BlockchainReactor) SyncHeight() int64 {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.syncHeight
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the reactor.
|
||||
func (r *BlockchainReactor) SetLogger(logger log.Logger) {
|
||||
r.logger = logger
|
||||
r.scheduler.setLogger(logger)
|
||||
r.processor.setLogger(logger)
|
||||
}
|
||||
|
||||
// Start implements cmn.Service interface
|
||||
func (r *BlockchainReactor) Start() error {
|
||||
r.reporter = behaviour.NewSwitchReporter(r.BaseReactor.Switch)
|
||||
if r.fastSync {
|
||||
err := r.startSync(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start fast sync: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startSync begins a fast sync, signalled by r.events being non-nil. If state is non-nil,
|
||||
// the scheduler and processor is updated with this state on startup.
|
||||
func (r *BlockchainReactor) startSync(state *state.State) error {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if r.events != nil {
|
||||
return errors.New("fast sync already in progress")
|
||||
}
|
||||
r.events = make(chan Event, chBufferSize)
|
||||
go r.scheduler.start()
|
||||
go r.processor.start()
|
||||
if state != nil {
|
||||
<-r.scheduler.ready()
|
||||
<-r.processor.ready()
|
||||
r.scheduler.send(bcResetState{state: *state})
|
||||
r.processor.send(bcResetState{state: *state})
|
||||
}
|
||||
go r.demux(r.events)
|
||||
return nil
|
||||
}
|
||||
|
||||
// endSync ends a fast sync
|
||||
func (r *BlockchainReactor) endSync() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if r.events != nil {
|
||||
close(r.events)
|
||||
}
|
||||
r.events = nil
|
||||
r.scheduler.stop()
|
||||
r.processor.stop()
|
||||
}
|
||||
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
|
||||
func (r *BlockchainReactor) SwitchToFastSync(state state.State) error {
|
||||
r.stateSynced = true
|
||||
state = state.Copy()
|
||||
return r.startSync(&state)
|
||||
}
|
||||
|
||||
// reactor generated ticker events:
|
||||
// ticker for cleaning peers
|
||||
type rTryPrunePeer struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (e rTryPrunePeer) String() string {
|
||||
return fmt.Sprintf("rTryPrunePeer{%v}", e.time)
|
||||
}
|
||||
|
||||
// ticker event for scheduling block requests
|
||||
type rTrySchedule struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (e rTrySchedule) String() string {
|
||||
return fmt.Sprintf("rTrySchedule{%v}", e.time)
|
||||
}
|
||||
|
||||
// ticker for block processing
|
||||
type rProcessBlock struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
func (e rProcessBlock) String() string {
|
||||
return "rProcessBlock"
|
||||
}
|
||||
|
||||
// reactor generated events based on blockchain related messages from peers:
|
||||
// blockResponse message received from a peer
|
||||
type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.NodeID
|
||||
size int64
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
func (resp bcBlockResponse) String() string {
|
||||
return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}",
|
||||
resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time)
|
||||
}
|
||||
|
||||
// blockNoResponse message received from a peer
|
||||
type bcNoBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
func (resp bcNoBlockResponse) String() string {
|
||||
return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}",
|
||||
resp.peerID, resp.height, resp.time)
|
||||
}
|
||||
|
||||
// statusResponse message received from a peer
|
||||
type bcStatusResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
|
||||
func (resp bcStatusResponse) String() string {
|
||||
return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}",
|
||||
resp.peerID, resp.height, resp.base, resp.time)
|
||||
}
|
||||
|
||||
// new peer is connected
|
||||
type bcAddNewPeer struct {
|
||||
priorityNormal
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (resp bcAddNewPeer) String() string {
|
||||
return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID)
|
||||
}
|
||||
|
||||
// existing peer is removed
|
||||
type bcRemovePeer struct {
|
||||
priorityHigh
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
func (resp bcRemovePeer) String() string {
|
||||
return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason)
|
||||
}
|
||||
|
||||
// resets the scheduler and processor state, e.g. following a switch from state syncing
|
||||
type bcResetState struct {
|
||||
priorityHigh
|
||||
state state.State
|
||||
}
|
||||
|
||||
func (e bcResetState) String() string {
|
||||
return fmt.Sprintf("bcResetState{%v}", e.state)
|
||||
}
|
||||
|
||||
// Takes the channel as a parameter to avoid race conditions on r.events.
|
||||
func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
var lastRate = 0.0
|
||||
var lastHundred = time.Now()
|
||||
|
||||
var (
|
||||
processBlockFreq = 20 * time.Millisecond
|
||||
doProcessBlockCh = make(chan struct{}, 1)
|
||||
doProcessBlockTk = time.NewTicker(processBlockFreq)
|
||||
)
|
||||
defer doProcessBlockTk.Stop()
|
||||
|
||||
var (
|
||||
prunePeerFreq = 1 * time.Second
|
||||
doPrunePeerCh = make(chan struct{}, 1)
|
||||
doPrunePeerTk = time.NewTicker(prunePeerFreq)
|
||||
)
|
||||
defer doPrunePeerTk.Stop()
|
||||
|
||||
var (
|
||||
scheduleFreq = 20 * time.Millisecond
|
||||
doScheduleCh = make(chan struct{}, 1)
|
||||
doScheduleTk = time.NewTicker(scheduleFreq)
|
||||
)
|
||||
defer doScheduleTk.Stop()
|
||||
|
||||
var (
|
||||
statusFreq = 10 * time.Second
|
||||
doStatusCh = make(chan struct{}, 1)
|
||||
doStatusTk = time.NewTicker(statusFreq)
|
||||
)
|
||||
defer doStatusTk.Stop()
|
||||
doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers
|
||||
|
||||
// Memoize the scSchedulerFail error to avoid printing it every scheduleFreq.
|
||||
var scSchedulerFailErr error
|
||||
|
||||
// XXX: Extract timers to make testing atemporal
|
||||
for {
|
||||
select {
|
||||
// Pacers: send at most per frequency but don't saturate
|
||||
case <-doProcessBlockTk.C:
|
||||
select {
|
||||
case doProcessBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doPrunePeerTk.C:
|
||||
select {
|
||||
case doPrunePeerCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doScheduleTk.C:
|
||||
select {
|
||||
case doScheduleCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doStatusTk.C:
|
||||
select {
|
||||
case doStatusCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
// Tickers: perform tasks periodically
|
||||
case <-doScheduleCh:
|
||||
r.scheduler.send(rTrySchedule{time: time.Now()})
|
||||
case <-doPrunePeerCh:
|
||||
r.scheduler.send(rTryPrunePeer{time: time.Now()})
|
||||
case <-doProcessBlockCh:
|
||||
r.processor.send(rProcessBlock{})
|
||||
case <-doStatusCh:
|
||||
if err := r.io.broadcastStatusRequest(); err != nil {
|
||||
r.logger.Error("Error broadcasting status request", "err", err)
|
||||
}
|
||||
|
||||
// Events from peers. Closing the channel signals event loop termination.
|
||||
case event, ok := <-events:
|
||||
if !ok {
|
||||
r.logger.Info("Stopping event processing")
|
||||
return
|
||||
}
|
||||
switch event := event.(type) {
|
||||
case bcStatusResponse:
|
||||
r.setMaxPeerHeight(event.height)
|
||||
r.scheduler.send(event)
|
||||
case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse:
|
||||
r.scheduler.send(event)
|
||||
default:
|
||||
r.logger.Error("Received unexpected event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Incremental events from scheduler
|
||||
case event := <-r.scheduler.next():
|
||||
switch event := event.(type) {
|
||||
case scBlockReceived:
|
||||
r.processor.send(event)
|
||||
case scPeerError:
|
||||
r.processor.send(event)
|
||||
if err := r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError")); err != nil {
|
||||
r.logger.Error("Error reporting peer", "err", err)
|
||||
}
|
||||
case scBlockRequest:
|
||||
peer := r.Switch.Peers().Get(event.peerID)
|
||||
if peer == nil {
|
||||
r.logger.Error("Wanted to send block request, but no such peer", "peerID", event.peerID)
|
||||
continue
|
||||
}
|
||||
if err := r.io.sendBlockRequest(peer, event.height); err != nil {
|
||||
r.logger.Error("Error sending block request", "err", err)
|
||||
}
|
||||
case scFinishedEv:
|
||||
r.processor.send(event)
|
||||
r.scheduler.stop()
|
||||
case scSchedulerFail:
|
||||
if scSchedulerFailErr != event.reason {
|
||||
r.logger.Error("Scheduler failure", "err", event.reason.Error())
|
||||
scSchedulerFailErr = event.reason
|
||||
}
|
||||
case scPeersPruned:
|
||||
// Remove peers from the processor.
|
||||
for _, peerID := range event.peers {
|
||||
r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")})
|
||||
}
|
||||
r.logger.Debug("Pruned peers", "count", len(event.peers))
|
||||
case noOpEvent:
|
||||
default:
|
||||
r.logger.Error("Received unexpected scheduler event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Incremental events from processor
|
||||
case event := <-r.processor.next():
|
||||
switch event := event.(type) {
|
||||
case pcBlockProcessed:
|
||||
r.setSyncHeight(event.height)
|
||||
if r.syncHeight%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.logger.Info("Fast Sync Rate", "height", r.syncHeight,
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
r.scheduler.send(event)
|
||||
case pcBlockVerificationFailure:
|
||||
r.scheduler.send(event)
|
||||
case pcFinished:
|
||||
r.logger.Info("Fast sync complete, switching to consensus")
|
||||
if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) {
|
||||
r.logger.Error("Failed to switch to consensus reactor")
|
||||
}
|
||||
r.endSync()
|
||||
return
|
||||
case noOpEvent:
|
||||
default:
|
||||
r.logger.Error("Received unexpected processor event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Terminal event from scheduler
|
||||
case err := <-r.scheduler.final():
|
||||
switch err {
|
||||
case nil:
|
||||
r.logger.Info("Scheduler stopped")
|
||||
default:
|
||||
r.logger.Error("Scheduler aborted with error", "err", err)
|
||||
}
|
||||
|
||||
// Terminal event from processor
|
||||
case err := <-r.processor.final():
|
||||
switch err {
|
||||
case nil:
|
||||
r.logger.Info("Processor stopped")
|
||||
default:
|
||||
r.logger.Error("Processor aborted with error", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop implements cmn.Service interface.
|
||||
func (r *BlockchainReactor) Stop() error {
|
||||
r.logger.Info("reactor stopping")
|
||||
r.endSync()
|
||||
r.logger.Info("reactor stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling different message types.
|
||||
// XXX: do not call any methods that can block or incur heavy processing.
|
||||
// https://github.com/tendermint/tendermint/issues/2888
|
||||
func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
logger := r.logger.With("src", src.ID(), "chID", chID)
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
|
||||
if err := proto.Unmarshal(msgBytes, msgProto); err != nil {
|
||||
logger.Error("error decoding message", "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err := msgProto.Validate(); err != nil {
|
||||
logger.Error("peer sent us an invalid msg", "msg", msgProto, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
r.logger.Debug("received", "msg", msgProto)
|
||||
|
||||
switch msg := msgProto.Sum.(type) {
|
||||
case *bcproto.Message_StatusRequest:
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src); err != nil {
|
||||
logger.Error("Could not send status message to src peer")
|
||||
}
|
||||
|
||||
case *bcproto.Message_BlockRequest:
|
||||
block := r.store.LoadBlock(msg.BlockRequest.Height)
|
||||
if block != nil {
|
||||
if err := r.io.sendBlockToPeer(block, src); err != nil {
|
||||
logger.Error("Could not send block message to src peer", "err", err)
|
||||
}
|
||||
} else {
|
||||
logger.Info("peer asking for a block we don't have", "height", msg.BlockRequest.Height)
|
||||
if err := r.io.sendBlockNotFound(msg.BlockRequest.Height, src); err != nil {
|
||||
logger.Error("Couldn't send block not found msg", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
case *bcproto.Message_StatusResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcStatusResponse{
|
||||
peerID: src.ID(),
|
||||
base: msg.StatusResponse.Base,
|
||||
height: msg.StatusResponse.Height,
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.Message_BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.BlockResponse.Block)
|
||||
if err != nil {
|
||||
logger.Error("error transitioning block from protobuf", "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcBlockResponse{
|
||||
peerID: src.ID(),
|
||||
block: bi,
|
||||
size: int64(len(msgBytes)),
|
||||
time: time.Now(),
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcNoBlockResponse{
|
||||
peerID: src.ID(),
|
||||
height: msg.NoBlockResponse.Height,
|
||||
time: time.Now(),
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor interface
|
||||
func (r *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer)
|
||||
if err != nil {
|
||||
r.logger.Error("could not send our status to the new peer", "peer", peer.ID, "err", err)
|
||||
}
|
||||
|
||||
err = r.io.sendStatusRequest(peer)
|
||||
if err != nil {
|
||||
r.logger.Error("could not send status request to the new peer", "peer", peer.ID, "err", err)
|
||||
}
|
||||
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
if r.events != nil {
|
||||
r.events <- bcAddNewPeer{peerID: peer.ID()}
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor interface.
|
||||
func (r *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
if r.events != nil {
|
||||
r.events <- bcRemovePeer{
|
||||
peerID: peer.ID(),
|
||||
reason: reason,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
576
blockchain/v2/reactor_test.go
Normal file
576
blockchain/v2/reactor_test.go
Normal file
@@ -0,0 +1,576 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
type mockPeer struct {
|
||||
service.Service
|
||||
id p2p.NodeID
|
||||
}
|
||||
|
||||
func (mp mockPeer) FlushStop() {}
|
||||
func (mp mockPeer) ID() p2p.NodeID { return mp.id }
|
||||
func (mp mockPeer) RemoteIP() net.IP { return net.IP{} }
|
||||
func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} }
|
||||
|
||||
func (mp mockPeer) IsOutbound() bool { return true }
|
||||
func (mp mockPeer) IsPersistent() bool { return true }
|
||||
func (mp mockPeer) CloseConn() error { return nil }
|
||||
|
||||
func (mp mockPeer) NodeInfo() p2p.NodeInfo {
|
||||
return p2p.NodeInfo{
|
||||
NodeID: "",
|
||||
ListenAddr: "",
|
||||
}
|
||||
}
|
||||
func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
|
||||
func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} }
|
||||
|
||||
func (mp mockPeer) Send(byte, []byte) bool { return true }
|
||||
func (mp mockPeer) TrySend(byte, []byte) bool { return true }
|
||||
|
||||
func (mp mockPeer) Set(string, interface{}) {}
|
||||
func (mp mockPeer) Get(string) interface{} { return struct{}{} }
|
||||
|
||||
//nolint:unused
|
||||
type mockBlockStore struct {
|
||||
blocks map[int64]*types.Block
|
||||
}
|
||||
|
||||
func (ml *mockBlockStore) Height() int64 {
|
||||
return int64(len(ml.blocks))
|
||||
}
|
||||
|
||||
func (ml *mockBlockStore) LoadBlock(height int64) *types.Block {
|
||||
return ml.blocks[height]
|
||||
}
|
||||
|
||||
func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) {
|
||||
ml.blocks[block.Height] = block
|
||||
}
|
||||
|
||||
type mockBlockApplier struct {
|
||||
}
|
||||
|
||||
// XXX: Add whitelist/blacklist?
|
||||
func (mba *mockBlockApplier) ApplyBlock(
|
||||
state sm.State, blockID types.BlockID, block *types.Block,
|
||||
) (sm.State, int64, error) {
|
||||
state.LastBlockHeight++
|
||||
return state, 0, nil
|
||||
}
|
||||
|
||||
type mockSwitchIo struct {
|
||||
mtx sync.Mutex
|
||||
switchedToConsensus bool
|
||||
numStatusResponse int
|
||||
numBlockResponse int
|
||||
numNoBlockResponse int
|
||||
numStatusRequest int
|
||||
}
|
||||
|
||||
var _ iIO = (*mockSwitchIo)(nil)
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockRequest(_ p2p.Peer, _ int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numStatusResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numBlockResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockNotFound(_ int64, _ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numNoBlockResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) trySwitchToConsensus(_ sm.State, _ bool) bool {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.switchedToConsensus = true
|
||||
return true
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) broadcastStatusRequest() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numStatusRequest++
|
||||
return nil
|
||||
}
|
||||
|
||||
type testReactorParams struct {
|
||||
logger log.Logger
|
||||
genDoc *types.GenesisDoc
|
||||
privVals []types.PrivValidator
|
||||
startHeight int64
|
||||
mockA bool
|
||||
}
|
||||
|
||||
func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight)
|
||||
reporter := behaviour.NewMockReporter()
|
||||
|
||||
var appl blockApplier
|
||||
|
||||
if p.mockA {
|
||||
appl = &mockBlockApplier{}
|
||||
} else {
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
db := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(db)
|
||||
appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
r := newReactor(state, store, reporter, appl, true)
|
||||
logger := log.TestingLogger()
|
||||
r.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// This test is left here and not deleted to retain the termination cases for
|
||||
// future improvement in [#4482](https://github.com/tendermint/tendermint/issues/4482).
|
||||
// func TestReactorTerminationScenarios(t *testing.T) {
|
||||
|
||||
// config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
// defer os.RemoveAll(config.RootDir)
|
||||
// genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
// refStore, _, _ := newReactorStore(genDoc, privVals, 20)
|
||||
|
||||
// params := testReactorParams{
|
||||
// logger: log.TestingLogger(),
|
||||
// genDoc: genDoc,
|
||||
// privVals: privVals,
|
||||
// startHeight: 10,
|
||||
// bufferSize: 100,
|
||||
// mockA: true,
|
||||
// }
|
||||
|
||||
// type testEvent struct {
|
||||
// evType string
|
||||
// peer string
|
||||
// height int64
|
||||
// }
|
||||
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// params testReactorParams
|
||||
// msgs []testEvent
|
||||
// }{
|
||||
// {
|
||||
// name: "simple termination on max peer height - one peer",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 11},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 12},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 13},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: "simple termination on max peer height - two peers",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "AddPeer", peer: "P2"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "ReceiveS", peer: "P2", height: 15},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 11},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 12},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 13},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 14},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 15},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: "termination on max peer height - two peers, noBlock error",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "AddPeer", peer: "P2"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "ReceiveS", peer: "P2", height: 15},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveNB", peer: "P1", height: 11},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 12},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 11},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 13},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 14},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 15},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: "termination on max peer height - two peers, remove one peer",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "AddPeer", peer: "P2"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "ReceiveS", peer: "P2", height: 15},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "RemovePeer", peer: "P1"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 12},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 11},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 13},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 14},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 15},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// for _, tt := range tests {
|
||||
// tt := tt
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// reactor := newTestReactor(params)
|
||||
// reactor.Start()
|
||||
// reactor.reporter = behaviour.NewMockReporter()
|
||||
// mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
// reactor.io = mockSwitch
|
||||
// // time for go routines to start
|
||||
// time.Sleep(time.Millisecond)
|
||||
|
||||
// for _, step := range tt.msgs {
|
||||
// switch step.evType {
|
||||
// case "AddPeer":
|
||||
// reactor.scheduler.send(bcAddNewPeer{peerID: p2p.ID(step.peer)})
|
||||
// case "RemovePeer":
|
||||
// reactor.scheduler.send(bcRemovePeer{peerID: p2p.ID(step.peer)})
|
||||
// case "ReceiveS":
|
||||
// reactor.scheduler.send(bcStatusResponse{
|
||||
// peerID: p2p.ID(step.peer),
|
||||
// height: step.height,
|
||||
// time: time.Now(),
|
||||
// })
|
||||
// case "ReceiveB":
|
||||
// reactor.scheduler.send(bcBlockResponse{
|
||||
// peerID: p2p.ID(step.peer),
|
||||
// block: refStore.LoadBlock(step.height),
|
||||
// size: 10,
|
||||
// time: time.Now(),
|
||||
// })
|
||||
// case "ReceiveNB":
|
||||
// reactor.scheduler.send(bcNoBlockResponse{
|
||||
// peerID: p2p.ID(step.peer),
|
||||
// height: step.height,
|
||||
// time: time.Now(),
|
||||
// })
|
||||
// case "BlockReq":
|
||||
// reactor.scheduler.send(rTrySchedule{time: time.Now()})
|
||||
// case "Process":
|
||||
// reactor.processor.send(rProcessBlock{})
|
||||
// }
|
||||
// // give time for messages to propagate between routines
|
||||
// time.Sleep(time.Millisecond)
|
||||
// }
|
||||
|
||||
// // time for processor to finish and reactor to switch to consensus
|
||||
// time.Sleep(20 * time.Millisecond)
|
||||
// assert.True(t, mockSwitch.hasSwitchedToConsensus())
|
||||
// reactor.Stop()
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestReactorHelperMode(t *testing.T) {
|
||||
var (
|
||||
channelID = byte(0x40)
|
||||
)
|
||||
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
|
||||
params := testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
startHeight: 20,
|
||||
mockA: true,
|
||||
}
|
||||
|
||||
type testEvent struct {
|
||||
peer string
|
||||
event interface{}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
params testReactorParams
|
||||
msgs []testEvent
|
||||
}{
|
||||
{
|
||||
name: "status request",
|
||||
params: params,
|
||||
msgs: []testEvent{
|
||||
{"P1", bcproto.StatusRequest{}},
|
||||
{"P1", bcproto.BlockRequest{Height: 13}},
|
||||
{"P1", bcproto.BlockRequest{Height: 20}},
|
||||
{"P1", bcproto.BlockRequest{Height: 22}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
reactor := newTestReactor(params)
|
||||
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
reactor.io = mockSwitch
|
||||
err := reactor.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(tt.msgs); i++ {
|
||||
step := tt.msgs[i]
|
||||
switch ev := step.event.(type) {
|
||||
case bcproto.StatusRequest:
|
||||
old := mockSwitch.numStatusResponse
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
require.NoError(t, msgProto.Wrap(&ev))
|
||||
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
|
||||
case bcproto.BlockRequest:
|
||||
if ev.Height > params.startHeight {
|
||||
old := mockSwitch.numNoBlockResponse
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
require.NoError(t, msgProto.Wrap(&ev))
|
||||
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
|
||||
} else {
|
||||
old := mockSwitch.numBlockResponse
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
require.NoError(t, msgProto.Wrap(&ev))
|
||||
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
|
||||
}
|
||||
}
|
||||
}
|
||||
err = reactor.Stop()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactorSetSwitchNil(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
|
||||
reactor := newTestReactor(testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
})
|
||||
reactor.SetSwitch(nil)
|
||||
|
||||
assert.Nil(t, reactor.Switch)
|
||||
assert.Nil(t, reactor.io)
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) (
|
||||
*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: chainID,
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
// Why are we importing the entire blockExecutor dependency graph here
|
||||
// when we have the facilities to
|
||||
func newReactorStore(
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// add blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
vote, err := types.MakeVote(
|
||||
lastBlock.Header.Height,
|
||||
lastBlockMeta.BlockID,
|
||||
state.Validators,
|
||||
privVals[0],
|
||||
lastBlock.Header.ChainID,
|
||||
time.Now(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error apply block: %w", err))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
return blockStore, state, blockExec
|
||||
}
|
||||
166
blockchain/v2/routine.go
Normal file
166
blockchain/v2/routine.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/Workiva/go-datastructures/queue"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
type handleFunc = func(event Event) (Event, error)
|
||||
|
||||
const historySize = 25
|
||||
|
||||
// Routine is a structure that models a finite state machine as serialized
|
||||
// stream of events processed by a handle function. This Routine structure
|
||||
// handles the concurrency and messaging guarantees. Events are sent via
|
||||
// `send` are handled by the `handle` function to produce an iterator
|
||||
// `next()`. Calling `stop()` on a routine will conclude processing of all
|
||||
// sent events and produce `final()` event representing the terminal state.
|
||||
type Routine struct {
|
||||
name string
|
||||
handle handleFunc
|
||||
queue *queue.PriorityQueue
|
||||
history []Event
|
||||
out chan Event
|
||||
fin chan error
|
||||
rdy chan struct{}
|
||||
running *uint32
|
||||
logger log.Logger
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine {
|
||||
return &Routine{
|
||||
name: name,
|
||||
handle: handleFunc,
|
||||
queue: queue.NewPriorityQueue(bufferSize, true),
|
||||
history: make([]Event, 0, historySize),
|
||||
out: make(chan Event, bufferSize),
|
||||
rdy: make(chan struct{}, 1),
|
||||
fin: make(chan error, 1),
|
||||
running: new(uint32),
|
||||
logger: log.NewNopLogger(),
|
||||
metrics: NopMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
func (rt *Routine) setLogger(logger log.Logger) {
|
||||
rt.logger = logger
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func (rt *Routine) setMetrics(metrics *Metrics) {
|
||||
rt.metrics = metrics
|
||||
}
|
||||
|
||||
func (rt *Routine) start() {
|
||||
rt.logger.Info(fmt.Sprintf("%s: run", rt.name))
|
||||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
|
||||
if !running {
|
||||
panic(fmt.Sprintf("%s is already running", rt.name))
|
||||
}
|
||||
close(rt.rdy)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var (
|
||||
b strings.Builder
|
||||
j int
|
||||
)
|
||||
for i := len(rt.history) - 1; i >= 0; i-- {
|
||||
fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i])
|
||||
j++
|
||||
}
|
||||
panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String()))
|
||||
}
|
||||
stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0))
|
||||
if !stopped {
|
||||
panic(fmt.Sprintf("%s is failed to stop", rt.name))
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
events, err := rt.queue.Get(1)
|
||||
if err == queue.ErrDisposed {
|
||||
rt.terminate(nil)
|
||||
return
|
||||
} else if err != nil {
|
||||
rt.terminate(err)
|
||||
return
|
||||
}
|
||||
oEvent, err := rt.handle(events[0].(Event))
|
||||
rt.metrics.EventsHandled.With("routine", rt.name).Add(1)
|
||||
if err != nil {
|
||||
rt.terminate(err)
|
||||
return
|
||||
}
|
||||
rt.metrics.EventsOut.With("routine", rt.name).Add(1)
|
||||
rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
|
||||
|
||||
// Skip rTrySchedule and rProcessBlock events as they clutter the history
|
||||
// due to their frequency.
|
||||
switch events[0].(type) {
|
||||
case rTrySchedule:
|
||||
case rProcessBlock:
|
||||
default:
|
||||
rt.history = append(rt.history, events[0].(Event))
|
||||
if len(rt.history) > historySize {
|
||||
rt.history = rt.history[1:]
|
||||
}
|
||||
}
|
||||
|
||||
rt.out <- oEvent
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: look into returning OpError in the net package
|
||||
func (rt *Routine) send(event Event) bool {
|
||||
rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event))
|
||||
if !rt.isRunning() {
|
||||
return false
|
||||
}
|
||||
err := rt.queue.Put(event)
|
||||
if err != nil {
|
||||
rt.metrics.EventsShed.With("routine", rt.name).Add(1)
|
||||
rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name))
|
||||
return false
|
||||
}
|
||||
|
||||
rt.metrics.EventsSent.With("routine", rt.name).Add(1)
|
||||
return true
|
||||
}
|
||||
|
||||
func (rt *Routine) isRunning() bool {
|
||||
return atomic.LoadUint32(rt.running) == 1
|
||||
}
|
||||
|
||||
func (rt *Routine) next() chan Event {
|
||||
return rt.out
|
||||
}
|
||||
|
||||
func (rt *Routine) ready() chan struct{} {
|
||||
return rt.rdy
|
||||
}
|
||||
|
||||
func (rt *Routine) stop() {
|
||||
if !rt.isRunning() { // XXX: this should check rt.queue.Disposed()
|
||||
return
|
||||
}
|
||||
|
||||
rt.logger.Info(fmt.Sprintf("%s: stop", rt.name))
|
||||
rt.queue.Dispose() // this should block until all queue items are free?
|
||||
}
|
||||
|
||||
func (rt *Routine) final() chan error {
|
||||
return rt.fin
|
||||
}
|
||||
|
||||
// XXX: Maybe get rid of this
|
||||
func (rt *Routine) terminate(reason error) {
|
||||
// We don't close the rt.out channel here, to avoid spinning on the closed channel
|
||||
// in the event loop.
|
||||
rt.fin <- reason
|
||||
}
|
||||
163
blockchain/v2/routine_test.go
Normal file
163
blockchain/v2/routine_test.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type eventA struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
var errDone = fmt.Errorf("done")
|
||||
|
||||
func simpleHandler(event Event) (Event, error) {
|
||||
if _, ok := event.(eventA); ok {
|
||||
return noOp, errDone
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func TestRoutineFinal(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize)
|
||||
)
|
||||
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an initialized routine to not be running")
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
assert.True(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a ready routine to succeed")
|
||||
|
||||
assert.Equal(t, errDone, <-routine.final(),
|
||||
"expected the final event to be done")
|
||||
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an completed routine to no longer be running")
|
||||
}
|
||||
|
||||
func TestRoutineStop(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize)
|
||||
)
|
||||
|
||||
assert.False(t, routine.send(eventA{}),
|
||||
"expected sending to an unstarted routine to fail")
|
||||
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a running routine to succeed")
|
||||
|
||||
routine.stop()
|
||||
|
||||
assert.False(t, routine.send(eventA{}),
|
||||
"expected sending to a stopped routine to fail")
|
||||
}
|
||||
|
||||
type finalCount struct {
|
||||
count int
|
||||
}
|
||||
|
||||
func (f finalCount) Error() string {
|
||||
return "end"
|
||||
}
|
||||
|
||||
func genStatefulHandler(maxCount int) handleFunc {
|
||||
counter := 0
|
||||
return func(event Event) (Event, error) {
|
||||
if _, ok := event.(eventA); ok {
|
||||
counter++
|
||||
if counter >= maxCount {
|
||||
return noOp, finalCount{counter}
|
||||
}
|
||||
|
||||
return eventA{}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
}
|
||||
|
||||
func feedback(r *Routine) {
|
||||
for event := range r.next() {
|
||||
r.send(event)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatefulRoutine(t *testing.T) {
|
||||
var (
|
||||
count = 10
|
||||
handler = genStatefulHandler(count)
|
||||
bufferSize = 20
|
||||
routine = newRoutine("statefulRoutine", handler, bufferSize)
|
||||
)
|
||||
|
||||
go routine.start()
|
||||
go feedback(routine)
|
||||
<-routine.ready()
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a started routine to succeed")
|
||||
|
||||
final := <-routine.final()
|
||||
if fnl, ok := final.(finalCount); ok {
|
||||
assert.Equal(t, count, fnl.count,
|
||||
"expected the routine to count to 10")
|
||||
} else {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
type lowPriorityEvent struct {
|
||||
priorityLow
|
||||
}
|
||||
|
||||
type highPriorityEvent struct {
|
||||
priorityHigh
|
||||
}
|
||||
|
||||
func handleWithPriority(event Event) (Event, error) {
|
||||
switch event.(type) {
|
||||
case lowPriorityEvent:
|
||||
return noOp, nil
|
||||
case highPriorityEvent:
|
||||
return noOp, errDone
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func TestPriority(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 20
|
||||
routine = newRoutine("priorityRoutine", handleWithPriority, bufferSize)
|
||||
)
|
||||
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
go func() {
|
||||
for {
|
||||
routine.send(lowPriorityEvent{})
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
assert.True(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
assert.True(t, routine.send(highPriorityEvent{}),
|
||||
"expected send to succeed even when saturated")
|
||||
|
||||
assert.Equal(t, errDone, <-routine.final())
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
}
|
||||
712
blockchain/v2/scheduler.go
Normal file
712
blockchain/v2/scheduler.go
Normal file
@@ -0,0 +1,712 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Events generated by the scheduler:
|
||||
// all blocks have been processed
|
||||
type scFinishedEv struct {
|
||||
priorityNormal
|
||||
reason string
|
||||
}
|
||||
|
||||
func (e scFinishedEv) String() string {
|
||||
return fmt.Sprintf("scFinishedEv{%v}", e.reason)
|
||||
}
|
||||
|
||||
// send a blockRequest message
|
||||
type scBlockRequest struct {
|
||||
priorityNormal
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
func (e scBlockRequest) String() string {
|
||||
return fmt.Sprintf("scBlockRequest{%d from %v}", e.height, e.peerID)
|
||||
}
|
||||
|
||||
// a block has been received and validated by the scheduler
|
||||
type scBlockReceived struct {
|
||||
priorityNormal
|
||||
peerID p2p.NodeID
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
func (e scBlockReceived) String() string {
|
||||
return fmt.Sprintf("scBlockReceived{%d#%X from %v}", e.block.Height, e.block.Hash(), e.peerID)
|
||||
}
|
||||
|
||||
// scheduler detected a peer error
|
||||
type scPeerError struct {
|
||||
priorityHigh
|
||||
peerID p2p.NodeID
|
||||
reason error
|
||||
}
|
||||
|
||||
func (e scPeerError) String() string {
|
||||
return fmt.Sprintf("scPeerError{%v errored with %v}", e.peerID, e.reason)
|
||||
}
|
||||
|
||||
// scheduler removed a set of peers (timed out or slow peer)
|
||||
type scPeersPruned struct {
|
||||
priorityHigh
|
||||
peers []p2p.NodeID
|
||||
}
|
||||
|
||||
func (e scPeersPruned) String() string {
|
||||
return fmt.Sprintf("scPeersPruned{%v}", e.peers)
|
||||
}
|
||||
|
||||
// XXX: make this fatal?
|
||||
// scheduler encountered a fatal error
|
||||
type scSchedulerFail struct {
|
||||
priorityHigh
|
||||
reason error
|
||||
}
|
||||
|
||||
func (e scSchedulerFail) String() string {
|
||||
return fmt.Sprintf("scSchedulerFail{%v}", e.reason)
|
||||
}
|
||||
|
||||
type blockState int
|
||||
|
||||
const (
|
||||
blockStateUnknown blockState = iota + 1 // no known peer has this block
|
||||
blockStateNew // indicates that a peer has reported having this block
|
||||
blockStatePending // indicates that this block has been requested from a peer
|
||||
blockStateReceived // indicates that this block has been received by a peer
|
||||
blockStateProcessed // indicates that this block has been applied
|
||||
)
|
||||
|
||||
func (e blockState) String() string {
|
||||
switch e {
|
||||
case blockStateUnknown:
|
||||
return "Unknown"
|
||||
case blockStateNew:
|
||||
return "New"
|
||||
case blockStatePending:
|
||||
return "Pending"
|
||||
case blockStateReceived:
|
||||
return "Received"
|
||||
case blockStateProcessed:
|
||||
return "Processed"
|
||||
default:
|
||||
return fmt.Sprintf("invalid blockState: %d", e)
|
||||
}
|
||||
}
|
||||
|
||||
type peerState int
|
||||
|
||||
const (
|
||||
peerStateNew = iota + 1
|
||||
peerStateReady
|
||||
peerStateRemoved
|
||||
)
|
||||
|
||||
func (e peerState) String() string {
|
||||
switch e {
|
||||
case peerStateNew:
|
||||
return "New"
|
||||
case peerStateReady:
|
||||
return "Ready"
|
||||
case peerStateRemoved:
|
||||
return "Removed"
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown peerState: %d", e))
|
||||
}
|
||||
}
|
||||
|
||||
type scPeer struct {
|
||||
peerID p2p.NodeID
|
||||
|
||||
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
|
||||
// updated to Removed when peer is removed
|
||||
state peerState
|
||||
|
||||
base int64 // updated when statusResponse is received
|
||||
height int64 // updated when statusResponse is received
|
||||
lastTouched time.Time
|
||||
lastRate int64 // last receive rate in bytes
|
||||
}
|
||||
|
||||
func (p scPeer) String() string {
|
||||
return fmt.Sprintf("{state %v, base %d, height %d, lastTouched %v, lastRate %d, id %v}",
|
||||
p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
|
||||
}
|
||||
|
||||
func newScPeer(peerID p2p.NodeID) *scPeer {
|
||||
return &scPeer{
|
||||
peerID: peerID,
|
||||
state: peerStateNew,
|
||||
base: -1,
|
||||
height: -1,
|
||||
lastTouched: time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
// The scheduler keep track of the state of each block and each peer. The
|
||||
// scheduler will attempt to schedule new block requests with `trySchedule`
|
||||
// events and remove slow peers with `tryPrune` events.
|
||||
type scheduler struct {
|
||||
initHeight int64
|
||||
|
||||
// next block that needs to be processed. All blocks with smaller height are
|
||||
// in Processed state.
|
||||
height int64
|
||||
|
||||
// lastAdvance tracks the last time a block execution happened.
|
||||
// syncTimeout is the maximum time the scheduler waits to advance in the fast sync process before finishing.
|
||||
// This covers the cases where there are no peers or all peers have a lower height.
|
||||
lastAdvance time.Time
|
||||
syncTimeout time.Duration
|
||||
|
||||
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
|
||||
// track of peer specific state
|
||||
peers map[p2p.NodeID]*scPeer
|
||||
peerTimeout time.Duration // maximum response time from a peer otherwise prune
|
||||
minRecvRate int64 // minimum receive rate from peer otherwise prune
|
||||
|
||||
// the maximum number of blocks that should be New, Received or Pending at any point
|
||||
// in time. This is used to enforce a limit on the blockStates map.
|
||||
targetPending int
|
||||
// a list of blocks to be scheduled (New), Pending or Received. Its length should be
|
||||
// smaller than targetPending.
|
||||
blockStates map[int64]blockState
|
||||
|
||||
// a map of heights to the peer we are waiting a response from
|
||||
pendingBlocks map[int64]p2p.NodeID
|
||||
|
||||
// the time at which a block was put in blockStatePending
|
||||
pendingTime map[int64]time.Time
|
||||
|
||||
// a map of heights to the peers that put the block in blockStateReceived
|
||||
receivedBlocks map[int64]p2p.NodeID
|
||||
}
|
||||
|
||||
func (sc scheduler) String() string {
|
||||
return fmt.Sprintf("ih: %d, bst: %v, peers: %v, pblks: %v, ptm %v, rblks: %v",
|
||||
sc.initHeight, sc.blockStates, sc.peers, sc.pendingBlocks, sc.pendingTime, sc.receivedBlocks)
|
||||
}
|
||||
|
||||
func newScheduler(initHeight int64, startTime time.Time) *scheduler {
|
||||
sc := scheduler{
|
||||
initHeight: initHeight,
|
||||
lastAdvance: startTime,
|
||||
syncTimeout: 60 * time.Second,
|
||||
height: initHeight,
|
||||
blockStates: make(map[int64]blockState),
|
||||
peers: make(map[p2p.NodeID]*scPeer),
|
||||
pendingBlocks: make(map[int64]p2p.NodeID),
|
||||
pendingTime: make(map[int64]time.Time),
|
||||
receivedBlocks: make(map[int64]p2p.NodeID),
|
||||
targetPending: 10, // TODO - pass as param
|
||||
peerTimeout: 15 * time.Second, // TODO - pass as param
|
||||
minRecvRate: 0, // int64(7680), TODO - pass as param
|
||||
}
|
||||
|
||||
return &sc
|
||||
}
|
||||
|
||||
func (sc *scheduler) ensurePeer(peerID p2p.NodeID) *scPeer {
|
||||
if _, ok := sc.peers[peerID]; !ok {
|
||||
sc.peers[peerID] = newScPeer(peerID)
|
||||
}
|
||||
return sc.peers[peerID]
|
||||
}
|
||||
|
||||
func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state != peerStateReady {
|
||||
return fmt.Errorf("tried to touch peer in state %s, must be Ready", peer.state)
|
||||
}
|
||||
|
||||
peer.lastTouched = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) removePeer(peerID p2p.NodeID) {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if peer.state == peerStateRemoved {
|
||||
return
|
||||
}
|
||||
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.pendingTime, height)
|
||||
delete(sc.pendingBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
for height, rcvPeerID := range sc.receivedBlocks {
|
||||
if rcvPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.receivedBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
// remove the blocks from blockStates if the peer removal causes the max peer height to be lower.
|
||||
peer.state = peerStateRemoved
|
||||
maxPeerHeight := int64(0)
|
||||
for _, otherPeer := range sc.peers {
|
||||
if otherPeer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if otherPeer.peerID != peer.peerID && otherPeer.height > maxPeerHeight {
|
||||
maxPeerHeight = otherPeer.height
|
||||
}
|
||||
}
|
||||
for h := range sc.blockStates {
|
||||
if h > maxPeerHeight {
|
||||
delete(sc.blockStates, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check if the blockPool is running low and add new blocks in New state to be requested.
|
||||
// This function is called when there is an increase in the maximum peer height or when
|
||||
// blocks are processed.
|
||||
func (sc *scheduler) addNewBlocks() {
|
||||
if len(sc.blockStates) >= sc.targetPending {
|
||||
return
|
||||
}
|
||||
|
||||
for i := sc.height; i < int64(sc.targetPending)+sc.height; i++ {
|
||||
if i > sc.maxHeight() {
|
||||
break
|
||||
}
|
||||
if sc.getStateAtHeight(i) == blockStateUnknown {
|
||||
sc.setStateAtHeight(i, blockStateNew)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) setPeerRange(peerID p2p.NodeID, base int64, height int64) error {
|
||||
peer := sc.ensurePeer(peerID)
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return nil // noop
|
||||
}
|
||||
|
||||
if height < peer.height {
|
||||
sc.removePeer(peerID)
|
||||
return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height)
|
||||
}
|
||||
|
||||
if base > height {
|
||||
sc.removePeer(peerID)
|
||||
return fmt.Errorf("cannot set peer base higher than its height")
|
||||
}
|
||||
|
||||
peer.base = base
|
||||
peer.height = height
|
||||
peer.state = peerStateReady
|
||||
|
||||
sc.addNewBlocks()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) getStateAtHeight(height int64) blockState {
|
||||
if height < sc.height {
|
||||
return blockStateProcessed
|
||||
} else if state, ok := sc.blockStates[height]; ok {
|
||||
return state
|
||||
} else {
|
||||
return blockStateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID {
|
||||
peers := make([]p2p.NodeID, 0)
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if peer.base <= height && peer.height >= height {
|
||||
peers = append(peers, peer.peerID)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.NodeID {
|
||||
prunable := make([]p2p.NodeID, 0)
|
||||
for peerID, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate {
|
||||
prunable = append(prunable, peerID)
|
||||
}
|
||||
}
|
||||
// Tests for handleTryPrunePeer() may fail without sort due to range non-determinism
|
||||
sort.Sort(PeerByID(prunable))
|
||||
return prunable
|
||||
}
|
||||
|
||||
func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
|
||||
sc.blockStates[height] = state
|
||||
}
|
||||
|
||||
// CONTRACT: peer exists and in Ready state.
|
||||
func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, now time.Time) error {
|
||||
peer := sc.peers[peerID]
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
return fmt.Errorf("received block %d from peer %s without being requested", height, peerID)
|
||||
}
|
||||
|
||||
pendingTime, ok := sc.pendingTime[height]
|
||||
if !ok || now.Sub(pendingTime) <= 0 {
|
||||
return fmt.Errorf("clock error: block %d received at %s but requested at %s",
|
||||
height, pendingTime, now)
|
||||
}
|
||||
|
||||
peer.lastRate = size / now.Sub(pendingTime).Nanoseconds()
|
||||
|
||||
sc.setStateAtHeight(height, blockStateReceived)
|
||||
delete(sc.pendingBlocks, height)
|
||||
delete(sc.pendingTime, height)
|
||||
|
||||
sc.receivedBlocks[height] = peerID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markPending(peerID p2p.NodeID, height int64, time time.Time) error {
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateNew {
|
||||
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
|
||||
}
|
||||
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state != peerStateReady {
|
||||
return fmt.Errorf("cannot schedule %d from %s in %s", height, peerID, peer.state)
|
||||
}
|
||||
|
||||
if height > peer.height {
|
||||
return fmt.Errorf("cannot request height %d from peer %s that is at height %d",
|
||||
height, peerID, peer.height)
|
||||
}
|
||||
|
||||
if height < peer.base {
|
||||
return fmt.Errorf("cannot request height %d for peer %s with base %d",
|
||||
height, peerID, peer.base)
|
||||
}
|
||||
|
||||
sc.setStateAtHeight(height, blockStatePending)
|
||||
sc.pendingBlocks[height] = peerID
|
||||
sc.pendingTime[height] = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markProcessed(height int64) error {
|
||||
// It is possible that a peer error or timeout is handled after the processor
|
||||
// has processed the block but before the scheduler received this event, so
|
||||
// when pcBlockProcessed event is received, the block had been requested
|
||||
// again => don't check the block state.
|
||||
sc.lastAdvance = time.Now()
|
||||
sc.height = height + 1
|
||||
delete(sc.pendingBlocks, height)
|
||||
delete(sc.pendingTime, height)
|
||||
delete(sc.receivedBlocks, height)
|
||||
delete(sc.blockStates, height)
|
||||
sc.addNewBlocks()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) allBlocksProcessed() bool {
|
||||
if len(sc.peers) == 0 {
|
||||
return false
|
||||
}
|
||||
return sc.height >= sc.maxHeight()
|
||||
}
|
||||
|
||||
// returns max peer height or the last processed block, i.e. sc.height
|
||||
func (sc *scheduler) maxHeight() int64 {
|
||||
max := sc.height - 1
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if max < peer.height {
|
||||
max = peer.height
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// lowest block in sc.blockStates with state == blockStateNew or -1 if no new blocks
|
||||
func (sc *scheduler) nextHeightToSchedule() int64 {
|
||||
var min int64 = math.MaxInt64
|
||||
for height, state := range sc.blockStates {
|
||||
if state == blockStateNew && height < min {
|
||||
min = height
|
||||
}
|
||||
}
|
||||
if min == math.MaxInt64 {
|
||||
min = -1
|
||||
}
|
||||
return min
|
||||
}
|
||||
|
||||
func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 {
|
||||
var heights []int64
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
heights = append(heights, height)
|
||||
}
|
||||
}
|
||||
return heights
|
||||
}
|
||||
|
||||
func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
|
||||
peers := sc.getPeersWithHeight(height)
|
||||
if len(peers) == 0 {
|
||||
return "", fmt.Errorf("cannot find peer for height %d", height)
|
||||
}
|
||||
|
||||
// create a map from number of pending requests to a list
|
||||
// of peers having that number of pending requests.
|
||||
pendingFrom := make(map[int][]p2p.NodeID)
|
||||
for _, peerID := range peers {
|
||||
numPending := len(sc.pendingFrom(peerID))
|
||||
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
|
||||
}
|
||||
|
||||
// find the set of peers with minimum number of pending requests.
|
||||
var minPending int64 = math.MaxInt64
|
||||
for mp := range pendingFrom {
|
||||
if int64(mp) < minPending {
|
||||
minPending = int64(mp)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(PeerByID(pendingFrom[int(minPending)]))
|
||||
return pendingFrom[int(minPending)][0], nil
|
||||
}
|
||||
|
||||
// PeerByID is a list of peers sorted by peerID.
|
||||
type PeerByID []p2p.NodeID
|
||||
|
||||
func (peers PeerByID) Len() int {
|
||||
return len(peers)
|
||||
}
|
||||
func (peers PeerByID) Less(i, j int) bool {
|
||||
return bytes.Compare([]byte(peers[i]), []byte(peers[j])) == -1
|
||||
}
|
||||
|
||||
func (peers PeerByID) Swap(i, j int) {
|
||||
peers[i], peers[j] = peers[j], peers[i]
|
||||
}
|
||||
|
||||
// Handlers
|
||||
|
||||
// This handler gets the block, performs some validation and then passes it on to the processor.
|
||||
func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) {
|
||||
err := sc.touchPeer(event.peerID, event.time)
|
||||
if err != nil {
|
||||
// peer does not exist OR not ready
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time)
|
||||
if err != nil {
|
||||
sc.removePeer(event.peerID)
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
}
|
||||
|
||||
return scBlockReceived{peerID: event.peerID, block: event.block}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) {
|
||||
// No such peer or peer was removed.
|
||||
peer, ok := sc.peers[event.peerID]
|
||||
if !ok || peer.state == peerStateRemoved {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// The peer may have been just removed due to errors, low speed or timeouts.
|
||||
sc.removePeer(event.peerID)
|
||||
|
||||
return scPeerError{peerID: event.peerID,
|
||||
reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d",
|
||||
event.peerID, peer.base, peer.height, event.height)}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) {
|
||||
if event.height != sc.height {
|
||||
panic(fmt.Sprintf("processed height %d, but expected height %d", event.height, sc.height))
|
||||
}
|
||||
|
||||
err := sc.markProcessed(event.height)
|
||||
if err != nil {
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "processed all blocks"}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// Handles an error from the processor. The processor had already cleaned the blocks from
|
||||
// the peers included in this event. Just attempt to remove the peers.
|
||||
func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) {
|
||||
// The peers may have been just removed due to errors, low speed or timeouts.
|
||||
sc.removePeer(event.firstPeerID)
|
||||
if event.firstPeerID != event.secondPeerID {
|
||||
sc.removePeer(event.secondPeerID)
|
||||
}
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "error on last block"}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) {
|
||||
sc.ensurePeer(event.peerID)
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) {
|
||||
sc.removePeer(event.peerID)
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "removed peer"}, nil
|
||||
}
|
||||
|
||||
// Return scPeerError so the peer (and all associated blocks) is removed from
|
||||
// the processor.
|
||||
return scPeerError{peerID: event.peerID, reason: errors.New("peer was stopped")}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) {
|
||||
// Check behavior of peer responsible to deliver block at sc.height.
|
||||
timeHeightAsked, ok := sc.pendingTime[sc.height]
|
||||
if ok && time.Since(timeHeightAsked) > sc.peerTimeout {
|
||||
// A request was sent to a peer for block at sc.height but a response was not received
|
||||
// from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer
|
||||
// will be timed out even if it sends blocks at higher heights but prevents progress by
|
||||
// not sending the block at current height.
|
||||
sc.removePeer(sc.pendingBlocks[sc.height])
|
||||
}
|
||||
|
||||
prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time)
|
||||
if len(prunablePeers) == 0 {
|
||||
return noOp, nil
|
||||
}
|
||||
for _, peerID := range prunablePeers {
|
||||
sc.removePeer(peerID)
|
||||
}
|
||||
|
||||
// If all blocks are processed we should finish.
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "after try prune"}, nil
|
||||
}
|
||||
|
||||
return scPeersPruned{peers: prunablePeers}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleResetState(event bcResetState) (Event, error) {
|
||||
initHeight := event.state.LastBlockHeight + 1
|
||||
if initHeight == 1 {
|
||||
initHeight = event.state.InitialHeight
|
||||
}
|
||||
sc.initHeight = initHeight
|
||||
sc.height = initHeight
|
||||
sc.lastAdvance = time.Now()
|
||||
sc.addNewBlocks()
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) {
|
||||
if time.Since(sc.lastAdvance) > sc.syncTimeout {
|
||||
return scFinishedEv{reason: "timeout, no advance"}, nil
|
||||
}
|
||||
|
||||
nextHeight := sc.nextHeightToSchedule()
|
||||
if nextHeight == -1 {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
bestPeerID, err := sc.selectPeer(nextHeight)
|
||||
if err != nil {
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
if err := sc.markPending(bestPeerID, nextHeight, event.time); err != nil {
|
||||
return scSchedulerFail{reason: err}, nil // XXX: peerError might be more appropriate
|
||||
}
|
||||
return scBlockRequest{peerID: bestPeerID, height: nextHeight}, nil
|
||||
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) {
|
||||
err := sc.setPeerRange(event.peerID, event.base, event.height)
|
||||
if err != nil {
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handle(event Event) (Event, error) {
|
||||
switch event := event.(type) {
|
||||
case bcResetState:
|
||||
nextEvent, err := sc.handleResetState(event)
|
||||
return nextEvent, err
|
||||
case bcStatusResponse:
|
||||
nextEvent, err := sc.handleStatusResponse(event)
|
||||
return nextEvent, err
|
||||
case bcBlockResponse:
|
||||
nextEvent, err := sc.handleBlockResponse(event)
|
||||
return nextEvent, err
|
||||
case bcNoBlockResponse:
|
||||
nextEvent, err := sc.handleNoBlockResponse(event)
|
||||
return nextEvent, err
|
||||
case rTrySchedule:
|
||||
nextEvent, err := sc.handleTrySchedule(event)
|
||||
return nextEvent, err
|
||||
case bcAddNewPeer:
|
||||
nextEvent, err := sc.handleAddNewPeer(event)
|
||||
return nextEvent, err
|
||||
case bcRemovePeer:
|
||||
nextEvent, err := sc.handleRemovePeer(event)
|
||||
return nextEvent, err
|
||||
case rTryPrunePeer:
|
||||
nextEvent, err := sc.handleTryPrunePeer(event)
|
||||
return nextEvent, err
|
||||
case pcBlockProcessed:
|
||||
nextEvent, err := sc.handleBlockProcessed(event)
|
||||
return nextEvent, err
|
||||
case pcBlockVerificationFailure:
|
||||
nextEvent, err := sc.handleBlockProcessError(event)
|
||||
return nextEvent, err
|
||||
default:
|
||||
return scSchedulerFail{reason: fmt.Errorf("unknown event %v", event)}, nil
|
||||
}
|
||||
}
|
||||
2254
blockchain/v2/scheduler_test.go
Normal file
2254
blockchain/v2/scheduler_test.go
Normal file
File diff suppressed because it is too large
Load Diff
65
blockchain/v2/types.go
Normal file
65
blockchain/v2/types.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/Workiva/go-datastructures/queue"
|
||||
)
|
||||
|
||||
// Event is the type that can be added to the priority queue.
|
||||
type Event queue.Item
|
||||
|
||||
type priority interface {
|
||||
Compare(other queue.Item) int
|
||||
Priority() int
|
||||
}
|
||||
|
||||
type priorityLow struct{}
|
||||
type priorityNormal struct{}
|
||||
type priorityHigh struct{}
|
||||
|
||||
func (p priorityLow) Priority() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (p priorityNormal) Priority() int {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (p priorityHigh) Priority() int {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (p priorityLow) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p priorityNormal) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p priorityHigh) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
type noOpEvent struct {
|
||||
priorityLow
|
||||
}
|
||||
|
||||
var noOp = noOpEvent{}
|
||||
@@ -46,8 +46,9 @@ func main() {
|
||||
rootCA = flag.String("rootcafile", "", "absolute path to root CA")
|
||||
prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)")
|
||||
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false).
|
||||
With("module", "priv_val")
|
||||
logger = log.NewTMLogger(
|
||||
log.NewSyncWriter(os.Stdout),
|
||||
).With("module", "priv_val")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
@@ -63,11 +64,7 @@ func main() {
|
||||
"rootCA", *rootCA,
|
||||
)
|
||||
|
||||
pv, err := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
pv := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
|
||||
|
||||
opts := []grpc.ServerOption{}
|
||||
if !*insecure {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -15,7 +17,7 @@ var (
|
||||
flagProfAddr = "pprof-laddr"
|
||||
flagFrequency = "frequency"
|
||||
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
)
|
||||
|
||||
// DebugCmd defines the root command containing subcommands that assist in
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cli"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
)
|
||||
@@ -59,15 +59,15 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
rpc, err := rpchttp.New(nodeRPCAddr, "/websocket")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
home := viper.GetString(cli.HomeFlag)
|
||||
conf := config.DefaultConfig()
|
||||
conf := cfg.DefaultConfig()
|
||||
conf = conf.SetRoot(home)
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
cfg.EnsureRoot(conf.RootDir)
|
||||
|
||||
dumpDebugData(outDir, conf, rpc)
|
||||
|
||||
@@ -79,7 +79,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) {
|
||||
func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) {
|
||||
start := time.Now().UTC()
|
||||
|
||||
tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp")
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cli"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
)
|
||||
@@ -28,7 +28,7 @@ go-routine state, and the node's WAL and config information. This aggregated dat
|
||||
is packaged into a compressed archive.
|
||||
|
||||
Example:
|
||||
$ tendermint debug kill 34255 /path/to/tm-debug.zip`,
|
||||
$ tendermint debug 34255 /path/to/tm-debug.zip`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: killCmdHandler,
|
||||
}
|
||||
@@ -44,15 +44,15 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
return errors.New("invalid output file")
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
rpc, err := rpchttp.New(nodeRPCAddr, "/websocket")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
home := viper.GetString(cli.HomeFlag)
|
||||
conf := config.DefaultConfig()
|
||||
conf := cfg.DefaultConfig()
|
||||
conf = conf.SetRoot(home)
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
cfg.EnsureRoot(conf.RootDir)
|
||||
|
||||
// Create a temporary directory which will contain all the state dumps and
|
||||
// relevant files and directories that will be compressed into a file.
|
||||
@@ -79,11 +79,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
|
||||
logger.Info("copying node WAL...")
|
||||
if err := copyWAL(conf, tmpDir); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("node WAL does not exist; continuing...")
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("copying node configuration...")
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
)
|
||||
|
||||
@@ -48,7 +48,7 @@ func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error {
|
||||
|
||||
// copyWAL copies the Tendermint node's WAL file. It returns an error if the
|
||||
// WAL file cannot be read or copied.
|
||||
func copyWAL(conf *config.Config, dir string) error {
|
||||
func copyWAL(conf *cfg.Config, dir string) error {
|
||||
walPath := conf.Consensus.WalFile()
|
||||
walFile := filepath.Base(walPath)
|
||||
|
||||
|
||||
@@ -6,19 +6,21 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
|
||||
// NodeKey to the standard output.
|
||||
var GenNodeKeyCmd = &cobra.Command{
|
||||
Use: "gen-node-key",
|
||||
Short: "Generate a new node key",
|
||||
RunE: genNodeKey,
|
||||
Use: "gen-node-key",
|
||||
Aliases: []string{"gen_node_key"},
|
||||
Short: "Generate a new node key",
|
||||
RunE: genNodeKey,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
func genNodeKey(cmd *cobra.Command, args []string) error {
|
||||
nodeKey := types.GenNodeKey()
|
||||
nodeKey := p2p.GenNodeKey()
|
||||
|
||||
bz, err := tmjson.Marshal(nodeKey)
|
||||
if err != nil {
|
||||
|
||||
@@ -13,9 +13,11 @@ import (
|
||||
// GenValidatorCmd allows the generation of a keypair for a
|
||||
// validator.
|
||||
var GenValidatorCmd = &cobra.Command{
|
||||
Use: "gen-validator",
|
||||
Short: "Generate new validator keypair",
|
||||
RunE: genValidator,
|
||||
Use: "gen-validator",
|
||||
Aliases: []string{"gen_validator"},
|
||||
Short: "Generate new validator keypair",
|
||||
RunE: genValidator,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -10,19 +8,18 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
// InitFilesCmd initializes a fresh Tendermint Core instance.
|
||||
// InitFilesCmd initialises a fresh Tendermint Core instance.
|
||||
var InitFilesCmd = &cobra.Command{
|
||||
Use: "init [full|validator|seed]",
|
||||
Short: "Initializes a Tendermint node",
|
||||
ValidArgs: []string{"full", "validator", "seed"},
|
||||
// We allow for zero args so we can throw a more informative error
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: initFiles,
|
||||
Use: "init",
|
||||
Short: "Initialize Tendermint",
|
||||
RunE: initFiles,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -35,47 +32,36 @@ func init() {
|
||||
}
|
||||
|
||||
func initFiles(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New("must specify a node type: tendermint init [validator|full|seed]")
|
||||
}
|
||||
config.Mode = args[0]
|
||||
return initFilesWithConfig(config)
|
||||
}
|
||||
|
||||
func initFilesWithConfig(config *cfg.Config) error {
|
||||
// private validator
|
||||
privValKeyFile := config.PrivValidatorKeyFile()
|
||||
privValStateFile := config.PrivValidatorStateFile()
|
||||
var (
|
||||
pv *privval.FilePV
|
||||
err error
|
||||
)
|
||||
|
||||
if config.Mode == cfg.ModeValidator {
|
||||
// private validator
|
||||
privValKeyFile := config.PrivValidator.KeyFile()
|
||||
privValStateFile := config.PrivValidator.StateFile()
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv, err = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
logger.Info("Found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
|
||||
nodeKeyFile := config.NodeKeyFile()
|
||||
if tmos.FileExists(nodeKeyFile) {
|
||||
logger.Info("Found node key", "path", nodeKeyFile)
|
||||
} else {
|
||||
if _, err := types.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("Generated node key", "path", nodeKeyFile)
|
||||
@@ -93,26 +79,19 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
if keyType == "secp256k1" {
|
||||
genDoc.ConsensusParams.Validator = types.ValidatorParams{
|
||||
genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{
|
||||
PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1},
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
|
||||
defer cancel()
|
||||
|
||||
// if this is a validator we add it to genesis
|
||||
if pv != nil {
|
||||
pubKey, err := pv.GetPubKey(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
Address: pubKey.Address(),
|
||||
PubKey: pubKey,
|
||||
Power: 10,
|
||||
}}
|
||||
pubKey, err := pv.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
Address: pubKey.Address(),
|
||||
PubKey: pubKey,
|
||||
Power: 10,
|
||||
}}
|
||||
|
||||
if err := genDoc.SaveAs(genFile); err != nil {
|
||||
return err
|
||||
@@ -120,9 +99,5 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
logger.Info("Generated genesis file", "path", genFile)
|
||||
}
|
||||
|
||||
// write config file
|
||||
cfg.WriteConfigFile(config.RootDir, config)
|
||||
logger.Info("Generated config", "mode", config.Mode)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/inspect"
|
||||
)
|
||||
|
||||
// InspectCmd is the command for starting an inspect server.
|
||||
var InspectCmd = &cobra.Command{
|
||||
Use: "inspect",
|
||||
Short: "Run an inspect server for investigating Tendermint state",
|
||||
Long: `
|
||||
inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging
|
||||
issues with Tendermint.
|
||||
|
||||
When the Tendermint consensus engine detects inconsistent state, it will crash the
|
||||
tendermint process. Tendermint will not start up while in this inconsistent state.
|
||||
The inspect command can be used to query the block and state store using Tendermint
|
||||
RPC calls to debug issues of inconsistent state.
|
||||
`,
|
||||
|
||||
RunE: runInspect,
|
||||
}
|
||||
|
||||
func init() {
|
||||
InspectCmd.Flags().
|
||||
String("rpc.laddr",
|
||||
config.RPC.ListenAddress, "RPC listenener address. Port required")
|
||||
InspectCmd.Flags().
|
||||
String("db-backend",
|
||||
config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb")
|
||||
InspectCmd.Flags().
|
||||
String("db-dir", config.DBPath, "database directory")
|
||||
}
|
||||
|
||||
func runInspect(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
|
||||
go func() {
|
||||
<-c
|
||||
cancel()
|
||||
}()
|
||||
|
||||
ins, err := inspect.NewFromConfig(logger, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("starting inspect server")
|
||||
if err := ins.Run(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/scripts/keymigrate"
|
||||
)
|
||||
|
||||
func MakeKeyMigrateCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "key-migrate",
|
||||
Short: "Run Database key migration",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
contexts := []string{
|
||||
// this is ordered to put the
|
||||
// (presumably) biggest/most important
|
||||
// subsets first.
|
||||
"blockstore",
|
||||
"state",
|
||||
"peerstore",
|
||||
"tx_index",
|
||||
"evidence",
|
||||
"light",
|
||||
}
|
||||
|
||||
for idx, dbctx := range contexts {
|
||||
logger.Info("beginning a key migration",
|
||||
"dbctx", dbctx,
|
||||
"num", idx+1,
|
||||
"total", len(contexts),
|
||||
)
|
||||
|
||||
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
|
||||
ID: dbctx,
|
||||
Config: config,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("constructing database handle: %w", err)
|
||||
}
|
||||
|
||||
if err = keymigrate.Migrate(ctx, db); err != nil {
|
||||
return fmt.Errorf("running migration for context %q: %w",
|
||||
dbctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("completed database migration successfully")
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// allow database info to be overridden via cli
|
||||
addDBFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,18 +1,22 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
@@ -20,6 +24,7 @@ import (
|
||||
lproxy "github.com/tendermint/tendermint/light/proxy"
|
||||
lrpc "github.com/tendermint/tendermint/light/rpc"
|
||||
dbs "github.com/tendermint/tendermint/light/store/db"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
|
||||
)
|
||||
|
||||
@@ -65,8 +70,7 @@ var (
|
||||
trustedHash []byte
|
||||
trustLevelStr string
|
||||
|
||||
logLevel string
|
||||
logFormat string
|
||||
verbose bool
|
||||
|
||||
primaryKey = []byte("primary")
|
||||
witnessesKey = []byte("witnesses")
|
||||
@@ -90,8 +94,7 @@ func init() {
|
||||
"trusting period that headers can be verified within. Should be significantly less than the unbonding period")
|
||||
LightCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height")
|
||||
LightCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash")
|
||||
LightCmd.Flags().StringVar(&logLevel, "log-level", log.LogLevelInfo, "The logging level (debug|info|warn|error|fatal)")
|
||||
LightCmd.Flags().StringVar(&logFormat, "log-format", log.LogFormatPlain, "The logging format (text|json)")
|
||||
LightCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output")
|
||||
LightCmd.Flags().StringVar(&trustLevelStr, "trust-level", "1/3",
|
||||
"trust level. Must be between 1/3 and 3/3",
|
||||
)
|
||||
@@ -101,10 +104,15 @@ func init() {
|
||||
}
|
||||
|
||||
func runProxy(cmd *cobra.Command, args []string) error {
|
||||
logger, err := log.NewDefaultLogger(logFormat, logLevel, false)
|
||||
if err != nil {
|
||||
return err
|
||||
// Initialise logger.
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
var option log.Option
|
||||
if verbose {
|
||||
option, _ = log.AllowLevel("debug")
|
||||
} else {
|
||||
option, _ = log.AllowLevel("info")
|
||||
}
|
||||
logger = log.NewFilter(logger, option)
|
||||
|
||||
chainID = args[0]
|
||||
logger.Info("Creating client...", "chainID", chainID)
|
||||
@@ -143,7 +151,25 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("can't parse trust level: %w", err)
|
||||
}
|
||||
|
||||
options := []light.Option{light.Logger(logger)}
|
||||
options := []light.Option{
|
||||
light.Logger(logger),
|
||||
light.ConfirmationFunction(func(action string) bool {
|
||||
fmt.Println(action)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for {
|
||||
scanner.Scan()
|
||||
response := scanner.Text()
|
||||
switch response {
|
||||
case "y", "Y":
|
||||
return true
|
||||
case "n", "N":
|
||||
return false
|
||||
default:
|
||||
fmt.Println("please input 'Y' or 'n' and press ENTER")
|
||||
}
|
||||
}
|
||||
}),
|
||||
}
|
||||
|
||||
if sequential {
|
||||
options = append(options, light.SequentialVerification())
|
||||
@@ -151,25 +177,40 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
options = append(options, light.SkippingVerification(trustLevel))
|
||||
}
|
||||
|
||||
// Initiate the light client. If the trusted store already has blocks in it, this
|
||||
// will be used else we use the trusted options.
|
||||
c, err := light.NewHTTPClient(
|
||||
context.Background(),
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: trustingPeriod,
|
||||
Height: trustedHeight,
|
||||
Hash: trustedHash,
|
||||
},
|
||||
primaryAddr,
|
||||
witnessesAddrs,
|
||||
dbs.New(db),
|
||||
options...,
|
||||
)
|
||||
var c *light.Client
|
||||
if trustedHeight > 0 && len(trustedHash) > 0 { // fresh installation
|
||||
c, err = light.NewHTTPClient(
|
||||
context.Background(),
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: trustingPeriod,
|
||||
Height: trustedHeight,
|
||||
Hash: trustedHash,
|
||||
},
|
||||
primaryAddr,
|
||||
witnessesAddrs,
|
||||
dbs.New(db),
|
||||
options...,
|
||||
)
|
||||
} else { // continue from latest state
|
||||
c, err = light.NewHTTPClientFromTrustedStore(
|
||||
chainID,
|
||||
trustingPeriod,
|
||||
primaryAddr,
|
||||
witnessesAddrs,
|
||||
dbs.New(db),
|
||||
options...,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rpcClient, err := rpchttp.New(primaryAddr, "/websocket")
|
||||
if err != nil {
|
||||
return fmt.Errorf("http client for %s: %w", primaryAddr, err)
|
||||
}
|
||||
|
||||
cfg := rpcserver.DefaultConfig()
|
||||
cfg.MaxBodyBytes = config.RPC.MaxBodyBytes
|
||||
cfg.MaxHeaderBytes = config.RPC.MaxHeaderBytes
|
||||
@@ -181,11 +222,12 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
cfg.WriteTimeout = config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
|
||||
}
|
||||
|
||||
p, err := lproxy.NewProxy(c, listenAddr, primaryAddr, cfg, logger, lrpc.KeyPathFn(lrpc.DefaultMerkleKeyPathFn()))
|
||||
if err != nil {
|
||||
return err
|
||||
p := lproxy.Proxy{
|
||||
Addr: listenAddr,
|
||||
Config: cfg,
|
||||
Client: lrpc.NewClient(rpcClient, c, lrpc.KeyPathFn(defaultMerkleKeyPathFn())),
|
||||
Logger: logger,
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
p.Listener.Close()
|
||||
@@ -224,3 +266,21 @@ func saveProviders(db dbm.DB, primaryAddr, witnessesAddrs string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func defaultMerkleKeyPathFn() lrpc.KeyPathFunc {
|
||||
// regexp for extracting store name from /abci_query path
|
||||
storeNameRegexp := regexp.MustCompile(`\/store\/(.+)\/key`)
|
||||
|
||||
return func(path string, key []byte) (merkle.KeyPath, error) {
|
||||
matches := storeNameRegexp.FindStringSubmatch(path)
|
||||
if len(matches) != 2 {
|
||||
return nil, fmt.Errorf("can't find store name in %s using %s", path, storeNameRegexp)
|
||||
}
|
||||
storeName := matches[1]
|
||||
|
||||
kp := merkle.KeyPath{}
|
||||
kp = kp.AppendKey([]byte(storeName), merkle.KeyEncodingURL)
|
||||
kp = kp.AppendKey(key, merkle.KeyEncodingURL)
|
||||
return kp, nil
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user