mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-12 07:42:48 +00:00
Compare commits
1 Commits
rfc-e2e-te
...
docs-stagi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5182ffee25 |
23
.github/CODEOWNERS
vendored
23
.github/CODEOWNERS
vendored
@@ -1,10 +1,25 @@
|
||||
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
|
||||
|
||||
# Everything goes through the following "global owners" by default.
|
||||
# Everything goes through the following "global owners" by default.
|
||||
# Unless a later match takes precedence, these three will be
|
||||
# requested for review when someone opens a PR.
|
||||
# requested for review when someone opens a PR.
|
||||
# Note that the last matching pattern takes precedence, so
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield @creachadair
|
||||
* @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for tooling packages
|
||||
.github/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
DOCKER/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for core Tendermint packages
|
||||
abci/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
evidence/ @cmwaters @ebuchman @melekes @tessr
|
||||
light/ @cmwaters @melekes @ebuchman @tessr
|
||||
|
||||
# Overrides for docs
|
||||
*.md @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
docs/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
|
||||
|
||||
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,7 +1,7 @@
|
||||
Please add a description of the changes that this PR introduces and the files that
|
||||
are the most critical to review.
|
||||
## Description
|
||||
|
||||
If this PR fixes an open Issue, please include "Closes #XXX" (where "XXX" is the Issue number)
|
||||
so that GitHub will automatically close the Issue when this PR is merged.
|
||||
_Please add a description of the changes that this PR introduces and the files that
|
||||
are the most critical to review._
|
||||
|
||||
Closes: #XXX
|
||||
|
||||
|
||||
14
.github/codecov.yml
vendored
14
.github/codecov.yml
vendored
@@ -5,14 +5,19 @@ coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 20%
|
||||
patch: off
|
||||
threshold: 1%
|
||||
patch: on
|
||||
changes: off
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
comment: false
|
||||
comment:
|
||||
layout: "diff, files"
|
||||
behavior: default
|
||||
require_changes: no
|
||||
require_base: no
|
||||
require_head: yes
|
||||
|
||||
ignore:
|
||||
- "docs"
|
||||
@@ -20,6 +25,3 @@ ignore:
|
||||
- "scripts"
|
||||
- "**/*.pb.go"
|
||||
- "libs/pubsub/query/query.peg.go"
|
||||
- "*.md"
|
||||
- "*.rst"
|
||||
- "*.yml"
|
||||
|
||||
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@@ -23,5 +23,6 @@ updates:
|
||||
reviewers:
|
||||
- melekes
|
||||
- tessr
|
||||
- erikgrinaker
|
||||
labels:
|
||||
- T:dependencies
|
||||
|
||||
8
.github/mergify.yml
vendored
8
.github/mergify.yml
vendored
@@ -8,11 +8,3 @@ pull_request_rules:
|
||||
method: squash
|
||||
strict: true
|
||||
commit_message: title+body
|
||||
- name: backport patches to v0.34.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:backport-to-v0.34.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.34.x
|
||||
|
||||
28
.github/workflows/coverage.yml
vendored
28
.github/workflows/coverage.yml
vendored
@@ -2,9 +2,6 @@ name: Test Coverage
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths:
|
||||
- "**.go"
|
||||
- "!test/"
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
@@ -13,7 +10,7 @@ jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
@@ -47,13 +44,12 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
@@ -70,13 +66,12 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
@@ -86,7 +81,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.15
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
@@ -100,12 +95,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
@@ -127,7 +121,7 @@ jobs:
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v2.1.0
|
||||
- uses: codecov/codecov-action@v1.2.1
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
8
.github/workflows/docker.yml
vendored
8
.github/workflows/docker.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@master
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -40,17 +40,17 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1.10.0
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.7.0
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
32
.github/workflows/docs.yml
vendored
Normal file
32
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Documentation
|
||||
# This job builds and deploys documentation to github pages.
|
||||
# It runs on every push to master, and can be manually triggered.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: tendermintdev/docker-website-deployment
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v2.3.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install and Build 🔧
|
||||
run: |
|
||||
apk add rsync
|
||||
make build-docs
|
||||
|
||||
- name: Deploy 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@3.7.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: ~/output
|
||||
74
.github/workflows/e2e-nightly-master.yml
vendored
74
.github/workflows/e2e-nightly-master.yml
vendored
@@ -1,74 +0,0 @@
|
||||
# Runs randomly generated E2E testnets nightly on master
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-34x workflow as well!
|
||||
|
||||
name: e2e-nightly-master
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test-2:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.16'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on master
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on master
|
||||
SLACK_FOOTER: ''
|
||||
@@ -1,12 +1,7 @@
|
||||
# Runs randomly generated E2E testnets nightly
|
||||
# on the 0.34.x release branch
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-master workflow as well!
|
||||
|
||||
name: e2e-nightly-34x
|
||||
# Runs randomly generated E2E testnets nightly.
|
||||
name: e2e-nightly
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually, in theory
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
@@ -18,16 +13,18 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
# todo: expand to multiple versions after 0.35 release
|
||||
branch: ['master', 'v0.34.x']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.16'
|
||||
go-version: '1.15'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
ref: ${{ matrix.branch}}
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
@@ -49,28 +46,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x
|
||||
SLACK_MESSAGE: Nightly E2E tests failed
|
||||
SLACK_FOOTER: ''
|
||||
8
.github/workflows/e2e.yml
vendored
8
.github/workflows/e2e.yml
vendored
@@ -16,9 +16,9 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.16'
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: '1.15'
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run two make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker runner tests
|
||||
run: make -j2 docker runner
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Run CI testnet
|
||||
|
||||
39
.github/workflows/fuzz-nightly.yml
vendored
39
.github/workflows/fuzz-nightly.yml
vendored
@@ -4,10 +4,6 @@ on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
pull_request:
|
||||
branches: [master]
|
||||
paths:
|
||||
- "test/fuzz/**/*.go"
|
||||
|
||||
jobs:
|
||||
fuzz-nightly-test:
|
||||
@@ -15,22 +11,17 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.16'
|
||||
go-version: '1.15'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
- name: Fuzz mempool-v1
|
||||
- name: Fuzz mempool
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz mempool-v0
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-addrbook
|
||||
@@ -53,35 +44,21 @@ jobs:
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-rpc-server
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive crashers
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 3
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
retention-days: 3
|
||||
|
||||
- name: Set crashers count
|
||||
working-directory: test/fuzz
|
||||
run: echo "::set-output name=count::$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
run: echo "::set-output name=crashers-count::$(find . -type d -name "crashers" | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
id: set-crashers-count
|
||||
|
||||
outputs:
|
||||
crashers-count: ${{ steps.set-crashers-count.outputs.count }}
|
||||
crashers_count: ${{ steps.set-crashers-count.outputs.crashers-count }}
|
||||
|
||||
fuzz-nightly-fail:
|
||||
needs: fuzz-nightly-test
|
||||
if: ${{ needs.fuzz-nightly-test.outputs.crashers-count != 0 }}
|
||||
if: ${{ needs.set-crashers-count.outputs.crashers-count != 0 }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
16
.github/workflows/janitor.yml
vendored
16
.github/workflows/janitor.yml
vendored
@@ -1,16 +0,0 @@
|
||||
name: Janitor
|
||||
# Janitor cleans up previous runs of various workflows
|
||||
# To add more workflows to cancel visit https://api.github.com/repos/tendermint/tendermint/actions/workflows and find the actions name
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
cancel:
|
||||
name: "Cancel Previous Runs"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.9.1
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
65
.github/workflows/jepsen.yml
vendored
65
.github/workflows/jepsen.yml
vendored
@@ -1,65 +0,0 @@
|
||||
# Runs a Jepsen test - cas-register (no nemesis) by default.
|
||||
# See inputs for various options.
|
||||
# Repo: https://github.com/tendermint/jepsen
|
||||
#
|
||||
# If you want to test a new breaking version of Tendermint, you'll need to
|
||||
# update the Merkleeyes ABCI app and 'merkleeyesUrl' input accordingly. You can
|
||||
# upload a new tarball to
|
||||
# https://github.com/tendermint/jepsen/releases/tag/0.2.1.
|
||||
#
|
||||
# Manually triggered.
|
||||
name: jepsen
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
workload:
|
||||
description: 'Test workload to run: (cas-register | set)'
|
||||
required: true
|
||||
default: 'cas-register'
|
||||
nemesis:
|
||||
description: 'Nemesis to use: (none | clocks | single-partitions | half-partitions | ring-partitions | split-dup-validators | peekaboo-dup-validators | changing-validators | crash | truncate-tendermint | truncate-merkleeyes)'
|
||||
required: true
|
||||
default: 'none'
|
||||
dupOrSuperByzValidators:
|
||||
description: '"--dup-validators" (multiple validators share the same key) and(or) "--super-byzantine-validators" (byzantine validators have just shy of 2/3 the voting weight)'
|
||||
required: false
|
||||
default: ''
|
||||
concurrency:
|
||||
description: 'How many workers should we run? Must be an integer and >= 10, optionally followed by n (e.g. 3n) to multiply by the number of nodes.'
|
||||
required: true
|
||||
default: 10
|
||||
timeLimit:
|
||||
description: 'Excluding setup and teardown, how long should a test run for, in seconds?'
|
||||
required: true
|
||||
default: 60
|
||||
tendermintUrl:
|
||||
description: 'Where to grab the Tendermint tarball (w/ linux/amd64 binary)'
|
||||
required: true
|
||||
default: 'https://github.com/melekes/katas/releases/download/0.2.0/tendermint.tar.gz'
|
||||
merkleeyesUrl:
|
||||
description: 'Where to grab the Merkleeyes tarball (w/ linux/amd64 binary)'
|
||||
required: true
|
||||
default: 'https://github.com/tendermint/jepsen/releases/download/0.2.1/merkleeyes_0.1.7.tar.gz'
|
||||
|
||||
jobs:
|
||||
jepsen-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the Jepsen repository
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
repository: 'tendermint/jepsen'
|
||||
|
||||
- name: Start a Jepsen cluster in background
|
||||
working-directory: docker
|
||||
run: ./bin/up --daemon
|
||||
|
||||
- name: Run the test
|
||||
run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}'
|
||||
|
||||
- name: Archive results
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: results
|
||||
path: tendermint/store/latest
|
||||
retention-days: 3
|
||||
4
.github/workflows/linkchecker.yml
vendored
4
.github/workflows/linkchecker.yml
vendored
@@ -6,7 +6,7 @@ jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
- uses: actions/checkout@master
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
@@ -13,17 +13,17 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v2.5.2
|
||||
- uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.38
|
||||
version: v1.31
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
5
.github/workflows/linter.yml
vendored
5
.github/workflows/linter.yml
vendored
@@ -11,7 +11,6 @@ on:
|
||||
branches: [master]
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -19,7 +18,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2.3.4
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v3
|
||||
env:
|
||||
@@ -28,5 +27,5 @@ jobs:
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
VALIDATE_OPENAPI: true
|
||||
VALIDATE_OPAENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
|
||||
8
.github/workflows/proto-docker.yml
vendored
8
.github/workflows/proto-docker.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@master
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -34,16 +34,16 @@ jobs:
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1.10.0
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.7.0
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
|
||||
4
.github/workflows/proto.yml
vendored
4
.github/workflows/proto.yml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@master
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@master
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
|
||||
11
.github/workflows/release.yml
vendored
11
.github/workflows/release.yml
vendored
@@ -2,7 +2,7 @@ name: "Release"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
branches:
|
||||
- "RC[0-9]/**"
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
@@ -12,13 +12,16 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.16'
|
||||
go-version: '1.15'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
@@ -32,6 +35,6 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
8
.github/workflows/stale.yml
vendored
8
.github/workflows/stale.yml
vendored
@@ -7,14 +7,12 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v4
|
||||
- uses: actions/stale@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions."
|
||||
days-before-stale: -1
|
||||
days-before-close: -1
|
||||
days-before-pr-stale: 10
|
||||
days-before-pr-close: 4
|
||||
days-before-stale: 10
|
||||
days-before-close: 4
|
||||
exempt-pr-labels: "S:wip"
|
||||
|
||||
70
.github/workflows/tests.yml
vendored
70
.github/workflows/tests.yml
vendored
@@ -10,6 +10,14 @@ on:
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
cleanup-runs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: rokroskar/workflow-run-cleanup-action@master
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'"
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
@@ -17,9 +25,9 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -28,7 +36,7 @@ jobs:
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -36,12 +44,44 @@ jobs:
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_abci_apps
|
||||
run: abci/tests/test_app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
@@ -49,22 +89,22 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -80,22 +120,22 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -15,7 +15,7 @@
|
||||
.vagrant
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci/abci-cli
|
||||
abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
build/*
|
||||
@@ -24,7 +24,6 @@ docs/.vuepress/dist
|
||||
docs/_build
|
||||
docs/dist
|
||||
docs/node_modules/
|
||||
docs/spec
|
||||
index.html.md
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
profile\.out
|
||||
@@ -36,10 +35,10 @@ shunit2
|
||||
terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
test/app/grpc_client
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
test/maverick/maverick
|
||||
test/p2p/data/
|
||||
vendor
|
||||
test/fuzz/**/corpus
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- exportloopref
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
- goconst
|
||||
- gocritic
|
||||
# - gocyclo
|
||||
@@ -25,11 +22,11 @@ linters:
|
||||
- ineffassign
|
||||
# - interfacer
|
||||
- lll
|
||||
# - maligned
|
||||
- misspell
|
||||
# - maligned
|
||||
- nakedret
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
@@ -40,6 +37,8 @@ linters:
|
||||
- varcheck
|
||||
# - whitespace
|
||||
# - wsl
|
||||
# - gocognit
|
||||
- nolintlint
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
@@ -54,9 +53,9 @@ issues:
|
||||
linters-settings:
|
||||
dogsled:
|
||||
max-blank-identifiers: 3
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
project_name: tendermint
|
||||
project_name: Tendermint
|
||||
|
||||
env:
|
||||
# Require use of Go modules.
|
||||
|
||||
434
CHANGELOG.md
434
CHANGELOG.md
@@ -1,340 +1,21 @@
|
||||
# Changelog
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
## v0.35
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106, @bipulprasad, @alessio, @Yawning, @silasdavis,
|
||||
@cuonglm, @tanyabouman, @JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
|
||||
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] \#5728 `fastsync.version = "v1"` is no longer supported (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
- [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
|
||||
- [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes)
|
||||
- [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters)
|
||||
- [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106)
|
||||
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [blocksync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
|
||||
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
|
||||
- [blocksync/v2] \#6730 Fast Sync v2 is deprecated, please use v0
|
||||
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
|
||||
- [rpc/jsonrpc/server] \#6785 `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield)
|
||||
- [rpc] \#6820 Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
|
||||
- [cli] \#6854 Remove deprecated snake case commands. (@tychoish)
|
||||
|
||||
- Apps
|
||||
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
- [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
|
||||
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
|
||||
- It is not required any longer to set ldflags to set version strings
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
|
||||
- Go API
|
||||
- [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
|
||||
- [p2p] \#6618 \#6583 Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish)
|
||||
- [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
|
||||
- [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
|
||||
- [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
|
||||
- [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish)
|
||||
- [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
|
||||
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
|
||||
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
|
||||
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
|
||||
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
|
||||
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
|
||||
- [all] \#6077 Change spelling from British English to American (@cmwaters)
|
||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||
- Rename "behaviour" pkg to "behavior" and internalized it in blocksync v2
|
||||
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
|
||||
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
|
||||
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
|
||||
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
|
||||
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
|
||||
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
|
||||
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
|
||||
and `TxInfo`. (@alexanderbez)
|
||||
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||
- [types] \#6627 Move `NodeKey` to types to make the type public.
|
||||
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
|
||||
- [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters)
|
||||
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
|
||||
- [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
- Tooling
|
||||
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
|
||||
- [rpc] \#6329 Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
|
||||
- [pex] \#6305 v2 pex reactor with backwards compatability. Introduces two new pex messages to
|
||||
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
|
||||
exchange reactors behave the same. (@cmwaters)
|
||||
- [crypto] \#6376 Enable sr25519 as a validator key type
|
||||
- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez)
|
||||
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
|
||||
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
|
||||
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
|
||||
`mempool.version` configuration, where `v1` is the default configuration.
|
||||
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
||||
- Transactions are gossiped in FIFO order as they are in `v0`.
|
||||
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||
- [blocksync/event] \#6619 Emit blocksync status event when switching consensus/blocksync (@JayT106)
|
||||
- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106)
|
||||
- [inspect] \#6785 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
|
||||
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
|
||||
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
|
||||
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [privval] \#5725 Add gRPC support to private validator.
|
||||
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
|
||||
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
|
||||
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [blocksync/v1] \#5728 Remove blocksync v1 (@melekes)
|
||||
- [blocksync/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
|
||||
- [blocksync/v2] \#5774 Send status request when new peer joins (@melekes)
|
||||
- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
|
||||
- [consensus] \#5987 and \#5792 Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon)
|
||||
- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
|
||||
- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes)
|
||||
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
|
||||
- [state] \#6067 Batch save state data (@githubsands & @cmwaters)
|
||||
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
|
||||
- [types] \#6120 use batch verification for verifying commits signatures.
|
||||
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
|
||||
- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
|
||||
- [privval] \#6240 Add `context.Context` to privval interface.
|
||||
- [rpc] \#6265 set cache control in http-rpc response header (@JayT106)
|
||||
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
|
||||
- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106)
|
||||
- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm)
|
||||
- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778)
|
||||
- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
|
||||
- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106)
|
||||
- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs)
|
||||
- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
||||
- [rpc] \#6507 Ensure RPC client can handle URLs without ports (@JayT106)
|
||||
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [blocksync] \#6590 Update the metrics during blocksync (@JayT106)
|
||||
|
||||
## v0.34.13
|
||||
|
||||
*September 6, 2021*
|
||||
|
||||
This release backports improvements to state synchronization and ABCI
|
||||
performance under concurrent load, and the PostgreSQL event indexer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6881](https://github.com/tendermint/tendermint/issues/6881) improvements to stateprovider logic (@cmwaters)
|
||||
- [ABCI] [\#6873](https://github.com/tendermint/tendermint/issues/6873) change client to use multi-reader mutexes (@tychoish)
|
||||
- [indexing] [\#6906](https://github.com/tendermint/tendermint/issues/6906) enable the PostgreSQL indexer sink (@creachadair)
|
||||
|
||||
## v0.34.12
|
||||
|
||||
*August 17, 2021*
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
|
||||
`/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug
|
||||
with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters)
|
||||
- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106)
|
||||
|
||||
## v0.34.11
|
||||
|
||||
*June 18, 2021*
|
||||
|
||||
This release improves the robustness of statesync; tweaking channel priorities and timeouts and
|
||||
adding two new parameters to the state sync config.
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Apps
|
||||
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
||||
- [statesync] [\#6582](https://github.com/tendermint/tendermint/pull/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
||||
|
||||
## v0.34.10
|
||||
|
||||
*April 14, 2021*
|
||||
|
||||
This release fixes a bug where peers would sometimes try to send messages
|
||||
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
|
||||
this issue!
|
||||
|
||||
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
|
||||
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
|
||||
|
||||
## v0.34.9
|
||||
|
||||
*April 8, 2021*
|
||||
|
||||
This release fixes a moderate severity security issue, Security Advisory Alderfly,
|
||||
which impacts all networks that rely on Tendermint light clients.
|
||||
Further details will be released once networks have upgraded.
|
||||
|
||||
This release also includes a small Go API-breaking change, to reduce panics in the RPC layer.
|
||||
|
||||
Special thanks to our external contributors on this release: @gchaincl
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
- [rpc/jsonrpc/server] [\#6204](https://github.com/tendermint/tendermint/issues/6204) Modify `WriteRPCResponseHTTP(Error)` to return an error (@melekes)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6226](https://github.com/tendermint/tendermint/issues/6226) Index block events and expose a new RPC method, `/block_search`, to allow querying for blocks by `BeginBlock` and `EndBlock` events (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [rpc/jsonrpc/server] [\#6191](https://github.com/tendermint/tendermint/issues/6191) Correctly unmarshal `RPCRequest` when data is `null` (@melekes)
|
||||
- [p2p] [\#6289](https://github.com/tendermint/tendermint/issues/6289) Fix "unknown channels" bug on CustomReactors (@gchaincl)
|
||||
- [light/evidence] Adds logic to handle forward lunatic attacks (@cmwaters)
|
||||
|
||||
## v0.34.8
|
||||
|
||||
*February 25, 2021*
|
||||
|
||||
This release, in conjunction with [a fix in the Cosmos SDK](https://github.com/cosmos/cosmos-sdk/pull/8641),
|
||||
introduces changes that should mean the logs are much, much quieter. 🎉
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] [\#6174](https://github.com/tendermint/tendermint/issues/6174) Include timestamp (`ts` field; `time.RFC3339Nano` format) in JSON logger output (@melekes)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [abci] [\#6124](https://github.com/tendermint/tendermint/issues/6124) Fixes a panic condition during callback execution in `ReCheckTx` during high tx load. (@alexanderbez)
|
||||
|
||||
## v0.34.7
|
||||
|
||||
*February 18, 2021*
|
||||
|
||||
This release fixes a downstream security issue which impacts Cosmos SDK
|
||||
users who are:
|
||||
|
||||
* Using Cosmos SDK v0.40.0 or later, AND
|
||||
* Running validator nodes, AND
|
||||
* Using the file-based `FilePV` implementation for their consensus keys
|
||||
|
||||
Users who fulfill all the above criteria were susceptible to leaking
|
||||
private key material in the logs. All other users are unaffected.
|
||||
|
||||
The root cause was a discrepancy
|
||||
between the Tendermint Core (untyped) logger and the Cosmos SDK (typed) logger:
|
||||
Tendermint Core's logger automatically stringifies Go interfaces whenever possible;
|
||||
however, the Cosmos SDK's logger uses reflection to log the fields within a Go interface.
|
||||
|
||||
The introduction of the typed logger meant that previously un-logged fields within
|
||||
interfaces are now sometimes logged, including the private key material inside the
|
||||
`FilePV` struct.
|
||||
|
||||
Tendermint Core v0.34.7 fixes this issue; however, we strongly recommend that all validators
|
||||
use remote signer implementations instead of `FilePV` in production.
|
||||
|
||||
Thank you to @joe-bowman for his assistance with this vulnerability and a particular
|
||||
shout-out to @marbar3778 for diagnosing it quickly.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#6128](https://github.com/tendermint/tendermint/pull/6128) Remove privValidator from log call (@tessr)
|
||||
|
||||
## v0.34.6
|
||||
|
||||
*February 18, 2021*
|
||||
|
||||
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling problems._
|
||||
|
||||
## v0.34.4
|
||||
|
||||
*February 11, 2021*
|
||||
|
||||
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
|
||||
All Tendermint clients are recommended to upgrade.
|
||||
Thank you to our friends at Crypto.com for the initial report of this memory leak!
|
||||
|
||||
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6022](https://github.com/tendermint/tendermint/pull/6022) Fix a bug when the number of validators equals 100 (@melekes)
|
||||
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
|
||||
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
|
||||
|
||||
## v0.34.3
|
||||
## v0.34.3
|
||||
|
||||
*January 19, 2021*
|
||||
|
||||
This release includes a fix for a high-severity security vulnerability,
|
||||
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
|
||||
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
|
||||
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||
This release includes a fix for a high-severity security vulnerability.
|
||||
More information on this vulnerability will be released on January 26, 2021
|
||||
and this changelog will be updated.
|
||||
|
||||
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
It also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [[security fix]](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) Use correct source of evidence time (@cmwaters)
|
||||
- [evidence] [N/A] Use correct source of evidence time (@cmwaters)
|
||||
- [proto] [\#5886](https://github.com/tendermint/tendermint/pull/5889) Bump gogoproto to 1.3.2 (@marbar3778)
|
||||
|
||||
## v0.34.2
|
||||
@@ -345,6 +26,8 @@ This release fixes a substantial bug in evidence handling where evidence could
|
||||
sometimes be broadcast before the block containing that evidence was fully committed,
|
||||
resulting in some nodes panicking when trying to verify said evidence.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
@@ -368,6 +51,8 @@ disconnecting from this node. As a temporary remedy (until the mempool package
|
||||
is refactored), the `max-batch-bytes` was disabled. Transactions will be sent
|
||||
one by one without batching.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -396,6 +81,8 @@ Holy smokes, this is a big one! For a more reader-friendly overview of the chang
|
||||
Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio,
|
||||
@joe-bowman, @cuonglm, @SadPencil and @dongsam.
|
||||
|
||||
And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -422,14 +109,14 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
|
||||
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- `MaxBatchBytes` new config setting defines the max size of one batch.
|
||||
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
|
||||
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
|
||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
|
||||
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
|
||||
@@ -488,7 +175,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
|
||||
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
|
||||
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
|
||||
@@ -526,7 +213,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
|
||||
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
|
||||
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
|
||||
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
|
||||
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)
|
||||
@@ -636,6 +323,9 @@ as 2/3+ of the signatures are checked._
|
||||
|
||||
Special thanks to @njmurarka at Bluzelle Networks for reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [consensus] Do not allow signatures for a wrong block in commits (@ebuchman)
|
||||
@@ -651,6 +341,8 @@ need to update your code.**
|
||||
|
||||
Special thanks to external contributors on this release: @tau3,
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -710,6 +402,8 @@ Special thanks to external contributors on this release: @tau3,
|
||||
|
||||
Special thanks to external contributors on this release: @whylee259, @greg-szabo
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -796,6 +490,9 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -808,6 +505,8 @@ and reporting this.
|
||||
Special thanks to external contributors on this release:
|
||||
@antho1404, @michaelfig, @gterzian, @tau3, @Shivani912
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -858,6 +557,9 @@ Special thanks to external contributors on this release:
|
||||
Special thanks to external contributors on this release:
|
||||
@princesinha19
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#3333](https://github.com/tendermint/tendermint/issues/3333) Add `order_by` to `/tx_search` endpoint, allowing to change default ordering from asc to desc (@princesinha19)
|
||||
@@ -876,6 +578,9 @@ Special thanks to external contributors on this release:
|
||||
|
||||
Special thanks to external contributors on this release: @mrekucci, @PSalant726, @princesinha19, @greg-szabo, @dongsam, @cuonglm, @jgimeno, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
*January 14, 2020*
|
||||
|
||||
This release contains breaking changes to the `Block#Header`, specifically
|
||||
@@ -1104,6 +809,9 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -1115,6 +823,9 @@ _January, 9, 2020_
|
||||
|
||||
Special thanks to external contributors on this release: @greg-szabo, @gregzaitsev, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc/lib] [\#4248](https://github.com/tendermint/tendermint/issues/4248) RPC client basic authentication support (@greg-szabo)
|
||||
@@ -1136,6 +847,9 @@ Special thanks to external contributors on this release: @greg-szabo, @gregzaits
|
||||
|
||||
Special thanks to external contributors on this release: @erikgrinaker, @guagualvcha, @hsyis, @cosmostuba, @whunmr, @austinabell
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1175,6 +889,9 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1201,6 +918,9 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
|
||||
@@ -1216,6 +936,9 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
|
||||
@@ -1226,6 +949,9 @@ and reporting this issue.
|
||||
|
||||
Special thanks to external contributors on this release: @jon-certik, @gracenoah, @PSalant726, @gchaincl
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -1261,6 +987,9 @@ guide.
|
||||
Special thanks to external contributors on this release:
|
||||
@gchaincl, @bluele, @climber73
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info)
|
||||
@@ -1281,6 +1010,9 @@ Special thanks to external contributors on this release:
|
||||
Special thanks to external contributors on this release:
|
||||
@ruseinov, @bluele, @guagualvcha
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1320,6 +1052,9 @@ This release contains a minor enhancement to the ABCI and some breaking changes
|
||||
- CheckTx requests include a `CheckTxType` enum that can be set to `Recheck` to indicate to the application that this transaction was already checked/validated and certain expensive operations (like checking signatures) can be skipped
|
||||
- Removed various functions from `libs` pkgs
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1365,6 +1100,9 @@ and the RPC, namely:
|
||||
[docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events)
|
||||
- Bind RPC to localhost by default, not to the public interface [UPGRADING/RPC_Changes](./UPGRADING.md#rpc_changes)
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1465,6 +1203,8 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1485,6 +1225,8 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1512,6 +1254,8 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1528,6 +1272,8 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1823,6 +1569,8 @@ See the [v0.31.0
|
||||
Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for
|
||||
more details.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -2043,6 +1791,8 @@ This release contains two important fixes: one for p2p layer where we sometimes
|
||||
were not closing connections and one for consensus layer where consensus with
|
||||
no empty blocks (`create_empty_blocks = false`) could halt.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [pex] [\#3037](https://github.com/tendermint/tendermint/issues/3037) Only log "Reached max attempts to dial" once
|
||||
@@ -2082,6 +1832,8 @@ While we are trying to stabilize the Block protocol to preserve compatibility
|
||||
with old chains, there may be some final changes yet to come before Cosmos
|
||||
launch as we continue to audit and test the software.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -2130,6 +1882,8 @@ launch as we continue to audit and test the software.
|
||||
Special thanks to external contributors on this release:
|
||||
@HaoyangLiu
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES:
|
||||
- [consensus] Fix consensus halt from proposing blocks with too much evidence
|
||||
@@ -2183,7 +1937,7 @@ See [UPGRADING.md](UPGRADING.md) for more details.
|
||||
|
||||
- [build] [\#3085](https://github.com/tendermint/tendermint/issues/3085) Fix `Version` field in build scripts (@husio)
|
||||
- [crypto/multisig] [\#3102](https://github.com/tendermint/tendermint/issues/3102) Fix multisig keys address length
|
||||
- [crypto/encoding] [\#3101](https://github.com/tendermint/tendermint/issues/3101) Fix `PubKeyMultisigThreshold` unmarshaling into `crypto.PubKey` interface
|
||||
- [crypto/encoding] [\#3101](https://github.com/tendermint/tendermint/issues/3101) Fix `PubKeyMultisigThreshold` unmarshalling into `crypto.PubKey` interface
|
||||
- [p2p/conn] [\#3111](https://github.com/tendermint/tendermint/issues/3111) Make SecretConnection thread safe
|
||||
- [rpc] [\#3053](https://github.com/tendermint/tendermint/issues/3053) Fix internal error in `/tx_search` when results are empty
|
||||
(@gianfelipe93)
|
||||
@@ -2258,6 +2012,8 @@ Special thanks to @dlguddus for discovering a [major
|
||||
issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677)
|
||||
in the proposer selection algorithm.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
This release is primarily about fixes to the proposer selection algorithm
|
||||
in preparation for the [Cosmos Game of
|
||||
@@ -2320,6 +2076,8 @@ Special thanks to external contributors on this release:
|
||||
@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko,
|
||||
@nagarajmanjunath, @tomtau
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
@@ -2359,6 +2117,8 @@ Special thanks to external contributors on this release:
|
||||
Special thanks to external contributors on this release:
|
||||
@danil-lashin, @kevlubkcm, @krhubert, @srmo
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -2403,6 +2163,8 @@ Special thanks to external contributors on this release:
|
||||
|
||||
Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#2582](https://github.com/tendermint/tendermint/issues/2582) Enable CORS on RPC API (@hleb-albau)
|
||||
@@ -2420,6 +2182,8 @@ Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
|
||||
|
||||
Special thanks to external contributors on this release: @katakonst
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#2704](https://github.com/tendermint/tendermint/issues/2704) Simplify valid POL round logic
|
||||
@@ -2593,6 +2357,8 @@ It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -2622,7 +2388,7 @@ FEATURES:
|
||||
- [libs] [\#2286](https://github.com/tendermint/tendermint/issues/2286) Panic if `autofile` or `db/fsdb` permissions change from 0600.
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialized (@bradyjoestar)
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialised (@bradyjoestar)
|
||||
- [mempool] [\#2399](https://github.com/tendermint/tendermint/issues/2399) Make mempool cache a proper LRU (@bradyjoestar)
|
||||
- [p2p] [\#2126](https://github.com/tendermint/tendermint/issues/2126) Introduce PeerTransport interface to improve isolation of concerns
|
||||
- [libs/common] [\#2326](https://github.com/tendermint/tendermint/issues/2326) Service returns ErrNotStarted
|
||||
|
||||
@@ -4,23 +4,58 @@
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
|
||||
- Apps
|
||||
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
- [proto/p2p] Renamed `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
|
||||
|
||||
### FEATURES
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [privval] \#5725 Add gRPC support to private validator.
|
||||
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
|
||||
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
|
||||
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes)
|
||||
- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
|
||||
- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes)
|
||||
- [consensus] \#5792 Deprecates the `time_iota_ms` consensus parameter, to reduce the bug surface. The parameter is no longer used. (@valardragon)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778)
|
||||
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
||||
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
||||
310
CONTRIBUTING.md
310
CONTRIBUTING.md
@@ -26,8 +26,7 @@ will indicate their support with a heartfelt emoji.
|
||||
|
||||
If the issue would benefit from thorough discussion, maintainers may
|
||||
request that you create a [Request For
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc)
|
||||
in the Tendermint spec repo. Discussion
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion
|
||||
at the RFC stage will build collective understanding of the dimensions
|
||||
of the problems and help structure conversations around trade-offs.
|
||||
|
||||
@@ -109,20 +108,37 @@ We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
|
||||
|
||||
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
|
||||
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`.
|
||||
There are two ways to generate your proto stubs.
|
||||
|
||||
1. Use Docker, pull an image that will generate your proto stubs with no need to install anything. `make proto-gen-docker`
|
||||
2. Run `make proto-gen` after installing `buf` and `gogoproto`, you can do this by running `make protobuf`.
|
||||
|
||||
### Installation Instructions
|
||||
|
||||
To install `protoc`, download an appropriate release (<https://github.com/protocolbuffers/protobuf>) and then move the provided binaries into your PATH (follow instructions in README included with the download).
|
||||
|
||||
To install `gogoproto`, do the following:
|
||||
|
||||
```sh
|
||||
go get github.com/gogo/protobuf/gogoproto
|
||||
cd $GOPATH/pkg/mod/github.com/gogo/protobuf@v1.3.1 # or wherever go get installs things
|
||||
make install
|
||||
```
|
||||
|
||||
You should now be able to run `make proto-gen` from inside the root Tendermint directory to generate new files from proto files.
|
||||
|
||||
### Visual Studio Code
|
||||
|
||||
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
|
||||
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -227,221 +243,125 @@ Fixes #nnnn
|
||||
|
||||
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
|
||||
### Release procedure
|
||||
### Release Procedure
|
||||
|
||||
#### A note about backport branches
|
||||
Tendermint's `master` branch is under active development.
|
||||
Releases are specified using tags and are built from long-lived "backport" branches.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
|
||||
and the backport branches have names like `v0.34.x` or `v0.33.x`
|
||||
(literally, `x`; it is not a placeholder in this case).
|
||||
#### Major Release
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked)
|
||||
to these backport branches.
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
|
||||
to the needed branch. There should be a label for any backport branch that you'll be targeting.
|
||||
To notify the bot to backport a pull request, mark the pull request with
|
||||
the label `S:backport-to-<backport_branch>`.
|
||||
Once the original pull request is merged, the bot will try to cherry-pick the pull request
|
||||
to the backport branch. If the bot fails to backport, it will open a pull request.
|
||||
The author of the original pull request is responsible for solving the conflicts and
|
||||
merging the pull request.
|
||||
|
||||
#### Creating a backport branch
|
||||
If this is the first release candidate for a major release, you get to have the honor of creating
|
||||
the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the tags on `master`
|
||||
so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag
|
||||
that is "greater than" the backport branches tags. See #6072 for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
|
||||
1. Start on `master`
|
||||
2. Create the backport branch:
|
||||
`git checkout -b v0.35.x`
|
||||
3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up:
|
||||
`git tag -a v0.36.0-dev; git push v0.36.0-dev`
|
||||
4. Create a new workflow to run the e2e nightlies for this backport branch.
|
||||
(See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml
|
||||
for an example.)
|
||||
|
||||
#### Release candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of backport branches.
|
||||
|
||||
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
|
||||
(for example, `v0.35.0-rc0`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
|
||||
Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
1. Run the integration tests and the e2e nightlies
|
||||
(which can be triggered from the Github UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||
1. Prepare the changelog:
|
||||
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`.
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
1. Open a PR with these changes against the backport branch.
|
||||
1. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
2. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.35.0-rc0`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Future RCs will continue to be built off of this branch.
|
||||
|
||||
Note that this process should only be used for "true" RCs--
|
||||
release candidates that, if successful, will be the next release.
|
||||
For more experimental "RCs," create a new, short-lived branch and tag that instead.
|
||||
|
||||
#### Major release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, begin by creating a backport branch, as described above.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
2. Run integration tests and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
1. Start on the latest RC branch (`RCx/vX.X.0`).
|
||||
2. Run integration tests.
|
||||
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
or other upgrading flows.
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.35.0`.
|
||||
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||
- `git push origin v0.35.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
|
||||
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
|
||||
- `git checkout RCx/vX.X.0`
|
||||
- `git checkout -b release/vX.X.0`
|
||||
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
|
||||
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
|
||||
- `git push origin vX.X.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
|
||||
new major release series.
|
||||
|
||||
#### Minor release (point releases)
|
||||
##### Major Release (from `master`)
|
||||
|
||||
1. Start on `master`
|
||||
2. Run integration tests (see `test_integrations` in Makefile)
|
||||
3. Prepare release in a pull request against `master` (to be squash merged):
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
|
||||
had release candidates, squash all the RC updates into one
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
5. Update the `CHANGELOG.md` file on master with the releases changelog.
|
||||
6. Delete any RC branches and tags for this release (if applicable)
|
||||
|
||||
#### Minor Release (Point Releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
|
||||
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
|
||||
2. Run integration tests: `make test_integrations`
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes that will land them back on `vX.X.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
|
||||
#### Release Candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of RC branches. RC branches typically have names formatted
|
||||
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
|
||||
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
|
||||
2. Create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.34.0-rc4`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Create a new release candidate branch for any possible updates to the RC:
|
||||
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit tests
|
||||
All repos should be hooked up to [CircleCI](https://circleci.com/).
|
||||
|
||||
Unit tests are located in `_test.go` files as directed by [the Go testing
|
||||
package](https://golang.org/pkg/testing/). If you're adding or removing a
|
||||
function, please check there's a `TestType_Method` test for it.
|
||||
|
||||
Run: `make test`
|
||||
|
||||
### Integration tests
|
||||
|
||||
Integration tests are also located in `_test.go` files. What differentiates
|
||||
them is a more complicated setup, which usually involves setting up two or more
|
||||
components.
|
||||
|
||||
Run: `make test_integrations`
|
||||
|
||||
### End-to-end tests
|
||||
|
||||
End-to-end tests are used to verify a fully integrated Tendermint network.
|
||||
|
||||
See [README](./test/e2e/README.md) for details.
|
||||
|
||||
Run:
|
||||
|
||||
```sh
|
||||
cd test/e2e && \
|
||||
make && \
|
||||
./build/runner -f networks/ci.toml
|
||||
```
|
||||
|
||||
### Model-based tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
For components, that have been [formally
|
||||
verified](https://en.wikipedia.org/wiki/Formal_verification) using
|
||||
[TLA+](https://en.wikipedia.org/wiki/TLA%2B), it may be possible to generate
|
||||
tests using a combination of the [Apalache Model
|
||||
Checker](https://apalache.informal.systems/) and [tendermint-rs testgen
|
||||
util](https://github.com/informalsystems/tendermint-rs/tree/master/testgen).
|
||||
|
||||
Now, I know there's a lot to take in. If you want to learn more, check out [
|
||||
this video](https://www.youtube.com/watch?v=aveoIMphzW8) by Andrey Kupriyanov
|
||||
& Igor Konnov.
|
||||
|
||||
At the moment, we have model-based tests for the light client, located in the
|
||||
`./light/mbt` directory.
|
||||
|
||||
Run: `cd light/mbt && go test`
|
||||
|
||||
### Fuzz tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
[Fuzz tests](https://en.wikipedia.org/wiki/Fuzzing) can be found inside the
|
||||
`./test/fuzz` directory. See [README.md](./test/fuzz/README.md) for details.
|
||||
|
||||
Run: `cd test/fuzz && make fuzz-{PACKAGE-COMPONENT}`
|
||||
|
||||
### Jepsen tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
[Jepsen](http://jepsen.io/) tests are used to verify the
|
||||
[linearizability](https://jepsen.io/consistency/models/linearizable) property
|
||||
of the Tendermint consensus. They are located in a separate repository
|
||||
-> <https://github.com/tendermint/jepsen>. Please refer to its README for more
|
||||
information.
|
||||
If they have `.go` files in the root directory, they will be automatically
|
||||
tested by circle using `go test -v -race ./...`. If not, they will need a
|
||||
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
|
||||
includes its continuous integration status using a badge in the `README.md`.
|
||||
|
||||
### RPC Testing
|
||||
|
||||
**If you contribute to the RPC endpoints it's important to document your
|
||||
changes in the [Openapi file](./rpc/openapi/openapi.yaml)**.
|
||||
|
||||
To test your changes you must install `nodejs` and run:
|
||||
If you contribute to the RPC endpoints it's important to document your changes in the [Openapi file](./rpc/openapi/openapi.yaml)
|
||||
To test your changes you should install `nodejs` and run:
|
||||
|
||||
```bash
|
||||
npm i -g dredd
|
||||
@@ -449,8 +369,4 @@ make build-linux build-contract-tests-hooks
|
||||
make contract-tests
|
||||
```
|
||||
|
||||
**WARNING: these are currently broken due to <https://github.com/apiaryio/dredd>
|
||||
not supporting complete OpenAPI 3**.
|
||||
|
||||
This command will popup a network and check every endpoint against what has
|
||||
been documented.
|
||||
This command will popup a network and check every endpoint against what has been documented
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.16-alpine as builder
|
||||
FROM golang:1.15-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
|
||||
@@ -9,7 +9,7 @@ RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm &
|
||||
RUN yum -y groupinstall "Development Tools"
|
||||
RUN yum -y install leveldb-devel which
|
||||
|
||||
ENV GOVERSION=1.16.5
|
||||
ENV GOVERSION=1.12.9
|
||||
|
||||
RUN cd /tmp && \
|
||||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
|
||||
@@ -31,8 +31,8 @@ To get started developing applications, see the [application developers guide](h
|
||||
A quick example of a built-in app and Tendermint core in one container.
|
||||
|
||||
```sh
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init validator
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint start --proxy-app=kvstore
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy-app=kvstore
|
||||
```
|
||||
|
||||
## Local cluster
|
||||
|
||||
@@ -3,7 +3,7 @@ set -e
|
||||
|
||||
if [ ! -d "$TMHOME/config" ]; then
|
||||
echo "Running tendermint init to create (default) configuration for docker run."
|
||||
tendermint init validator
|
||||
tendermint init
|
||||
|
||||
sed -i \
|
||||
-e "s/^proxy-app\s*=.*/proxy-app = \"$PROXY_APP\"/" \
|
||||
|
||||
28
Makefile
28
Makefile
@@ -12,7 +12,7 @@ else
|
||||
VERSION := $(shell git describe)
|
||||
endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
|
||||
@@ -58,6 +58,8 @@ LD_FLAGS += $(LDFLAGS)
|
||||
all: check build test install
|
||||
.PHONY: all
|
||||
|
||||
# The below include contains the tools.
|
||||
include tools/Makefile
|
||||
include test/Makefile
|
||||
|
||||
###############################################################################
|
||||
@@ -83,10 +85,19 @@ proto-all: proto-gen proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
|
||||
proto-gen:
|
||||
## If you get the following error,
|
||||
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
|
||||
## See https://stackoverflow.com/a/25518702
|
||||
## Note the $< here is substituted for the %.proto
|
||||
## Note the $@ here is substituted for the %.pb.go
|
||||
@sh scripts/protocgen.sh
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-gen-docker:
|
||||
@docker pull -q tendermintdev/docker-build-proto
|
||||
@echo "Generating Protobuf files"
|
||||
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
|
||||
.PHONY: proto-gen
|
||||
.PHONY: proto-gen-docker
|
||||
|
||||
proto-lint:
|
||||
@$(DOCKER_BUF) check lint --error-format=json
|
||||
@@ -202,7 +213,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
@golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
@@ -231,21 +242,12 @@ build-docker: build-linux
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Local testnet using docker ###
|
||||
###############################################################################
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux:
|
||||
build-linux: tools
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
.PHONY: build-linux
|
||||
|
||||
|
||||
43
README.md
43
README.md
@@ -8,7 +8,7 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/vcExX9T)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
@@ -18,7 +18,8 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|--------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
|
||||
| master |  | [](https://codecov.io/gh/tendermint/tendermint) |  |
|
||||
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines.
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
|
||||
@@ -29,7 +30,9 @@ see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/ab
|
||||
|
||||
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
|
||||
Tendermint is being used in production in both private and public environments,
|
||||
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
|
||||
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
@@ -48,7 +51,7 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.16 or higher |
|
||||
| Go version | Go1.15 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -82,12 +85,32 @@ and familiarize yourself with our
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR version is used
|
||||
to signal breaking changes across Tendermint's API. This API includes all
|
||||
publicly exposed types, functions, and methods in non-internal Go packages as well as
|
||||
the types and methods accessible via the Tendermint RPC interface.
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||
include the Go APIs.
|
||||
|
||||
Breaking changes to these public APIs will be documented in the CHANGELOG.
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- config
|
||||
- libs
|
||||
- bits
|
||||
- bytes
|
||||
- json
|
||||
- log
|
||||
- math
|
||||
- net
|
||||
- os
|
||||
- protoio
|
||||
- rand
|
||||
- sync
|
||||
- strings
|
||||
- service
|
||||
- node
|
||||
- rpc/client
|
||||
- types
|
||||
|
||||
### Upgrades
|
||||
|
||||
@@ -144,4 +167,4 @@ If you'd like to work full-time on Tendermint Core, [we're hiring](https://inter
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
|
||||
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
|
||||
that also maintains [tendermint.com](https://tendermint.com).
|
||||
that also maintains [tendermint.com](https://tendermint.com).
|
||||
78
SECURITY.md
78
SECURITY.md
@@ -7,55 +7,54 @@ Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
|
||||
|
||||
### Guidelines
|
||||
### Guidelines
|
||||
|
||||
We require that all researchers:
|
||||
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Avoid posting personally identifiable information, privately or publicly
|
||||
|
||||
If you follow these guidelines when reporting an issue to us, we commit to:
|
||||
|
||||
* Not pursue or support any legal action related to your research on this vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
|
||||
## Disclosure Process
|
||||
## Disclosure Process
|
||||
|
||||
Tendermint Core uses the following disclosure process:
|
||||
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc).
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
|
||||
### Example Timeline
|
||||
### Example Timeline
|
||||
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
|
||||
#### 24+ Hours Before Release Time
|
||||
#### > 24 Hours Before Release Time
|
||||
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Create patches in a private security repo, and ensure that PRs are open targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG)
|
||||
5. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG)
|
||||
4. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
|
||||
#### 24 Hours Before Release Time
|
||||
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
|
||||
#### Release Time
|
||||
|
||||
@@ -65,36 +64,36 @@ The following is an example timeline for the triage and response. The required r
|
||||
4. Post “Security releases” on forum (TENDERMINT LEAD)
|
||||
5. Post new Tweet linking to forum post (COMMS LEAD)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
|
||||
#### After Release Time
|
||||
|
||||
1. Write forum post with exploit details (TENDERMINT LEAD)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
|
||||
#### 7 Days After Release Time
|
||||
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
|
||||
## Supported Releases
|
||||
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
|
||||
## Scope
|
||||
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Findings derived from social engineering (e.g., phishing)
|
||||
|
||||
## Example Vulnerabilities
|
||||
## Example Vulnerabilities
|
||||
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
|
||||
### Specification
|
||||
|
||||
@@ -115,9 +114,6 @@ Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
* A node halting (liveness failure)
|
||||
* Syncing new and old nodes
|
||||
|
||||
Assuming more than 1/3 the voting power is Byzantine:
|
||||
|
||||
* Attacks that go unpunished (unhandled evidence)
|
||||
|
||||
### Networking
|
||||
|
||||
@@ -143,7 +139,7 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Libraries
|
||||
|
||||
* Serialization
|
||||
* Serialization (Amino)
|
||||
* Reading/Writing files and databases
|
||||
|
||||
### Cryptography
|
||||
@@ -154,5 +150,5 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Light Client
|
||||
|
||||
* Core verification
|
||||
* Core verification
|
||||
* Bisection/sequential algorithms
|
||||
|
||||
221
UPGRADING.md
221
UPGRADING.md
@@ -2,198 +2,43 @@
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
|
||||
## v0.35
|
||||
## Unreleased
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors.
|
||||
|
||||
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
|
||||
|
||||
* Messages are written to a byte stream using uin64 length delimiters instead of int64.
|
||||
* When mempool `v1` is enabled, transactions broadcasted via `sync` mode may return a successful
|
||||
response with a transaction hash indicating that the transaction was successfully inserted into
|
||||
the mempool. While this is true for `v0`, the `v1` mempool reactor may at a later point in time
|
||||
evict or even drop this transaction after a hash has been returned. Thus, the user or client must
|
||||
query for that transaction to check if it is still in the mempool.
|
||||
|
||||
### Config Changes
|
||||
|
||||
* The configuration file field `[fastsync]` has been renamed to `[blocksync]`.
|
||||
|
||||
* The top level configuration file field `fast-sync` has moved under the new `[blocksync]`
|
||||
field as `blocksync.enable`.
|
||||
|
||||
* `blocksync.version = "v1"` and `blocksync.version = "v2"` (previously `fastsync`)
|
||||
are no longer supported. Please use `v0` instead. During the v0.35 release cycle, `v0` was
|
||||
determined to suit the existing needs and the cost of maintaining the `v1` and `v2` modules
|
||||
was determined to be greater than necessary.
|
||||
|
||||
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
|
||||
|
||||
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
|
||||
you have updated all the variables in your `config.toml` file.
|
||||
|
||||
* Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`)
|
||||
[ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
|
||||
|
||||
* `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace
|
||||
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
|
||||
persistent peers, there's no gaurantee that the node will remain connected with these peers.
|
||||
|
||||
* configuration values starting with `priv-validator-` have moved to the new
|
||||
`priv-validator` section, without the `priv-validator-` prefix.
|
||||
|
||||
* The fast sync process as well as the blockchain package and service has all
|
||||
been renamed to block sync
|
||||
|
||||
### Database Key Format Changes
|
||||
|
||||
The format of all tendermint on-disk database keys changes in
|
||||
0.35. Upgrading nodes must either re-sync all data or run a migration
|
||||
script provided in this release. The script located in
|
||||
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
|
||||
provides the function `Migrate(context.Context, db.DB)` which you can
|
||||
operationalize as makes sense for your deployment.
|
||||
|
||||
For ease of use the `tendermint` command includes a CLI version of the
|
||||
migration script, which you can invoke, as in:
|
||||
|
||||
tendermint key-migrate
|
||||
|
||||
This reads the configuration file as normal and allows the
|
||||
`--db-backend` and `--db-dir` flags to change database operations as
|
||||
needed.
|
||||
|
||||
The migration operation is idempotent and can be run more than once,
|
||||
if needed.
|
||||
you have updated all the variables in your `config.toml` file.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
|
||||
|
||||
* The `--fast-sync` command line option has been renamed to `--blocksync.enable`
|
||||
|
||||
* If you had previously used `tendermint gen_node_key` to generate a new node
|
||||
key, keep in mind that it no longer saves the output to a file. You can use
|
||||
`tendermint init validator` or pipe the output of `tendermint gen_node_key` to
|
||||
`tendermint init` or pipe the output of `tendermint gen_node_key` to
|
||||
`$TMHOME/config/node_key.json`:
|
||||
|
||||
```
|
||||
$ tendermint gen_node_key > $TMHOME/config/node_key.json
|
||||
```
|
||||
|
||||
* CLI commands and flags are all now hyphen-case instead of snake_case.
|
||||
* CLI commands and flags are all now hyphen-case instead of snake_case.
|
||||
Make sure to adjust any scripts that calls a cli command with snake_casing
|
||||
|
||||
### API Changes
|
||||
|
||||
The p2p layer was reimplemented as part of the 0.35 release cycle and
|
||||
all reactors were refactored to accomodate the change. As part of that work these
|
||||
implementations moved into the `internal` package and are no longer
|
||||
considered part of the public Go API of tendermint. These packages
|
||||
are:
|
||||
|
||||
- `p2p`
|
||||
- `mempool`
|
||||
- `consensus`
|
||||
- `statesync`
|
||||
- `blockchain`
|
||||
- `evidence`
|
||||
|
||||
Accordingly, the `node` package was changed to reduce access to
|
||||
tendermint internals: applications that use tendermint as a library
|
||||
will need to change to accommodate these changes. Most notably:
|
||||
|
||||
- The `Node` type has become internal, and all constructors return a
|
||||
`service.Service` implementation.
|
||||
|
||||
- The `node.DefaultNewNode` and `node.NewNode` constructors are no
|
||||
longer exported and have been replaced with `node.New` and
|
||||
`node.NewDefault` which provide more functional interfaces.
|
||||
|
||||
### gRPC Support
|
||||
|
||||
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
|
||||
|
||||
### Peer Management Interface
|
||||
|
||||
When running with the new P2P Layer, the methods `UnsafeDialSeeds` and
|
||||
`UnsafeDialPeers` RPC methods will always return an error. They are
|
||||
deprecated and will be removed in 0.36 when the legacy peer stack is
|
||||
removed.
|
||||
|
||||
Additionally the format of the Peer list returned in the `NetInfo`
|
||||
method changes in this release to accommodate the different way that
|
||||
the new stack tracks data about peers. This change affects users of
|
||||
both stacks.
|
||||
|
||||
### Using the updated p2p library
|
||||
|
||||
The P2P library was reimplemented in this release. The new implementation is
|
||||
enabled by default in this version of Tendermint. The legacy implementation is still
|
||||
included in this version of Tendermint as a backstop to work around unforeseen
|
||||
production issues. The new and legacy version are interoperable. If necessary,
|
||||
you can enable the legacy implementation in the server configuration file.
|
||||
|
||||
To make use of the legacy P2P implemementation add or update the following field of
|
||||
your server's configuration file under the `[p2p]` section:
|
||||
|
||||
```toml
|
||||
[p2p]
|
||||
...
|
||||
use-legacy = true
|
||||
...
|
||||
```
|
||||
|
||||
If you need to do this, please consider filing an issue in the Tendermint repository
|
||||
to let us know why. We plan to remove the legacy P2P code in the next (v0.36) release.
|
||||
|
||||
#### New p2p queue types
|
||||
|
||||
The new p2p implementation enables selection of the queue type to be used for
|
||||
passing messages between peers.
|
||||
|
||||
The following values may be used when selecting which queue type to use:
|
||||
|
||||
* `fifo`: (**default**) An unbuffered and lossless queue that passes messages through
|
||||
in the order in which they were received.
|
||||
|
||||
* `priority`: A priority queue of messages.
|
||||
|
||||
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
|
||||
weighted deficit round robin queue is created per peer. Each queue contains a
|
||||
separate 'flow' for each of the channels of communication that exist between any two
|
||||
peers. Tendermint maintains a channel per message type between peers. Each WDRR
|
||||
queue maintains a shared buffered with a fixed capacity through which messages on different
|
||||
flows are passed.
|
||||
For more information on WDRR scheduling, see: https://en.wikipedia.org/wiki/Deficit_round_robin
|
||||
|
||||
To select a queue type, add or update the following field under the `[p2p]`
|
||||
section of your server's configuration file.
|
||||
|
||||
```toml
|
||||
[p2p]
|
||||
...
|
||||
queue-type = wdrr
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
### Support for Custom Reactor and Mempool Implementations
|
||||
|
||||
The changes to p2p layer removed existing support for custom
|
||||
reactors. Based on our understanding of how this functionality was
|
||||
used, the introduction of the prioritized mempool covers nearly all of
|
||||
the use cases for custom reactors. If you are currently running custom
|
||||
reactors and mempools and are having trouble seeing the migration path
|
||||
for your project please feel free to reach out to the Tendermint Core
|
||||
development team directly.
|
||||
|
||||
## v0.34.0
|
||||
|
||||
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
|
||||
This release is not compatible with previous blockchains due to changes to
|
||||
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
|
||||
|
||||
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
@@ -340,8 +185,8 @@ Other user-relevant changes include:
|
||||
|
||||
* The old `lite` package was removed; the new light client uses the `light` package.
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The new light clients stores headers & validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
@@ -473,12 +318,12 @@ Evidence Params has been changed to include duration.
|
||||
### Go API
|
||||
|
||||
* `libs/common` has been removed in favor of specific pkgs.
|
||||
* `async`
|
||||
* `service`
|
||||
* `rand`
|
||||
* `net`
|
||||
* `strings`
|
||||
* `cmap`
|
||||
* `async`
|
||||
* `service`
|
||||
* `rand`
|
||||
* `net`
|
||||
* `strings`
|
||||
* `cmap`
|
||||
* removal of `errors` pkg
|
||||
|
||||
### RPC Changes
|
||||
@@ -547,9 +392,9 @@ Prior to the update, suppose your `ResponseDeliverTx` look like:
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Tags: []kv.Pair{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -568,14 +413,14 @@ the following `Events`:
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "transfer",
|
||||
Attributes: kv.Pairs{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
},
|
||||
}
|
||||
{
|
||||
Type: "transfer",
|
||||
Attributes: kv.Pairs{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -623,9 +468,9 @@ In this case, the WS client will receive an error with description:
|
||||
"jsonrpc": "2.0",
|
||||
"id": "{ID}#event",
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -831,9 +676,9 @@ just the `Data` field set:
|
||||
|
||||
```go
|
||||
[]ProofOp{
|
||||
ProofOp{
|
||||
Data: <proof bytes>,
|
||||
}
|
||||
ProofOp{
|
||||
Data: <proof bytes>,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -15,7 +15,7 @@ const (
|
||||
echoRetryIntervalSeconds = 1
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Client
|
||||
//go:generate mockery --case underscore --name Client
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
//
|
||||
@@ -80,14 +80,18 @@ func NewClient(addr, transport string, mustConnect bool) (client Client, err err
|
||||
return
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type Callback func(*types.Request, *types.Response)
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type ReqRes struct {
|
||||
*types.Request
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
mtx tmsync.Mutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
@@ -103,50 +107,34 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
}
|
||||
}
|
||||
|
||||
// Sets sets the callback. If reqRes is already done, it will call the cb
|
||||
// immediately. Note, reqRes.cb should not change if reqRes.done and only one
|
||||
// callback is supported.
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
// Sets the callback for this ReqRes atomically.
|
||||
// If reqRes is already done, calls cb immediately.
|
||||
// NOTE: reqRes.cb should not change if reqRes.done.
|
||||
// NOTE: only one callback is supported.
|
||||
func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
reqRes.mtx.Lock()
|
||||
|
||||
if r.done {
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
if reqRes.done {
|
||||
reqRes.mtx.Unlock()
|
||||
cb(reqRes.Response)
|
||||
return
|
||||
}
|
||||
|
||||
r.cb = cb
|
||||
r.mtx.Unlock()
|
||||
reqRes.cb = cb
|
||||
reqRes.mtx.Unlock()
|
||||
}
|
||||
|
||||
// InvokeCallback invokes a thread-safe execution of the configured callback
|
||||
// if non-nil.
|
||||
func (r *ReqRes) InvokeCallback() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
func (reqRes *ReqRes) GetCallback() func(*types.Response) {
|
||||
reqRes.mtx.Lock()
|
||||
defer reqRes.mtx.Unlock()
|
||||
return reqRes.cb
|
||||
}
|
||||
|
||||
// GetCallback returns the configured callback of the ReqRes object which may be
|
||||
// nil. Note, it is not safe to concurrently call this in cases where it is
|
||||
// marked done and SetCallback is called before calling GetCallback as that
|
||||
// will invoke the callback twice and create a potential race condition.
|
||||
//
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5439
|
||||
func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.cb
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.done = true
|
||||
// NOTE: it should be safe to read reqRes.cb without locks after this.
|
||||
func (reqRes *ReqRes) SetDone() {
|
||||
reqRes.mtx.Lock()
|
||||
reqRes.done = true
|
||||
reqRes.mtx.Unlock()
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// A gRPC client.
|
||||
@@ -24,7 +24,7 @@ type grpcClient struct {
|
||||
conn *grpc.ClientConn
|
||||
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
mtx tmsync.Mutex
|
||||
addr string
|
||||
err error
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
@@ -149,8 +149,8 @@ func (cli *grpcClient) StopForError(err error) {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Error() error {
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
@@ -158,8 +158,8 @@ func (cli *grpcClient) Error() error {
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.resCb = resCb
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
type localClient struct {
|
||||
service.BaseService
|
||||
|
||||
mtx *tmsync.RWMutex
|
||||
mtx *tmsync.Mutex
|
||||
types.Application
|
||||
Callback
|
||||
}
|
||||
@@ -26,24 +26,22 @@ var _ Client = (*localClient)(nil)
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.RWMutex, app types.Application) Client {
|
||||
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = &tmsync.RWMutex{}
|
||||
mtx = new(tmsync.Mutex)
|
||||
}
|
||||
|
||||
cli := &localClient{
|
||||
mtx: mtx,
|
||||
Application: app,
|
||||
}
|
||||
|
||||
cli.BaseService = *service.NewBaseService(nil, "localClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (app *localClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.Callback = cb
|
||||
app.mtx.Unlock()
|
||||
}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
@@ -67,8 +65,8 @@ func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, err
|
||||
}
|
||||
|
||||
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return app.callback(
|
||||
@@ -100,8 +98,8 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return app.callback(
|
||||
@@ -215,8 +213,8 @@ func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.Respon
|
||||
}
|
||||
|
||||
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
@@ -249,8 +247,8 @@ func (app *localClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
// Code generated by mockery v2.3.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -796,8 +796,3 @@ func (_m *Client) String() string {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Wait provides a mock function with given fields:
|
||||
func (_m *Client) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/libs/timer"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/timer"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,7 +43,7 @@ type socketClient struct {
|
||||
reqQueue chan *reqResWithContext
|
||||
flushTimer *timer.ThrottleTimer
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
mtx tmsync.Mutex
|
||||
err error
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
@@ -108,8 +108,8 @@ func (cli *socketClient) OnStop() {
|
||||
|
||||
// Error returns an error if the client was stopped abruptly.
|
||||
func (cli *socketClient) Error() error {
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
@@ -119,8 +119,8 @@ func (cli *socketClient) Error() error {
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *socketClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.resCb = resCb
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -226,7 +226,9 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
//
|
||||
// NOTE: It is possible this callback isn't set on the reqres object. At this
|
||||
// point, in which case it will be called after, when it is set.
|
||||
reqres.InvokeCallback()
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,14 +6,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"math/rand"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
@@ -102,7 +101,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
func setupClientServer(t *testing.T, app types.Application) (
|
||||
service.Service, abcicli.Client) {
|
||||
// some port between 20k and 30k
|
||||
port := 20000 + rand.Int31()%10000
|
||||
port := 20000 + tmrand.Int32()%10000
|
||||
addr := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
s, err := server.NewServer(addr, "socket", app)
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
@@ -46,6 +47,9 @@ var (
|
||||
flagHeight int
|
||||
flagProve bool
|
||||
|
||||
// counter
|
||||
flagSerial bool
|
||||
|
||||
// kvstore
|
||||
flagPersist string
|
||||
)
|
||||
@@ -57,14 +61,19 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "kvstore", "version":
|
||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
||||
return nil
|
||||
case "version": // skip running for version command
|
||||
return nil
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
allowLevel, err := log.AllowLevel(flagLogLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger = log.NewFilter(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), allowLevel)
|
||||
}
|
||||
|
||||
if client == nil {
|
||||
var err error
|
||||
client, err = abcicli.NewClient(flagAddress, flagAbci, false)
|
||||
@@ -129,6 +138,10 @@ func addQueryFlags() {
|
||||
"whether or not to return a merkle proof of the query result")
|
||||
}
|
||||
|
||||
func addCounterFlags() {
|
||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
||||
}
|
||||
|
||||
func addKVStoreFlags() {
|
||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
@@ -147,6 +160,8 @@ func addCommands() {
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
|
||||
// examples
|
||||
addCounterFlags()
|
||||
RootCmd.AddCommand(counterCmd)
|
||||
addKVStoreFlags()
|
||||
RootCmd.AddCommand(kvstoreCmd)
|
||||
}
|
||||
@@ -246,6 +261,14 @@ var queryCmd = &cobra.Command{
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
var counterCmd = &cobra.Command{
|
||||
Use: "counter",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdCounter,
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
Use: "kvstore",
|
||||
Short: "ABCI demo example",
|
||||
@@ -573,8 +596,34 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
app := counter.NewApplication(flagSerial)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
|
||||
86
abci/example/counter/counter.go
Normal file
86
abci/example/counter/counter.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package counter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
hashCount int
|
||||
txCount int
|
||||
serial bool
|
||||
}
|
||||
|
||||
func NewApplication(serial bool) *Application {
|
||||
return &Application{serial: serial}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() (resp types.ResponseCommit) {
|
||||
app.hashCount++
|
||||
if app.txCount == 0 {
|
||||
return types.ResponseCommit{}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
switch reqQuery.Path {
|
||||
case "hash":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
||||
case "tx":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
|
||||
default:
|
||||
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
mrand "math/rand"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
@@ -11,8 +9,7 @@ import (
|
||||
// from the input value
|
||||
func RandVal(i int) types.ValidatorUpdate {
|
||||
pubkey := tmrand.Bytes(32)
|
||||
// Random value between [0, 2^16 - 1]
|
||||
power := mrand.Uint32() & (1<<16 - 1) // nolint:gosec // G404: Use of weak random number generator
|
||||
power := tmrand.Uint16() + 1
|
||||
v := types.UpdateValidator(pubkey, int64(power), "")
|
||||
return v
|
||||
}
|
||||
|
||||
@@ -87,16 +87,15 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var key, value string
|
||||
|
||||
var key, value []byte
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
key, value = parts[0], parts[1]
|
||||
} else {
|
||||
key, value = string(req.Tx), string(req.Tx)
|
||||
key, value = req.Tx, req.Tx
|
||||
}
|
||||
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
err := app.state.db.Set(prefixKey(key), value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -106,10 +105,10 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
{Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko"), Index: true},
|
||||
{Key: []byte("key"), Value: key, Index: true},
|
||||
{Key: []byte("index_key"), Value: []byte("index is working"), Index: true},
|
||||
{Key: []byte("noindex_key"), Value: []byte("index is working"), Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -51,10 +51,6 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Close() error {
|
||||
return app.app.state.db.Close()
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
|
||||
app.logger = l
|
||||
}
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"runtime"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmlog "github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// var maxNumberConnections = 2
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
mrand "math/rand"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
@@ -19,8 +18,7 @@ func InitChain(client abcicli.Client) error {
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
for i := 0; i < total; i++ {
|
||||
pubkey := tmrand.Bytes(33)
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
power := mrand.Int()
|
||||
power := tmrand.Int()
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChainSync(ctx, types.RequestInitChain{
|
||||
|
||||
56
abci/tests/test_app/app.go
Normal file
56
abci/tests/test_app/app.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func startClient(abciType string) abcicli.Client {
|
||||
// Start client
|
||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
client.SetLogger(logger.With("module", "abcicli"))
|
||||
if err := client.Start(); err != nil {
|
||||
panicf("connecting to abci_app: %v", err.Error())
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func commit(client abcicli.Client, hashExp []byte) {
|
||||
res, err := client.CommitSync(ctx)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(res.Data, hashExp) {
|
||||
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
|
||||
}
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
|
||||
}
|
||||
}
|
||||
|
||||
func panicf(format string, a ...interface{}) {
|
||||
panic(fmt.Sprintf(format, a...))
|
||||
}
|
||||
93
abci/tests/test_app/main.go
Normal file
93
abci/tests/test_app/main.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
var abciType string
|
||||
|
||||
func init() {
|
||||
abciType = os.Getenv("ABCI")
|
||||
if abciType == "" {
|
||||
abciType = "socket"
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
testCounter()
|
||||
}
|
||||
|
||||
const (
|
||||
maxABCIConnectTries = 10
|
||||
)
|
||||
|
||||
func ensureABCIIsUp(typ string, n int) error {
|
||||
var err error
|
||||
cmdString := "abci-cli echo hello"
|
||||
if typ == "grpc" {
|
||||
cmdString = "abci-cli --abci grpc echo hello"
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
cmd := exec.Command("bash", "-c", cmdString)
|
||||
_, err = cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func testCounter() {
|
||||
abciApp := os.Getenv("ABCI_APP")
|
||||
if abciApp == "" {
|
||||
panic("No ABCI_APP specified")
|
||||
}
|
||||
|
||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
||||
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
|
||||
cmd := exec.Command("bash", "-c", subCommand)
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error on process kill: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
log.Fatalf("echo failed: %v", err) //nolint:gocritic
|
||||
}
|
||||
|
||||
client := startClient(abciType)
|
||||
defer func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
log.Printf("error trying client stop: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// commit(client, nil)
|
||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
|
||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
28
abci/tests/test_app/test.sh
Executable file
28
abci/tests/test_app/test.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#! /bin/bash
|
||||
set -e
|
||||
|
||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
||||
|
||||
# Get the directory of where this script is.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd "$DIR"
|
||||
|
||||
echo "RUN COUNTER OVER SOCKET"
|
||||
# test golang counter
|
||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
|
||||
echo "RUN COUNTER OVER GRPC"
|
||||
# test golang counter via grpc
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
# test nodejs counter
|
||||
# TODO: fix node app
|
||||
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter
|
||||
@@ -37,6 +37,7 @@ function testExample() {
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli counter
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/internal/libs/protoio"
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -15,7 +15,11 @@ const (
|
||||
func WriteMessage(msg proto.Message, w io.Writer) error {
|
||||
protoWriter := protoio.NewDelimitedWriter(w)
|
||||
_, err := protoWriter.WriteMsg(msg)
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadMessage reads a varint length-delimited protobuf message.
|
||||
|
||||
@@ -25,7 +25,7 @@ func TestMarshalJSON(t *testing.T) {
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: "pho", Value: "bo"},
|
||||
{Key: []byte("pho"), Value: []byte("bo")},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -92,7 +92,7 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: "abc", Value: "def"},
|
||||
{Key: []byte("abc"), Value: []byte("def")},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
"github.com/tendermint/tendermint/crypto/sr25519"
|
||||
)
|
||||
|
||||
func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
|
||||
@@ -18,6 +17,7 @@ func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
|
||||
}
|
||||
|
||||
return ValidatorUpdate{
|
||||
// Address:
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
@@ -34,16 +34,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
|
||||
panic(err)
|
||||
}
|
||||
return ValidatorUpdate{
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
case sr25519.KeyType:
|
||||
pke := sr25519.PubKey(pk)
|
||||
pkp, err := cryptoenc.PubKeyToProto(pke)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ValidatorUpdate{
|
||||
// Address:
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func (r ResponseQuery) IsErr() bool {
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
// override JSON marshalling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,10 @@
|
||||
/*
|
||||
Package Behavior provides a mechanism for reactors to report behavior of peers.
|
||||
Package Behaviour provides a mechanism for reactors to report behaviour of peers.
|
||||
|
||||
Instead of a reactor calling the switch directly it will call the behavior module which will
|
||||
Instead of a reactor calling the switch directly it will call the behaviour module which will
|
||||
handle the stoping and marking peer as good on behalf of the reactor.
|
||||
|
||||
There are four different behaviors a reactor can report.
|
||||
There are four different behaviours a reactor can report.
|
||||
|
||||
1. bad message
|
||||
|
||||
@@ -39,4 +39,4 @@ type blockPart struct {
|
||||
This message will request the peer be marked as good
|
||||
|
||||
*/
|
||||
package behavior
|
||||
package behaviour
|
||||
49
behaviour/peer_behaviour.go
Normal file
49
behaviour/peer_behaviour.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// PeerBehaviour is a struct describing a behaviour a peer performed.
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behaviour performed by the peer.
|
||||
type PeerBehaviour struct {
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BadMessage returns a badMessage PeerBehaviour.
|
||||
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}}
|
||||
}
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour.
|
||||
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: messageOutOfOrder{explanation}}
|
||||
}
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// ConsensusVote returns a consensusVote PeerBehaviour.
|
||||
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}}
|
||||
}
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BlockPart returns blockPart PeerBehaviour.
|
||||
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}}
|
||||
}
|
||||
@@ -1,20 +1,19 @@
|
||||
package behavior
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// Reporter provides an interface for reactors to report the behavior
|
||||
// Reporter provides an interface for reactors to report the behaviour
|
||||
// of peers synchronously to other components.
|
||||
type Reporter interface {
|
||||
Report(behavior PeerBehavior) error
|
||||
Report(behaviour PeerBehaviour) error
|
||||
}
|
||||
|
||||
// SwitchReporter reports peer behavior to an internal Switch.
|
||||
// SwitchReporter reports peer behaviour to an internal Switch.
|
||||
type SwitchReporter struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
@@ -26,14 +25,14 @@ func NewSwitchReporter(sw *p2p.Switch) *SwitchReporter {
|
||||
}
|
||||
}
|
||||
|
||||
// Report reports the behavior of a peer to the Switch.
|
||||
func (spbr *SwitchReporter) Report(behavior PeerBehavior) error {
|
||||
peer := spbr.sw.Peers().Get(behavior.peerID)
|
||||
// Report reports the behaviour of a peer to the Switch.
|
||||
func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error {
|
||||
peer := spbr.sw.Peers().Get(behaviour.peerID)
|
||||
if peer == nil {
|
||||
return errors.New("peer not found")
|
||||
}
|
||||
|
||||
switch reason := behavior.reason.(type) {
|
||||
switch reason := behaviour.reason.(type) {
|
||||
case consensusVote, blockPart:
|
||||
spbr.sw.MarkPeerAsGood(peer)
|
||||
case badMessage:
|
||||
@@ -49,39 +48,39 @@ func (spbr *SwitchReporter) Report(behavior PeerBehavior) error {
|
||||
|
||||
// MockReporter is a concrete implementation of the Reporter
|
||||
// interface used in reactor tests to ensure reactors report the correct
|
||||
// behavior in manufactured scenarios.
|
||||
// behaviour in manufactured scenarios.
|
||||
type MockReporter struct {
|
||||
mtx tmsync.RWMutex
|
||||
pb map[types.NodeID][]PeerBehavior
|
||||
pb map[p2p.NodeID][]PeerBehaviour
|
||||
}
|
||||
|
||||
// NewMockReporter returns a Reporter which records all reported
|
||||
// behaviors in memory.
|
||||
// behaviours in memory.
|
||||
func NewMockReporter() *MockReporter {
|
||||
return &MockReporter{
|
||||
pb: map[types.NodeID][]PeerBehavior{},
|
||||
pb: map[p2p.NodeID][]PeerBehaviour{},
|
||||
}
|
||||
}
|
||||
|
||||
// Report stores the PeerBehavior produced by the peer identified by peerID.
|
||||
func (mpbr *MockReporter) Report(behavior PeerBehavior) error {
|
||||
// Report stores the PeerBehaviour produced by the peer identified by peerID.
|
||||
func (mpbr *MockReporter) Report(behaviour PeerBehaviour) error {
|
||||
mpbr.mtx.Lock()
|
||||
defer mpbr.mtx.Unlock()
|
||||
mpbr.pb[behavior.peerID] = append(mpbr.pb[behavior.peerID], behavior)
|
||||
mpbr.pb[behaviour.peerID] = append(mpbr.pb[behaviour.peerID], behaviour)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBehaviors returns all behaviors reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior {
|
||||
// GetBehaviours returns all behaviours reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviours(peerID p2p.NodeID) []PeerBehaviour {
|
||||
mpbr.mtx.RLock()
|
||||
defer mpbr.mtx.RUnlock()
|
||||
if items, ok := mpbr.pb[peerID]; ok {
|
||||
result := make([]PeerBehavior, len(items))
|
||||
result := make([]PeerBehaviour, len(items))
|
||||
copy(result, items)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
return []PeerBehavior{}
|
||||
return []PeerBehaviour{}
|
||||
}
|
||||
205
behaviour/reporter_test.go
Normal file
205
behaviour/reporter_test.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package behaviour_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
bh "github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behaviour in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID p2p.NodeID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviours := pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 0 {
|
||||
t.Error("Expected to have no behaviours reported")
|
||||
}
|
||||
|
||||
badMessage := bh.BadMessage(peerID, "bad message")
|
||||
if err := pr.Report(badMessage); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
behaviours = pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 1 {
|
||||
t.Error("Expected the peer have one reported behaviour")
|
||||
}
|
||||
|
||||
if behaviours[0] != badMessage {
|
||||
t.Error("Expected Bad Message to have been reported")
|
||||
}
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID p2p.NodeID
|
||||
behaviour bh.PeerBehaviour
|
||||
}
|
||||
|
||||
// equalBehaviours returns true if a and b contain the same PeerBehaviours with
|
||||
// the same freequencies and otherwise false.
|
||||
func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
|
||||
aHistogram := map[bh.PeerBehaviour]int{}
|
||||
bHistogram := map[bh.PeerBehaviour]int{}
|
||||
|
||||
for _, behaviour := range a {
|
||||
aHistogram[behaviour]++
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
bHistogram[behaviour]++
|
||||
}
|
||||
|
||||
if len(aHistogram) != len(bHistogram) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, behaviour := range a {
|
||||
if aHistogram[behaviour] != bHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
if bHistogram[behaviour] != aHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices
|
||||
// of peer behaviours can be compared for the behaviours they contain and the
|
||||
// freequencies that those behaviours occur.
|
||||
func TestEqualPeerBehaviours(t *testing.T) {
|
||||
var (
|
||||
peerID p2p.NodeID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{}},
|
||||
// Single behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Equal Frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote, consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
// Equal frequencies different orders
|
||||
{[]bh.PeerBehaviour{consensusVote, blockPart},
|
||||
[]bh.PeerBehaviour{blockPart, consensusVote}},
|
||||
}
|
||||
unequals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Comparing empty sets to non empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Different behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{blockPart}},
|
||||
// Same behaviour with different frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
}
|
||||
)
|
||||
|
||||
for _, test := range equals {
|
||||
if !equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be equal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range unequals {
|
||||
if equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be unequal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPeerBehaviourConcurrency constructs a scenario in which
|
||||
// multiple goroutines are using the same MockReporter instance.
|
||||
// This test reproduces the conditions in which MockReporter will
|
||||
// be used within a Reactor `Receive` method tests to ensure thread safety.
|
||||
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviourScript = []struct {
|
||||
peerID p2p.NodeID
|
||||
behaviours []bh.PeerBehaviour
|
||||
}{
|
||||
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},
|
||||
{"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
|
||||
{
|
||||
"3",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", ""),
|
||||
bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", "")}},
|
||||
{
|
||||
"4",
|
||||
[]bh.PeerBehaviour{bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", "")}},
|
||||
{
|
||||
"5",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", ""),
|
||||
bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", "")}},
|
||||
}
|
||||
)
|
||||
|
||||
var receiveWg sync.WaitGroup
|
||||
pr := bh.NewMockReporter()
|
||||
scriptItems := make(chan scriptItem)
|
||||
done := make(chan int)
|
||||
numConsumers := 3
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
receiveWg.Add(1)
|
||||
go func() {
|
||||
defer receiveWg.Done()
|
||||
for {
|
||||
select {
|
||||
case pb := <-scriptItems:
|
||||
if err := pr.Report(pb.behaviour); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var sendingWg sync.WaitGroup
|
||||
sendingWg.Add(1)
|
||||
go func() {
|
||||
defer sendingWg.Done()
|
||||
for _, item := range behaviourScript {
|
||||
for _, reason := range item.behaviours {
|
||||
scriptItems <- scriptItem{item.peerID, reason}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
sendingWg.Wait()
|
||||
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
done <- 1
|
||||
}
|
||||
|
||||
receiveWg.Wait()
|
||||
|
||||
for _, items := range behaviourScript {
|
||||
reported := pr.GetBehaviours(items.peerID)
|
||||
if !equalBehaviours(reported, items.behaviours) {
|
||||
t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
|
||||
items.peerID, items.behaviours, reported)
|
||||
}
|
||||
}
|
||||
}
|
||||
17
blockchain/doc.go
Normal file
17
blockchain/doc.go
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
Package blockchain provides two implementations of the fast-sync protocol.
|
||||
|
||||
- v0 was the very first implementation. it's battle tested, but does not have a
|
||||
lot of test coverage.
|
||||
- v2 is the newest implementation, with a focus on testability and readability.
|
||||
|
||||
Check out ADR-40 for the formal model and requirements.
|
||||
|
||||
# Termination criteria
|
||||
|
||||
1. the maximum peer height is reached
|
||||
2. termination timeout is triggered, which is set if the peer set is empty or
|
||||
there are no pending requests.
|
||||
|
||||
*/
|
||||
package blockchain
|
||||
@@ -1,7 +1,7 @@
|
||||
package blocksync
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
flow "github.com/tendermint/tendermint/internal/libs/flowrate"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
flow "github.com/tendermint/tendermint/libs/flowrate"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -62,10 +63,10 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
|
||||
// PeerID responsible for delivering the block.
|
||||
type BlockRequest struct {
|
||||
Height int64
|
||||
PeerID types.NodeID
|
||||
PeerID p2p.NodeID
|
||||
}
|
||||
|
||||
// BlockPool keeps track of the block sync peers, block requests and block responses.
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
service.BaseService
|
||||
lastAdvance time.Time
|
||||
@@ -75,7 +76,7 @@ type BlockPool struct {
|
||||
requesters map[int64]*bpRequester
|
||||
height int64 // the lowest key in requesters.
|
||||
// peers
|
||||
peers map[types.NodeID]*bpPeer
|
||||
peers map[p2p.NodeID]*bpPeer
|
||||
maxPeerHeight int64 // the biggest reported height
|
||||
|
||||
// atomic
|
||||
@@ -83,26 +84,20 @@ type BlockPool struct {
|
||||
|
||||
requestsCh chan<- BlockRequest
|
||||
errorsCh chan<- peerError
|
||||
|
||||
startHeight int64
|
||||
lastHundredBlockTimeStamp time.Time
|
||||
lastSyncRate float64
|
||||
}
|
||||
|
||||
// NewBlockPool returns a new BlockPool with the height equal to start. Block
|
||||
// requests and errors will be sent to requestsCh and errorsCh accordingly.
|
||||
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
|
||||
bp := &BlockPool{
|
||||
peers: make(map[types.NodeID]*bpPeer),
|
||||
peers: make(map[p2p.NodeID]*bpPeer),
|
||||
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
startHeight: start,
|
||||
numPending: 0,
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
numPending: 0,
|
||||
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
lastSyncRate: 0,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
}
|
||||
bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp)
|
||||
return bp
|
||||
@@ -112,7 +107,6 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
pool.lastAdvance = time.Now()
|
||||
pool.lastHundredBlockTimeStamp = pool.lastAdvance
|
||||
go pool.makeRequestersRoutine()
|
||||
return nil
|
||||
}
|
||||
@@ -223,19 +217,6 @@ func (pool *BlockPool) PopRequest() {
|
||||
delete(pool.requesters, pool.height)
|
||||
pool.height++
|
||||
pool.lastAdvance = time.Now()
|
||||
|
||||
// the lastSyncRate will be updated every 100 blocks, it uses the adaptive filter
|
||||
// to smooth the block sync rate and the unit represents the number of blocks per second.
|
||||
if (pool.height-pool.startHeight)%100 == 0 {
|
||||
newSyncRate := 100 / time.Since(pool.lastHundredBlockTimeStamp).Seconds()
|
||||
if pool.lastSyncRate == 0 {
|
||||
pool.lastSyncRate = newSyncRate
|
||||
} else {
|
||||
pool.lastSyncRate = 0.9*pool.lastSyncRate + 0.1*newSyncRate
|
||||
}
|
||||
pool.lastHundredBlockTimeStamp = time.Now()
|
||||
}
|
||||
|
||||
} else {
|
||||
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
|
||||
}
|
||||
@@ -244,13 +225,13 @@ func (pool *BlockPool) PopRequest() {
|
||||
// RedoRequest invalidates the block at pool.height,
|
||||
// Remove the peer and redo request from others.
|
||||
// Returns the ID of the removed peer.
|
||||
func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
|
||||
func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
request := pool.requesters[height]
|
||||
peerID := request.getPeerID()
|
||||
if peerID != types.NodeID("") {
|
||||
if peerID != p2p.NodeID("") {
|
||||
// RemovePeer will redo all requesters associated with this peer.
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
@@ -259,7 +240,7 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) {
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.NodeID, block *types.Block, blockSize int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
@@ -306,7 +287,7 @@ func (pool *BlockPool) LastAdvance() time.Time {
|
||||
}
|
||||
|
||||
// SetPeerRange sets the peer's alleged blockchain base and height.
|
||||
func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int64) {
|
||||
func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
@@ -327,14 +308,14 @@ func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int6
|
||||
|
||||
// RemovePeer removes the peer with peerID from the pool. If there's no peer
|
||||
// with peerID, function is a no-op.
|
||||
func (pool *BlockPool) RemovePeer(peerID types.NodeID) {
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.NodeID) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removePeer(peerID types.NodeID) {
|
||||
func (pool *BlockPool) removePeer(peerID p2p.NodeID) {
|
||||
for _, requester := range pool.requesters {
|
||||
if requester.getPeerID() == peerID {
|
||||
requester.redo(peerID)
|
||||
@@ -415,14 +396,14 @@ func (pool *BlockPool) requestersLen() int64 {
|
||||
return int64(len(pool.requesters))
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendRequest(height int64, peerID types.NodeID) {
|
||||
func (pool *BlockPool) sendRequest(height int64, peerID p2p.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
pool.requestsCh <- BlockRequest{height, peerID}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendError(err error, peerID types.NodeID) {
|
||||
func (pool *BlockPool) sendError(err error, peerID p2p.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
@@ -448,20 +429,6 @@ func (pool *BlockPool) debug() string {
|
||||
return str
|
||||
}
|
||||
|
||||
func (pool *BlockPool) targetSyncBlocks() int64 {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
return pool.maxPeerHeight - pool.startHeight + 1
|
||||
}
|
||||
|
||||
func (pool *BlockPool) getLastSyncRate() float64 {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
return pool.lastSyncRate
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bpPeer struct {
|
||||
@@ -470,7 +437,7 @@ type bpPeer struct {
|
||||
height int64
|
||||
base int64
|
||||
pool *BlockPool
|
||||
id types.NodeID
|
||||
id p2p.NodeID
|
||||
recvMonitor *flow.Monitor
|
||||
|
||||
timeout *time.Timer
|
||||
@@ -478,7 +445,7 @@ type bpPeer struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func newBPPeer(pool *BlockPool, peerID types.NodeID, base int64, height int64) *bpPeer {
|
||||
func newBPPeer(pool *BlockPool, peerID p2p.NodeID, base int64, height int64) *bpPeer {
|
||||
peer := &bpPeer{
|
||||
pool: pool,
|
||||
id: peerID,
|
||||
@@ -543,10 +510,10 @@ type bpRequester struct {
|
||||
pool *BlockPool
|
||||
height int64
|
||||
gotBlockCh chan struct{}
|
||||
redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat
|
||||
redoCh chan p2p.NodeID // redo may send multitime, add peerId to identify repeat
|
||||
|
||||
mtx tmsync.Mutex
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
@@ -555,7 +522,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
pool: pool,
|
||||
height: height,
|
||||
gotBlockCh: make(chan struct{}, 1),
|
||||
redoCh: make(chan types.NodeID, 1),
|
||||
redoCh: make(chan p2p.NodeID, 1),
|
||||
|
||||
peerID: "",
|
||||
block: nil,
|
||||
@@ -570,7 +537,7 @@ func (bpr *bpRequester) OnStart() error {
|
||||
}
|
||||
|
||||
// Returns true if the peer matches and block doesn't already exist.
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool {
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.NodeID) bool {
|
||||
bpr.mtx.Lock()
|
||||
if bpr.block != nil || bpr.peerID != peerID {
|
||||
bpr.mtx.Unlock()
|
||||
@@ -592,7 +559,7 @@ func (bpr *bpRequester) getBlock() *types.Block {
|
||||
return bpr.block
|
||||
}
|
||||
|
||||
func (bpr *bpRequester) getPeerID() types.NodeID {
|
||||
func (bpr *bpRequester) getPeerID() p2p.NodeID {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
return bpr.peerID
|
||||
@@ -614,7 +581,7 @@ func (bpr *bpRequester) reset() {
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
// NOTE: Nonblocking, and does nothing if another redo
|
||||
// was already requested.
|
||||
func (bpr *bpRequester) redo(peerID types.NodeID) {
|
||||
func (bpr *bpRequester) redo(peerID p2p.NodeID) {
|
||||
select {
|
||||
case bpr.redoCh <- peerID:
|
||||
default:
|
||||
@@ -2,7 +2,6 @@ package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
mrand "math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -11,6 +10,7 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ func init() {
|
||||
}
|
||||
|
||||
type testPeer struct {
|
||||
id types.NodeID
|
||||
id p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
inputChan chan inputData // make sure each peer's data is sequential
|
||||
@@ -49,7 +49,7 @@ func (p testPeer) simulateInput(input inputData) {
|
||||
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
|
||||
}
|
||||
|
||||
type testPeers map[types.NodeID]testPeer
|
||||
type testPeers map[p2p.NodeID]testPeer
|
||||
|
||||
func (ps testPeers) start() {
|
||||
for _, v := range ps {
|
||||
@@ -66,8 +66,8 @@ func (ps testPeers) stop() {
|
||||
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
|
||||
peers := make(testPeers, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerID := types.NodeID(tmrand.Str(12))
|
||||
height := minHeight + mrand.Int63n(maxHeight-minHeight)
|
||||
peerID := p2p.NodeID(tmrand.Str(12))
|
||||
height := minHeight + tmrand.Int63n(maxHeight-minHeight)
|
||||
base := minHeight + int64(i)
|
||||
if base > height {
|
||||
base = height
|
||||
@@ -182,7 +182,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
|
||||
// Pull from channels
|
||||
counter := 0
|
||||
timedOut := map[types.NodeID]struct{}{}
|
||||
timedOut := map[p2p.NodeID]struct{}{}
|
||||
for {
|
||||
select {
|
||||
case err := <-errorsCh:
|
||||
@@ -203,7 +203,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
peers := make(testPeers, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
peerID := types.NodeID(fmt.Sprintf("%d", i+1))
|
||||
peerID := p2p.NodeID(fmt.Sprintf("%d", i+1))
|
||||
height := int64(i + 1)
|
||||
peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)}
|
||||
}
|
||||
@@ -227,10 +227,10 @@ func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
assert.EqualValues(t, 10, pool.MaxPeerHeight())
|
||||
|
||||
// remove not-existing peer
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(types.NodeID("Superman")) })
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(p2p.NodeID("Superman")) })
|
||||
|
||||
// remove peer with biggest height
|
||||
pool.RemovePeer(types.NodeID("10"))
|
||||
pool.RemovePeer(p2p.NodeID("10"))
|
||||
assert.EqualValues(t, 9, pool.MaxPeerHeight())
|
||||
|
||||
// remove all peers
|
||||
@@ -2,17 +2,14 @@ package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bc "github.com/tendermint/tendermint/internal/blocksync"
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmSync "github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -29,23 +26,22 @@ var (
|
||||
// TODO: Remove once p2p refactor is complete.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{
|
||||
BlockSyncChannel: {
|
||||
BlockchainChannel: {
|
||||
MsgType: new(bcproto.Message),
|
||||
Descriptor: &p2p.ChannelDescriptor{
|
||||
ID: byte(BlockSyncChannel),
|
||||
ID: byte(BlockchainChannel),
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
MaxSendBytes: 100,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSyncChannel is a channel for blocks and status updates
|
||||
BlockSyncChannel = p2p.ChannelID(0x40)
|
||||
// BlockchainChannel is a channel for blocks and status updates
|
||||
BlockchainChannel = p2p.ChannelID(0x40)
|
||||
|
||||
trySyncIntervalMS = 10
|
||||
|
||||
@@ -60,21 +56,21 @@ const (
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// For when we switch from block sync reactor to the consensus
|
||||
// For when we switch from blockchain reactor and fast sync to the consensus
|
||||
// machine.
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
|
||||
type peerError struct {
|
||||
err error
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e peerError) Error() string {
|
||||
return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
|
||||
}
|
||||
|
||||
// Reactor handles long-term catchup syncing.
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type Reactor struct {
|
||||
service.BaseService
|
||||
|
||||
@@ -85,19 +81,11 @@ type Reactor struct {
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
consReactor consensusReactor
|
||||
blockSync *tmSync.AtomicBool
|
||||
fastSync bool
|
||||
|
||||
blockSyncCh *p2p.Channel
|
||||
// blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope
|
||||
// messages that the reactor will consume in processBlockSyncCh and receiving messages
|
||||
// from the peer updates channel and other goroutines. We do this instead of directly
|
||||
// sending on blockSyncCh.Out to avoid race conditions in the case where other goroutines
|
||||
// send Envelopes directly to the to blockSyncCh.Out channel, since processBlockSyncCh
|
||||
// may close the blockSyncCh.Out channel at the same time that other goroutines send to
|
||||
// blockSyncCh.Out.
|
||||
blockSyncOutBridgeCh chan p2p.Envelope
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
closeCh chan struct{}
|
||||
blockchainCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdatesCh
|
||||
closeCh chan struct{}
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
@@ -106,10 +94,6 @@ type Reactor struct {
|
||||
// requestRoutine spawned goroutines when stopping the reactor and before
|
||||
// stopping the p2p Channel(s).
|
||||
poolWG sync.WaitGroup
|
||||
|
||||
metrics *cons.Metrics
|
||||
|
||||
syncStartTime time.Time
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
@@ -119,10 +103,9 @@ func NewReactor(
|
||||
blockExec *sm.BlockExecutor,
|
||||
store *store.BlockStore,
|
||||
consReactor consensusReactor,
|
||||
blockSyncCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdates,
|
||||
blockSync bool,
|
||||
metrics *cons.Metrics,
|
||||
blockchainCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdatesCh,
|
||||
fastSync bool,
|
||||
) (*Reactor, error) {
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
|
||||
@@ -137,23 +120,20 @@ func NewReactor(
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
|
||||
r := &Reactor{
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
blockSync: tmSync.NewBool(blockSync),
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockSyncCh: blockSyncCh,
|
||||
blockSyncOutBridgeCh: make(chan p2p.Envelope),
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
metrics: metrics,
|
||||
syncStartTime: time.Time{},
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: fastSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "BlockSync", r)
|
||||
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
@@ -162,10 +142,10 @@ func NewReactor(
|
||||
// messages on that p2p channel accordingly. The caller must be sure to execute
|
||||
// OnStop to ensure the outbound p2p Channels are closed.
|
||||
//
|
||||
// If blockSync is enabled, we also start the pool and the pool processing
|
||||
// If fastSync is enabled, we also start the pool and the pool processing
|
||||
// goroutine. If the pool fails to start, an error is returned.
|
||||
func (r *Reactor) OnStart() error {
|
||||
if r.blockSync.IsSet() {
|
||||
if r.fastSync {
|
||||
if err := r.pool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -174,7 +154,7 @@ func (r *Reactor) OnStart() error {
|
||||
go r.poolRoutine(false)
|
||||
}
|
||||
|
||||
go r.processBlockSyncCh()
|
||||
go r.processBlockchainCh()
|
||||
go r.processPeerUpdates()
|
||||
|
||||
return nil
|
||||
@@ -183,7 +163,7 @@ func (r *Reactor) OnStart() error {
|
||||
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
|
||||
// blocking until they all exit.
|
||||
func (r *Reactor) OnStop() {
|
||||
if r.blockSync.IsSet() {
|
||||
if r.fastSync {
|
||||
if err := r.pool.Stop(); err != nil {
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
@@ -199,13 +179,13 @@ func (r *Reactor) OnStop() {
|
||||
// Wait for all p2p Channels to be closed before returning. This ensures we
|
||||
// can easily reason about synchronization of all p2p Channels and ensure no
|
||||
// panics will occur.
|
||||
<-r.blockSyncCh.Done()
|
||||
<-r.blockchainCh.Done()
|
||||
<-r.peerUpdates.Done()
|
||||
}
|
||||
|
||||
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
|
||||
// Otherwise, we'll respond saying we do not have it.
|
||||
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) {
|
||||
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID p2p.NodeID) {
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
blockProto, err := block.ToProto()
|
||||
@@ -214,7 +194,7 @@ func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID)
|
||||
return
|
||||
}
|
||||
|
||||
r.blockSyncCh.Out <- p2p.Envelope{
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.BlockResponse{Block: blockProto},
|
||||
}
|
||||
@@ -223,16 +203,16 @@ func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID)
|
||||
}
|
||||
|
||||
r.Logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height)
|
||||
r.blockSyncCh.Out <- p2p.Envelope{
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
}
|
||||
}
|
||||
|
||||
// handleBlockSyncMessage handles envelopes sent from peers on the
|
||||
// BlockSyncChannel. It returns an error only if the Envelope.Message is unknown
|
||||
// handleBlockchainMessage handles envelopes sent from peers on the
|
||||
// BlockchainChannel. It returns an error only if the Envelope.Message is unknown
|
||||
// for this channel. This should never be called outside of handleMessage.
|
||||
func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error {
|
||||
func (r *Reactor) handleBlockchainMessage(envelope p2p.Envelope) error {
|
||||
logger := r.Logger.With("peer", envelope.From)
|
||||
|
||||
switch msg := envelope.Message.(type) {
|
||||
@@ -249,7 +229,7 @@ func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error {
|
||||
r.pool.AddBlock(envelope.From, block, block.Size())
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
r.blockSyncCh.Out <- p2p.Envelope{
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: envelope.From,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: r.store.Height(),
|
||||
@@ -277,19 +257,15 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
r.Logger.Error("recovering from processing message panic", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
r.Logger.Debug("received message", "message", envelope.Message, "peer", envelope.From)
|
||||
|
||||
switch chID {
|
||||
case BlockSyncChannel:
|
||||
err = r.handleBlockSyncMessage(envelope)
|
||||
case BlockchainChannel:
|
||||
err = r.handleBlockchainMessage(envelope)
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope)
|
||||
@@ -298,58 +274,55 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
return err
|
||||
}
|
||||
|
||||
// processBlockSyncCh initiates a blocking process where we listen for and handle
|
||||
// envelopes on the BlockSyncChannel and blockSyncOutBridgeCh. Any error encountered during
|
||||
// message execution will result in a PeerError being sent on the BlockSyncChannel.
|
||||
// When the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// processBlockchainCh initiates a blocking process where we listen for and handle
|
||||
// envelopes on the BlockchainChannel. Any error encountered during message
|
||||
// execution will result in a PeerError being sent on the BlockchainChannel. When
|
||||
// the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// gracefully.
|
||||
func (r *Reactor) processBlockSyncCh() {
|
||||
defer r.blockSyncCh.Close()
|
||||
func (r *Reactor) processBlockchainCh() {
|
||||
defer r.blockchainCh.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case envelope := <-r.blockSyncCh.In:
|
||||
if err := r.handleMessage(r.blockSyncCh.ID, envelope); err != nil {
|
||||
r.Logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err)
|
||||
r.blockSyncCh.Error <- p2p.PeerError{
|
||||
NodeID: envelope.From,
|
||||
Err: err,
|
||||
case envelope := <-r.blockchainCh.In():
|
||||
if err := r.handleMessage(r.blockchainCh.ID(), envelope); err != nil {
|
||||
r.Logger.Error("failed to process message", "ch_id", r.blockchainCh.ID(), "envelope", envelope, "err", err)
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: envelope.From,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
}
|
||||
|
||||
case envelope := <-r.blockSyncOutBridgeCh:
|
||||
r.blockSyncCh.Out <- envelope
|
||||
|
||||
case <-r.closeCh:
|
||||
r.Logger.Debug("stopped listening on block sync channel; closing...")
|
||||
r.Logger.Debug("stopped listening on blockchain channel; closing...")
|
||||
return
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processPeerUpdate processes a PeerUpdate.
|
||||
func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
|
||||
r.Logger.Debug("received peer update", "peer", peerUpdate.PeerID, "status", peerUpdate.Status)
|
||||
|
||||
// XXX: Pool#RedoRequest can sometimes give us an empty peer.
|
||||
if len(peerUpdate.NodeID) == 0 {
|
||||
if len(peerUpdate.PeerID) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
switch peerUpdate.Status {
|
||||
case p2p.PeerStatusUp:
|
||||
case p2p.PeerStatusNew, p2p.PeerStatusUp:
|
||||
// send a status update the newly added peer
|
||||
r.blockSyncOutBridgeCh <- p2p.Envelope{
|
||||
To: peerUpdate.NodeID,
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerUpdate.PeerID,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: r.store.Base(),
|
||||
Height: r.store.Height(),
|
||||
},
|
||||
}
|
||||
|
||||
case p2p.PeerStatusDown:
|
||||
r.pool.RemovePeer(peerUpdate.NodeID)
|
||||
case p2p.PeerStatusDown, p2p.PeerStatusRemoved, p2p.PeerStatusBanned:
|
||||
r.pool.RemovePeer(peerUpdate.PeerID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -371,10 +344,10 @@ func (r *Reactor) processPeerUpdates() {
|
||||
}
|
||||
}
|
||||
|
||||
// SwitchToBlockSync is called by the state sync reactor when switching to fast
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast
|
||||
// sync.
|
||||
func (r *Reactor) SwitchToBlockSync(state sm.State) error {
|
||||
r.blockSync.Set()
|
||||
func (r *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
r.fastSync = true
|
||||
r.initialState = state
|
||||
r.pool.height = state.LastBlockHeight + 1
|
||||
|
||||
@@ -382,8 +355,6 @@ func (r *Reactor) SwitchToBlockSync(state sm.State) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r.syncStartTime = time.Now()
|
||||
|
||||
r.poolWG.Add(1)
|
||||
go r.poolRoutine(true)
|
||||
|
||||
@@ -406,15 +377,16 @@ func (r *Reactor) requestRoutine() {
|
||||
return
|
||||
|
||||
case request := <-r.requestsCh:
|
||||
r.blockSyncOutBridgeCh <- p2p.Envelope{
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: request.PeerID,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
}
|
||||
|
||||
case pErr := <-r.errorsCh:
|
||||
r.blockSyncCh.Error <- p2p.PeerError{
|
||||
NodeID: pErr.peerID,
|
||||
Err: pErr.err,
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: pErr.peerID,
|
||||
Err: pErr.err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
@@ -423,7 +395,7 @@ func (r *Reactor) requestRoutine() {
|
||||
go func() {
|
||||
defer r.poolWG.Done()
|
||||
|
||||
r.blockSyncOutBridgeCh <- p2p.Envelope{
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
Broadcast: true,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
}
|
||||
@@ -496,8 +468,6 @@ FOR_LOOP:
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
|
||||
r.blockSync.UnSet()
|
||||
|
||||
if r.consReactor != nil {
|
||||
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
@@ -554,16 +524,18 @@ FOR_LOOP:
|
||||
// NOTE: We've already removed the peer's request, but we still need
|
||||
// to clean up the rest.
|
||||
peerID := r.pool.RedoRequest(first.Height)
|
||||
r.blockSyncCh.Error <- p2p.PeerError{
|
||||
NodeID: peerID,
|
||||
Err: err,
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: peerID,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
|
||||
peerID2 := r.pool.RedoRequest(second.Height)
|
||||
if peerID2 != peerID {
|
||||
r.blockSyncCh.Error <- p2p.PeerError{
|
||||
NodeID: peerID2,
|
||||
Err: err,
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: peerID2,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -578,20 +550,18 @@ FOR_LOOP:
|
||||
|
||||
// TODO: Same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
state, err = r.blockExec.ApplyBlock(state, firstID, first)
|
||||
state, _, err = r.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO: This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
r.metrics.RecordConsMetrics(first)
|
||||
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.Logger.Info(
|
||||
"block sync rate",
|
||||
"fast sync rate",
|
||||
"height", r.pool.height,
|
||||
"max_peer_height", r.pool.MaxPeerHeight(),
|
||||
"blocks/s", lastRate,
|
||||
@@ -608,31 +578,3 @@ FOR_LOOP:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) GetMaxPeerBlockHeight() int64 {
|
||||
return r.pool.MaxPeerHeight()
|
||||
}
|
||||
|
||||
func (r *Reactor) GetTotalSyncedTime() time.Duration {
|
||||
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
return time.Since(r.syncStartTime)
|
||||
}
|
||||
|
||||
func (r *Reactor) GetRemainingSyncTime() time.Duration {
|
||||
if !r.blockSync.IsSet() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
targetSyncs := r.pool.targetSyncBlocks()
|
||||
currentSyncs := r.store.Height() - r.pool.startHeight + 1
|
||||
lastSyncRate := r.pool.getLastSyncRate()
|
||||
if currentSyncs < 0 || lastSyncRate < 0.001 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
remain := float64(targetSyncs-currentSyncs) / lastSyncRate
|
||||
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
405
blockchain/v0/reactor_test.go
Normal file
405
blockchain/v0/reactor_test.go
Normal file
@@ -0,0 +1,405 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var rng = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
type reactorTestSuite struct {
|
||||
reactor *Reactor
|
||||
app proxy.AppConns
|
||||
|
||||
peerID p2p.NodeID
|
||||
|
||||
blockchainChannel *p2p.Channel
|
||||
blockchainInCh chan p2p.Envelope
|
||||
blockchainOutCh chan p2p.Envelope
|
||||
blockchainPeerErrCh chan p2p.PeerError
|
||||
|
||||
peerUpdatesCh chan p2p.PeerUpdate
|
||||
peerUpdates *p2p.PeerUpdatesCh
|
||||
}
|
||||
|
||||
func setup(
|
||||
t *testing.T,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64,
|
||||
chBuf uint,
|
||||
) *reactorTestSuite {
|
||||
t.Helper()
|
||||
|
||||
require.Len(t, privVals, 1, "only one validator can be supported")
|
||||
|
||||
app := &abci.BaseApplication{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
require.NoError(t, proxyApp.Start())
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
proxyApp.Consensus(),
|
||||
mock.Mempool{},
|
||||
sm.EmptyEvidencePool{},
|
||||
)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote, err := types.MakeVote(
|
||||
lastBlock.Header.Height,
|
||||
lastBlockMeta.BlockID,
|
||||
state.Validators,
|
||||
privVals[0],
|
||||
lastBlock.Header.ChainID,
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
lastCommit = types.NewCommit(
|
||||
vote.Height,
|
||||
vote.Round,
|
||||
lastBlockMeta.BlockID,
|
||||
[]types.CommitSig{vote.CommitSig()},
|
||||
)
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
pID := make([]byte, 16)
|
||||
_, err = rng.Read(pID)
|
||||
require.NoError(t, err)
|
||||
|
||||
peerUpdatesCh := make(chan p2p.PeerUpdate, chBuf)
|
||||
|
||||
rts := &reactorTestSuite{
|
||||
app: proxyApp,
|
||||
blockchainInCh: make(chan p2p.Envelope, chBuf),
|
||||
blockchainOutCh: make(chan p2p.Envelope, chBuf),
|
||||
blockchainPeerErrCh: make(chan p2p.PeerError, chBuf),
|
||||
peerUpdatesCh: peerUpdatesCh,
|
||||
peerUpdates: p2p.NewPeerUpdates(peerUpdatesCh),
|
||||
peerID: p2p.NodeID(fmt.Sprintf("%x", pID)),
|
||||
}
|
||||
|
||||
rts.blockchainChannel = p2p.NewChannel(
|
||||
BlockchainChannel,
|
||||
new(bcproto.Message),
|
||||
rts.blockchainInCh,
|
||||
rts.blockchainOutCh,
|
||||
rts.blockchainPeerErrCh,
|
||||
)
|
||||
|
||||
reactor, err := NewReactor(
|
||||
log.TestingLogger().With("module", "blockchain", "node", rts.peerID),
|
||||
state.Copy(),
|
||||
blockExec,
|
||||
blockStore,
|
||||
nil,
|
||||
rts.blockchainChannel,
|
||||
rts.peerUpdates,
|
||||
fastSync,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
rts.reactor = reactor
|
||||
|
||||
require.NoError(t, rts.reactor.Start())
|
||||
require.True(t, rts.reactor.IsRunning())
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, rts.reactor.Stop())
|
||||
require.NoError(t, rts.app.Stop())
|
||||
require.False(t, rts.reactor.IsRunning())
|
||||
})
|
||||
|
||||
return rts
|
||||
}
|
||||
|
||||
func simulateRouter(primary *reactorTestSuite, suites []*reactorTestSuite, dropChErr bool) {
|
||||
// create a mapping for efficient suite lookup by peer ID
|
||||
suitesByPeerID := make(map[p2p.NodeID]*reactorTestSuite)
|
||||
for _, suite := range suites {
|
||||
suitesByPeerID[suite.peerID] = suite
|
||||
}
|
||||
|
||||
// Simulate a router by listening for all outbound envelopes and proxying the
|
||||
// envelope to the respective peer (suite).
|
||||
go func() {
|
||||
for envelope := range primary.blockchainOutCh {
|
||||
if envelope.Broadcast {
|
||||
for _, s := range suites {
|
||||
// broadcast to everyone except source
|
||||
if s.peerID != primary.peerID {
|
||||
s.blockchainInCh <- p2p.Envelope{
|
||||
From: primary.peerID,
|
||||
To: s.peerID,
|
||||
Message: envelope.Message,
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
suitesByPeerID[envelope.To].blockchainInCh <- p2p.Envelope{
|
||||
From: primary.peerID,
|
||||
To: envelope.To,
|
||||
Message: envelope.Message,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for pErr := range primary.blockchainPeerErrCh {
|
||||
if dropChErr {
|
||||
primary.reactor.Logger.Debug("dropped peer error", "err", pErr.Err)
|
||||
} else {
|
||||
primary.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
PeerID: pErr.PeerID,
|
||||
Status: p2p.PeerStatusRemoved,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
maxBlockHeight := int64(64)
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 0),
|
||||
setup(t, genDoc, privVals, 0, 0),
|
||||
}
|
||||
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
for _, s := range testSuites {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
// connect reactor to every other reactor
|
||||
for _, ss := range testSuites {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
secondaryPool := testSuites[1].reactor.pool
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
height, _, _ := secondaryPool.GetStatus()
|
||||
return secondaryPool.MaxPeerHeight() > 0 && height > 0 && height < 10
|
||||
},
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be partially synced",
|
||||
)
|
||||
|
||||
// Remove synced node from the syncing node which should not result in any
|
||||
// deadlocks or race conditions within the context of poolRoutine.
|
||||
testSuites[1].peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusDown,
|
||||
PeerID: testSuites[0].peerID,
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactor_NoBlockResponse(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
maxBlockHeight := int64(65)
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 0),
|
||||
setup(t, genDoc, privVals, 0, 0),
|
||||
}
|
||||
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
for _, s := range testSuites {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
// connect reactor to every other reactor
|
||||
for _, ss := range testSuites {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{100, false},
|
||||
}
|
||||
|
||||
secondaryPool := testSuites[1].reactor.pool
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool { return secondaryPool.MaxPeerHeight() > 0 && secondaryPool.IsCaughtUp() },
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be fully synced",
|
||||
)
|
||||
|
||||
for _, tc := range testCases {
|
||||
block := testSuites[1].reactor.store.LoadBlock(tc.height)
|
||||
if tc.existent {
|
||||
require.True(t, block != nil)
|
||||
} else {
|
||||
require.Nil(t, block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactor_BadBlockStopsPeer(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
maxBlockHeight := int64(48)
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 1000), // fully synced node
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000), // new node
|
||||
}
|
||||
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
// connect reactor to every other reactor except the new node
|
||||
for _, ss := range testSuites[:len(testSuites)-1] {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
caughtUp := true
|
||||
for _, s := range testSuites[1 : len(testSuites)-1] {
|
||||
if s.reactor.pool.MaxPeerHeight() == 0 || !s.reactor.pool.IsCaughtUp() {
|
||||
caughtUp = false
|
||||
}
|
||||
}
|
||||
|
||||
return caughtUp
|
||||
},
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"expected all nodes to be fully synced",
|
||||
)
|
||||
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
require.Len(t, s.reactor.pool.peers, 3)
|
||||
}
|
||||
|
||||
// Mark testSuites[3] as an invalid peer which will cause newSuite to disconnect
|
||||
// from this peer.
|
||||
otherGenDoc, otherPrivVals := randGenesisDoc(config, 1, false, 30)
|
||||
otherSuite := setup(t, otherGenDoc, otherPrivVals, maxBlockHeight, 0)
|
||||
testSuites[3].reactor.store = otherSuite.reactor.store
|
||||
|
||||
// add a fake peer just so we do not wait for the consensus ticker to timeout
|
||||
otherSuite.reactor.pool.SetPeerRange("00ff", 10, 10)
|
||||
|
||||
// start the new peer's faux router
|
||||
newSuite := testSuites[len(testSuites)-1]
|
||||
simulateRouter(newSuite, testSuites, false)
|
||||
|
||||
// connect all nodes to the new peer
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
newSuite.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: s.peerID,
|
||||
}
|
||||
}
|
||||
|
||||
// wait for the new peer to catch up and become fully synced
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool { return newSuite.reactor.pool.MaxPeerHeight() > 0 && newSuite.reactor.pool.IsCaughtUp() },
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"expected new node to be fully synced",
|
||||
)
|
||||
|
||||
require.Eventuallyf(
|
||||
t,
|
||||
func() bool { return len(newSuite.reactor.pool.peers) < len(testSuites)-1 },
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"invalid number of peers; expected < %d, got: %d",
|
||||
len(testSuites)-1,
|
||||
len(newSuite.reactor.pool.peers),
|
||||
)
|
||||
}
|
||||
50
blockchain/v0/test_util.go
Normal file
50
blockchain/v0/test_util.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
func randGenesisDoc(
|
||||
config *cfg.Config,
|
||||
numValidators int,
|
||||
randPower bool,
|
||||
minPower int64,
|
||||
) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"errors"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -42,7 +42,7 @@ const (
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and block sync to
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state state.State, skipWAL bool)
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package v2
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -12,8 +13,8 @@ import (
|
||||
type pcBlockVerificationFailure struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
firstPeerID types.NodeID
|
||||
secondPeerID types.NodeID
|
||||
firstPeerID p2p.NodeID
|
||||
secondPeerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockVerificationFailure) String() string {
|
||||
@@ -25,7 +26,7 @@ func (e pcBlockVerificationFailure) String() string {
|
||||
type pcBlockProcessed struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockProcessed) String() string {
|
||||
@@ -45,7 +46,7 @@ func (p pcFinished) Error() string {
|
||||
|
||||
type queueItem struct {
|
||||
block *types.Block
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
type blockQueue map[int64]queueItem
|
||||
@@ -94,7 +95,7 @@ func (state *pcState) synced() bool {
|
||||
return len(state.queue) <= 1
|
||||
}
|
||||
|
||||
func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) {
|
||||
func (state *pcState) enqueue(peerID p2p.NodeID, block *types.Block, height int64) {
|
||||
if item, ok := state.queue[height]; ok {
|
||||
panic(fmt.Sprintf(
|
||||
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
|
||||
@@ -109,7 +110,7 @@ func (state *pcState) height() int64 {
|
||||
}
|
||||
|
||||
// purgePeer moves all unprocessed blocks from the queue
|
||||
func (state *pcState) purgePeer(peerID types.NodeID) {
|
||||
func (state *pcState) purgePeer(peerID p2p.NodeID) {
|
||||
// what if height is less than state.height?
|
||||
for height, item := range state.queue {
|
||||
if item.peerID == peerID {
|
||||
@@ -181,8 +182,6 @@ func (state *pcState) handle(event Event) (Event, error) {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
state.context.recordConsMetrics(first)
|
||||
|
||||
delete(state.queue, first.Height)
|
||||
state.blocksSynced++
|
||||
|
||||
@@ -3,7 +3,6 @@ package v2
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -14,27 +13,24 @@ type processorContext interface {
|
||||
saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
|
||||
tmState() state.State
|
||||
setState(state.State)
|
||||
recordConsMetrics(block *types.Block)
|
||||
}
|
||||
|
||||
type pContext struct {
|
||||
store blockStore
|
||||
applier blockApplier
|
||||
state state.State
|
||||
metrics *cons.Metrics
|
||||
}
|
||||
|
||||
func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *cons.Metrics) *pContext {
|
||||
func newProcessorContext(st blockStore, ex blockApplier, s state.State) *pContext {
|
||||
return &pContext{
|
||||
store: st,
|
||||
applier: ex,
|
||||
state: s,
|
||||
metrics: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
newState, err := pc.applier.ApplyBlock(pc.state, blockID, block)
|
||||
newState, _, err := pc.applier.ApplyBlock(pc.state, blockID, block)
|
||||
pc.state = newState
|
||||
return err
|
||||
}
|
||||
@@ -55,10 +51,6 @@ func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, see
|
||||
pc.store.SaveBlock(block, blockParts, seenCommit)
|
||||
}
|
||||
|
||||
func (pc *pContext) recordConsMetrics(block *types.Block) {
|
||||
pc.metrics.RecordConsMetrics(block)
|
||||
}
|
||||
|
||||
type mockPContext struct {
|
||||
applicationBL []int64
|
||||
verificationBL []int64
|
||||
@@ -106,7 +98,3 @@ func (mpc *mockPContext) setState(state state.State) {
|
||||
func (mpc *mockPContext) tmState() state.State {
|
||||
return mpc.state
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) recordConsMetrics(block *types.Block) {
|
||||
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -39,7 +40,7 @@ func makeState(p *params) *pcState {
|
||||
state := newPcState(context)
|
||||
|
||||
for _, item := range p.items {
|
||||
state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height)
|
||||
state.enqueue(p2p.NodeID(item.pid), makePcBlock(item.height), item.height)
|
||||
}
|
||||
|
||||
state.blocksSynced = p.blocksSynced
|
||||
@@ -47,7 +48,7 @@ func makeState(p *params) *pcState {
|
||||
return state
|
||||
}
|
||||
|
||||
func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived {
|
||||
func mBlockResponse(peerID p2p.NodeID, height int64) scBlockReceived {
|
||||
return scBlockReceived{
|
||||
peerID: peerID,
|
||||
block: makePcBlock(height),
|
||||
@@ -81,7 +82,7 @@ func executeProcessorTests(t *testing.T, tests []testFields) {
|
||||
}
|
||||
}()
|
||||
|
||||
// First step must always initialize the currentState as state.
|
||||
// First step must always initialise the currentState as state.
|
||||
if step.currentState != nil {
|
||||
state = makeState(step.currentState)
|
||||
}
|
||||
@@ -7,14 +7,12 @@ import (
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
bc "github.com/tendermint/tendermint/internal/blocksync"
|
||||
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -31,12 +29,12 @@ type blockStore interface {
|
||||
Height() int64
|
||||
}
|
||||
|
||||
// BlockchainReactor handles block sync protocol.
|
||||
// BlockchainReactor handles fast sync protocol.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
blockSync *sync.AtomicBool // enable block sync on start when it's been Set
|
||||
stateSynced bool // set to true when SwitchToBlockSync is called by state sync
|
||||
fastSync bool // if true, enable fast sync on start
|
||||
stateSynced bool // set to true when SwitchToFastSync is called by state sync
|
||||
scheduler *Routine
|
||||
processor *Routine
|
||||
logger log.Logger
|
||||
@@ -44,44 +42,37 @@ type BlockchainReactor struct {
|
||||
mtx tmsync.RWMutex
|
||||
maxPeerHeight int64
|
||||
syncHeight int64
|
||||
events chan Event // non-nil during a block sync
|
||||
events chan Event // non-nil during a fast sync
|
||||
|
||||
reporter behavior.Reporter
|
||||
reporter behaviour.Reporter
|
||||
io iIO
|
||||
store blockStore
|
||||
|
||||
syncStartTime time.Time
|
||||
syncStartHeight int64
|
||||
lastSyncRate float64 // # blocks sync per sec base on the last 100 blocks
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error)
|
||||
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error)
|
||||
}
|
||||
|
||||
// XXX: unify naming in this package around tmState
|
||||
func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
|
||||
blockApplier blockApplier, blockSync bool, metrics *cons.Metrics) *BlockchainReactor {
|
||||
func newReactor(state state.State, store blockStore, reporter behaviour.Reporter,
|
||||
blockApplier blockApplier, fastSync bool) *BlockchainReactor {
|
||||
initHeight := state.LastBlockHeight + 1
|
||||
if initHeight == 1 {
|
||||
initHeight = state.InitialHeight
|
||||
}
|
||||
scheduler := newScheduler(initHeight, time.Now())
|
||||
pContext := newProcessorContext(store, blockApplier, state, metrics)
|
||||
pContext := newProcessorContext(store, blockApplier, state)
|
||||
// TODO: Fix naming to just newProcesssor
|
||||
// newPcState requires a processorContext
|
||||
processor := newPcState(pContext)
|
||||
|
||||
return &BlockchainReactor{
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
blockSync: sync.NewBool(blockSync),
|
||||
syncStartHeight: initHeight,
|
||||
syncStartTime: time.Time{},
|
||||
lastSyncRate: 0,
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
fastSync: fastSync,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,10 +81,9 @@ func NewBlockchainReactor(
|
||||
state state.State,
|
||||
blockApplier blockApplier,
|
||||
store blockStore,
|
||||
blockSync bool,
|
||||
metrics *cons.Metrics) *BlockchainReactor {
|
||||
reporter := behavior.NewMockReporter()
|
||||
return newReactor(state, store, reporter, blockApplier, blockSync, metrics)
|
||||
fastSync bool) *BlockchainReactor {
|
||||
reporter := behaviour.NewMockReporter()
|
||||
return newReactor(state, store, reporter, blockApplier, fastSync)
|
||||
}
|
||||
|
||||
// SetSwitch implements Reactor interface.
|
||||
@@ -136,23 +126,23 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) {
|
||||
|
||||
// Start implements cmn.Service interface
|
||||
func (r *BlockchainReactor) Start() error {
|
||||
r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch)
|
||||
if r.blockSync.IsSet() {
|
||||
r.reporter = behaviour.NewSwitchReporter(r.BaseReactor.Switch)
|
||||
if r.fastSync {
|
||||
err := r.startSync(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start block sync: %w", err)
|
||||
return fmt.Errorf("failed to start fast sync: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startSync begins a block sync, signaled by r.events being non-nil. If state is non-nil,
|
||||
// startSync begins a fast sync, signalled by r.events being non-nil. If state is non-nil,
|
||||
// the scheduler and processor is updated with this state on startup.
|
||||
func (r *BlockchainReactor) startSync(state *state.State) error {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if r.events != nil {
|
||||
return errors.New("block sync already in progress")
|
||||
return errors.New("fast sync already in progress")
|
||||
}
|
||||
r.events = make(chan Event, chBufferSize)
|
||||
go r.scheduler.start()
|
||||
@@ -167,7 +157,7 @@ func (r *BlockchainReactor) startSync(state *state.State) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// endSync ends a block sync
|
||||
// endSync ends a fast sync
|
||||
func (r *BlockchainReactor) endSync() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
@@ -179,17 +169,11 @@ func (r *BlockchainReactor) endSync() {
|
||||
r.processor.stop()
|
||||
}
|
||||
|
||||
// SwitchToBlockSync is called by the state sync reactor when switching to block sync.
|
||||
func (r *BlockchainReactor) SwitchToBlockSync(state state.State) error {
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
|
||||
func (r *BlockchainReactor) SwitchToFastSync(state state.State) error {
|
||||
r.stateSynced = true
|
||||
state = state.Copy()
|
||||
|
||||
err := r.startSync(&state)
|
||||
if err == nil {
|
||||
r.syncStartTime = time.Now()
|
||||
}
|
||||
|
||||
return err
|
||||
return r.startSync(&state)
|
||||
}
|
||||
|
||||
// reactor generated ticker events:
|
||||
@@ -227,7 +211,7 @@ func (e rProcessBlock) String() string {
|
||||
type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
size int64
|
||||
block *types.Block
|
||||
}
|
||||
@@ -241,7 +225,7 @@ func (resp bcBlockResponse) String() string {
|
||||
type bcNoBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
@@ -254,7 +238,7 @@ func (resp bcNoBlockResponse) String() string {
|
||||
type bcStatusResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
@@ -267,7 +251,7 @@ func (resp bcStatusResponse) String() string {
|
||||
// new peer is connected
|
||||
type bcAddNewPeer struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (resp bcAddNewPeer) String() string {
|
||||
@@ -277,7 +261,7 @@ func (resp bcAddNewPeer) String() string {
|
||||
// existing peer is removed
|
||||
type bcRemovePeer struct {
|
||||
priorityHigh
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
@@ -297,6 +281,7 @@ func (e bcResetState) String() string {
|
||||
|
||||
// Takes the channel as a parameter to avoid race conditions on r.events.
|
||||
func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
var lastRate = 0.0
|
||||
var lastHundred = time.Now()
|
||||
|
||||
var (
|
||||
@@ -391,7 +376,7 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
r.processor.send(event)
|
||||
case scPeerError:
|
||||
r.processor.send(event)
|
||||
if err := r.reporter.Report(behavior.BadMessage(event.peerID, "scPeerError")); err != nil {
|
||||
if err := r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError")); err != nil {
|
||||
r.logger.Error("Error reporting peer", "err", err)
|
||||
}
|
||||
case scBlockRequest:
|
||||
@@ -427,27 +412,21 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
switch event := event.(type) {
|
||||
case pcBlockProcessed:
|
||||
r.setSyncHeight(event.height)
|
||||
if (r.syncHeight-r.syncStartHeight)%100 == 0 {
|
||||
newSyncRate := 100 / time.Since(lastHundred).Seconds()
|
||||
if r.lastSyncRate == 0 {
|
||||
r.lastSyncRate = newSyncRate
|
||||
} else {
|
||||
r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate
|
||||
}
|
||||
r.logger.Info("block sync Rate", "height", r.syncHeight,
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate)
|
||||
if r.syncHeight%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.logger.Info("Fast Sync Rate", "height", r.syncHeight,
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
r.scheduler.send(event)
|
||||
case pcBlockVerificationFailure:
|
||||
r.scheduler.send(event)
|
||||
case pcFinished:
|
||||
r.logger.Info("block sync complete, switching to consensus")
|
||||
r.logger.Info("Fast sync complete, switching to consensus")
|
||||
if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) {
|
||||
r.logger.Error("Failed to switch to consensus reactor")
|
||||
}
|
||||
r.endSync()
|
||||
r.blockSync.UnSet()
|
||||
return
|
||||
case noOpEvent:
|
||||
default:
|
||||
@@ -493,13 +472,13 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
|
||||
if err := proto.Unmarshal(msgBytes, msgProto); err != nil {
|
||||
logger.Error("error decoding message", "err", err)
|
||||
_ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error()))
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err := msgProto.Validate(); err != nil {
|
||||
logger.Error("peer sent us an invalid msg", "msg", msgProto, "err", err)
|
||||
_ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error()))
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -539,7 +518,7 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
bi, err := types.BlockFromProto(msg.BlockResponse.Block)
|
||||
if err != nil {
|
||||
logger.Error("error transitioning block from protobuf", "err", err)
|
||||
_ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error()))
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
r.mtx.RLock()
|
||||
@@ -604,40 +583,8 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
ID: BlockchainChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.maxPeerHeight
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration {
|
||||
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
return time.Since(r.syncStartTime)
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration {
|
||||
if !r.blockSync.IsSet() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
|
||||
targetSyncs := r.maxPeerHeight - r.syncStartHeight
|
||||
currentSyncs := r.syncHeight - r.syncStartHeight + 1
|
||||
if currentSyncs < 0 || r.lastSyncRate < 0.001 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
remain := float64(targetSyncs-currentSyncs) / r.lastSyncRate
|
||||
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -14,30 +15,28 @@ import (
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/mempool/mock"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
tmstore "github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
type mockPeer struct {
|
||||
service.Service
|
||||
id types.NodeID
|
||||
id p2p.NodeID
|
||||
}
|
||||
|
||||
func (mp mockPeer) FlushStop() {}
|
||||
func (mp mockPeer) ID() types.NodeID { return mp.id }
|
||||
func (mp mockPeer) ID() p2p.NodeID { return mp.id }
|
||||
func (mp mockPeer) RemoteIP() net.IP { return net.IP{} }
|
||||
func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} }
|
||||
|
||||
@@ -45,8 +44,8 @@ func (mp mockPeer) IsOutbound() bool { return true }
|
||||
func (mp mockPeer) IsPersistent() bool { return true }
|
||||
func (mp mockPeer) CloseConn() error { return nil }
|
||||
|
||||
func (mp mockPeer) NodeInfo() types.NodeInfo {
|
||||
return types.NodeInfo{
|
||||
func (mp mockPeer) NodeInfo() p2p.NodeInfo {
|
||||
return p2p.NodeInfo{
|
||||
NodeID: "",
|
||||
ListenAddr: "",
|
||||
}
|
||||
@@ -65,17 +64,14 @@ type mockBlockStore struct {
|
||||
blocks map[int64]*types.Block
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func (ml *mockBlockStore) Height() int64 {
|
||||
return int64(len(ml.blocks))
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func (ml *mockBlockStore) LoadBlock(height int64) *types.Block {
|
||||
return ml.blocks[height]
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) {
|
||||
ml.blocks[block.Height] = block
|
||||
}
|
||||
@@ -86,9 +82,9 @@ type mockBlockApplier struct {
|
||||
// XXX: Add whitelist/blacklist?
|
||||
func (mba *mockBlockApplier) ApplyBlock(
|
||||
state sm.State, blockID types.BlockID, block *types.Block,
|
||||
) (sm.State, error) {
|
||||
) (sm.State, int64, error) {
|
||||
state.LastBlockHeight++
|
||||
return state, nil
|
||||
return state, 0, nil
|
||||
}
|
||||
|
||||
type mockSwitchIo struct {
|
||||
@@ -153,9 +149,9 @@ type testReactorParams struct {
|
||||
mockA bool
|
||||
}
|
||||
|
||||
func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
|
||||
store, state, _ := newReactorStore(t, p.genDoc, p.privVals, p.startHeight)
|
||||
reporter := behavior.NewMockReporter()
|
||||
func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight)
|
||||
reporter := behaviour.NewMockReporter()
|
||||
|
||||
var appl blockApplier
|
||||
|
||||
@@ -166,17 +162,18 @@ func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
db := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(db)
|
||||
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
|
||||
appl = sm.NewBlockExecutor(
|
||||
stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
|
||||
err = stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
r := newReactor(state, store, reporter, appl, true, cons.NopMetrics())
|
||||
r := newReactor(state, store, reporter, appl, true)
|
||||
logger := log.TestingLogger()
|
||||
r.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
@@ -311,7 +308,7 @@ func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// reactor := newTestReactor(params)
|
||||
// reactor.Start()
|
||||
// reactor.reporter = behavior.NewMockReporter()
|
||||
// reactor.reporter = behaviour.NewMockReporter()
|
||||
// mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
// reactor.io = mockSwitch
|
||||
// // time for go routines to start
|
||||
@@ -366,7 +363,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
|
||||
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
|
||||
params := testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
@@ -401,7 +398,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
reactor := newTestReactor(t, params)
|
||||
reactor := newTestReactor(params)
|
||||
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
reactor.io = mockSwitch
|
||||
err := reactor.Start()
|
||||
@@ -419,7 +416,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
|
||||
case bcproto.BlockRequest:
|
||||
if ev.Height > params.startHeight {
|
||||
@@ -431,7 +428,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
|
||||
} else {
|
||||
old := mockSwitch.numBlockResponse
|
||||
@@ -442,7 +439,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
|
||||
}
|
||||
}
|
||||
@@ -456,9 +453,9 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
func TestReactorSetSwitchNil(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
|
||||
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
|
||||
reactor := newTestReactor(t, testReactorParams{
|
||||
reactor := newTestReactor(testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
@@ -469,18 +466,55 @@ func TestReactorSetSwitchNil(t *testing.T) {
|
||||
assert.Nil(t, reactor.io)
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) (
|
||||
*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: chainID,
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
// Why are we importing the entire blockExecutor dependency graph here
|
||||
// when we have the facilities to
|
||||
func newReactorStore(
|
||||
t *testing.T,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
t.Helper()
|
||||
|
||||
require.Len(t, privVals, 1)
|
||||
maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
@@ -490,15 +524,20 @@ func newReactorStore(
|
||||
}
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
|
||||
err = stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// add blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
@@ -506,25 +545,30 @@ func newReactorStore(
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
vote, err := factory.MakeVote(
|
||||
privVals[0],
|
||||
lastBlock.Header.ChainID, 0,
|
||||
lastBlock.Header.Height, 0, 2,
|
||||
vote, err := types.MakeVote(
|
||||
lastBlock.Header.Height,
|
||||
lastBlockMeta.BlockID,
|
||||
state.Validators,
|
||||
privVals[0],
|
||||
lastBlock.Header.ChainID,
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
require.NoError(t, err)
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error apply block: %w", err))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -25,7 +26,7 @@ func (e scFinishedEv) String() string {
|
||||
// send a blockRequest message
|
||||
type scBlockRequest struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
@@ -36,7 +37,7 @@ func (e scBlockRequest) String() string {
|
||||
// a block has been received and validated by the scheduler
|
||||
type scBlockReceived struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
@@ -47,7 +48,7 @@ func (e scBlockReceived) String() string {
|
||||
// scheduler detected a peer error
|
||||
type scPeerError struct {
|
||||
priorityHigh
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
reason error
|
||||
}
|
||||
|
||||
@@ -58,7 +59,7 @@ func (e scPeerError) String() string {
|
||||
// scheduler removed a set of peers (timed out or slow peer)
|
||||
type scPeersPruned struct {
|
||||
priorityHigh
|
||||
peers []types.NodeID
|
||||
peers []p2p.NodeID
|
||||
}
|
||||
|
||||
func (e scPeersPruned) String() string {
|
||||
@@ -125,7 +126,7 @@ func (e peerState) String() string {
|
||||
}
|
||||
|
||||
type scPeer struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
|
||||
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
|
||||
// updated to Removed when peer is removed
|
||||
@@ -142,7 +143,7 @@ func (p scPeer) String() string {
|
||||
p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
|
||||
}
|
||||
|
||||
func newScPeer(peerID types.NodeID) *scPeer {
|
||||
func newScPeer(peerID p2p.NodeID) *scPeer {
|
||||
return &scPeer{
|
||||
peerID: peerID,
|
||||
state: peerStateNew,
|
||||
@@ -163,14 +164,14 @@ type scheduler struct {
|
||||
height int64
|
||||
|
||||
// lastAdvance tracks the last time a block execution happened.
|
||||
// syncTimeout is the maximum time the scheduler waits to advance in the block sync process before finishing.
|
||||
// syncTimeout is the maximum time the scheduler waits to advance in the fast sync process before finishing.
|
||||
// This covers the cases where there are no peers or all peers have a lower height.
|
||||
lastAdvance time.Time
|
||||
syncTimeout time.Duration
|
||||
|
||||
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
|
||||
// track of peer specific state
|
||||
peers map[types.NodeID]*scPeer
|
||||
peers map[p2p.NodeID]*scPeer
|
||||
peerTimeout time.Duration // maximum response time from a peer otherwise prune
|
||||
minRecvRate int64 // minimum receive rate from peer otherwise prune
|
||||
|
||||
@@ -182,13 +183,13 @@ type scheduler struct {
|
||||
blockStates map[int64]blockState
|
||||
|
||||
// a map of heights to the peer we are waiting a response from
|
||||
pendingBlocks map[int64]types.NodeID
|
||||
pendingBlocks map[int64]p2p.NodeID
|
||||
|
||||
// the time at which a block was put in blockStatePending
|
||||
pendingTime map[int64]time.Time
|
||||
|
||||
// a map of heights to the peers that put the block in blockStateReceived
|
||||
receivedBlocks map[int64]types.NodeID
|
||||
receivedBlocks map[int64]p2p.NodeID
|
||||
}
|
||||
|
||||
func (sc scheduler) String() string {
|
||||
@@ -203,10 +204,10 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
|
||||
syncTimeout: 60 * time.Second,
|
||||
height: initHeight,
|
||||
blockStates: make(map[int64]blockState),
|
||||
peers: make(map[types.NodeID]*scPeer),
|
||||
pendingBlocks: make(map[int64]types.NodeID),
|
||||
peers: make(map[p2p.NodeID]*scPeer),
|
||||
pendingBlocks: make(map[int64]p2p.NodeID),
|
||||
pendingTime: make(map[int64]time.Time),
|
||||
receivedBlocks: make(map[int64]types.NodeID),
|
||||
receivedBlocks: make(map[int64]p2p.NodeID),
|
||||
targetPending: 10, // TODO - pass as param
|
||||
peerTimeout: 15 * time.Second, // TODO - pass as param
|
||||
minRecvRate: 0, // int64(7680), TODO - pass as param
|
||||
@@ -215,14 +216,14 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
|
||||
return &sc
|
||||
}
|
||||
|
||||
func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer {
|
||||
func (sc *scheduler) ensurePeer(peerID p2p.NodeID) *scPeer {
|
||||
if _, ok := sc.peers[peerID]; !ok {
|
||||
sc.peers[peerID] = newScPeer(peerID)
|
||||
}
|
||||
return sc.peers[peerID]
|
||||
}
|
||||
|
||||
func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
|
||||
func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't find peer %s", peerID)
|
||||
@@ -237,7 +238,7 @@ func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) removePeer(peerID types.NodeID) {
|
||||
func (sc *scheduler) removePeer(peerID p2p.NodeID) {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return
|
||||
@@ -297,7 +298,7 @@ func (sc *scheduler) addNewBlocks() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error {
|
||||
func (sc *scheduler) setPeerRange(peerID p2p.NodeID, base int64, height int64) error {
|
||||
peer := sc.ensurePeer(peerID)
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
@@ -332,8 +333,8 @@ func (sc *scheduler) getStateAtHeight(height int64) blockState {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
|
||||
peers := make([]types.NodeID, 0)
|
||||
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID {
|
||||
peers := make([]p2p.NodeID, 0)
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
@@ -345,8 +346,8 @@ func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID {
|
||||
prunable := make([]types.NodeID, 0)
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.NodeID {
|
||||
prunable := make([]p2p.NodeID, 0)
|
||||
for peerID, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
@@ -365,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
|
||||
}
|
||||
|
||||
// CONTRACT: peer exists and in Ready state.
|
||||
func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error {
|
||||
func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, now time.Time) error {
|
||||
peer := sc.peers[peerID]
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
@@ -389,7 +390,7 @@ func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error {
|
||||
func (sc *scheduler) markPending(peerID p2p.NodeID, height int64, time time.Time) error {
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateNew {
|
||||
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
|
||||
@@ -471,7 +472,7 @@ func (sc *scheduler) nextHeightToSchedule() int64 {
|
||||
return min
|
||||
}
|
||||
|
||||
func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
|
||||
func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 {
|
||||
var heights []int64
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
@@ -481,7 +482,7 @@ func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
|
||||
return heights
|
||||
}
|
||||
|
||||
func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
|
||||
func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
|
||||
peers := sc.getPeersWithHeight(height)
|
||||
if len(peers) == 0 {
|
||||
return "", fmt.Errorf("cannot find peer for height %d", height)
|
||||
@@ -489,7 +490,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
|
||||
|
||||
// create a map from number of pending requests to a list
|
||||
// of peers having that number of pending requests.
|
||||
pendingFrom := make(map[int][]types.NodeID)
|
||||
pendingFrom := make(map[int][]p2p.NodeID)
|
||||
for _, peerID := range peers {
|
||||
numPending := len(sc.pendingFrom(peerID))
|
||||
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
|
||||
@@ -508,7 +509,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
|
||||
}
|
||||
|
||||
// PeerByID is a list of peers sorted by peerID.
|
||||
type PeerByID []types.NodeID
|
||||
type PeerByID []p2p.NodeID
|
||||
|
||||
func (peers PeerByID) Len() int {
|
||||
return len(peers)
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -19,9 +20,9 @@ type scTestParams struct {
|
||||
initHeight int64
|
||||
height int64
|
||||
allB []int64
|
||||
pending map[int64]types.NodeID
|
||||
pending map[int64]p2p.NodeID
|
||||
pendingTime map[int64]time.Time
|
||||
received map[int64]types.NodeID
|
||||
received map[int64]p2p.NodeID
|
||||
peerTimeout time.Duration
|
||||
minRecvRate int64
|
||||
targetPending int
|
||||
@@ -40,7 +41,7 @@ func verifyScheduler(sc *scheduler) {
|
||||
}
|
||||
|
||||
func newTestScheduler(params scTestParams) *scheduler {
|
||||
peers := make(map[types.NodeID]*scPeer)
|
||||
peers := make(map[p2p.NodeID]*scPeer)
|
||||
var maxHeight int64
|
||||
|
||||
initHeight := params.initHeight
|
||||
@@ -53,8 +54,8 @@ func newTestScheduler(params scTestParams) *scheduler {
|
||||
}
|
||||
|
||||
for id, peer := range params.peers {
|
||||
peer.peerID = types.NodeID(id)
|
||||
peers[types.NodeID(id)] = peer
|
||||
peer.peerID = p2p.NodeID(id)
|
||||
peers[p2p.NodeID(id)] = peer
|
||||
if maxHeight < peer.height {
|
||||
maxHeight = peer.height
|
||||
}
|
||||
@@ -121,7 +122,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "one ready peer",
|
||||
sc: scheduler{
|
||||
height: 3,
|
||||
peers: map[types.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
|
||||
peers: map[p2p.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
|
||||
},
|
||||
wantMax: 6,
|
||||
},
|
||||
@@ -129,7 +130,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "ready and removed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: 4, state: peerStateReady},
|
||||
"P2": {height: 10, state: peerStateRemoved}},
|
||||
},
|
||||
@@ -139,7 +140,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "removed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: 4, state: peerStateRemoved},
|
||||
"P2": {height: 10, state: peerStateRemoved}},
|
||||
},
|
||||
@@ -149,7 +150,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "new peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {base: -1, height: -1, state: peerStateNew},
|
||||
"P2": {base: -1, height: -1, state: peerStateNew}},
|
||||
},
|
||||
@@ -159,7 +160,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "mixed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: -1, state: peerStateNew},
|
||||
"P2": {height: 10, state: peerStateReady},
|
||||
"P3": {height: 20, state: peerStateRemoved},
|
||||
@@ -186,7 +187,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
func TestScEnsurePeer(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -243,7 +244,7 @@ func TestScTouchPeer(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
time time.Time
|
||||
}
|
||||
|
||||
@@ -315,13 +316,13 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult []types.NodeID
|
||||
wantResult []p2p.NodeID
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{}},
|
||||
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "mixed peers",
|
||||
@@ -340,7 +341,7 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
"P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90},
|
||||
}},
|
||||
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
|
||||
wantResult: []types.NodeID{"P4", "P5", "P6"},
|
||||
wantResult: []p2p.NodeID{"P4", "P5", "P6"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -360,7 +361,7 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
func TestScRemovePeer(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -423,13 +424,13 @@ func TestScRemovePeer(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
pending: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -437,13 +438,13 @@ func TestScRemovePeer(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]types.NodeID{1: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
received: map[int64]types.NodeID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -451,15 +452,15 @@ func TestScRemovePeer(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 3: "P1"},
|
||||
received: map[int64]types.NodeID{2: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
pending: map[int64]types.NodeID{},
|
||||
received: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -470,8 +471,8 @@ func TestScRemovePeer(t *testing.T) {
|
||||
"P2": {height: 6, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6},
|
||||
pending: map[int64]types.NodeID{1: "P1", 3: "P2", 6: "P1"},
|
||||
received: map[int64]types.NodeID{2: "P1", 4: "P2", 5: "P2"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 3: "P2", 6: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 4: "P2", 5: "P2"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
@@ -480,8 +481,8 @@ func TestScRemovePeer(t *testing.T) {
|
||||
"P2": {height: 6, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6},
|
||||
pending: map[int64]types.NodeID{3: "P2"},
|
||||
received: map[int64]types.NodeID{4: "P2", 5: "P2"},
|
||||
pending: map[int64]p2p.NodeID{3: "P2"},
|
||||
received: map[int64]p2p.NodeID{4: "P2", 5: "P2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -500,7 +501,7 @@ func TestScRemovePeer(t *testing.T) {
|
||||
func TestScSetPeerRange(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
@@ -621,25 +622,25 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult []types.NodeID
|
||||
wantResult []p2p.NodeID
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{}},
|
||||
args: args{height: 10},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "only new peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}},
|
||||
args: args{height: 10},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "only Removed peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}},
|
||||
args: args{height: 2},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "one Ready shorter peer",
|
||||
@@ -648,7 +649,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 5},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "one Ready equal peer",
|
||||
@@ -657,7 +658,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer",
|
||||
@@ -667,7 +668,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer at base",
|
||||
@@ -677,7 +678,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer with higher base",
|
||||
@@ -687,7 +688,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "multiple mixed peers",
|
||||
@@ -702,7 +703,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{8, 9, 10, 11},
|
||||
},
|
||||
args: args{height: 8},
|
||||
wantResult: []types.NodeID{"P2", "P5"},
|
||||
wantResult: []p2p.NodeID{"P2", "P5"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -724,7 +725,7 @@ func TestScMarkPending(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
tm time.Time
|
||||
}
|
||||
@@ -820,14 +821,14 @@ func TestScMarkPending(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)},
|
||||
},
|
||||
},
|
||||
@@ -850,7 +851,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
size int64
|
||||
tm time.Time
|
||||
@@ -890,7 +891,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
"P2": {height: 4, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
@@ -899,7 +900,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
"P2": {height: 4, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@@ -908,13 +909,13 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@@ -923,14 +924,14 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
|
||||
},
|
||||
wantErr: true,
|
||||
@@ -940,16 +941,16 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now},
|
||||
received: map[int64]types.NodeID{2: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -990,7 +991,7 @@ func TestScMarkProcessed(t *testing.T) {
|
||||
height: 2,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{2},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
targetPending: 1,
|
||||
},
|
||||
@@ -1008,15 +1009,15 @@ func TestScMarkProcessed(t *testing.T) {
|
||||
height: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
received: map[int64]types.NodeID{1: "P1"}},
|
||||
received: map[int64]p2p.NodeID{1: "P1"}},
|
||||
args: args{height: 1},
|
||||
wantFields: scTestParams{
|
||||
height: 2,
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{2},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now}},
|
||||
},
|
||||
}
|
||||
@@ -1100,7 +1101,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
|
||||
},
|
||||
wantResult: false,
|
||||
@@ -1110,7 +1111,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
},
|
||||
wantResult: false,
|
||||
},
|
||||
@@ -1121,7 +1122,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
peers: map[string]*scPeer{
|
||||
"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{4},
|
||||
received: map[int64]types.NodeID{4: "P1"},
|
||||
received: map[int64]p2p.NodeID{4: "P1"},
|
||||
},
|
||||
wantResult: true,
|
||||
},
|
||||
@@ -1130,7 +1131,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{2: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now, 4: now},
|
||||
},
|
||||
wantResult: false,
|
||||
@@ -1178,7 +1179,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
|
||||
},
|
||||
wantHeight: -1,
|
||||
@@ -1189,7 +1190,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
},
|
||||
wantHeight: -1,
|
||||
},
|
||||
@@ -1208,7 +1209,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
},
|
||||
wantHeight: 1,
|
||||
@@ -1238,7 +1239,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult types.NodeID
|
||||
wantResult p2p.NodeID
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
@@ -1306,7 +1307,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
"P1": {height: 8, state: peerStateReady},
|
||||
"P2": {height: 9, state: peerStateReady}},
|
||||
allB: []int64{4, 5, 6, 7, 8, 9},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
4: "P1", 6: "P1",
|
||||
5: "P2",
|
||||
},
|
||||
@@ -1322,7 +1323,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
"P1": {height: 15, state: peerStateReady},
|
||||
"P3": {height: 15, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P3", 4: "P3",
|
||||
5: "P2", 6: "P2",
|
||||
@@ -1391,7 +1392,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
now := time.Now()
|
||||
block6FromP1 := bcBlockResponse{
|
||||
time: now.Add(time.Millisecond),
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
size: 100,
|
||||
block: makeScBlock(6),
|
||||
}
|
||||
@@ -1432,7 +1433,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
@@ -1443,7 +1444,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now.Add(time.Second)},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
@@ -1454,7 +1455,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
@@ -1476,7 +1477,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
now := time.Now()
|
||||
noBlock6FromP1 := bcNoBlockResponse{
|
||||
time: now.Add(time.Millisecond),
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
height: 6,
|
||||
}
|
||||
|
||||
@@ -1512,14 +1513,14 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
wantEvent: noOpEvent{},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
},
|
||||
@@ -1528,7 +1529,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
|
||||
@@ -1551,7 +1552,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
func TestScHandleBlockProcessed(t *testing.T) {
|
||||
now := time.Now()
|
||||
processed6FromP1 := pcBlockProcessed{
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
height: 6,
|
||||
}
|
||||
|
||||
@@ -1578,7 +1579,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
@@ -1590,7 +1591,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
|
||||
allB: []int64{6, 7},
|
||||
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
wantEvent: scFinishedEv{},
|
||||
@@ -1601,8 +1602,8 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1"},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1645,7 +1646,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -1657,7 +1658,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -1669,7 +1670,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
|
||||
allB: []int64{6, 7},
|
||||
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
wantEvent: scFinishedEv{},
|
||||
@@ -1680,8 +1681,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 5,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1696,8 +1697,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
"P3": {height: 8, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1716,7 +1717,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
|
||||
func TestScHandleAddNewPeer(t *testing.T) {
|
||||
addP1 := bcAddNewPeer{
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
}
|
||||
type args struct {
|
||||
event bcAddNewPeer
|
||||
@@ -1827,7 +1828,7 @@ func TestScHandleTryPrunePeer(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7},
|
||||
peerTimeout: time.Second},
|
||||
args: args{event: pruneEv},
|
||||
wantEvent: scPeersPruned{peers: []types.NodeID{"P4", "P5", "P6"}},
|
||||
wantEvent: scPeersPruned{peers: []p2p.NodeID{"P4", "P5", "P6"}},
|
||||
},
|
||||
{
|
||||
name: "mixed peers, finish after pruning",
|
||||
@@ -1925,7 +1926,7 @@ func TestScHandleTrySchedule(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateReady},
|
||||
"P2": {height: 5, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P2",
|
||||
},
|
||||
@@ -1943,7 +1944,7 @@ func TestScHandleTrySchedule(t *testing.T) {
|
||||
"P1": {height: 8, state: peerStateReady},
|
||||
"P3": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P3", 4: "P3",
|
||||
5: "P2", 6: "P2",
|
||||
@@ -2105,7 +2106,7 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1]},
|
||||
height: 1,
|
||||
},
|
||||
@@ -2117,7 +2118,7 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]},
|
||||
height: 1,
|
||||
},
|
||||
@@ -2129,7 +2130,7 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]},
|
||||
height: 1,
|
||||
},
|
||||
@@ -2141,9 +2142,9 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{2: "P1", 3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]},
|
||||
received: map[int64]types.NodeID{1: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
@@ -2154,9 +2155,9 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{3: "P1"},
|
||||
pendingTime: map[int64]time.Time{3: tick[3]},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
@@ -2167,29 +2168,29 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
{ // processed block 1
|
||||
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 1}},
|
||||
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 1}},
|
||||
wantEvent: noOpEvent{},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{2, 3},
|
||||
received: map[int64]types.NodeID{2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
|
||||
height: 2,
|
||||
},
|
||||
},
|
||||
{ // processed block 2
|
||||
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 2}},
|
||||
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 2}},
|
||||
wantEvent: scFinishedEv{},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{3},
|
||||
received: map[int64]types.NodeID{3: "P1"},
|
||||
received: map[int64]p2p.NodeID{3: "P1"},
|
||||
height: 3,
|
||||
},
|
||||
},
|
||||
@@ -2205,7 +2206,7 @@ func TestScHandle(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateReady, lastTouched: tick[6]},
|
||||
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -2216,7 +2217,7 @@ func TestScHandle(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]},
|
||||
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]types.NodeID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
@@ -2229,7 +2230,7 @@ func TestScHandle(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var sc *scheduler
|
||||
for i, step := range tt.steps {
|
||||
// First step must always initialize the currentState as state.
|
||||
// First step must always initialise the currentState as state.
|
||||
if step.currentSc != nil {
|
||||
sc = newTestScheduler(*step.currentSc)
|
||||
}
|
||||
@@ -46,8 +46,9 @@ func main() {
|
||||
rootCA = flag.String("rootcafile", "", "absolute path to root CA")
|
||||
prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)")
|
||||
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false).
|
||||
With("module", "priv_val")
|
||||
logger = log.NewTMLogger(
|
||||
log.NewSyncWriter(os.Stdout),
|
||||
).With("module", "priv_val")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
@@ -63,11 +64,7 @@ func main() {
|
||||
"rootCA", *rootCA,
|
||||
)
|
||||
|
||||
pv, err := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
pv := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
|
||||
|
||||
opts := []grpc.ServerOption{}
|
||||
if !*insecure {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -15,7 +17,7 @@ var (
|
||||
flagProfAddr = "pprof-laddr"
|
||||
flagFrequency = "frequency"
|
||||
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
)
|
||||
|
||||
// DebugCmd defines the root command containing subcommands that assist in
|
||||
|
||||
@@ -59,7 +59,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
rpc, err := rpchttp.New(nodeRPCAddr, "/websocket")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ go-routine state, and the node's WAL and config information. This aggregated dat
|
||||
is packaged into a compressed archive.
|
||||
|
||||
Example:
|
||||
$ tendermint debug kill 34255 /path/to/tm-debug.zip`,
|
||||
$ tendermint debug 34255 /path/to/tm-debug.zip`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: killCmdHandler,
|
||||
}
|
||||
@@ -44,7 +44,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
return errors.New("invalid output file")
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
rpc, err := rpchttp.New(nodeRPCAddr, "/websocket")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
@@ -79,11 +79,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
|
||||
logger.Info("copying node WAL...")
|
||||
if err := copyWAL(conf, tmpDir); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("node WAL does not exist; continuing...")
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("copying node configuration...")
|
||||
|
||||
@@ -6,19 +6,21 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
|
||||
// NodeKey to the standard output.
|
||||
var GenNodeKeyCmd = &cobra.Command{
|
||||
Use: "gen-node-key",
|
||||
Short: "Generate a new node key",
|
||||
RunE: genNodeKey,
|
||||
Use: "gen-node-key",
|
||||
Aliases: []string{"gen_node_key"},
|
||||
Short: "Generate a new node key",
|
||||
RunE: genNodeKey,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
func genNodeKey(cmd *cobra.Command, args []string) error {
|
||||
nodeKey := types.GenNodeKey()
|
||||
nodeKey := p2p.GenNodeKey()
|
||||
|
||||
bz, err := tmjson.Marshal(nodeKey)
|
||||
if err != nil {
|
||||
|
||||
@@ -13,9 +13,11 @@ import (
|
||||
// GenValidatorCmd allows the generation of a keypair for a
|
||||
// validator.
|
||||
var GenValidatorCmd = &cobra.Command{
|
||||
Use: "gen-validator",
|
||||
Short: "Generate new validator keypair",
|
||||
RunE: genValidator,
|
||||
Use: "gen-validator",
|
||||
Aliases: []string{"gen_validator"},
|
||||
Short: "Generate new validator keypair",
|
||||
RunE: genValidator,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -10,19 +8,18 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
// InitFilesCmd initializes a fresh Tendermint Core instance.
|
||||
// InitFilesCmd initialises a fresh Tendermint Core instance.
|
||||
var InitFilesCmd = &cobra.Command{
|
||||
Use: "init [full|validator|seed]",
|
||||
Short: "Initializes a Tendermint node",
|
||||
ValidArgs: []string{"full", "validator", "seed"},
|
||||
// We allow for zero args so we can throw a more informative error
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: initFiles,
|
||||
Use: "init",
|
||||
Short: "Initialize Tendermint",
|
||||
RunE: initFiles,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -35,47 +32,36 @@ func init() {
|
||||
}
|
||||
|
||||
func initFiles(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New("must specify a node type: tendermint init [validator|full|seed]")
|
||||
}
|
||||
config.Mode = args[0]
|
||||
return initFilesWithConfig(config)
|
||||
}
|
||||
|
||||
func initFilesWithConfig(config *cfg.Config) error {
|
||||
// private validator
|
||||
privValKeyFile := config.PrivValidatorKeyFile()
|
||||
privValStateFile := config.PrivValidatorStateFile()
|
||||
var (
|
||||
pv *privval.FilePV
|
||||
err error
|
||||
)
|
||||
|
||||
if config.Mode == cfg.ModeValidator {
|
||||
// private validator
|
||||
privValKeyFile := config.PrivValidator.KeyFile()
|
||||
privValStateFile := config.PrivValidator.StateFile()
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv, err = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
logger.Info("Found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
|
||||
nodeKeyFile := config.NodeKeyFile()
|
||||
if tmos.FileExists(nodeKeyFile) {
|
||||
logger.Info("Found node key", "path", nodeKeyFile)
|
||||
} else {
|
||||
if _, err := types.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("Generated node key", "path", nodeKeyFile)
|
||||
@@ -93,26 +79,19 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
if keyType == "secp256k1" {
|
||||
genDoc.ConsensusParams.Validator = types.ValidatorParams{
|
||||
genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{
|
||||
PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1},
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
|
||||
defer cancel()
|
||||
|
||||
// if this is a validator we add it to genesis
|
||||
if pv != nil {
|
||||
pubKey, err := pv.GetPubKey(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
Address: pubKey.Address(),
|
||||
PubKey: pubKey,
|
||||
Power: 10,
|
||||
}}
|
||||
pubKey, err := pv.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
Address: pubKey.Address(),
|
||||
PubKey: pubKey,
|
||||
Power: 10,
|
||||
}}
|
||||
|
||||
if err := genDoc.SaveAs(genFile); err != nil {
|
||||
return err
|
||||
@@ -120,9 +99,5 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
logger.Info("Generated genesis file", "path", genFile)
|
||||
}
|
||||
|
||||
// write config file
|
||||
cfg.WriteConfigFile(config.RootDir, config)
|
||||
logger.Info("Generated config", "mode", config.Mode)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/inspect"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer/sink"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// InspectCmd is the command for starting an inspect server.
|
||||
var InspectCmd = &cobra.Command{
|
||||
Use: "inspect",
|
||||
Short: "Run an inspect server for investigating Tendermint state",
|
||||
Long: `
|
||||
inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging
|
||||
issues with Tendermint.
|
||||
|
||||
When the Tendermint consensus engine detects inconsistent state, it will crash the
|
||||
tendermint process. Tendermint will not start up while in this inconsistent state.
|
||||
The inspect command can be used to query the block and state store using Tendermint
|
||||
RPC calls to debug issues of inconsistent state.
|
||||
`,
|
||||
|
||||
RunE: runInspect,
|
||||
}
|
||||
|
||||
func init() {
|
||||
InspectCmd.Flags().
|
||||
String("rpc.laddr",
|
||||
config.RPC.ListenAddress, "RPC listenener address. Port required")
|
||||
InspectCmd.Flags().
|
||||
String("db-backend",
|
||||
config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb")
|
||||
InspectCmd.Flags().
|
||||
String("db-dir", config.DBPath, "database directory")
|
||||
}
|
||||
|
||||
func runInspect(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
|
||||
go func() {
|
||||
<-c
|
||||
cancel()
|
||||
}()
|
||||
|
||||
blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config})
|
||||
if err != nil {
|
||||
if err := blockStoreDB.Close(); err != nil {
|
||||
logger.Error("error closing block store db", "error", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sinks, err := sink.EventSinksFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateStore := state.NewStore(stateDB)
|
||||
|
||||
ins := inspect.New(config.RPC, blockStore, stateStore, sinks, logger)
|
||||
|
||||
logger.Info("starting inspect server")
|
||||
if err := ins.Run(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/scripts/keymigrate"
|
||||
)
|
||||
|
||||
func MakeKeyMigrateCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "key-migrate",
|
||||
Short: "Run Database key migration",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
contexts := []string{
|
||||
// this is ordered to put the
|
||||
// (presumably) biggest/most important
|
||||
// subsets first.
|
||||
"blockstore",
|
||||
"state",
|
||||
"peerstore",
|
||||
"tx_index",
|
||||
"evidence",
|
||||
"light",
|
||||
}
|
||||
|
||||
for idx, dbctx := range contexts {
|
||||
logger.Info("beginning a key migration",
|
||||
"dbctx", dbctx,
|
||||
"num", idx+1,
|
||||
"total", len(contexts),
|
||||
)
|
||||
|
||||
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
|
||||
ID: dbctx,
|
||||
Config: config,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("constructing database handle: %w", err)
|
||||
}
|
||||
|
||||
if err = keymigrate.Migrate(ctx, db); err != nil {
|
||||
return fmt.Errorf("running migration for context %q: %w",
|
||||
dbctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("completed database migration successfully")
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// allow database info to be overridden via cli
|
||||
addDBFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,12 +1,14 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -14,6 +16,7 @@ import (
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
@@ -21,6 +24,7 @@ import (
|
||||
lproxy "github.com/tendermint/tendermint/light/proxy"
|
||||
lrpc "github.com/tendermint/tendermint/light/rpc"
|
||||
dbs "github.com/tendermint/tendermint/light/store/db"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
|
||||
)
|
||||
|
||||
@@ -66,8 +70,7 @@ var (
|
||||
trustedHash []byte
|
||||
trustLevelStr string
|
||||
|
||||
logLevel string
|
||||
logFormat string
|
||||
verbose bool
|
||||
|
||||
primaryKey = []byte("primary")
|
||||
witnessesKey = []byte("witnesses")
|
||||
@@ -91,8 +94,7 @@ func init() {
|
||||
"trusting period that headers can be verified within. Should be significantly less than the unbonding period")
|
||||
LightCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height")
|
||||
LightCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash")
|
||||
LightCmd.Flags().StringVar(&logLevel, "log-level", log.LogLevelInfo, "The logging level (debug|info|warn|error|fatal)")
|
||||
LightCmd.Flags().StringVar(&logFormat, "log-format", log.LogFormatPlain, "The logging format (text|json)")
|
||||
LightCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output")
|
||||
LightCmd.Flags().StringVar(&trustLevelStr, "trust-level", "1/3",
|
||||
"trust level. Must be between 1/3 and 3/3",
|
||||
)
|
||||
@@ -102,10 +104,15 @@ func init() {
|
||||
}
|
||||
|
||||
func runProxy(cmd *cobra.Command, args []string) error {
|
||||
logger, err := log.NewDefaultLogger(logFormat, logLevel, false)
|
||||
if err != nil {
|
||||
return err
|
||||
// Initialise logger.
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
var option log.Option
|
||||
if verbose {
|
||||
option, _ = log.AllowLevel("debug")
|
||||
} else {
|
||||
option, _ = log.AllowLevel("info")
|
||||
}
|
||||
logger = log.NewFilter(logger, option)
|
||||
|
||||
chainID = args[0]
|
||||
logger.Info("Creating client...", "chainID", chainID)
|
||||
@@ -144,7 +151,25 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("can't parse trust level: %w", err)
|
||||
}
|
||||
|
||||
options := []light.Option{light.Logger(logger)}
|
||||
options := []light.Option{
|
||||
light.Logger(logger),
|
||||
light.ConfirmationFunction(func(action string) bool {
|
||||
fmt.Println(action)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for {
|
||||
scanner.Scan()
|
||||
response := scanner.Text()
|
||||
switch response {
|
||||
case "y", "Y":
|
||||
return true
|
||||
case "n", "N":
|
||||
return false
|
||||
default:
|
||||
fmt.Println("please input 'Y' or 'n' and press ENTER")
|
||||
}
|
||||
}
|
||||
}),
|
||||
}
|
||||
|
||||
if sequential {
|
||||
options = append(options, light.SequentialVerification())
|
||||
@@ -152,25 +177,40 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
options = append(options, light.SkippingVerification(trustLevel))
|
||||
}
|
||||
|
||||
// Initiate the light client. If the trusted store already has blocks in it, this
|
||||
// will be used else we use the trusted options.
|
||||
c, err := light.NewHTTPClient(
|
||||
context.Background(),
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: trustingPeriod,
|
||||
Height: trustedHeight,
|
||||
Hash: trustedHash,
|
||||
},
|
||||
primaryAddr,
|
||||
witnessesAddrs,
|
||||
dbs.New(db),
|
||||
options...,
|
||||
)
|
||||
var c *light.Client
|
||||
if trustedHeight > 0 && len(trustedHash) > 0 { // fresh installation
|
||||
c, err = light.NewHTTPClient(
|
||||
context.Background(),
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: trustingPeriod,
|
||||
Height: trustedHeight,
|
||||
Hash: trustedHash,
|
||||
},
|
||||
primaryAddr,
|
||||
witnessesAddrs,
|
||||
dbs.New(db),
|
||||
options...,
|
||||
)
|
||||
} else { // continue from latest state
|
||||
c, err = light.NewHTTPClientFromTrustedStore(
|
||||
chainID,
|
||||
trustingPeriod,
|
||||
primaryAddr,
|
||||
witnessesAddrs,
|
||||
dbs.New(db),
|
||||
options...,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rpcClient, err := rpchttp.New(primaryAddr, "/websocket")
|
||||
if err != nil {
|
||||
return fmt.Errorf("http client for %s: %w", primaryAddr, err)
|
||||
}
|
||||
|
||||
cfg := rpcserver.DefaultConfig()
|
||||
cfg.MaxBodyBytes = config.RPC.MaxBodyBytes
|
||||
cfg.MaxHeaderBytes = config.RPC.MaxHeaderBytes
|
||||
@@ -182,11 +222,12 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
cfg.WriteTimeout = config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
|
||||
}
|
||||
|
||||
p, err := lproxy.NewProxy(c, listenAddr, primaryAddr, cfg, logger, lrpc.KeyPathFn(lrpc.DefaultMerkleKeyPathFn()))
|
||||
if err != nil {
|
||||
return err
|
||||
p := lproxy.Proxy{
|
||||
Addr: listenAddr,
|
||||
Config: cfg,
|
||||
Client: lrpc.NewClient(rpcClient, c, lrpc.KeyPathFn(defaultMerkleKeyPathFn())),
|
||||
Logger: logger,
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
p.Listener.Close()
|
||||
@@ -225,3 +266,21 @@ func saveProviders(db dbm.DB, primaryAddr, witnessesAddrs string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func defaultMerkleKeyPathFn() lrpc.KeyPathFunc {
|
||||
// regexp for extracting store name from /abci_query path
|
||||
storeNameRegexp := regexp.MustCompile(`\/store\/(.+)\/key`)
|
||||
|
||||
return func(path string, key []byte) (merkle.KeyPath, error) {
|
||||
matches := storeNameRegexp.FindStringSubmatch(path)
|
||||
if len(matches) != 2 {
|
||||
return nil, fmt.Errorf("can't find store name in %s using %s", path, storeNameRegexp)
|
||||
}
|
||||
storeName := matches[1]
|
||||
|
||||
kp := merkle.KeyPath{}
|
||||
kp = kp.AppendKey([]byte(storeName), merkle.KeyEncodingURL)
|
||||
kp = kp.AppendKey(key, merkle.KeyEncodingURL)
|
||||
return kp, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,15 +5,17 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/p2p/upnp"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/p2p/upnp"
|
||||
)
|
||||
|
||||
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.
|
||||
var ProbeUpnpCmd = &cobra.Command{
|
||||
Use: "probe-upnp",
|
||||
Short: "Test UPnP functionality",
|
||||
RunE: probeUpnp,
|
||||
Use: "probe-upnp",
|
||||
Aliases: []string{"probe_upnp"},
|
||||
Short: "Test UPnP functionality",
|
||||
RunE: probeUpnp,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
func probeUpnp(cmd *cobra.Command, args []string) error {
|
||||
|
||||
@@ -1,251 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
tmdb "github.com/tendermint/tm-db"
|
||||
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/libs/progressbar"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/state/indexer/sink/kv"
|
||||
"github.com/tendermint/tendermint/state/indexer/sink/psql"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
reindexFailed = "event re-index failed: "
|
||||
)
|
||||
|
||||
// ReIndexEventCmd allows re-index the event by given block height interval
|
||||
var ReIndexEventCmd = &cobra.Command{
|
||||
Use: "reindex-event",
|
||||
Short: "reindex events to the event store backends",
|
||||
Long: `
|
||||
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
|
||||
you can run this command when the event store backend dropped/disconnected or you want to replace the backend.
|
||||
The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the
|
||||
default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits
|
||||
either or both arguments.
|
||||
`,
|
||||
Example: `
|
||||
tendermint reindex-event
|
||||
tendermint reindex-event --start-height 2
|
||||
tendermint reindex-event --end-height 10
|
||||
tendermint reindex-event --start-height 2 --end-height 10
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
bs, ss, err := loadStateAndBlockStore(config)
|
||||
if err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := checkValidHeight(bs); err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
es, err := loadEventSinks(config)
|
||||
if err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = eventReIndex(cmd, es, bs, ss); err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("event re-index finished")
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
startHeight int64
|
||||
endHeight int64
|
||||
)
|
||||
|
||||
func init() {
|
||||
ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index")
|
||||
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
|
||||
}
|
||||
|
||||
func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
|
||||
// Check duplicated sinks.
|
||||
sinks := map[string]bool{}
|
||||
for _, s := range cfg.TxIndex.Indexer {
|
||||
sl := strings.ToLower(s)
|
||||
if sinks[sl] {
|
||||
return nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
|
||||
}
|
||||
sinks[sl] = true
|
||||
}
|
||||
|
||||
eventSinks := []indexer.EventSink{}
|
||||
|
||||
for k := range sinks {
|
||||
switch k {
|
||||
case string(indexer.NULL):
|
||||
return nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
|
||||
case string(indexer.KV):
|
||||
store, err := tmcfg.DefaultDBProvider(&tmcfg.DBContext{ID: "tx_index", Config: cfg})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eventSinks = append(eventSinks, kv.NewEventSink(store))
|
||||
case string(indexer.PSQL):
|
||||
conn := cfg.TxIndex.PsqlConn
|
||||
if conn == "" {
|
||||
return nil, errors.New("the psql connection settings cannot be empty")
|
||||
}
|
||||
es, err := psql.NewEventSink(conn, chainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eventSinks = append(eventSinks, es)
|
||||
default:
|
||||
return nil, errors.New("unsupported event sink type")
|
||||
}
|
||||
}
|
||||
|
||||
if len(eventSinks) == 0 {
|
||||
return nil, errors.New("no proper event sink can do event re-indexing," +
|
||||
" please check the tx-index section in the config.toml")
|
||||
}
|
||||
|
||||
if !indexer.IndexingEnabled(eventSinks) {
|
||||
return nil, fmt.Errorf("no event sink has been enabled")
|
||||
}
|
||||
|
||||
return eventSinks, nil
|
||||
}
|
||||
|
||||
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
|
||||
dbType := tmdb.BackendType(cfg.DBBackend)
|
||||
|
||||
// Get BlockStore
|
||||
blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
|
||||
// Get StateStore
|
||||
stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
stateStore := state.NewStore(stateDB)
|
||||
|
||||
return blockStore, stateStore, nil
|
||||
}
|
||||
|
||||
func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStore, ss state.Store) error {
|
||||
|
||||
var bar progressbar.Bar
|
||||
bar.NewOption(startHeight-1, endHeight)
|
||||
|
||||
fmt.Println("start re-indexing events:")
|
||||
defer bar.Finish()
|
||||
for i := startHeight; i <= endHeight; i++ {
|
||||
select {
|
||||
case <-cmd.Context().Done():
|
||||
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
|
||||
default:
|
||||
b := bs.LoadBlock(i)
|
||||
if b == nil {
|
||||
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
|
||||
}
|
||||
|
||||
r, err := ss.LoadABCIResponses(i)
|
||||
if err != nil {
|
||||
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
|
||||
}
|
||||
|
||||
e := types.EventDataNewBlockHeader{
|
||||
Header: b.Header,
|
||||
NumTxs: int64(len(b.Txs)),
|
||||
ResultBeginBlock: *r.BeginBlock,
|
||||
ResultEndBlock: *r.EndBlock,
|
||||
}
|
||||
|
||||
var batch *indexer.Batch
|
||||
if e.NumTxs > 0 {
|
||||
batch = indexer.NewBatch(e.NumTxs)
|
||||
|
||||
for i, tx := range b.Data.Txs {
|
||||
tr := abcitypes.TxResult{
|
||||
Height: b.Height,
|
||||
Index: uint32(i),
|
||||
Tx: tx,
|
||||
Result: *(r.DeliverTxs[i]),
|
||||
}
|
||||
|
||||
_ = batch.Add(&tr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, sink := range es {
|
||||
if err := sink.IndexBlockEvents(e); err != nil {
|
||||
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
|
||||
}
|
||||
|
||||
if batch != nil {
|
||||
if err := sink.IndexTxEvents(batch.Ops); err != nil {
|
||||
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bar.Play(i)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkValidHeight(bs state.BlockStore) error {
|
||||
base := bs.Base()
|
||||
|
||||
if startHeight == 0 {
|
||||
startHeight = base
|
||||
fmt.Printf("set the start block height to the base height of the blockstore %d \n", base)
|
||||
}
|
||||
|
||||
if startHeight < base {
|
||||
return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base)
|
||||
}
|
||||
|
||||
height := bs.Height()
|
||||
|
||||
if startHeight > height {
|
||||
return fmt.Errorf(
|
||||
"%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height)
|
||||
}
|
||||
|
||||
if endHeight == 0 || endHeight > height {
|
||||
endHeight = height
|
||||
fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height)
|
||||
}
|
||||
|
||||
if endHeight < base {
|
||||
return fmt.Errorf(
|
||||
"%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base)
|
||||
}
|
||||
|
||||
if endHeight < startHeight {
|
||||
return fmt.Errorf(
|
||||
"%s (requested the end height: %d is less than the start height: %d)",
|
||||
ctypes.ErrInvalidRequest, startHeight, endHeight)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,171 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/state/mocks"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
height int64 = 10
|
||||
base int64 = 2
|
||||
)
|
||||
|
||||
func setupReIndexEventCmd() *cobra.Command {
|
||||
reIndexEventCmd := &cobra.Command{
|
||||
Use: ReIndexEventCmd.Use,
|
||||
Run: func(cmd *cobra.Command, args []string) {},
|
||||
}
|
||||
|
||||
_ = reIndexEventCmd.ExecuteContext(context.Background())
|
||||
|
||||
return reIndexEventCmd
|
||||
}
|
||||
|
||||
func TestReIndexEventCheckHeight(t *testing.T) {
|
||||
mockBlockStore := &mocks.BlockStore{}
|
||||
mockBlockStore.
|
||||
On("Base").Return(base).
|
||||
On("Height").Return(height)
|
||||
|
||||
testCases := []struct {
|
||||
startHeight int64
|
||||
endHeight int64
|
||||
validHeight bool
|
||||
}{
|
||||
{0, 0, true},
|
||||
{0, base, true},
|
||||
{0, base - 1, false},
|
||||
{0, height, true},
|
||||
{0, height + 1, true},
|
||||
{0, 0, true},
|
||||
{base - 1, 0, false},
|
||||
{base, 0, true},
|
||||
{base, base, true},
|
||||
{base, base - 1, false},
|
||||
{base, height, true},
|
||||
{base, height + 1, true},
|
||||
{height, 0, true},
|
||||
{height, base, false},
|
||||
{height, height - 1, false},
|
||||
{height, height, true},
|
||||
{height, height + 1, true},
|
||||
{height + 1, 0, false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
startHeight = tc.startHeight
|
||||
endHeight = tc.endHeight
|
||||
|
||||
err := checkValidHeight(mockBlockStore)
|
||||
if tc.validHeight {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadEventSink(t *testing.T) {
|
||||
testCases := []struct {
|
||||
sinks []string
|
||||
connURL string
|
||||
loadErr bool
|
||||
}{
|
||||
{[]string{}, "", true},
|
||||
{[]string{"NULL"}, "", true},
|
||||
{[]string{"KV"}, "", false},
|
||||
{[]string{"KV", "KV"}, "", true},
|
||||
{[]string{"PSQL"}, "", true}, // true because empty connect url
|
||||
{[]string{"PSQL"}, "wrongUrl", true}, // true because wrong connect url
|
||||
// skip to test PSQL connect with correct url
|
||||
{[]string{"UnsupportedSinkType"}, "wrongUrl", true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
cfg := tmcfg.TestConfig()
|
||||
cfg.TxIndex.Indexer = tc.sinks
|
||||
cfg.TxIndex.PsqlConn = tc.connURL
|
||||
_, err := loadEventSinks(cfg)
|
||||
if tc.loadErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBlockStore(t *testing.T) {
|
||||
bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, bs)
|
||||
require.NotNil(t, ss)
|
||||
|
||||
}
|
||||
func TestReIndexEvent(t *testing.T) {
|
||||
mockBlockStore := &mocks.BlockStore{}
|
||||
mockStateStore := &mocks.Store{}
|
||||
mockEventSink := &mocks.EventSink{}
|
||||
|
||||
mockBlockStore.
|
||||
On("Base").Return(base).
|
||||
On("Height").Return(height).
|
||||
On("LoadBlock", base).Return(nil).Once().
|
||||
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
|
||||
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
|
||||
|
||||
mockEventSink.
|
||||
On("Type").Return(indexer.KV).
|
||||
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
|
||||
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil).
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
|
||||
|
||||
dtx := abcitypes.ResponseDeliverTx{}
|
||||
abciResp := &prototmstate.ABCIResponses{
|
||||
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
|
||||
EndBlock: &abcitypes.ResponseEndBlock{},
|
||||
BeginBlock: &abcitypes.ResponseBeginBlock{},
|
||||
}
|
||||
|
||||
mockStateStore.
|
||||
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
|
||||
On("LoadABCIResponses", base).Return(abciResp, nil).
|
||||
On("LoadABCIResponses", height).Return(abciResp, nil)
|
||||
|
||||
testCases := []struct {
|
||||
startHeight int64
|
||||
endHeight int64
|
||||
reIndexErr bool
|
||||
}{
|
||||
{base, height, true}, // LoadBlock error
|
||||
{base, height, true}, // LoadABCIResponses error
|
||||
{base, height, true}, // index block event error
|
||||
{base, height, true}, // index tx event error
|
||||
{base, base, false},
|
||||
{height, height, false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
startHeight = tc.startHeight
|
||||
endHeight = tc.endHeight
|
||||
|
||||
err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
|
||||
if tc.reIndexErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,8 @@ package commands
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
|
||||
"github.com/tendermint/tendermint/consensus"
|
||||
)
|
||||
|
||||
// ReplayCmd allows replaying of messages from the WAL.
|
||||
@@ -17,9 +18,11 @@ var ReplayCmd = &cobra.Command{
|
||||
// ReplayConsoleCmd allows replaying of messages from the WAL in a
|
||||
// console.
|
||||
var ReplayConsoleCmd = &cobra.Command{
|
||||
Use: "replay-console",
|
||||
Short: "Replay messages from WAL in a console",
|
||||
Use: "replay-console",
|
||||
Aliases: []string{"replay_console"},
|
||||
Short: "Replay messages from WAL in a console",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
consensus.RunReplayFile(config.BaseConfig, config.Consensus, true)
|
||||
},
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
@@ -14,9 +14,11 @@ import (
|
||||
// ResetAllCmd removes the database of this Tendermint core
|
||||
// instance.
|
||||
var ResetAllCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-all",
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||
RunE: resetAll,
|
||||
Use: "unsafe-reset-all",
|
||||
Aliases: []string{"unsafe_reset_all"},
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||
Run: resetAll,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
var keepAddrBook bool
|
||||
@@ -29,27 +31,29 @@ func init() {
|
||||
|
||||
// ResetPrivValidatorCmd resets the private validator files.
|
||||
var ResetPrivValidatorCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-priv-validator",
|
||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||
RunE: resetPrivValidator,
|
||||
Use: "unsafe-reset-priv-validator",
|
||||
Aliases: []string{"unsafe_reset_priv_validator"},
|
||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||
Run: resetPrivValidator,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAll(cmd *cobra.Command, args []string) error {
|
||||
return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(),
|
||||
config.PrivValidator.StateFile(), logger)
|
||||
func resetAll(cmd *cobra.Command, args []string) {
|
||||
ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(), logger)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) error {
|
||||
return resetFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger)
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) {
|
||||
resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
|
||||
}
|
||||
|
||||
// ResetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// Exported so other CLI tools can use it.
|
||||
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) error {
|
||||
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
if keepAddrBook {
|
||||
logger.Info("The address book remains intact")
|
||||
} else {
|
||||
@@ -64,28 +68,24 @@ func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logg
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
return resetFilePV(privValKeyFile, privValStateFile, logger)
|
||||
resetFilePV(privValKeyFile, privValStateFile, logger)
|
||||
}
|
||||
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) error {
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
if _, err := os.Stat(privValKeyFile); err == nil {
|
||||
pv, err := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pv := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
|
||||
pv.Reset()
|
||||
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv, err := privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
panic(err)
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeAddrBook(addrBookFile string, logger log.Logger) {
|
||||
|
||||
@@ -2,19 +2,21 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cli"
|
||||
tmflags "github.com/tendermint/tendermint/libs/cli/flags"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var (
|
||||
config = cfg.DefaultConfig()
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
ctxTimeout = 4 * time.Second
|
||||
config = cfg.DefaultConfig()
|
||||
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -49,18 +51,28 @@ var RootCmd = &cobra.Command{
|
||||
if cmd.Name() == VersionCmd.Name() {
|
||||
return nil
|
||||
}
|
||||
|
||||
config, err = ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger, err = log.NewDefaultLogger(config.LogFormat, config.LogLevel, false)
|
||||
if config.LogFormat == cfg.LogFormatJSON {
|
||||
logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout))
|
||||
}
|
||||
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if viper.GetBool(cli.TraceFlag) {
|
||||
logger = log.NewTracingLogger(logger)
|
||||
}
|
||||
logger = logger.With("module", "main")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// deprecateSnakeCase is a util function for 0.34.1. Should be removed in 0.35
|
||||
func deprecateSnakeCase(cmd *cobra.Command, args []string) {
|
||||
if strings.Contains(cmd.CalledAs(), "_") {
|
||||
fmt.Println("Deprecated: snake_case commands will be replaced by hyphen-case commands in the next major release")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,10 @@ import (
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
|
||||
)
|
||||
|
||||
// clearConfig clears env vars, the given root dir, and resets viper.
|
||||
func clearConfig(dir string) {
|
||||
if err := os.Unsetenv("TMHOME"); err != nil {
|
||||
@@ -48,10 +52,10 @@ func testRootCmd() *cobra.Command {
|
||||
}
|
||||
|
||||
func testSetup(rootDir string, args []string, env map[string]string) error {
|
||||
clearConfig(rootDir)
|
||||
clearConfig(defaultRoot)
|
||||
|
||||
rootCmd := testRootCmd()
|
||||
cmd := cli.PrepareBaseCmd(rootCmd, "TM", rootDir)
|
||||
cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot)
|
||||
|
||||
// run with the args and env
|
||||
args = append([]string{rootCmd.Use}, args...)
|
||||
@@ -59,7 +63,6 @@ func testSetup(rootDir string, args []string, env map[string]string) error {
|
||||
}
|
||||
|
||||
func TestRootHome(t *testing.T) {
|
||||
defaultRoot := t.TempDir()
|
||||
newRoot := filepath.Join(defaultRoot, "something-else")
|
||||
cases := []struct {
|
||||
args []string
|
||||
@@ -102,7 +105,6 @@ func TestRootFlagsEnv(t *testing.T) {
|
||||
{nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env
|
||||
}
|
||||
|
||||
defaultRoot := t.TempDir()
|
||||
for i, tc := range cases {
|
||||
idxString := strconv.Itoa(i)
|
||||
|
||||
@@ -116,7 +118,7 @@ func TestRootFlagsEnv(t *testing.T) {
|
||||
func TestRootConfig(t *testing.T) {
|
||||
|
||||
// write non-default config
|
||||
nonDefaultLogLvl := "debug"
|
||||
nonDefaultLogLvl := "abc:debug"
|
||||
cvals := map[string]string{
|
||||
"log-level": nonDefaultLogLvl,
|
||||
}
|
||||
@@ -127,13 +129,12 @@ func TestRootConfig(t *testing.T) {
|
||||
|
||||
logLvl string
|
||||
}{
|
||||
{nil, nil, nonDefaultLogLvl}, // should load config
|
||||
{[]string{"--log-level=info"}, nil, "info"}, // flag over rides
|
||||
{nil, map[string]string{"TM_LOG_LEVEL": "info"}, "info"}, // env over rides
|
||||
{nil, nil, nonDefaultLogLvl}, // should load config
|
||||
{[]string{"--log-level=abc:info"}, nil, "abc:info"}, // flag over rides
|
||||
{nil, map[string]string{"TM_LOG_LEVEL": "abc:info"}, "abc:info"}, // env over rides
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
defaultRoot := t.TempDir()
|
||||
idxString := strconv.Itoa(i)
|
||||
clearConfig(defaultRoot)
|
||||
|
||||
|
||||
@@ -3,8 +3,6 @@ package commands
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -13,6 +11,7 @@ import (
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -25,32 +24,14 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
// bind flags
|
||||
cmd.Flags().String("moniker", config.Moniker, "node name")
|
||||
|
||||
// mode flags
|
||||
cmd.Flags().String("mode", config.Mode, "node mode (full | validator | seed)")
|
||||
|
||||
// priv val flags
|
||||
cmd.Flags().String(
|
||||
"priv-validator-laddr",
|
||||
config.PrivValidator.ListenAddr,
|
||||
config.PrivValidatorListenAddr,
|
||||
"socket address to listen on for connections from external priv-validator process")
|
||||
|
||||
// node flags
|
||||
cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing")
|
||||
|
||||
// TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle
|
||||
// This check was added to give users an upgrade prompt to use the new flag for syncing.
|
||||
//
|
||||
// The pflag package does not have a native way to print a depcrecation warning
|
||||
// and return an error. This logic was added to print a deprecation message to the user
|
||||
// and then crash if the user attempts to use the old --fast-sync flag.
|
||||
fs := flag.NewFlagSet("", flag.ExitOnError)
|
||||
fs.Func("fast-sync", "deprecated",
|
||||
func(string) error {
|
||||
return errors.New("--fast-sync has been deprecated, please use --blocksync.enable")
|
||||
})
|
||||
cmd.Flags().AddGoFlagSet(fs)
|
||||
|
||||
cmd.Flags().MarkHidden("fast-sync") //nolint:errcheck
|
||||
cmd.Flags().Bool("fast-sync", config.FastSyncMode, "fast blockchain syncing")
|
||||
cmd.Flags().BytesHexVar(
|
||||
&genesisHash,
|
||||
"genesis-hash",
|
||||
@@ -65,7 +46,9 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
"proxy-app",
|
||||
config.ProxyApp,
|
||||
"proxy app address, or one of: 'kvstore',"+
|
||||
" 'persistent_kvstore' or 'noop' for local testing.")
|
||||
" 'persistent_kvstore',"+
|
||||
" 'counter',"+
|
||||
" 'counter_serial' or 'noop' for local testing.")
|
||||
cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)")
|
||||
|
||||
// rpc flags
|
||||
@@ -88,6 +71,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers")
|
||||
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")
|
||||
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange")
|
||||
cmd.Flags().Bool("p2p.seed-mode", config.P2P.SeedMode, "enable/disable seed mode")
|
||||
cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs")
|
||||
|
||||
// consensus flags
|
||||
@@ -100,10 +84,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
config.Consensus.CreateEmptyBlocksInterval.String(),
|
||||
"the possible interval between empty blocks")
|
||||
|
||||
addDBFlags(cmd)
|
||||
}
|
||||
|
||||
func addDBFlags(cmd *cobra.Command) {
|
||||
// db flags
|
||||
cmd.Flags().String(
|
||||
"db-backend",
|
||||
config.DBBackend,
|
||||
@@ -116,7 +97,7 @@ func addDBFlags(cmd *cobra.Command) {
|
||||
|
||||
// NewRunNodeCmd returns the command that allows the CLI to start a node.
|
||||
// It can be used with a custom PrivValidator and in-process ABCI application.
|
||||
func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command {
|
||||
func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "start",
|
||||
Aliases: []string{"node", "run"},
|
||||
@@ -135,7 +116,7 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command {
|
||||
return fmt.Errorf("failed to start node: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("started node", "node", n.String())
|
||||
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
@@ -175,7 +156,7 @@ func checkGenesisHash(config *cfg.Config) error {
|
||||
// Compare with the flag.
|
||||
if !bytes.Equal(genesisHash, actualHash) {
|
||||
return fmt.Errorf(
|
||||
"--genesis-hash=%X does not match %s hash: %X",
|
||||
"--genesis_hash=%X does not match %s hash: %X",
|
||||
genesisHash, config.GenesisFile(), actualHash)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,21 +4,25 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// ShowNodeIDCmd dumps node's ID to the standard output.
|
||||
var ShowNodeIDCmd = &cobra.Command{
|
||||
Use: "show-node-id",
|
||||
Short: "Show this node's ID",
|
||||
RunE: showNodeID,
|
||||
Use: "show-node-id",
|
||||
Aliases: []string{"show_node_id"},
|
||||
Short: "Show this node's ID",
|
||||
RunE: showNodeID,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
func showNodeID(cmd *cobra.Command, args []string) error {
|
||||
nodeKeyID, err := config.LoadNodeKeyID()
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(nodeKeyID)
|
||||
fmt.Println(nodeKey.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user