mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-12 15:52:50 +00:00
Compare commits
183 Commits
v0.36.0-de
...
add_persis
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
519f82e2d3 | ||
|
|
8109c10d5e | ||
|
|
7b0e98d0de | ||
|
|
24dbcb392b | ||
|
|
2f90325fb7 | ||
|
|
7f304bc498 | ||
|
|
33a0a48dbe | ||
|
|
073b99704d | ||
|
|
f14e81e21c | ||
|
|
bcae7e228e | ||
|
|
a872dd75b7 | ||
|
|
d0e03f01fc | ||
|
|
2b35d8191c | ||
|
|
65c0aaee5e | ||
|
|
4e2aa63bb3 | ||
|
|
f80c235842 | ||
|
|
4da0a4b8ed | ||
|
|
f8bf2cb912 | ||
|
|
a925f4fa84 | ||
|
|
3e92899bd9 | ||
|
|
bd6dc3ca88 | ||
|
|
f79b77036f | ||
|
|
358fc5f6c4 | ||
|
|
867d406c6c | ||
|
|
9c21d4140b | ||
|
|
cb88bd3941 | ||
|
|
892f5d9524 | ||
|
|
5ba3c6be42 | ||
|
|
26d421b8f6 | ||
|
|
587c91132b | ||
|
|
b057740bd3 | ||
|
|
0ff3d4b89d | ||
|
|
0b3e00a6b5 | ||
|
|
6b35cc1a47 | ||
|
|
4b8fd28148 | ||
|
|
065918d1cd | ||
|
|
02d456b8b8 | ||
|
|
2d4844f97f | ||
|
|
a62ac27047 | ||
|
|
4e355d80c4 | ||
|
|
5f9dd2e7f5 | ||
|
|
8c52956007 | ||
|
|
8a991e288c | ||
|
|
b3be1d7d7a | ||
|
|
1e208d66e5 | ||
|
|
bca2080c01 | ||
|
|
5f57d84dd3 | ||
|
|
f6c39126ed | ||
|
|
a823d167bc | ||
|
|
3749c37847 | ||
|
|
76dea94a01 | ||
|
|
ab1788b922 | ||
|
|
babd3acb70 | ||
|
|
24dcba9230 | ||
|
|
c4033f95c1 | ||
|
|
070445bc10 | ||
|
|
d5c18b68c8 | ||
|
|
cf5c7be4d8 | ||
|
|
502f92bb97 | ||
|
|
4734f7d806 | ||
|
|
4af2dbd03b | ||
|
|
c9f90953a2 | ||
|
|
99ee730ee7 | ||
|
|
99c51b354c | ||
|
|
abc697b46c | ||
|
|
1dca1a8f97 | ||
|
|
d1f07047ec | ||
|
|
a36dd49eae | ||
|
|
da3449599f | ||
|
|
02c7dca945 | ||
|
|
26b887b883 | ||
|
|
7e58f02eb8 | ||
|
|
6ab62fe7b6 | ||
|
|
1c34d17240 | ||
|
|
44b5d330b0 | ||
|
|
d7606777cf | ||
|
|
dbac109d01 | ||
|
|
d5df412b26 | ||
|
|
2a455be46c | ||
|
|
a15ae5b53a | ||
|
|
f28d629e28 | ||
|
|
27560cf7a4 | ||
|
|
4acd117b5e | ||
|
|
9dc3d7f9a2 | ||
|
|
e4466b7905 | ||
|
|
b3b90f820c | ||
|
|
1e701ed9b5 | ||
|
|
2af7d8defd | ||
|
|
d5865af1f4 | ||
|
|
54d7030510 | ||
|
|
b4055a0753 | ||
|
|
432067cc0e | ||
|
|
6c7d6f761b | ||
|
|
3335cbe8b7 | ||
|
|
641a5ec998 | ||
|
|
b3b1279d1f | ||
|
|
797541e742 | ||
|
|
ae92b135d8 | ||
|
|
a2f4f3fb48 | ||
|
|
01c43c153b | ||
|
|
0de4aa1765 | ||
|
|
dc28734dad | ||
|
|
6f08c14375 | ||
|
|
ffcd347ef6 | ||
|
|
279e8027d3 | ||
|
|
e2b626fc92 | ||
|
|
63192ac300 | ||
|
|
5bc9cb6260 | ||
|
|
d32913c889 | ||
|
|
5599ec37bf | ||
|
|
5cc980698a | ||
|
|
8441b3715a | ||
|
|
6108d0a9b5 | ||
|
|
1fd7060542 | ||
|
|
4d09a6c25d | ||
|
|
e2a103a315 | ||
|
|
93eb940dcd | ||
|
|
e5d019d51b | ||
|
|
0dd5970894 | ||
|
|
ab237091bb | ||
|
|
2416ff3be9 | ||
|
|
4725b00af1 | ||
|
|
ce89292712 | ||
|
|
4bd8c5ab6f | ||
|
|
b15b2c1b78 | ||
|
|
cb39e2f917 | ||
|
|
b4bc6bb4e8 | ||
|
|
23be048294 | ||
|
|
0d68161cc8 | ||
|
|
68ca65f5d7 | ||
|
|
88bdd328ed | ||
|
|
a8ff617773 | ||
|
|
a8917040a8 | ||
|
|
a30860a307 | ||
|
|
f7f4067968 | ||
|
|
b0130c88fb | ||
|
|
0408888a5e | ||
|
|
19ec4a5322 | ||
|
|
ca8f004112 | ||
|
|
7143f14a63 | ||
|
|
cbe6ad6cd5 | ||
|
|
0900ea8396 | ||
|
|
f4a56f4034 | ||
|
|
66a11fe527 | ||
|
|
006e6108a1 | ||
|
|
c3dc7d20df | ||
|
|
b95c261981 | ||
|
|
bc1a20dbb8 | ||
|
|
86f00135dd | ||
|
|
ff7b0e638e | ||
|
|
36a1acff52 | ||
|
|
164de91842 | ||
|
|
4fe0f262d4 | ||
|
|
6538776e6a | ||
|
|
4781d04d18 | ||
|
|
52ed994416 | ||
|
|
0524558696 | ||
|
|
d837432681 | ||
|
|
34a3fcd8fc | ||
|
|
48295955ed | ||
|
|
ded310093e | ||
|
|
befd669794 | ||
|
|
3646b635d3 | ||
|
|
59404003ee | ||
|
|
f2a8f5e054 | ||
|
|
1b5bb5348f | ||
|
|
4ca130d226 | ||
|
|
1f438f205a | ||
|
|
5bf30bb049 | ||
|
|
e53f92ba9c | ||
|
|
e4d6f6df09 | ||
|
|
0ef1a12186 | ||
|
|
72aee47847 | ||
|
|
109814c85a | ||
|
|
851d2e3bde | ||
|
|
3ea81bfaa7 | ||
|
|
5703ae2fb3 | ||
|
|
03ad7d6f20 | ||
|
|
f5b9c210ca | ||
|
|
cb69ed8135 | ||
|
|
c201e3b54d | ||
|
|
b30ec89ee9 | ||
|
|
6276fdcb5d |
43
.github/dependabot.yml
vendored
43
.github/dependabot.yml
vendored
@@ -3,25 +3,48 @@ updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- fadeev
|
||||
|
||||
###################################
|
||||
##
|
||||
## Update All Go Dependencies
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
target-branch: "master"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- melekes
|
||||
- tessr
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
target-branch: "v0.35.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
23
.github/mergify.yml
vendored
23
.github/mergify.yml
vendored
@@ -1,13 +1,22 @@
|
||||
queue_rules:
|
||||
- name: default
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:automerge
|
||||
|
||||
pull_request_rules:
|
||||
- name: Automerge to master
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:automerge
|
||||
actions:
|
||||
merge:
|
||||
queue:
|
||||
method: squash
|
||||
strict: smart+fasttrack
|
||||
commit_message: title+body
|
||||
name: default
|
||||
commit_message_template: |
|
||||
{{ title }} (#{{ number }})
|
||||
|
||||
{{ body }}
|
||||
- name: backport patches to v0.34.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
@@ -16,3 +25,11 @@ pull_request_rules:
|
||||
backport:
|
||||
branches:
|
||||
- v0.34.x
|
||||
- name: backport patches to v0.35.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:backport-to-v0.35.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.35.x
|
||||
|
||||
100
.github/workflows/build.yml
vendored
Normal file
100
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
name: Build
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to master or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
goos: ["linux"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: install
|
||||
run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_persistence:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- run: make install
|
||||
- run: test/persist/test_failure_indices.sh
|
||||
shell: bash
|
||||
# if: "env.GIT_DIFF != ''"
|
||||
132
.github/workflows/coverage.yml
vendored
132
.github/workflows/coverage.yml
vendored
@@ -1,132 +0,0 @@
|
||||
name: Test Coverage
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths:
|
||||
- "**.go"
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
|
||||
# cache multiple
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-00"
|
||||
path: ./pkgs.txt.part.00
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-01"
|
||||
path: ./pkgs.txt.part.01
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02"
|
||||
path: ./pkgs.txt.part.02
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03"
|
||||
path: ./pkgs.txt.part.03
|
||||
|
||||
build-linux:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: GOOS=linux GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: split-test-files
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}"
|
||||
if: env.GIT_DIFF
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./${{ matrix.part }}profile.out
|
||||
|
||||
upload-coverage-report:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v2.1.0
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
7
.github/workflows/docker.yml
vendored
7
.github/workflows/docker.yml
vendored
@@ -1,8 +1,7 @@
|
||||
name: Build & Push
|
||||
name: Docker
|
||||
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
@@ -14,7 +13,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -39,7 +38,7 @@ jobs:
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
|
||||
2
.github/workflows/e2e-nightly-34x.yml
vendored
2
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
|
||||
76
.github/workflows/e2e-nightly-35x.yml
vendored
Normal file
76
.github/workflows/e2e-nightly-35x.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
# Runs randomly generated E2E testnets nightly on v0.35.x.
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-master workflow as well!
|
||||
|
||||
name: e2e-nightly-35x
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
ref: 'v0.35.x'
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.35.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.35.x
|
||||
SLACK_FOOTER: ''
|
||||
15
.github/workflows/e2e-nightly-master.yml
vendored
15
.github/workflows/e2e-nightly-master.yml
vendored
@@ -10,13 +10,12 @@ on:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test-2:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
@@ -25,7 +24,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
@@ -35,14 +34,14 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
run: ./build/generator -g 4 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test-2
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -58,7 +57,7 @@ jobs:
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test-2
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
8
.github/workflows/e2e.yml
vendored
8
.github/workflows/e2e.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
@@ -33,10 +33,6 @@ jobs:
|
||||
|
||||
- name: Run CI testnet
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml
|
||||
run: ./run-multiple.sh networks/ci.toml
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Emit logs on failure
|
||||
if: ${{ failure() }}
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml logs
|
||||
|
||||
23
.github/workflows/fuzz-nightly.yml
vendored
23
.github/workflows/fuzz-nightly.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Runs fuzzing nightly.
|
||||
name: fuzz-nightly
|
||||
name: Fuzz Tests
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
@@ -17,30 +17,15 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
- name: Fuzz mempool-v1
|
||||
- name: Fuzz mempool
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz mempool-v0
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-addrbook
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-addrbook
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-pex
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-pex
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-sc
|
||||
|
||||
2
.github/workflows/jepsen.yml
vendored
2
.github/workflows/jepsen.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the Jepsen repository
|
||||
uses: actions/checkout@v2.3.4
|
||||
uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
repository: 'tendermint/jepsen'
|
||||
|
||||
|
||||
2
.github/workflows/linkchecker.yml
vendored
2
.github/workflows/linkchecker.yml
vendored
@@ -6,7 +6,7 @@ jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
4
.github/workflows/lint.yml
vendored
4
.github/workflows/lint.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
- uses: golangci/golangci-lint-action@v2.5.2
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.38
|
||||
version: v1.42.1
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
2
.github/workflows/linter.yml
vendored
2
.github/workflows/linter.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2.3.4
|
||||
uses: actions/checkout@v2.4.0
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v3
|
||||
env:
|
||||
|
||||
51
.github/workflows/proto-docker.yml
vendored
51
.github/workflows/proto-docker.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: Build & Push TM Proto Builder
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
schedule:
|
||||
# run this job once a month to recieve any go or buf updates
|
||||
- cron: "* * 1 * *"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermintdev/docker-build-proto
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.7.0
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
23
.github/workflows/proto.yml
vendored
23
.github/workflows/proto.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: Protobuf
|
||||
# Protobuf runs buf (https://buf.build/) lint and check-breakage
|
||||
# This workflow is only run when a .proto file has been modified
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
paths:
|
||||
- "**.proto"
|
||||
jobs:
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
114
.github/workflows/tests.yml
vendored
114
.github/workflows/tests.yml
vendored
@@ -1,106 +1,82 @@
|
||||
name: Tests
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to master or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
name: Test
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths:
|
||||
- "**.go"
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
Makefile
|
||||
- name: Run Go Tests
|
||||
run: |
|
||||
make test-group-${{ matrix.part }} NUM_SPLIT=4
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./build/${{ matrix.part }}.profile.out
|
||||
|
||||
test_abci_cli:
|
||||
upload-coverage-report:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
Makefile
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_apps:
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v2.1.0
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_persistence:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
- name: run persistence tests
|
||||
working-directory: test/persist
|
||||
run: ./test_failure_indices.sh
|
||||
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -25,6 +25,7 @@ docs/_build
|
||||
docs/dist
|
||||
docs/node_modules/
|
||||
docs/spec
|
||||
docs/.vuepress/public/rpc
|
||||
index.html.md
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
profile\.out
|
||||
@@ -46,3 +47,10 @@ test/fuzz/**/corpus
|
||||
test/fuzz/**/crashers
|
||||
test/fuzz/**/suppressions
|
||||
test/fuzz/**/*.zip
|
||||
proto/tendermint/blocksync/types.proto
|
||||
proto/tendermint/consensus/types.proto
|
||||
proto/tendermint/mempool/*.proto
|
||||
proto/tendermint/p2p/*.proto
|
||||
proto/tendermint/statesync/*.proto
|
||||
proto/tendermint/types/*.proto
|
||||
proto/tendermint/version/*.proto
|
||||
|
||||
@@ -13,18 +13,18 @@ linters:
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
- goconst
|
||||
- gocritic
|
||||
# - gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- revive
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
# - interfacer
|
||||
- lll
|
||||
# - lll
|
||||
# - maligned
|
||||
- misspell
|
||||
- nakedret
|
||||
@@ -46,9 +46,6 @@ issues:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
- linters:
|
||||
- lll
|
||||
source: "https://"
|
||||
max-same-issues: 50
|
||||
|
||||
linters-settings:
|
||||
|
||||
174
CHANGELOG.md
174
CHANGELOG.md
@@ -2,9 +2,49 @@
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.35.0-rc2
|
||||
## v0.35.0
|
||||
|
||||
September 27, 2021
|
||||
November 4, 2021
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106,
|
||||
@bipulprasad, @alessio, @Yawning, @silasdavis, @cuonglm, @tanyabouman,
|
||||
@JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of an incorrect app hash. (@cmwaters)
|
||||
- [config] [\#7174](https://github.com/tendermint/tendermint/pull/7174) expose ability to write config to arbitrary paths. (@tychoish)
|
||||
- [mempool, rpc] [\#7065](https://github.com/tendermint/tendermint/pull/7065) add removetx rpc method (backport of #7047) (@tychoish).
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters).
|
||||
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
|
||||
- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
|
||||
- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to
|
||||
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
|
||||
exchange reactors behave the same. (@cmwaters)
|
||||
- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type
|
||||
- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez)
|
||||
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
|
||||
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
|
||||
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
|
||||
`mempool.version` configuration, where `v1` is the default configuration.
|
||||
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
||||
- Transactions are gossiped in FIFO order as they are in `v0`.
|
||||
- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||
- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106)
|
||||
- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106)
|
||||
- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
|
||||
- [\#7142](https://github.com/tendermint/tendermint/pull/7142) mempool: remove panic when recheck-tx was not sent to ABCI application (@williambanfield).
|
||||
- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060)
|
||||
wait until peerUpdates channel is closed to close remaining peers (@williambanfield)
|
||||
- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
||||
- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106)
|
||||
- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106)
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
@@ -16,53 +56,6 @@ September 27, 2021
|
||||
- [state] [store] [proxy] [rpc/core]: [\#6937](https://github.com/tendermint/tendermint/pull/6937) move packages to
|
||||
`internal` to prevent consumption of these internal APIs by
|
||||
external users. (@tychoish)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters).
|
||||
|
||||
|
||||
## v0.35.0-rc1
|
||||
|
||||
September 8, 2021
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106, @bipulprasad, @alessio, @Yawning, @silasdavis,
|
||||
@cuonglm, @tanyabouman, @JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
|
||||
- [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes)
|
||||
- [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
- [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
|
||||
- [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes)
|
||||
- [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters)
|
||||
- [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106)
|
||||
- [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||
- [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
|
||||
- [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated.
|
||||
- [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0
|
||||
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
|
||||
- [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield)
|
||||
- [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
|
||||
- [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish)
|
||||
|
||||
- Apps
|
||||
- [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
- [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
- [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
|
||||
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`.
|
||||
- It is not required any longer to set ldflags to set version strings
|
||||
- [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app
|
||||
|
||||
- Go API
|
||||
- [pubsub] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
|
||||
- [p2p] [\#6618](https://github.com/tendermint/tendermint/pull/6618) [\#6583](https://github.com/tendermint/tendermint/pull/6583) Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish)
|
||||
- [node] [\#6540](https://github.com/tendermint/tendermint/pull/6540) Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
|
||||
@@ -98,35 +91,46 @@ Special thanks to external contributors on this release: @JayT106, @bipulprasad,
|
||||
- [config] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
|
||||
- [blocksync] [\#6755](https://github.com/tendermint/tendermint/pull/6755) Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters)
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
|
||||
- [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes)
|
||||
- [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
- [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
|
||||
- [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes)
|
||||
- [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters)
|
||||
- [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106)
|
||||
- [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||
- [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
|
||||
- [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated.
|
||||
- [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0
|
||||
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
|
||||
- [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield)
|
||||
- [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
|
||||
- [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish)
|
||||
- [tools] [\#6498](https://github.com/tendermint/tendermint/pull/6498) Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [cli/indexer] [\#6676](https://github.com/tendermint/tendermint/pull/6676) Reindex events command line tooling. (@JayT106)
|
||||
|
||||
- Apps
|
||||
|
||||
- [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
- [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
- [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
|
||||
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`.
|
||||
- It is not required any longer to set ldflags to set version strings
|
||||
- [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app
|
||||
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] [\#5771](https://github.com/tendermint/tendermint/pull/5771) Use an order-preserving varint key encoding (@cmwaters)
|
||||
- [mempool] [\#6396](https://github.com/tendermint/tendermint/pull/6396) Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
|
||||
- [state] [\#6541](https://github.com/tendermint/tendermint/pull/6541) Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
- Tooling
|
||||
- [tools] [\#6498](https://github.com/tendermint/tendermint/pull/6498) Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [cli/indexer] [\#6676](https://github.com/tendermint/tendermint/pull/6676) Reindex events command line tooling. (@JayT106)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
|
||||
- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
|
||||
- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to
|
||||
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
|
||||
exchange reactors behave the same. (@cmwaters)
|
||||
- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type
|
||||
- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez)
|
||||
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
|
||||
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
|
||||
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
|
||||
`mempool.version` configuration, where `v1` is the default configuration.
|
||||
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
||||
- Transactions are gossiped in FIFO order as they are in `v0`.
|
||||
- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||
- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106)
|
||||
- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106)
|
||||
- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] Console log formatting changes as a result of [\#6534](https://github.com/tendermint/tendermint/pull/6534) and [\#6589](https://github.com/tendermint/tendermint/pull/6589). (@tychoish)
|
||||
@@ -170,13 +174,23 @@ Special thanks to external contributors on this release: @JayT106, @bipulprasad,
|
||||
- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
|
||||
|
||||
## v0.34.14
|
||||
|
||||
This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) The tendermint binary now has built-in suppport for running the end-to-end test application (with state sync support) (@cmwaters).
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state. This may be useful in the event of non-determinstic app hash or when reverting an upgrade. @cmwaters
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [\#7103](https://github.com/tendermint/tendermint/pull/7104) Remove IAVL dependency (backport of #6550) (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
||||
- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106)
|
||||
- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106)
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [ABCI] [\#7110](https://github.com/tendermint/tendermint/issues/7110) Revert "change client to use multi-reader mutexes (#6873)" (@tychoish).
|
||||
|
||||
## v0.34.13
|
||||
|
||||
@@ -1976,7 +1990,7 @@ For more, see issues marked
|
||||
|
||||
This release also includes a fix to prevent Tendermint from including the same
|
||||
piece of evidence in more than one block. This issue was reported by @chengwenxi in our
|
||||
[bug bounty program](https://hackerone.com/tendermint).
|
||||
[bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -2469,7 +2483,7 @@ Special thanks to external contributors on this release:
|
||||
@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu.
|
||||
|
||||
Special thanks to @Slamper for a series of bug reports in our [bug bounty
|
||||
program](https://hackerone.com/tendermint) which are fixed in this release.
|
||||
program](https://hackerone.com/cosmos) which are fixed in this release.
|
||||
|
||||
This release is primarily about adding Version fields to various data structures,
|
||||
optimizing consensus messages for signing and verification in
|
||||
|
||||
@@ -12,16 +12,42 @@ Special thanks to external contributors on this release:
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [rpc] Remove the deprecated gRPC interface to the RPC service. (@creachadair)
|
||||
- [blocksync] \#7159 Remove support for disabling blocksync in any circumstance. (@tychoish)
|
||||
- [mempool] \#7171 Remove legacy mempool implementation. (@tychoish)
|
||||
|
||||
- Apps
|
||||
|
||||
- [proto/tendermint] \#6976 Remove core protobuf files in favor of only housing them in the [tendermint/spec](https://github.com/tendermint/spec) repository.
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish)
|
||||
- [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish)
|
||||
|
||||
- Go API
|
||||
|
||||
- [pubsub] \#7231 Remove unbuffered subscriptions and rework the Subscription interface. (@creachadair)
|
||||
- [eventbus] \#7231 Move the EventBus type to the internal/eventbus package. (@creachadair)
|
||||
- [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish)
|
||||
- [p2p] \#7064 Remove WDRR queue implementation. (@tychoish)
|
||||
- [config] \#7169 `WriteConfigFile` now returns an error. (@tychoish)
|
||||
- [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychoish)
|
||||
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#7270](https://github.com/tendermint/tendermint/pull/7270) Add `header` and `header_by_hash` RPC Client queries. (@fedekunze)
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade.
|
||||
- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish)
|
||||
|
||||
### IMPROVEMENTS
|
||||
- [internal/protoio] \#7325 Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em)
|
||||
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek)
|
||||
|
||||
146
CONTRIBUTING.md
146
CONTRIBUTING.md
@@ -109,7 +109,7 @@ We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
|
||||
|
||||
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
|
||||
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`.
|
||||
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. This command uses the spec repo to get the necessary protobuf files for generating the go code. If you are modifying the proto files manually for changes in the core data structures, you will need to clone them into the go repo and comment out lines 22-37 of the file `./scripts/protocgen.sh`.
|
||||
|
||||
### Visual Studio Code
|
||||
|
||||
@@ -227,150 +227,6 @@ Fixes #nnnn
|
||||
|
||||
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
|
||||
### Release procedure
|
||||
|
||||
#### A note about backport branches
|
||||
Tendermint's `master` branch is under active development.
|
||||
Releases are specified using tags and are built from long-lived "backport" branches.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
|
||||
and the backport branches have names like `v0.34.x` or `v0.33.x`
|
||||
(literally, `x`; it is not a placeholder in this case).
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked)
|
||||
to these backport branches.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
|
||||
to the needed branch. There should be a label for any backport branch that you'll be targeting.
|
||||
To notify the bot to backport a pull request, mark the pull request with
|
||||
the label `S:backport-to-<backport_branch>`.
|
||||
Once the original pull request is merged, the bot will try to cherry-pick the pull request
|
||||
to the backport branch. If the bot fails to backport, it will open a pull request.
|
||||
The author of the original pull request is responsible for solving the conflicts and
|
||||
merging the pull request.
|
||||
|
||||
#### Creating a backport branch
|
||||
|
||||
If this is the first release candidate for a major release, you get to have the honor of creating
|
||||
the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the tags on `master`
|
||||
so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag
|
||||
that is "greater than" the backport branches tags. See #6072 for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
|
||||
1. Start on `master`
|
||||
2. Create the backport branch:
|
||||
`git checkout -b v0.35.x`
|
||||
3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up:
|
||||
`git tag -a v0.36.0-dev; git push v0.36.0-dev`
|
||||
4. Create a new workflow to run the e2e nightlies for this backport branch.
|
||||
(See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml
|
||||
for an example.)
|
||||
|
||||
#### Release candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of backport branches.
|
||||
|
||||
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
|
||||
(for example, `v0.35.0-rc0`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
|
||||
Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
1. Run the integration tests and the e2e nightlies
|
||||
(which can be triggered from the Github UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||
1. Prepare the changelog:
|
||||
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`.
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
1. Open a PR with these changes against the backport branch.
|
||||
1. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
2. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.35.0-rc0`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Future RCs will continue to be built off of this branch.
|
||||
|
||||
Note that this process should only be used for "true" RCs--
|
||||
release candidates that, if successful, will be the next release.
|
||||
For more experimental "RCs," create a new, short-lived branch and tag that instead.
|
||||
|
||||
#### Major release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, begin by creating a backport branch, as described above.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
2. Run integration tests and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.35.0`.
|
||||
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||
- `git push origin v0.35.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
8. Add the release to the documentation site generator config (see
|
||||
[DOCS_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
- Start on branch `master`.
|
||||
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
ensure the newest release is the default for the landing page.
|
||||
- Add a new entry to `themeConfig.versions` in
|
||||
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
|
||||
release in the dropdown versions menu.
|
||||
|
||||
#### Minor release (point releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit tests
|
||||
|
||||
50
Makefile
50
Makefile
@@ -1,6 +1,5 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
PACKAGES=$(shell go list ./...)
|
||||
BUILDDIR ?= $(CURDIR)/build
|
||||
|
||||
BUILD_TAGS?=tendermint
|
||||
@@ -14,8 +13,8 @@ endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
|
||||
BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto
|
||||
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE)
|
||||
CGO_ENABLED ?= 0
|
||||
|
||||
# handle nostrip
|
||||
@@ -79,32 +78,17 @@ $(BUILDDIR)/:
|
||||
### Protobuf ###
|
||||
###############################################################################
|
||||
|
||||
proto-all: proto-gen proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
|
||||
proto-gen:
|
||||
@docker pull -q tendermintdev/docker-build-proto
|
||||
@echo "Generating Protobuf files"
|
||||
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
|
||||
@$(DOCKER_PROTO_BUILDER) sh ./scripts/protocgen.sh
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-lint:
|
||||
@$(DOCKER_BUF) check lint --error-format=json
|
||||
.PHONY: proto-lint
|
||||
|
||||
proto-format:
|
||||
@echo "Formatting Protobuf files"
|
||||
docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
|
||||
@$(DOCKER_PROTO_BUILDER) find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-check-breaking:
|
||||
@$(DOCKER_BUF) check breaking --against-input .git#branch=master
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
proto-check-breaking-ci:
|
||||
@$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master
|
||||
.PHONY: proto-check-breaking-ci
|
||||
|
||||
###############################################################################
|
||||
### Build ABCI ###
|
||||
###############################################################################
|
||||
@@ -118,7 +102,7 @@ install_abci:
|
||||
.PHONY: install_abci
|
||||
|
||||
###############################################################################
|
||||
### Privval Server ###
|
||||
### Privval Server ###
|
||||
###############################################################################
|
||||
|
||||
build_privval_server:
|
||||
@@ -227,13 +211,13 @@ build-docs:
|
||||
|
||||
build-docker: build-linux
|
||||
cp $(BUILDDIR)/tendermint DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile .
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
@@ -303,3 +287,23 @@ build-reproducible:
|
||||
--name latest-build cosmossdk/rbuilder:latest
|
||||
docker cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/
|
||||
.PHONY: build-reproducible
|
||||
|
||||
# Implements test splitting and running. This is pulled directly from
|
||||
# the github action workflows for better local reproducibility.
|
||||
|
||||
GO_TEST_FILES != find $(CURDIR) -name "*_test.go"
|
||||
|
||||
# default to four splits by default
|
||||
NUM_SPLIT ?= 4
|
||||
|
||||
$(BUILDDIR):
|
||||
mkdir -p $@
|
||||
|
||||
# the format statement filters out all packages that don't have tests.
|
||||
$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR)
|
||||
go list -f "{{ if .TestGoFiles }}{{ .ImportPath }}{{ end }}" ./... | sort > $@
|
||||
|
||||
split-test-packages:$(BUILDDIR)/packages.txt
|
||||
split -d -n l/$(NUM_SPLIT) $< $<.
|
||||
test-group-%:split-test-packages
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=8m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
|
||||
18
README.md
18
README.md
@@ -29,16 +29,18 @@ see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/ab
|
||||
|
||||
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
|
||||
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
program](https://hackerone.com/cosmos).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
|
||||
|
||||
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
|
||||
@@ -48,7 +50,7 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.16 or higher |
|
||||
| Go version | Go1.17 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -61,8 +63,8 @@ See the [install instructions](/docs/introduction/install.md).
|
||||
### Quick Start
|
||||
|
||||
- [Single node](/docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md)
|
||||
- [Local cluster using docker-compose](/docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](/docs/tools/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
|
||||
## Contributing
|
||||
@@ -71,7 +73,7 @@ Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
|
||||
|
||||
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
|
||||
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
|
||||
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
|
||||
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
|
||||
and familiarize yourself with our
|
||||
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
|
||||
@@ -95,7 +97,7 @@ In an effort to avoid accumulating technical debt prior to 1.0.0,
|
||||
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
|
||||
will work with existing Tendermint blockchains. In these cases you will
|
||||
have to start a new blockchain, or write something custom to get the old
|
||||
data into the new chain. However, any bump in the PATCH version should be
|
||||
data into the new chain. However, any bump in the PATCH version should be
|
||||
compatible with existing blockchain histories.
|
||||
|
||||
|
||||
@@ -112,6 +114,8 @@ in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Tendermint Core
|
||||
|
||||
We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap.md)
|
||||
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](https://docs.tendermint.com/master/spec/).
|
||||
|
||||
|
||||
180
RELEASES.md
Normal file
180
RELEASES.md
Normal file
@@ -0,0 +1,180 @@
|
||||
# Releases
|
||||
|
||||
Tendermint uses [semantic versioning](https://semver.org/) with each release following
|
||||
a `vX.Y.Z` format. The `master` branch is used for active development and thus it's
|
||||
advisable not to build against it.
|
||||
|
||||
The latest changes are always initially merged into `master`.
|
||||
Releases are specified using tags and are built from long-lived "backport" branches
|
||||
that are cut from `master` when the release process begins.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
|
||||
and the backport branches have names like `v0.34.x` or `v0.33.x`
|
||||
(literally, `x`; it is not a placeholder in this case). Tendermint only
|
||||
maintains the last two releases at a time (the oldest release is predominantly
|
||||
just security patches).
|
||||
|
||||
## Backporting
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported
|
||||
to these backport branches.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
|
||||
to the needed branch. There should be a label for any backport branch that you'll be targeting.
|
||||
To notify the bot to backport a pull request, mark the pull request with the label corresponding
|
||||
to the correct backport branch. For example, to backport to v0.35.x, add the label `S:backport-to-v0.35.x`.
|
||||
Once the original pull request is merged, the bot will try to cherry-pick the pull request
|
||||
to the backport branch. If the bot fails to backport, it will open a pull request.
|
||||
The author of the original pull request is responsible for solving the conflicts and
|
||||
merging the pull request.
|
||||
|
||||
### Creating a backport branch
|
||||
|
||||
If this is the first release candidate for a major release, you get to have the
|
||||
honor of creating the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the
|
||||
tags on `master` so that `go mod` is able to order the branches correctly. You
|
||||
should tag `master` with a "dev" tag that is "greater than" the backport
|
||||
branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072)
|
||||
for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
|
||||
1. Start on `master`
|
||||
2. Create and push the backport branch:
|
||||
```sh
|
||||
git checkout -b v0.35.x
|
||||
git push origin v0.35.x
|
||||
```
|
||||
|
||||
After doing these steps, go back to `master` and do the following:
|
||||
|
||||
1. Tag `master` as the dev branch for the _next_ major release and push it back up.
|
||||
For example:
|
||||
```sh
|
||||
git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."
|
||||
git push origin v0.36.0-dev
|
||||
```
|
||||
|
||||
2. Create a new workflow to run e2e nightlies for the new backport branch.
|
||||
(See [e2e-nightly-master.yml][e2e] for an example.)
|
||||
|
||||
3. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x`
|
||||
[label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered.
|
||||
|
||||
4. Add a new section to the Dependabot config (`.github/dependabot.yml`) to
|
||||
enable automatic update of Go dependencies on this branch. Copy and edit one
|
||||
of the existing branch configurations to set the correct `target-branch`.
|
||||
|
||||
[e2e]: https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml
|
||||
|
||||
## Release candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of backport branches.
|
||||
|
||||
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
|
||||
(for example, `v0.35.0-rc0`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
|
||||
Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
2. Run the integration tests and the e2e nightlies
|
||||
(which can be triggered from the Github UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||
3. Prepare the changelog:
|
||||
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. Each RC should have
|
||||
it's own changelog section. These will be squashed when the final candidate is released.
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary.
|
||||
Check the changelog for breaking changes in these components.
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
6. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||
7. Push the tag back up to origin:
|
||||
`git push origin v0.35.0-rc0`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
8. Future RCs will continue to be built off of this branch.
|
||||
|
||||
Note that this process should only be used for "true" RCs--
|
||||
release candidates that, if successful, will be the next release.
|
||||
For more experimental "RCs," create a new, short-lived branch and tag that instead.
|
||||
|
||||
## Major release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, begin by creating a backport branch, as described above.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.35.0`.
|
||||
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||
- `git push origin v0.35.0`
|
||||
6. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
7. Add the release to the documentation site generator config (see
|
||||
[DOCS_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
- Start on branch `master`.
|
||||
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
ensure the newest release is the default for the landing page.
|
||||
- Add a new entry to `themeConfig.versions` in
|
||||
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
|
||||
release in the dropdown versions menu.
|
||||
- Commit these changes to `master` and backport them into the backport
|
||||
branch for this release.
|
||||
|
||||
## Minor release (point releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of
|
||||
long-lived backport branches, rather than from master. As non-breaking changes
|
||||
land on `master`, they should also be backported into these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky
|
||||
changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the TMDefaultVersion in `version.go`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
As part of our [Coordinated Vulnerability Disclosure
|
||||
Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
bounty](https://hackerone.com/cosmos).
|
||||
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
|
||||
|
||||
### Guidelines
|
||||
@@ -86,7 +86,7 @@ If you are running older versions of Tendermint Core, we encourage you to upgrad
|
||||
|
||||
## Scope
|
||||
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/cosmos). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
|
||||
16
UPGRADING.md
16
UPGRADING.md
@@ -98,7 +98,7 @@ are:
|
||||
- `blockchain`
|
||||
- `evidence`
|
||||
|
||||
Accordingly, the `node` package was changed to reduce access to
|
||||
Accordingly, the `node` package changed to reduce access to
|
||||
tendermint internals: applications that use tendermint as a library
|
||||
will need to change to accommodate these changes. Most notably:
|
||||
|
||||
@@ -109,6 +109,20 @@ will need to change to accommodate these changes. Most notably:
|
||||
longer exported and have been replaced with `node.New` and
|
||||
`node.NewDefault` which provide more functional interfaces.
|
||||
|
||||
To access any of the functionality previously available via the
|
||||
`node.Node` type, use the `*local.Local` "RPC" client, that exposes
|
||||
the full RPC interface provided as direct function calls. Import the
|
||||
`github.com/tendermint/tendermint/rpc/client/local` package and pass
|
||||
the node service as in the following:
|
||||
|
||||
```go
|
||||
node := node.NewDefault() //construct the node object
|
||||
// start and set up the node service
|
||||
|
||||
client := local.New(node.(local.NodeService))
|
||||
// use client object to interact with the node
|
||||
```
|
||||
|
||||
### gRPC Support
|
||||
|
||||
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
|
||||
|
||||
@@ -20,7 +20,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [A protobuf file](../proto/tendermint/abci/types.proto)
|
||||
- [A protobuf file](https://github.com/tendermint/spec/blob/master/proto/tendermint/abci/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
## Protocol Buffers
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
@@ -68,12 +68,12 @@ type Client interface {
|
||||
|
||||
// NewClient returns a new ABCI client of the specified transport type.
|
||||
// It returns an error if the transport is not "socket" or "grpc"
|
||||
func NewClient(addr, transport string, mustConnect bool) (client Client, err error) {
|
||||
func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (client Client, err error) {
|
||||
switch transport {
|
||||
case "socket":
|
||||
client = NewSocketClient(addr, mustConnect)
|
||||
client = NewSocketClient(logger, addr, mustConnect)
|
||||
case "grpc":
|
||||
client = NewGRPCClient(addr, mustConnect)
|
||||
client = NewGRPCClient(logger, addr, mustConnect)
|
||||
default:
|
||||
err = fmt.Errorf("unknown abci transport %s", transport)
|
||||
}
|
||||
@@ -87,7 +87,7 @@ type ReqRes struct {
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
mtx sync.Mutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
@@ -137,16 +137,16 @@ func (r *ReqRes) InvokeCallback() {
|
||||
//
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5439
|
||||
func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.cb
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.done = true
|
||||
r.mtx.Unlock()
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
|
||||
@@ -2,20 +2,21 @@ package abciclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
// Creator creates new ABCI clients.
|
||||
type Creator func() (Client, error)
|
||||
type Creator func(log.Logger) (Client, error)
|
||||
|
||||
// NewLocalCreator returns a Creator for the given app,
|
||||
// which will be running locally.
|
||||
func NewLocalCreator(app types.Application) Creator {
|
||||
mtx := new(tmsync.RWMutex)
|
||||
mtx := new(sync.Mutex)
|
||||
|
||||
return func() (Client, error) {
|
||||
return func(_ log.Logger) (Client, error) {
|
||||
return NewLocalClient(mtx, app), nil
|
||||
}
|
||||
}
|
||||
@@ -23,9 +24,9 @@ func NewLocalCreator(app types.Application) Creator {
|
||||
// NewRemoteCreator returns a Creator for the given address (e.g.
|
||||
// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you
|
||||
// want the client to connect before reporting success.
|
||||
func NewRemoteCreator(addr, transport string, mustConnect bool) Creator {
|
||||
return func() (Client, error) {
|
||||
remoteApp, err := NewClient(addr, transport, mustConnect)
|
||||
func NewRemoteCreator(logger log.Logger, addr, transport string, mustConnect bool) Creator {
|
||||
return func(log.Logger) (Client, error) {
|
||||
remoteApp, err := NewClient(logger, addr, transport, mustConnect)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to proxy: %w", err)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
@@ -18,13 +18,15 @@ import (
|
||||
// A gRPC client.
|
||||
type grpcClient struct {
|
||||
service.BaseService
|
||||
logger log.Logger
|
||||
|
||||
mustConnect bool
|
||||
|
||||
client types.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
mtx sync.Mutex
|
||||
addr string
|
||||
err error
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
@@ -42,8 +44,9 @@ var _ Client = (*grpcClient)(nil)
|
||||
// which is expensive, but easy - if you want something better, use the socket
|
||||
// protocol! maybe one day, if people really want it, we use grpc streams, but
|
||||
// hopefully not :D
|
||||
func NewGRPCClient(addr string, mustConnect bool) Client {
|
||||
func NewGRPCClient(logger log.Logger, addr string, mustConnect bool) Client {
|
||||
cli := &grpcClient{
|
||||
logger: logger,
|
||||
addr: addr,
|
||||
mustConnect: mustConnect,
|
||||
// Buffering the channel is needed to make calls appear asynchronous,
|
||||
@@ -54,7 +57,7 @@ func NewGRPCClient(addr string, mustConnect bool) Client {
|
||||
// gRPC calls while processing a slow callback at the channel head.
|
||||
chReqRes: make(chan *ReqRes, 64),
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(nil, "grpcClient", cli)
|
||||
cli.BaseService = *service.NewBaseService(logger, "grpcClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
@@ -62,7 +65,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStart() error {
|
||||
func (cli *grpcClient) OnStart(ctx context.Context) error {
|
||||
// This processes asynchronous request/response messages and dispatches
|
||||
// them to callbacks.
|
||||
go func() {
|
||||
@@ -84,12 +87,19 @@ func (cli *grpcClient) OnStart() error {
|
||||
cb(reqres.Response)
|
||||
}
|
||||
}
|
||||
for reqres := range cli.chReqRes {
|
||||
if reqres != nil {
|
||||
callCb(reqres)
|
||||
} else {
|
||||
cli.Logger.Error("Received nil reqres")
|
||||
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.chReqRes:
|
||||
if reqres != nil {
|
||||
callCb(reqres)
|
||||
} else {
|
||||
cli.logger.Error("Received nil reqres")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -100,12 +110,12 @@ RETRY_LOOP:
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
}
|
||||
cli.Logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err)
|
||||
cli.logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err)
|
||||
time.Sleep(time.Second * dialRetryIntervalSeconds)
|
||||
continue RETRY_LOOP
|
||||
}
|
||||
|
||||
cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
|
||||
cli.logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
|
||||
client := types.NewABCIApplicationClient(conn)
|
||||
cli.conn = conn
|
||||
|
||||
@@ -115,7 +125,7 @@ RETRY_LOOP:
|
||||
if err == nil {
|
||||
break ENSURE_CONNECTED
|
||||
}
|
||||
cli.Logger.Error("Echo failed", "err", err)
|
||||
cli.logger.Error("Echo failed", "err", err)
|
||||
time.Sleep(time.Second * echoRetryIntervalSeconds)
|
||||
}
|
||||
|
||||
@@ -142,15 +152,15 @@ func (cli *grpcClient) StopForError(err error) {
|
||||
}
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.Logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error()))
|
||||
cli.logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error()))
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.Logger.Error("Error stopping abci.grpcClient", "err", err)
|
||||
cli.logger.Error("Error stopping abci.grpcClient", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Error() error {
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
@@ -158,8 +168,8 @@ func (cli *grpcClient) Error() error {
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.resCb = resCb
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -2,9 +2,9 @@ package abciclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
type localClient struct {
|
||||
service.BaseService
|
||||
|
||||
mtx *tmsync.RWMutex
|
||||
mtx *sync.Mutex
|
||||
types.Application
|
||||
Callback
|
||||
}
|
||||
@@ -26,20 +26,21 @@ var _ Client = (*localClient)(nil)
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.RWMutex, app types.Application) Client {
|
||||
func NewLocalClient(mtx *sync.Mutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = &tmsync.RWMutex{}
|
||||
mtx = new(sync.Mutex)
|
||||
}
|
||||
|
||||
cli := &localClient{
|
||||
mtx: mtx,
|
||||
Application: app,
|
||||
}
|
||||
|
||||
cli.BaseService = *service.NewBaseService(nil, "localClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (*localClient) OnStart(context.Context) error { return nil }
|
||||
func (*localClient) OnStop() {}
|
||||
|
||||
func (app *localClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -67,8 +68,8 @@ func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, err
|
||||
}
|
||||
|
||||
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return app.callback(
|
||||
@@ -100,8 +101,8 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return app.callback(
|
||||
@@ -215,8 +216,8 @@ func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.Respon
|
||||
}
|
||||
|
||||
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
@@ -249,8 +250,8 @@ func (app *localClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
@@ -636,39 +634,6 @@ func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferS
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OnReset provides a mock function with given fields:
|
||||
func (_m *Client) OnReset() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OnStart provides a mock function with given fields:
|
||||
func (_m *Client) OnStart() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OnStop provides a mock function with given fields:
|
||||
func (_m *Client) OnStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// QueryAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@@ -715,67 +680,18 @@ func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Quit provides a mock function with given fields:
|
||||
func (_m *Client) Quit() <-chan struct{} {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 <-chan struct{}
|
||||
if rf, ok := ret.Get(0).(func() <-chan struct{}); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(<-chan struct{})
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Reset provides a mock function with given fields:
|
||||
func (_m *Client) Reset() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SetLogger provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetLogger(_a0 log.Logger) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// SetResponseCallback provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetResponseCallback(_a0 abciclient.Callback) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// Start provides a mock function with given fields:
|
||||
func (_m *Client) Start() error {
|
||||
ret := _m.Called()
|
||||
// Start provides a mock function with given fields: _a0
|
||||
func (_m *Client) Start(_a0 context.Context) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Stop provides a mock function with given fields:
|
||||
func (_m *Client) Stop() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
@@ -9,10 +9,11 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
@@ -32,6 +33,7 @@ type reqResWithContext struct {
|
||||
// general is not meant to be interfaced with concurrent callers.
|
||||
type socketClient struct {
|
||||
service.BaseService
|
||||
logger log.Logger
|
||||
|
||||
addr string
|
||||
mustConnect bool
|
||||
@@ -39,7 +41,7 @@ type socketClient struct {
|
||||
|
||||
reqQueue chan *reqResWithContext
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
mtx sync.Mutex
|
||||
err error
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
@@ -50,22 +52,22 @@ var _ Client = (*socketClient)(nil)
|
||||
// NewSocketClient creates a new socket client, which connects to a given
|
||||
// address. If mustConnect is true, the client will return an error upon start
|
||||
// if it fails to connect.
|
||||
func NewSocketClient(addr string, mustConnect bool) Client {
|
||||
func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client {
|
||||
cli := &socketClient{
|
||||
logger: logger,
|
||||
reqQueue: make(chan *reqResWithContext, reqQueueSize),
|
||||
mustConnect: mustConnect,
|
||||
|
||||
addr: addr,
|
||||
reqSent: list.New(),
|
||||
resCb: nil,
|
||||
addr: addr,
|
||||
reqSent: list.New(),
|
||||
resCb: nil,
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(nil, "socketClient", cli)
|
||||
cli.BaseService = *service.NewBaseService(logger, "socketClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
// OnStart implements Service by connecting to the server and spawning reading
|
||||
// and writing goroutines.
|
||||
func (cli *socketClient) OnStart() error {
|
||||
func (cli *socketClient) OnStart(ctx context.Context) error {
|
||||
var (
|
||||
err error
|
||||
conn net.Conn
|
||||
@@ -77,15 +79,15 @@ func (cli *socketClient) OnStart() error {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
}
|
||||
cli.Logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...",
|
||||
cli.logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...",
|
||||
cli.addr, dialRetryIntervalSeconds), "err", err)
|
||||
time.Sleep(time.Second * dialRetryIntervalSeconds)
|
||||
continue
|
||||
}
|
||||
cli.conn = conn
|
||||
|
||||
go cli.sendRequestsRoutine(conn)
|
||||
go cli.recvResponseRoutine(conn)
|
||||
go cli.sendRequestsRoutine(ctx, conn)
|
||||
go cli.recvResponseRoutine(ctx, conn)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -102,8 +104,8 @@ func (cli *socketClient) OnStop() {
|
||||
|
||||
// Error returns an error if the client was stopped abruptly.
|
||||
func (cli *socketClient) Error() error {
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
@@ -119,13 +121,19 @@ func (cli *socketClient) SetResponseCallback(resCb Callback) {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer) {
|
||||
bw := bufio.NewWriter(conn)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case reqres := <-cli.reqQueue:
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if reqres.C.Err() != nil {
|
||||
cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err())
|
||||
cli.logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err())
|
||||
continue
|
||||
}
|
||||
cli.willSendReq(reqres.R)
|
||||
@@ -138,16 +146,16 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
case <-cli.Quit():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader) {
|
||||
r := bufio.NewReader(conn)
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
var res = &types.Response{}
|
||||
err := types.ReadMessage(r, res)
|
||||
if err != nil {
|
||||
@@ -155,7 +163,7 @@ func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
return
|
||||
}
|
||||
|
||||
// cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
|
||||
// cli.logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
|
||||
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_Exception: // app responded with error
|
||||
@@ -582,8 +590,8 @@ func (cli *socketClient) stopForError(err error) {
|
||||
cli.err = err
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.Logger.Info("Stopping abci.socketClient", "reason", err)
|
||||
cli.logger.Info("Stopping abci.socketClient", "reason", err)
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.Logger.Error("Error stopping abci.socketClient", "err", err)
|
||||
cli.logger.Error("Error stopping abci.socketClient", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,32 +14,25 @@ import (
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func TestProperSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
s, c := setupClientServer(t, app)
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
app := slowApp{}
|
||||
logger := log.TestingLogger()
|
||||
|
||||
_, c := setupClientServer(ctx, t, logger, app)
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
err = c.FlushSync(context.Background())
|
||||
err = c.FlushSync(ctx)
|
||||
assert.NoError(t, err)
|
||||
res := reqres.Response.GetBeginBlock()
|
||||
assert.NotNil(t, res)
|
||||
@@ -55,64 +48,29 @@ func TestProperSyncCalls(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHangingSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
func setupClientServer(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
app types.Application,
|
||||
) (service.Service, abciclient.Client) {
|
||||
t.Helper()
|
||||
|
||||
s, c := setupClientServer(t, app)
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// Start BeginBlock and flush it
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
flush, err := c.FlushAsync(ctx)
|
||||
assert.NoError(t, err)
|
||||
// wait 20 ms for all events to travel socket, but
|
||||
// no response yet from server
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
// kill the server, so the connections break
|
||||
err = s.Stop()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// wait for the response from BeginBlock
|
||||
reqres.Wait()
|
||||
flush.Wait()
|
||||
resp <- c.Error()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
require.Fail(t, "No response arrived")
|
||||
case err, ok := <-resp:
|
||||
require.True(t, ok, "Must not close channel")
|
||||
assert.Error(t, err, "We should get EOF error")
|
||||
}
|
||||
}
|
||||
|
||||
func setupClientServer(t *testing.T, app types.Application) (
|
||||
service.Service, abciclient.Client) {
|
||||
// some port between 20k and 30k
|
||||
port := 20000 + rand.Int31()%10000
|
||||
addr := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
s, err := server.NewServer(addr, "socket", app)
|
||||
require.NoError(t, err)
|
||||
err = s.Start()
|
||||
s, err := server.NewServer(logger, addr, "socket", app)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.Start(ctx))
|
||||
t.Cleanup(s.Wait)
|
||||
|
||||
c := abciclient.NewSocketClient(addr, true)
|
||||
err = c.Start()
|
||||
require.NoError(t, err)
|
||||
c := abciclient.NewSocketClient(logger, addr, true)
|
||||
require.NoError(t, c.Start(ctx))
|
||||
t.Cleanup(c.Wait)
|
||||
|
||||
require.True(t, s.IsRunning())
|
||||
require.True(t, c.IsRunning())
|
||||
|
||||
return s, c
|
||||
}
|
||||
|
||||
@@ -2,18 +2,18 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
@@ -29,8 +29,6 @@ import (
|
||||
var (
|
||||
client abciclient.Client
|
||||
logger log.Logger
|
||||
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
||||
// flags
|
||||
@@ -67,12 +65,12 @@ var RootCmd = &cobra.Command{
|
||||
|
||||
if client == nil {
|
||||
var err error
|
||||
client, err = abciclient.NewClient(flagAddress, flagAbci, false)
|
||||
client, err = abciclient.NewClient(logger.With("module", "abci-client"), flagAddress, flagAbci, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
|
||||
if err := client.Start(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -292,23 +290,24 @@ func compose(fs []func() error) error {
|
||||
}
|
||||
|
||||
func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
return compose(
|
||||
[]func() error{
|
||||
func() error { return servertest.InitChain(client) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x01}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.InitChain(ctx, client) },
|
||||
func() error { return servertest.Commit(ctx, client, nil) },
|
||||
func() error { return servertest.DeliverTx(ctx, client, []byte("abc"), code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.Commit(ctx, client, nil) },
|
||||
func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) },
|
||||
func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.DeliverTx(ctx, client, []byte{0x01}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) },
|
||||
func() error {
|
||||
return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
},
|
||||
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) },
|
||||
func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) },
|
||||
})
|
||||
}
|
||||
|
||||
@@ -443,13 +442,15 @@ func cmdEcho(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
msg = args[0]
|
||||
}
|
||||
res, err := client.EchoSync(ctx, msg)
|
||||
res, err := client.EchoSync(cmd.Context(), msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printResponse(cmd, args, response{
|
||||
Data: []byte(res.Message),
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -459,7 +460,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
version = args[0]
|
||||
}
|
||||
res, err := client.InfoSync(ctx, types.RequestInfo{Version: version})
|
||||
res, err := client.InfoSync(cmd.Context(), types.RequestInfo{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -484,7 +485,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := client.DeliverTxSync(cmd.Context(), types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -510,7 +511,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
res, err := client.CheckTxSync(cmd.Context(), types.RequestCheckTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -525,7 +526,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Get application Merkle root hash
|
||||
func cmdCommit(cmd *cobra.Command, args []string) error {
|
||||
res, err := client.CommitSync(ctx)
|
||||
res, err := client.CommitSync(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -550,7 +551,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resQuery, err := client.QuerySync(ctx, types.RequestQuery{
|
||||
resQuery, err := client.QuerySync(cmd.Context(), types.RequestQuery{
|
||||
Data: queryBytes,
|
||||
Path: flagPath,
|
||||
Height: int64(flagHeight),
|
||||
@@ -586,25 +587,21 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
srv, err := server.NewServer(logger.With("module", "abci-server"), flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
|
||||
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
if err := srv.Start(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
@@ -29,47 +29,48 @@ func init() {
|
||||
}
|
||||
|
||||
func TestKVStore(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fmt.Println("### Testing KVStore")
|
||||
testStream(t, kvstore.NewApplication())
|
||||
testStream(ctx, t, kvstore.NewApplication())
|
||||
}
|
||||
|
||||
func TestBaseApp(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
fmt.Println("### Testing BaseApp")
|
||||
testStream(t, types.NewBaseApplication())
|
||||
testStream(ctx, t, types.NewBaseApplication())
|
||||
}
|
||||
|
||||
func TestGRPC(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fmt.Println("### Testing GRPC")
|
||||
testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication()))
|
||||
testGRPCSync(ctx, t, types.NewGRPCApplication(types.NewBaseApplication()))
|
||||
}
|
||||
|
||||
func testStream(t *testing.T, app types.Application) {
|
||||
func testStream(ctx context.Context, t *testing.T, app types.Application) {
|
||||
t.Helper()
|
||||
|
||||
const numDeliverTxs = 20000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
logger := log.TestingLogger()
|
||||
// Start the listener
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
err := server.Start()
|
||||
server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app)
|
||||
t.Cleanup(server.Wait)
|
||||
err := server.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
client := abciclient.NewSocketClient(socket, false)
|
||||
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
|
||||
err = client.Start()
|
||||
client := abciclient.NewSocketClient(log.TestingLogger().With("module", "abci-client"), socket, false)
|
||||
t.Cleanup(client.Wait)
|
||||
|
||||
err = client.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
done := make(chan struct{})
|
||||
counter := 0
|
||||
@@ -98,8 +99,6 @@ func testStream(t *testing.T, app types.Application) {
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
@@ -127,24 +126,20 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
|
||||
func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
func testGRPCSync(ctx context.Context, t *testing.T, app types.ABCIApplicationServer) {
|
||||
numDeliverTxs := 2000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
logger := log.TestingLogger()
|
||||
// Start the listener
|
||||
server := abciserver.NewGRPCServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app)
|
||||
|
||||
if err := server.Start(ctx); err != nil {
|
||||
t.Fatalf("Error starting GRPC server: %v", err.Error())
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { server.Wait() })
|
||||
|
||||
// Connect to the socket
|
||||
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
|
||||
@@ -3,7 +3,7 @@ package kvstore
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -24,8 +24,6 @@ const (
|
||||
testValue = "def"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
@@ -74,7 +72,7 @@ func TestKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -90,7 +88,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -122,7 +120,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -229,103 +227,103 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeSocketClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) {
|
||||
func makeSocketClientServer(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
app types.Application,
|
||||
name string,
|
||||
) (abciclient.Client, service.Service, error) {
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
server.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app)
|
||||
if err := server.Start(ctx); err != nil {
|
||||
cancel()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Connect to the socket
|
||||
client := abciclient.NewSocketClient(socket, false)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
if err = server.Stop(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false)
|
||||
if err := client.Start(ctx); err != nil {
|
||||
cancel()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func makeGRPCClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) {
|
||||
func makeGRPCClientServer(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
app types.Application,
|
||||
name string,
|
||||
) (abciclient.Client, service.Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.Cleanup(cancel)
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
gapp := types.NewGRPCApplication(app)
|
||||
server := abciserver.NewGRPCServer(socket, gapp)
|
||||
server.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, gapp)
|
||||
|
||||
if err := server.Start(ctx); err != nil {
|
||||
cancel()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
client := abciclient.NewGRPCClient(socket, true)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
if err := server.Stop(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
client := abciclient.NewGRPCClient(logger.With("module", "abci-client"), socket, true)
|
||||
|
||||
if err := client.Start(ctx); err != nil {
|
||||
cancel()
|
||||
return nil, nil, err
|
||||
}
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func TestClientServer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.TestingLogger()
|
||||
|
||||
// set up socket app
|
||||
kvstore := NewApplication()
|
||||
client, server, err := makeSocketClientServer(kvstore, "kvstore-socket")
|
||||
client, server, err := makeSocketClientServer(ctx, t, logger, kvstore, "kvstore-socket")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { cancel(); server.Wait() })
|
||||
t.Cleanup(func() { cancel(); client.Wait() })
|
||||
|
||||
runClientTests(t, client)
|
||||
runClientTests(ctx, t, client)
|
||||
|
||||
// set up grpc app
|
||||
kvstore = NewApplication()
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
|
||||
gclient, gserver, err := makeGRPCClientServer(ctx, t, logger, kvstore, "/tmp/kvstore-grpc")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := gserver.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := gclient.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { cancel(); gserver.Wait() })
|
||||
t.Cleanup(func() { cancel(); gclient.Wait() })
|
||||
|
||||
runClientTests(t, gclient)
|
||||
runClientTests(ctx, t, gclient)
|
||||
}
|
||||
|
||||
func runClientTests(t *testing.T, client abciclient.Client) {
|
||||
func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client) {
|
||||
// run some tests....
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testClient(t, client, tx, key, value)
|
||||
testClient(ctx, t, client, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testClient(t, client, tx, key, value)
|
||||
testClient(ctx, t, client, tx, key, value)
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abciclient.Client, tx []byte, key, value string) {
|
||||
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
type GRPCServer struct {
|
||||
service.BaseService
|
||||
logger log.Logger
|
||||
|
||||
proto string
|
||||
addr string
|
||||
@@ -22,20 +25,21 @@ type GRPCServer struct {
|
||||
}
|
||||
|
||||
// NewGRPCServer returns a new gRPC ABCI server
|
||||
func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Service {
|
||||
func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicationServer) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &GRPCServer{
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
}
|
||||
s.BaseService = *service.NewBaseService(nil, "ABCIServer", s)
|
||||
s.BaseService = *service.NewBaseService(logger, "ABCIServer", s)
|
||||
return s
|
||||
}
|
||||
|
||||
// OnStart starts the gRPC service.
|
||||
func (s *GRPCServer) OnStart() error {
|
||||
func (s *GRPCServer) OnStart(ctx context.Context) error {
|
||||
|
||||
ln, err := net.Listen(s.proto, s.addr)
|
||||
if err != nil {
|
||||
@@ -46,10 +50,15 @@ func (s *GRPCServer) OnStart() error {
|
||||
s.server = grpc.NewServer()
|
||||
types.RegisterABCIApplicationServer(s.server, s.app)
|
||||
|
||||
s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr)
|
||||
s.logger.Info("Listening", "proto", s.proto, "addr", s.addr)
|
||||
go func() {
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
s.server.GracefulStop()
|
||||
}()
|
||||
|
||||
if err := s.server.Serve(s.listener); err != nil {
|
||||
s.Logger.Error("Error serving gRPC server", "err", err)
|
||||
s.logger.Error("Error serving gRPC server", "err", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
|
||||
@@ -12,17 +12,18 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) {
|
||||
func NewServer(logger log.Logger, protoAddr, transport string, app types.Application) (service.Service, error) {
|
||||
var s service.Service
|
||||
var err error
|
||||
switch transport {
|
||||
case "socket":
|
||||
s = NewSocketServer(protoAddr, app)
|
||||
s = NewSocketServer(logger, protoAddr, app)
|
||||
case "grpc":
|
||||
s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app))
|
||||
s = NewGRPCServer(logger, protoAddr, types.NewGRPCApplication(app))
|
||||
default:
|
||||
err = fmt.Errorf("unknown server type %s", transport)
|
||||
}
|
||||
|
||||
@@ -2,15 +2,15 @@ package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmlog "github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
@@ -19,61 +19,58 @@ import (
|
||||
|
||||
type SocketServer struct {
|
||||
service.BaseService
|
||||
isLoggerSet bool
|
||||
logger log.Logger
|
||||
|
||||
proto string
|
||||
addr string
|
||||
listener net.Listener
|
||||
|
||||
connsMtx tmsync.Mutex
|
||||
connsMtx sync.Mutex
|
||||
conns map[int]net.Conn
|
||||
nextConnID int
|
||||
|
||||
appMtx tmsync.Mutex
|
||||
appMtx sync.Mutex
|
||||
app types.Application
|
||||
}
|
||||
|
||||
func NewSocketServer(protoAddr string, app types.Application) service.Service {
|
||||
func NewSocketServer(logger log.Logger, protoAddr string, app types.Application) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &SocketServer{
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
conns: make(map[int]net.Conn),
|
||||
}
|
||||
s.BaseService = *service.NewBaseService(nil, "ABCIServer", s)
|
||||
s.BaseService = *service.NewBaseService(logger, "ABCIServer", s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *SocketServer) SetLogger(l tmlog.Logger) {
|
||||
s.BaseService.SetLogger(l)
|
||||
s.isLoggerSet = true
|
||||
}
|
||||
|
||||
func (s *SocketServer) OnStart() error {
|
||||
func (s *SocketServer) OnStart(ctx context.Context) error {
|
||||
ln, err := net.Listen(s.proto, s.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.listener = ln
|
||||
go s.acceptConnectionsRoutine()
|
||||
go s.acceptConnectionsRoutine(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SocketServer) OnStop() {
|
||||
if err := s.listener.Close(); err != nil {
|
||||
s.Logger.Error("Error closing listener", "err", err)
|
||||
s.logger.Error("Error closing listener", "err", err)
|
||||
}
|
||||
|
||||
s.connsMtx.Lock()
|
||||
defer s.connsMtx.Unlock()
|
||||
|
||||
for id, conn := range s.conns {
|
||||
delete(s.conns, id)
|
||||
if err := conn.Close(); err != nil {
|
||||
s.Logger.Error("Error closing connection", "id", id, "conn", conn, "err", err)
|
||||
s.logger.Error("Error closing connection", "id", id, "conn", conn, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -103,20 +100,25 @@ func (s *SocketServer) rmConn(connID int) error {
|
||||
return conn.Close()
|
||||
}
|
||||
|
||||
func (s *SocketServer) acceptConnectionsRoutine() {
|
||||
func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// Accept a connection
|
||||
s.Logger.Info("Waiting for new connection...")
|
||||
s.logger.Info("Waiting for new connection...")
|
||||
conn, err := s.listener.Accept()
|
||||
if err != nil {
|
||||
if !s.IsRunning() {
|
||||
return // Ignore error from listener closing.
|
||||
}
|
||||
s.Logger.Error("Failed to accept connection", "err", err)
|
||||
s.logger.Error("Failed to accept connection", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
s.Logger.Info("Accepted a new connection")
|
||||
s.logger.Info("Accepted a new connection")
|
||||
|
||||
connID := s.addConn(conn)
|
||||
|
||||
@@ -124,35 +126,46 @@ func (s *SocketServer) acceptConnectionsRoutine() {
|
||||
responses := make(chan *types.Response, 1000) // A channel to buffer responses
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
go s.handleRequests(closeConn, conn, responses)
|
||||
go s.handleRequests(ctx, closeConn, conn, responses)
|
||||
// Pull responses from 'responses' and write them to conn.
|
||||
go s.handleResponses(closeConn, conn, responses)
|
||||
go s.handleResponses(ctx, closeConn, conn, responses)
|
||||
|
||||
// Wait until signal to close connection
|
||||
go s.waitForClose(closeConn, connID)
|
||||
go s.waitForClose(ctx, closeConn, connID)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) waitForClose(closeConn chan error, connID int) {
|
||||
err := <-closeConn
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
s.Logger.Error("Connection was closed by client")
|
||||
case err != nil:
|
||||
s.Logger.Error("Connection error", "err", err)
|
||||
default:
|
||||
// never happens
|
||||
s.Logger.Error("Connection was closed")
|
||||
}
|
||||
func (s *SocketServer) waitForClose(ctx context.Context, closeConn chan error, connID int) {
|
||||
defer func() {
|
||||
// Close the connection
|
||||
if err := s.rmConn(connID); err != nil {
|
||||
s.logger.Error("Error closing connection", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Close the connection
|
||||
if err := s.rmConn(connID); err != nil {
|
||||
s.Logger.Error("Error closing connection", "err", err)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case err := <-closeConn:
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
s.logger.Error("Connection was closed by client")
|
||||
case err != nil:
|
||||
s.logger.Error("Connection error", "err", err)
|
||||
default:
|
||||
// never happens
|
||||
s.logger.Error("Connection was closed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *types.Response) {
|
||||
func (s *SocketServer) handleRequests(
|
||||
ctx context.Context,
|
||||
closeConn chan error,
|
||||
conn io.Reader,
|
||||
responses chan<- *types.Response,
|
||||
) {
|
||||
var count int
|
||||
var bufReader = bufio.NewReader(conn)
|
||||
|
||||
@@ -164,15 +177,15 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp
|
||||
buf := make([]byte, size)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
err := fmt.Errorf("recovered from panic: %v\n%s", r, buf)
|
||||
if !s.isLoggerSet {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
}
|
||||
closeConn <- err
|
||||
s.appMtx.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var req = &types.Request{}
|
||||
err := types.ReadMessage(bufReader, req)
|
||||
@@ -239,7 +252,12 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
}
|
||||
|
||||
// Pull responses from 'responses' and write them to conn.
|
||||
func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) {
|
||||
func (s *SocketServer) handleResponses(
|
||||
ctx context.Context,
|
||||
closeConn chan error,
|
||||
conn io.Writer,
|
||||
responses <-chan *types.Response,
|
||||
) {
|
||||
bw := bufio.NewWriter(conn)
|
||||
for res := range responses {
|
||||
if err := types.WriteMessage(res, bw); err != nil {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -8,20 +9,27 @@ import (
|
||||
abciclientent "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
func TestClientServerNoAddrPrefix(t *testing.T) {
|
||||
addr := "localhost:26658"
|
||||
transport := "socket"
|
||||
app := kvstore.NewApplication()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
server, err := abciserver.NewServer(addr, transport, app)
|
||||
const (
|
||||
addr = "localhost:26658"
|
||||
transport = "socket"
|
||||
)
|
||||
app := kvstore.NewApplication()
|
||||
logger := log.TestingLogger()
|
||||
|
||||
server, err := abciserver.NewServer(logger, addr, transport, app)
|
||||
assert.NoError(t, err, "expected no error on NewServer")
|
||||
err = server.Start()
|
||||
err = server.Start(ctx)
|
||||
assert.NoError(t, err, "expected no error on server.Start")
|
||||
|
||||
client, err := abciclientent.NewClient(addr, transport, true)
|
||||
client, err := abciclientent.NewClient(logger, addr, transport, true)
|
||||
assert.NoError(t, err, "expected no error on NewClient")
|
||||
err = client.Start()
|
||||
err = client.Start(ctx)
|
||||
assert.NoError(t, err, "expected no error on client.Start")
|
||||
}
|
||||
|
||||
@@ -12,9 +12,7 @@ import (
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func InitChain(client abciclient.Client) error {
|
||||
func InitChain(ctx context.Context, client abciclient.Client) error {
|
||||
total := 10
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
for i := 0; i < total; i++ {
|
||||
@@ -34,7 +32,7 @@ func InitChain(client abciclient.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Commit(client abciclient.Client, hashExp []byte) error {
|
||||
func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error {
|
||||
res, err := client.CommitSync(ctx)
|
||||
data := res.Data
|
||||
if err != nil {
|
||||
@@ -51,7 +49,7 @@ func Commit(client abciclient.Client, hashExp []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeliverTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
func DeliverTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
@@ -70,7 +68,7 @@ func DeliverTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
|
||||
@@ -1838,7 +1838,7 @@ type ResponseCheckTx struct {
|
||||
Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"`
|
||||
Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"`
|
||||
// mempool_error is set by Tendermint.
|
||||
// ABCI applictions creating a ResponseCheckTX should not set mempool_error.
|
||||
// ABCI applications creating a ResponseCheckTX should not set mempool_error.
|
||||
MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
13
buf.gen.yaml
13
buf.gen.yaml
@@ -1,13 +0,0 @@
|
||||
# The version of the generation template.
|
||||
# Required.
|
||||
# The only currently-valid value is v1beta1.
|
||||
version: v1beta1
|
||||
|
||||
# The plugins to run.
|
||||
plugins:
|
||||
# The name of the plugin.
|
||||
- name: gogofaster
|
||||
# The the relative output directory.
|
||||
out: proto
|
||||
# Any options to provide to the plugin.
|
||||
opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative
|
||||
16
buf.yaml
16
buf.yaml
@@ -1,16 +0,0 @@
|
||||
version: v1beta1
|
||||
|
||||
build:
|
||||
roots:
|
||||
- proto
|
||||
- third_party/proto
|
||||
lint:
|
||||
use:
|
||||
- BASIC
|
||||
- FILE_LOWER_SNAKE_CASE
|
||||
- UNARY_RPC
|
||||
ignore:
|
||||
- gogoproto
|
||||
breaking:
|
||||
use:
|
||||
- FILE
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"crypto/x509"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -51,6 +50,9 @@ func main() {
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
logger.Info(
|
||||
"Starting private validator",
|
||||
"addr", *addr,
|
||||
@@ -78,7 +80,7 @@ func main() {
|
||||
}
|
||||
|
||||
certPool := x509.NewCertPool()
|
||||
bs, err := ioutil.ReadFile(*rootCA)
|
||||
bs, err := os.ReadFile(*rootCA)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to read client ca cert: %s", err)
|
||||
os.Exit(1)
|
||||
@@ -132,7 +134,7 @@ func main() {
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
tmos.TrapSignal(ctx, logger, func() {
|
||||
logger.Debug("SignerServer: calling Close")
|
||||
if *prometheusAddr != "" {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
|
||||
@@ -3,7 +3,6 @@ package debug
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@@ -82,7 +81,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
|
||||
func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) {
|
||||
start := time.Now().UTC()
|
||||
|
||||
tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp")
|
||||
tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err)
|
||||
return
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -111,5 +110,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error {
|
||||
return fmt.Errorf("failed to encode state dump: %w", err)
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
|
||||
return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package debug
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -34,7 +33,7 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`,
|
||||
}
|
||||
|
||||
func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
pid, err := strconv.ParseUint(args[0], 10, 64)
|
||||
pid, err := strconv.ParseInt(args[0], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -56,7 +55,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Create a temporary directory which will contain all the state dumps and
|
||||
// relevant files and directories that will be compressed into a file.
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp")
|
||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
@@ -92,7 +91,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
logger.Info("killing Tendermint process")
|
||||
if err := killProc(pid, tmpDir); err != nil {
|
||||
if err := killProc(int(pid), tmpDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -105,7 +104,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
// is tailed and piped to a file under the directory dir. An error is returned
|
||||
// if the output file cannot be created or the tail command cannot be started.
|
||||
// An error is not returned if any subsequent syscall fails.
|
||||
func killProc(pid uint64, dir string) error {
|
||||
func killProc(pid int, dir string) error {
|
||||
// pipe STDERR output from tailing the Tendermint process to a file
|
||||
//
|
||||
// NOTE: This will only work on UNIX systems.
|
||||
@@ -128,7 +127,7 @@ func killProc(pid uint64, dir string) error {
|
||||
go func() {
|
||||
// Killing the Tendermint process with the '-ABRT|-6' signal will result in
|
||||
// a goroutine stacktrace.
|
||||
p, err := os.FindProcess(int(pid))
|
||||
p, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to find PID to kill Tendermint process: %s", err)
|
||||
} else if err = p.Signal(syscall.SIGABRT); err != nil {
|
||||
|
||||
@@ -3,7 +3,7 @@ package debug
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -73,10 +73,10 @@ func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read %s profile response body: %w", profile, err)
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
|
||||
return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
|
||||
}
|
||||
|
||||
@@ -121,7 +121,9 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
}
|
||||
|
||||
// write config file
|
||||
cfg.WriteConfigFile(config.RootDir, config)
|
||||
if err := cfg.WriteConfigFile(config.RootDir, config); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("Generated config", "mode", config.Mode)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
@@ -40,16 +38,9 @@ func init() {
|
||||
}
|
||||
|
||||
func runInspect(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM, syscall.SIGINT)
|
||||
defer cancel()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
|
||||
go func() {
|
||||
<-c
|
||||
cancel()
|
||||
}()
|
||||
|
||||
ins, err := inspect.NewFromConfig(logger, config)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -6,8 +6,10 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -187,12 +189,16 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
tmos.TrapSignal(cmd.Context(), logger, func() {
|
||||
p.Listener.Close()
|
||||
})
|
||||
|
||||
// this might be redundant to the above, eventually.
|
||||
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
logger.Info("Starting proxy...", "laddr", listenAddr)
|
||||
if err := p.ListenAndServe(); err != http.ErrServerClosed {
|
||||
if err := p.ListenAndServe(ctx); err != http.ErrServerClosed {
|
||||
// Error starting or closing listener:
|
||||
logger.Error("proxy ListenAndServe", "err", err)
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/p2p/upnp"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
)
|
||||
|
||||
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.
|
||||
var ProbeUpnpCmd = &cobra.Command{
|
||||
Use: "probe-upnp",
|
||||
Short: "Test UPnP functionality",
|
||||
RunE: probeUpnp,
|
||||
}
|
||||
|
||||
func probeUpnp(cmd *cobra.Command, args []string) error {
|
||||
capabilities, err := upnp.Probe(logger)
|
||||
if err != nil {
|
||||
fmt.Println("Probe failed: ", err)
|
||||
} else {
|
||||
fmt.Println("Probe success!")
|
||||
jsonBytes, err := tmjson.Marshal(capabilities)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(jsonBytes))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package commands
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/state/indexer/sink/kv"
|
||||
"github.com/tendermint/tendermint/internal/state/indexer/sink/psql"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/rpc/coretypes"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -29,11 +31,12 @@ var ReIndexEventCmd = &cobra.Command{
|
||||
Use: "reindex-event",
|
||||
Short: "reindex events to the event store backends",
|
||||
Long: `
|
||||
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
|
||||
you can run this command when the event store backend dropped/disconnected or you want to replace the backend.
|
||||
The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the
|
||||
default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits
|
||||
either or both arguments.
|
||||
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
|
||||
you can run this command when the event store backend dropped/disconnected or you want to
|
||||
replace the backend. The default start-height is 0, meaning the tooling will start
|
||||
reindex from the base block height(inclusive); and the default end-height is 0, meaning
|
||||
the tooling will reindex until the latest block height(inclusive). User can omit
|
||||
either or both arguments.
|
||||
`,
|
||||
Example: `
|
||||
tendermint reindex-event
|
||||
@@ -131,6 +134,10 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
|
||||
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
|
||||
dbType := dbm.BackendType(cfg.DBBackend)
|
||||
|
||||
if !os.FileExists(filepath.Join(cfg.DBDir(), "blockstore.db")) {
|
||||
return nil, nil, fmt.Errorf("no blockstore found in %v", cfg.DBDir())
|
||||
}
|
||||
|
||||
// Get BlockStore
|
||||
blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir())
|
||||
if err != nil {
|
||||
@@ -138,6 +145,10 @@ func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store,
|
||||
}
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
|
||||
if !os.FileExists(filepath.Join(cfg.DBDir(), "state.db")) {
|
||||
return nil, nil, fmt.Errorf("no blockstore found in %v", cfg.DBDir())
|
||||
}
|
||||
|
||||
// Get StateStore
|
||||
stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir())
|
||||
if err != nil {
|
||||
|
||||
@@ -15,6 +15,9 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/state/mocks"
|
||||
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
_ "github.com/lib/pq" // for the psql sink
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -107,12 +110,29 @@ func TestLoadEventSink(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLoadBlockStore(t *testing.T) {
|
||||
bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig())
|
||||
testCfg, err := tmcfg.ResetTestRoot(t.Name())
|
||||
require.NoError(t, err)
|
||||
testCfg.DBBackend = "goleveldb"
|
||||
_, _, err = loadStateAndBlockStore(testCfg)
|
||||
// we should return an error because the state store and block store
|
||||
// don't yet exist
|
||||
require.Error(t, err)
|
||||
|
||||
dbType := dbm.BackendType(testCfg.DBBackend)
|
||||
bsdb, err := dbm.NewDB("blockstore", dbType, testCfg.DBDir())
|
||||
require.NoError(t, err)
|
||||
bsdb.Close()
|
||||
|
||||
ssdb, err := dbm.NewDB("state", dbType, testCfg.DBDir())
|
||||
require.NoError(t, err)
|
||||
ssdb.Close()
|
||||
|
||||
bs, ss, err := loadStateAndBlockStore(testCfg)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, bs)
|
||||
require.NotNil(t, ss)
|
||||
|
||||
}
|
||||
|
||||
func TestReIndexEvent(t *testing.T) {
|
||||
mockBlockStore := &mocks.BlockStore{}
|
||||
mockStateStore := &mocks.Store{}
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
var ReplayCmd = &cobra.Command{
|
||||
Use: "replay",
|
||||
Short: "Replay messages from WAL",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
consensus.RunReplayFile(config.BaseConfig, config.Consensus, false)
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return consensus.RunReplayFile(cmd.Context(), logger, config.BaseConfig, config.Consensus, false)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ var ReplayCmd = &cobra.Command{
|
||||
var ReplayConsoleCmd = &cobra.Command{
|
||||
Use: "replay-console",
|
||||
Short: "Replay messages from WAL in a console",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
consensus.RunReplayFile(config.BaseConfig, config.Consensus, true)
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return consensus.RunReplayFile(cmd.Context(), logger, config.BaseConfig, config.Consensus, true)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ var ResetPrivValidatorCmd = &cobra.Command{
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAll(cmd *cobra.Command, args []string) error {
|
||||
return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(),
|
||||
return ResetAll(config.DBDir(), config.PrivValidator.KeyFile(),
|
||||
config.PrivValidator.StateFile(), logger)
|
||||
}
|
||||
|
||||
@@ -49,12 +49,7 @@ func resetPrivValidator(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// ResetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// Exported so other CLI tools can use it.
|
||||
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) error {
|
||||
if keepAddrBook {
|
||||
logger.Info("The address book remains intact")
|
||||
} else {
|
||||
removeAddrBook(addrBookFile, logger)
|
||||
}
|
||||
func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger) error {
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
@@ -87,11 +82,3 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeAddrBook(addrBookFile string, logger log.Logger) {
|
||||
if err := os.Remove(addrBookFile); err == nil {
|
||||
logger.Info("Removed existing address book", "file", addrBookFile)
|
||||
} else if !os.IsNotExist(err) {
|
||||
logger.Info("Error removing address book", "file", addrBookFile, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
50
cmd/tendermint/commands/rollback.go
Normal file
50
cmd/tendermint/commands/rollback.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/state"
|
||||
)
|
||||
|
||||
var RollbackStateCmd = &cobra.Command{
|
||||
Use: "rollback",
|
||||
Short: "rollback tendermint state by one height",
|
||||
Long: `
|
||||
A state rollback is performed to recover from an incorrect application state transition,
|
||||
when Tendermint has persisted an incorrect app hash and is thus unable to make
|
||||
progress. Rollback overwrites a state at height n with the state at height n - 1.
|
||||
The application should also roll back to height n - 1. No blocks are removed, so upon
|
||||
restarting Tendermint the transactions in block n will be re-executed against the
|
||||
application.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
height, hash, err := RollbackState(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rollback state: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Rolled back state to height %d and hash %X", height, hash)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// RollbackState takes the state at the current height n and overwrites it with the state
|
||||
// at height n - 1. Note state here refers to tendermint state not application state.
|
||||
// Returns the latest state height and app hash alongside an error if there was one.
|
||||
func RollbackState(config *cfg.Config) (int64, []byte, error) {
|
||||
// use the parsed config to load the block and state store
|
||||
blockStore, stateStore, err := loadStateAndBlockStore(config)
|
||||
if err != nil {
|
||||
return -1, nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = blockStore.Close()
|
||||
_ = stateStore.Close()
|
||||
}()
|
||||
|
||||
// rollback the last state
|
||||
return state.Rollback(blockStore, stateStore)
|
||||
}
|
||||
71
cmd/tendermint/commands/rollback_test.go
Normal file
71
cmd/tendermint/commands/rollback_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package commands_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/cmd/tendermint/commands"
|
||||
"github.com/tendermint/tendermint/rpc/client/local"
|
||||
rpctest "github.com/tendermint/tendermint/rpc/test"
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/app"
|
||||
)
|
||||
|
||||
func TestRollbackIntegration(t *testing.T) {
|
||||
var height int64
|
||||
dir := t.TempDir()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cfg, err := rpctest.CreateConfig(t.Name())
|
||||
require.NoError(t, err)
|
||||
cfg.BaseConfig.DBBackend = "goleveldb"
|
||||
app, err := e2e.NewApplication(e2e.DefaultConfig(dir))
|
||||
|
||||
t.Run("First run", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
require.NoError(t, err)
|
||||
node, _, err := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
cancel()
|
||||
node.Wait()
|
||||
require.False(t, node.IsRunning())
|
||||
})
|
||||
|
||||
t.Run("Rollback", func(t *testing.T) {
|
||||
require.NoError(t, app.Rollback())
|
||||
height, _, err = commands.RollbackState(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
})
|
||||
|
||||
t.Run("Restart", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
node2, _, err2 := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout)
|
||||
require.NoError(t, err2)
|
||||
|
||||
client, err := local.New(node2.(local.NodeService))
|
||||
require.NoError(t, err)
|
||||
|
||||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("failed to make progress after 20 seconds. Min height: %d", height)
|
||||
case <-ticker.C:
|
||||
status, err := client.Status(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if status.SyncInfo.LatestBlockHeight > height {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -167,5 +166,5 @@ func WriteConfigVals(dir string, vals map[string]string) error {
|
||||
data += fmt.Sprintf("%s = \"%s\"\n", k, v)
|
||||
}
|
||||
cfile := filepath.Join(dir, "config.toml")
|
||||
return ioutil.WriteFile(cfile, []byte(data), 0600)
|
||||
return os.WriteFile(cfile, []byte(data), 0600)
|
||||
}
|
||||
|
||||
@@ -3,16 +3,15 @@ package commands
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -35,22 +34,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
"socket address to listen on for connections from external priv-validator process")
|
||||
|
||||
// node flags
|
||||
cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing")
|
||||
|
||||
// TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle
|
||||
// This check was added to give users an upgrade prompt to use the new flag for syncing.
|
||||
//
|
||||
// The pflag package does not have a native way to print a depcrecation warning
|
||||
// and return an error. This logic was added to print a deprecation message to the user
|
||||
// and then crash if the user attempts to use the old --fast-sync flag.
|
||||
fs := flag.NewFlagSet("", flag.ExitOnError)
|
||||
fs.Func("fast-sync", "deprecated",
|
||||
func(string) error {
|
||||
return errors.New("--fast-sync has been deprecated, please use --blocksync.enable")
|
||||
})
|
||||
cmd.Flags().AddGoFlagSet(fs)
|
||||
|
||||
cmd.Flags().MarkHidden("fast-sync") //nolint:errcheck
|
||||
cmd.Flags().BytesHexVar(
|
||||
&genesisHash,
|
||||
"genesis-hash",
|
||||
@@ -70,10 +54,6 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
|
||||
// rpc flags
|
||||
cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required")
|
||||
cmd.Flags().String(
|
||||
"rpc.grpc-laddr",
|
||||
config.RPC.GRPCListenAddress,
|
||||
"GRPC listen address (BroadcastTx only). Port required")
|
||||
cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "enabled unsafe rpc methods")
|
||||
cmd.Flags().String("rpc.pprof-laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)")
|
||||
|
||||
@@ -84,8 +64,6 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
"node listen address. (0.0.0.0:0 means any interface, any port)")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent-peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
|
||||
cmd.Flags().String("p2p.unconditional-peer-ids",
|
||||
config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers")
|
||||
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")
|
||||
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange")
|
||||
cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs")
|
||||
@@ -126,28 +104,22 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := nodeProvider(config, logger)
|
||||
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
n, err := nodeProvider(ctx, config, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create node: %w", err)
|
||||
}
|
||||
|
||||
if err := n.Start(); err != nil {
|
||||
if err := n.Start(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start node: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("started node", "node", n.String())
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
if n.IsRunning() {
|
||||
if err := n.Stop(); err != nil {
|
||||
logger.Error("unable to stop the node", "error", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -226,7 +226,6 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
config.P2P.AddrBookStrict = false
|
||||
config.P2P.AllowDuplicateIP = true
|
||||
if populatePersistentPeers {
|
||||
persistentPeersWithoutSelf := make([]string, 0)
|
||||
@@ -240,7 +239,9 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
config.Moniker = moniker(i)
|
||||
|
||||
cfg.WriteConfigFile(nodeDir, config)
|
||||
if err := cfg.WriteConfigFile(nodeDir, config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators)
|
||||
|
||||
@@ -17,7 +17,6 @@ func main() {
|
||||
cmd.GenValidatorCmd,
|
||||
cmd.ReIndexEventCmd,
|
||||
cmd.InitFilesCmd,
|
||||
cmd.ProbeUpnpCmd,
|
||||
cmd.LightCmd,
|
||||
cmd.ReplayCmd,
|
||||
cmd.ReplayConsoleCmd,
|
||||
@@ -29,6 +28,7 @@ func main() {
|
||||
cmd.GenNodeKeyCmd,
|
||||
cmd.VersionCmd,
|
||||
cmd.InspectCmd,
|
||||
cmd.RollbackStateCmd,
|
||||
cmd.MakeKeyMigrateCommand(),
|
||||
debug.DebugCmd,
|
||||
cli.NewCompletionCmd(rootCmd, true),
|
||||
|
||||
201
config/config.go
201
config/config.go
@@ -4,7 +4,6 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -28,12 +27,6 @@ const (
|
||||
ModeFull = "full"
|
||||
ModeValidator = "validator"
|
||||
ModeSeed = "seed"
|
||||
|
||||
BlockSyncV0 = "v0"
|
||||
BlockSyncV2 = "v2"
|
||||
|
||||
MempoolV0 = "v0"
|
||||
MempoolV1 = "v1"
|
||||
)
|
||||
|
||||
// NOTE: Most of the structs & relevant comments + the
|
||||
@@ -54,16 +47,14 @@ var (
|
||||
defaultPrivValKeyName = "priv_validator_key.json"
|
||||
defaultPrivValStateName = "priv_validator_state.json"
|
||||
|
||||
defaultNodeKeyName = "node_key.json"
|
||||
defaultAddrBookName = "addrbook.json"
|
||||
defaultNodeKeyName = "node_key.json"
|
||||
|
||||
defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName)
|
||||
defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
|
||||
defaultPrivValKeyPath = filepath.Join(defaultConfigDir, defaultPrivValKeyName)
|
||||
defaultPrivValStatePath = filepath.Join(defaultDataDir, defaultPrivValStateName)
|
||||
|
||||
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
||||
defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName)
|
||||
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
||||
)
|
||||
|
||||
// Config defines the top level configuration for a Tendermint node
|
||||
@@ -76,7 +67,6 @@ type Config struct {
|
||||
P2P *P2PConfig `mapstructure:"p2p"`
|
||||
Mempool *MempoolConfig `mapstructure:"mempool"`
|
||||
StateSync *StateSyncConfig `mapstructure:"statesync"`
|
||||
BlockSync *BlockSyncConfig `mapstructure:"blocksync"`
|
||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
|
||||
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
||||
@@ -91,7 +81,6 @@ func DefaultConfig() *Config {
|
||||
P2P: DefaultP2PConfig(),
|
||||
Mempool: DefaultMempoolConfig(),
|
||||
StateSync: DefaultStateSyncConfig(),
|
||||
BlockSync: DefaultBlockSyncConfig(),
|
||||
Consensus: DefaultConsensusConfig(),
|
||||
TxIndex: DefaultTxIndexConfig(),
|
||||
Instrumentation: DefaultInstrumentationConfig(),
|
||||
@@ -114,7 +103,6 @@ func TestConfig() *Config {
|
||||
P2P: TestP2PConfig(),
|
||||
Mempool: TestMempoolConfig(),
|
||||
StateSync: TestStateSyncConfig(),
|
||||
BlockSync: TestBlockSyncConfig(),
|
||||
Consensus: TestConsensusConfig(),
|
||||
TxIndex: TestTxIndexConfig(),
|
||||
Instrumentation: TestInstrumentationConfig(),
|
||||
@@ -142,18 +130,12 @@ func (cfg *Config) ValidateBasic() error {
|
||||
if err := cfg.RPC.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [rpc] section: %w", err)
|
||||
}
|
||||
if err := cfg.P2P.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [p2p] section: %w", err)
|
||||
}
|
||||
if err := cfg.Mempool.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [mempool] section: %w", err)
|
||||
}
|
||||
if err := cfg.StateSync.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [statesync] section: %w", err)
|
||||
}
|
||||
if err := cfg.BlockSync.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [blocksync] section: %w", err)
|
||||
}
|
||||
if err := cfg.Consensus.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [consensus] section: %w", err)
|
||||
}
|
||||
@@ -283,7 +265,7 @@ func (cfg BaseConfig) NodeKeyFile() string {
|
||||
|
||||
// LoadNodeKey loads NodeKey located in filePath.
|
||||
func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) {
|
||||
jsonBytes, err := ioutil.ReadFile(cfg.NodeKeyFile())
|
||||
jsonBytes, err := os.ReadFile(cfg.NodeKeyFile())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -339,28 +321,6 @@ func (cfg BaseConfig) ValidateBasic() error {
|
||||
return fmt.Errorf("unknown mode: %v", cfg.Mode)
|
||||
}
|
||||
|
||||
// TODO (https://github.com/tendermint/tendermint/issues/6908) remove this check after the v0.35 release cycle.
|
||||
// This check was added to give users an upgrade prompt to use the new
|
||||
// configuration option in v0.35. In future release cycles they should no longer
|
||||
// be using this configuration parameter so the check can be removed.
|
||||
// The cfg.Other field can likely be removed at the same time if it is not referenced
|
||||
// elsewhere as it was added to service this check.
|
||||
if fs, ok := cfg.Other["fastsync"]; ok {
|
||||
if _, ok := fs.(map[string]interface{}); ok {
|
||||
return fmt.Errorf("a configuration section named 'fastsync' was found in the " +
|
||||
"configuration file. The 'fastsync' section has been renamed to " +
|
||||
"'blocksync', please update the 'fastsync' field in your configuration file to 'blocksync'")
|
||||
}
|
||||
}
|
||||
if fs, ok := cfg.Other["fast-sync"]; ok {
|
||||
if fs != "" {
|
||||
return fmt.Errorf("a parameter named 'fast-sync' was found in the " +
|
||||
"configuration file. The parameter to enable or disable quickly syncing with a blockchain" +
|
||||
"has moved to the [blocksync] section of the configuration file as blocksync.enable. " +
|
||||
"Please move the 'fast-sync' field in your configuration file to 'blocksync.enable'")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -461,24 +421,10 @@ type RPCConfig struct {
|
||||
// A list of non simple headers the client is allowed to use with cross-domain requests.
|
||||
CORSAllowedHeaders []string `mapstructure:"cors-allowed-headers"`
|
||||
|
||||
// TCP or UNIX socket address for the gRPC server to listen on
|
||||
// NOTE: This server only supports /broadcast_tx_commit
|
||||
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||
GRPCListenAddress string `mapstructure:"grpc-laddr"`
|
||||
|
||||
// Maximum number of simultaneous connections.
|
||||
// Does not include RPC (HTTP&WebSocket) connections. See max-open-connections
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
|
||||
|
||||
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
|
||||
Unsafe bool `mapstructure:"unsafe"`
|
||||
|
||||
// Maximum number of simultaneous connections (including WebSocket).
|
||||
// Does not include gRPC connections. See grpc-max-open-connections
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
@@ -492,7 +438,7 @@ type RPCConfig struct {
|
||||
MaxSubscriptionClients int `mapstructure:"max-subscription-clients"`
|
||||
|
||||
// Maximum number of unique queries a given client can /subscribe to
|
||||
// If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set
|
||||
// If you're using a Local RPC client and /broadcast_tx_commit, set this
|
||||
// to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"`
|
||||
|
||||
@@ -533,12 +479,10 @@ type RPCConfig struct {
|
||||
// DefaultRPCConfig returns a default configuration for the RPC server
|
||||
func DefaultRPCConfig() *RPCConfig {
|
||||
return &RPCConfig{
|
||||
ListenAddress: "tcp://127.0.0.1:26657",
|
||||
CORSAllowedOrigins: []string{},
|
||||
CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost},
|
||||
CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"},
|
||||
GRPCListenAddress: "",
|
||||
GRPCMaxOpenConnections: 900,
|
||||
ListenAddress: "tcp://127.0.0.1:26657",
|
||||
CORSAllowedOrigins: []string{},
|
||||
CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost},
|
||||
CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"},
|
||||
|
||||
Unsafe: false,
|
||||
MaxOpenConnections: 900,
|
||||
@@ -559,7 +503,6 @@ func DefaultRPCConfig() *RPCConfig {
|
||||
func TestRPCConfig() *RPCConfig {
|
||||
cfg := DefaultRPCConfig()
|
||||
cfg.ListenAddress = "tcp://127.0.0.1:36657"
|
||||
cfg.GRPCListenAddress = "tcp://127.0.0.1:36658"
|
||||
cfg.Unsafe = true
|
||||
return cfg
|
||||
}
|
||||
@@ -567,9 +510,6 @@ func TestRPCConfig() *RPCConfig {
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg *RPCConfig) ValidateBasic() error {
|
||||
if cfg.GRPCMaxOpenConnections < 0 {
|
||||
return errors.New("grpc-max-open-connections can't be negative")
|
||||
}
|
||||
if cfg.MaxOpenConnections < 0 {
|
||||
return errors.New("max-open-connections can't be negative")
|
||||
}
|
||||
@@ -647,25 +587,6 @@ type P2PConfig struct { //nolint: maligned
|
||||
// UPNP port forwarding
|
||||
UPNP bool `mapstructure:"upnp"`
|
||||
|
||||
// Path to address book
|
||||
AddrBook string `mapstructure:"addr-book-file"`
|
||||
|
||||
// Set true for strict address routability rules
|
||||
// Set false for private or local networks
|
||||
AddrBookStrict bool `mapstructure:"addr-book-strict"`
|
||||
|
||||
// Maximum number of inbound peers
|
||||
//
|
||||
// TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
MaxNumInboundPeers int `mapstructure:"max-num-inbound-peers"`
|
||||
|
||||
// Maximum number of outbound peers to connect to, excluding persistent peers.
|
||||
//
|
||||
// TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
MaxNumOutboundPeers int `mapstructure:"max-num-outbound-peers"`
|
||||
|
||||
// MaxConnections defines the maximum number of connected peers (inbound and
|
||||
// outbound).
|
||||
MaxConnections uint16 `mapstructure:"max-connections"`
|
||||
@@ -674,11 +595,15 @@ type P2PConfig struct { //nolint: maligned
|
||||
// attempts per IP address.
|
||||
MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"`
|
||||
|
||||
// List of node IDs, to which a connection will be (re)established ignoring any existing limits
|
||||
UnconditionalPeerIDs string `mapstructure:"unconditional-peer-ids"`
|
||||
// Set true to enable the peer-exchange reactor
|
||||
PexReactor bool `mapstructure:"pex"`
|
||||
|
||||
// Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
|
||||
PersistentPeersMaxDialPeriod time.Duration `mapstructure:"persistent-peers-max-dial-period"`
|
||||
// Comma separated list of peer IDs to keep private (will not be gossiped to
|
||||
// other peers)
|
||||
PrivatePeerIDs string `mapstructure:"private-peer-ids"`
|
||||
|
||||
// Toggle to disable guard against peers connecting from the same ip.
|
||||
AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"`
|
||||
|
||||
// Time to wait before flushing messages out on the connection
|
||||
FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"`
|
||||
@@ -692,16 +617,6 @@ type P2PConfig struct { //nolint: maligned
|
||||
// Rate at which packets can be received, in bytes/second
|
||||
RecvRate int64 `mapstructure:"recv-rate"`
|
||||
|
||||
// Set true to enable the peer-exchange reactor
|
||||
PexReactor bool `mapstructure:"pex"`
|
||||
|
||||
// Comma separated list of peer IDs to keep private (will not be gossiped to
|
||||
// other peers)
|
||||
PrivatePeerIDs string `mapstructure:"private-peer-ids"`
|
||||
|
||||
// Toggle to disable guard against peers connecting from the same ip.
|
||||
AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"`
|
||||
|
||||
// Peer connection configuration.
|
||||
HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"`
|
||||
DialTimeout time.Duration `mapstructure:"dial-timeout"`
|
||||
@@ -710,13 +625,8 @@ type P2PConfig struct { //nolint: maligned
|
||||
// Force dial to fail
|
||||
TestDialFail bool `mapstructure:"test-dial-fail"`
|
||||
|
||||
// UseLegacy enables the "legacy" P2P implementation and
|
||||
// disables the newer default implementation. This flag will
|
||||
// be removed in a future release.
|
||||
UseLegacy bool `mapstructure:"use-legacy"`
|
||||
|
||||
// Makes it possible to configure which queue backend the p2p
|
||||
// layer uses. Options are: "fifo", "priority" and "wdrr",
|
||||
// layer uses. Options are: "fifo" and "priority",
|
||||
// with the default being "priority".
|
||||
QueueType string `mapstructure:"queue-type"`
|
||||
}
|
||||
@@ -727,13 +637,8 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
ListenAddress: "tcp://0.0.0.0:26656",
|
||||
ExternalAddress: "",
|
||||
UPNP: false,
|
||||
AddrBook: defaultAddrBookPath,
|
||||
AddrBookStrict: true,
|
||||
MaxNumInboundPeers: 40,
|
||||
MaxNumOutboundPeers: 10,
|
||||
MaxConnections: 64,
|
||||
MaxIncomingConnectionAttempts: 100,
|
||||
PersistentPeersMaxDialPeriod: 0 * time.Second,
|
||||
FlushThrottleTimeout: 100 * time.Millisecond,
|
||||
// The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes.
|
||||
// The IP header and the TCP header take up 20 bytes each at least (unless
|
||||
@@ -749,39 +654,15 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
DialTimeout: 3 * time.Second,
|
||||
TestDialFail: false,
|
||||
QueueType: "priority",
|
||||
UseLegacy: false,
|
||||
}
|
||||
}
|
||||
|
||||
// TestP2PConfig returns a configuration for testing the peer-to-peer layer
|
||||
func TestP2PConfig() *P2PConfig {
|
||||
cfg := DefaultP2PConfig()
|
||||
cfg.ListenAddress = "tcp://127.0.0.1:36656"
|
||||
cfg.FlushThrottleTimeout = 10 * time.Millisecond
|
||||
cfg.AllowDuplicateIP = true
|
||||
return cfg
|
||||
}
|
||||
|
||||
// AddrBookFile returns the full path to the address book
|
||||
func (cfg *P2PConfig) AddrBookFile() string {
|
||||
return rootify(cfg.AddrBook, cfg.RootDir)
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg *P2PConfig) ValidateBasic() error {
|
||||
if cfg.MaxNumInboundPeers < 0 {
|
||||
return errors.New("max-num-inbound-peers can't be negative")
|
||||
}
|
||||
if cfg.MaxNumOutboundPeers < 0 {
|
||||
return errors.New("max-num-outbound-peers can't be negative")
|
||||
}
|
||||
if cfg.FlushThrottleTimeout < 0 {
|
||||
return errors.New("flush-throttle-timeout can't be negative")
|
||||
}
|
||||
if cfg.PersistentPeersMaxDialPeriod < 0 {
|
||||
return errors.New("persistent-peers-max-dial-period can't be negative")
|
||||
}
|
||||
if cfg.MaxPacketMsgPayloadSize < 0 {
|
||||
return errors.New("max-packet-msg-payload-size can't be negative")
|
||||
}
|
||||
@@ -794,12 +675,21 @@ func (cfg *P2PConfig) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestP2PConfig returns a configuration for testing the peer-to-peer layer
|
||||
func TestP2PConfig() *P2PConfig {
|
||||
cfg := DefaultP2PConfig()
|
||||
cfg.ListenAddress = "tcp://127.0.0.1:36656"
|
||||
cfg.AllowDuplicateIP = true
|
||||
cfg.FlushThrottleTimeout = 10 * time.Millisecond
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// MempoolConfig
|
||||
|
||||
// MempoolConfig defines the configuration options for the Tendermint mempool.
|
||||
type MempoolConfig struct {
|
||||
Version string `mapstructure:"version"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
@@ -849,7 +739,6 @@ type MempoolConfig struct {
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
|
||||
func DefaultMempoolConfig() *MempoolConfig {
|
||||
return &MempoolConfig{
|
||||
Version: MempoolV1,
|
||||
Recheck: true,
|
||||
Broadcast: true,
|
||||
// Each signature verification takes .5ms, Size reduced until we implement
|
||||
@@ -1018,42 +907,6 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service
|
||||
// If this node is many blocks behind the tip of the chain, BlockSync
|
||||
// allows them to catchup quickly by downloading blocks in parallel
|
||||
// and verifying their commits.
|
||||
type BlockSyncConfig struct {
|
||||
Enable bool `mapstructure:"enable"`
|
||||
Version string `mapstructure:"version"`
|
||||
}
|
||||
|
||||
// DefaultBlockSyncConfig returns a default configuration for the block sync service
|
||||
func DefaultBlockSyncConfig() *BlockSyncConfig {
|
||||
return &BlockSyncConfig{
|
||||
Enable: true,
|
||||
Version: BlockSyncV0,
|
||||
}
|
||||
}
|
||||
|
||||
// TestBlockSyncConfig returns a default configuration for the block sync.
|
||||
func TestBlockSyncConfig() *BlockSyncConfig {
|
||||
return DefaultBlockSyncConfig()
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (cfg *BlockSyncConfig) ValidateBasic() error {
|
||||
switch cfg.Version {
|
||||
case BlockSyncV0:
|
||||
return nil
|
||||
case BlockSyncV2:
|
||||
return errors.New("blocksync version v2 is no longer supported. Please use v0")
|
||||
default:
|
||||
return fmt.Errorf("unknown blocksync version %s", cfg.Version)
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// ConsensusConfig
|
||||
|
||||
|
||||
@@ -66,7 +66,6 @@ func TestRPCConfigValidateBasic(t *testing.T) {
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"GRPCMaxOpenConnections",
|
||||
"MaxOpenConnections",
|
||||
"MaxSubscriptionClients",
|
||||
"MaxSubscriptionsPerClient",
|
||||
@@ -82,26 +81,6 @@ func TestRPCConfigValidateBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestP2PConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestP2PConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"MaxNumInboundPeers",
|
||||
"MaxNumOutboundPeers",
|
||||
"FlushThrottleTimeout",
|
||||
"MaxPacketMsgPayloadSize",
|
||||
"SendRate",
|
||||
"RecvRate",
|
||||
}
|
||||
|
||||
for _, fieldName := range fieldsToTest {
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1)
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMempoolConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestMempoolConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
@@ -125,20 +104,7 @@ func TestStateSyncConfigValidateBasic(t *testing.T) {
|
||||
require.NoError(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestBlockSyncConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestBlockSyncConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with version
|
||||
cfg.Version = "v2"
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
|
||||
cfg.Version = "invalid"
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
// nolint: lll
|
||||
testcases := map[string]struct {
|
||||
modify func(*ConsensusConfig)
|
||||
expectErr bool
|
||||
@@ -187,3 +153,21 @@ func TestInstrumentationConfigValidateBasic(t *testing.T) {
|
||||
cfg.MaxOpenConnections = -1
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestP2PConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestP2PConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"FlushThrottleTimeout",
|
||||
"MaxPacketMsgPayloadSize",
|
||||
"SendRate",
|
||||
"RecvRate",
|
||||
}
|
||||
|
||||
for _, fieldName := range fieldsToTest {
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1)
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -8,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
// ServiceProvider takes a config and a logger and returns a ready to go Node.
|
||||
type ServiceProvider func(*Config, log.Logger) (service.Service, error)
|
||||
type ServiceProvider func(context.Context, *Config, log.Logger) (service.Service, error)
|
||||
|
||||
// DBContext specifies config information for loading a new DB.
|
||||
type DBContext struct {
|
||||
|
||||
179
config/toml.go
179
config/toml.go
@@ -3,7 +3,6 @@ package config
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -45,23 +44,29 @@ func EnsureRoot(rootDir string) {
|
||||
|
||||
// WriteConfigFile renders config using the template and writes it to configFilePath.
|
||||
// This function is called by cmd/tendermint/commands/init.go
|
||||
func WriteConfigFile(rootDir string, config *Config) {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if err := configTemplate.Execute(&buffer, config); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
|
||||
mustWriteFile(configFilePath, buffer.Bytes(), 0644)
|
||||
func WriteConfigFile(rootDir string, config *Config) error {
|
||||
return config.WriteToTemplate(filepath.Join(rootDir, defaultConfigFilePath))
|
||||
}
|
||||
|
||||
func writeDefaultConfigFileIfNone(rootDir string) {
|
||||
// WriteToTemplate writes the config to the exact file specified by
|
||||
// the path, in the default toml template and does not mangle the path
|
||||
// or filename at all.
|
||||
func (cfg *Config) WriteToTemplate(path string) error {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if err := configTemplate.Execute(&buffer, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeFile(path, buffer.Bytes(), 0644)
|
||||
}
|
||||
|
||||
func writeDefaultConfigFileIfNone(rootDir string) error {
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
if !tmos.FileExists(configFilePath) {
|
||||
WriteConfigFile(rootDir, DefaultConfig())
|
||||
return WriteConfigFile(rootDir, DefaultConfig())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note: any changes to the comments/variables/mapstructure
|
||||
@@ -193,26 +198,10 @@ cors-allowed-methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }}
|
||||
# A list of non simple headers the client is allowed to use with cross-domain requests
|
||||
cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}]
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
|
||||
# Maximum number of simultaneous connections.
|
||||
# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
# 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
|
||||
|
||||
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
|
||||
unsafe = {{ .RPC.Unsafe }}
|
||||
|
||||
# Maximum number of simultaneous connections (including WebSocket).
|
||||
# Does not include gRPC connections. See grpc-max-open-connections
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
@@ -226,8 +215,8 @@ max-open-connections = {{ .RPC.MaxOpenConnections }}
|
||||
max-subscription-clients = {{ .RPC.MaxSubscriptionClients }}
|
||||
|
||||
# Maximum number of unique queries a given client can /subscribe to
|
||||
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
|
||||
# the estimated # maximum number of broadcast_tx_commit calls per block.
|
||||
# If you're using a Local RPC client and /broadcast_tx_commit, set this
|
||||
# to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }}
|
||||
|
||||
# How long to wait for a tx to be committed during /broadcast_tx_commit.
|
||||
@@ -265,9 +254,6 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}"
|
||||
#######################################################
|
||||
[p2p]
|
||||
|
||||
# Enable the legacy p2p layer.
|
||||
use-legacy = {{ .P2P.UseLegacy }}
|
||||
|
||||
# Select the p2p internal queue
|
||||
queue-type = "{{ .P2P.QueueType }}"
|
||||
|
||||
@@ -299,62 +285,12 @@ persistent-peers = "{{ .P2P.PersistentPeers }}"
|
||||
# UPNP port forwarding
|
||||
upnp = {{ .P2P.UPNP }}
|
||||
|
||||
# Path to address book
|
||||
# TODO: Remove once p2p refactor is complete in favor of peer store.
|
||||
addr-book-file = "{{ js .P2P.AddrBook }}"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
# Set false for private or local networks
|
||||
addr-book-strict = {{ .P2P.AddrBookStrict }}
|
||||
|
||||
# Maximum number of inbound peers
|
||||
#
|
||||
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
max-num-inbound-peers = {{ .P2P.MaxNumInboundPeers }}
|
||||
|
||||
# Maximum number of outbound peers to connect to, excluding persistent peers
|
||||
#
|
||||
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }}
|
||||
|
||||
# Maximum number of connections (inbound and outbound).
|
||||
max-connections = {{ .P2P.MaxConnections }}
|
||||
|
||||
# Rate limits the number of incoming connection attempts per IP address.
|
||||
max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }}
|
||||
|
||||
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
|
||||
# TODO: Remove once p2p refactor is complete.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}"
|
||||
|
||||
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
persistent-peers-max-dial-period = "{{ .P2P.PersistentPeersMaxDialPeriod }}"
|
||||
|
||||
# Time to wait before flushing messages out on the connection
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}"
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }}
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
send-rate = {{ .P2P.SendRate }}
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
recv-rate = {{ .P2P.RecvRate }}
|
||||
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = {{ .P2P.PexReactor }}
|
||||
|
||||
@@ -369,16 +305,28 @@ allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }}
|
||||
handshake-timeout = "{{ .P2P.HandshakeTimeout }}"
|
||||
dial-timeout = "{{ .P2P.DialTimeout }}"
|
||||
|
||||
# Time to wait before flushing messages out on the connection
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}"
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }}
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
send-rate = {{ .P2P.SendRate }}
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
recv-rate = {{ .P2P.RecvRate }}
|
||||
|
||||
|
||||
#######################################################
|
||||
### Mempool Configuration Option ###
|
||||
#######################################################
|
||||
[mempool]
|
||||
|
||||
# Mempool version to use:
|
||||
# 1) "v0" - The legacy non-prioritized mempool reactor.
|
||||
# 2) "v1" (default) - The prioritized mempool reactor.
|
||||
version = "{{ .Mempool.Version }}"
|
||||
|
||||
recheck = {{ .Mempool.Recheck }}
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
|
||||
@@ -467,21 +415,6 @@ chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}"
|
||||
# The number of concurrent chunk and block fetchers to run (default: 4).
|
||||
fetchers = "{{ .StateSync.Fetchers }}"
|
||||
|
||||
#######################################################
|
||||
### Block Sync Configuration Connections ###
|
||||
#######################################################
|
||||
[blocksync]
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, BlockSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
enable = {{ .BlockSync.Enable }}
|
||||
|
||||
# Block Sync version to use:
|
||||
# 1) "v0" (default) - the standard Block Sync implementation
|
||||
# 2) "v2" - DEPRECATED, please use v0
|
||||
version = "{{ .BlockSync.Version }}"
|
||||
|
||||
#######################################################
|
||||
### Consensus Configuration Options ###
|
||||
#######################################################
|
||||
@@ -570,22 +503,22 @@ namespace = "{{ .Instrumentation.Namespace }}"
|
||||
|
||||
/****** these are for test settings ***********/
|
||||
|
||||
func ResetTestRoot(testName string) *Config {
|
||||
func ResetTestRoot(testName string) (*Config, error) {
|
||||
return ResetTestRootWithChainID(testName, "")
|
||||
}
|
||||
|
||||
func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||
func ResetTestRootWithChainID(testName string, chainID string) (*Config, error) {
|
||||
// create a unique, concurrency-safe test directory under os.TempDir()
|
||||
rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName))
|
||||
rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
// ensure config and data subdirs are created
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf := DefaultConfig()
|
||||
@@ -594,26 +527,36 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||
privStateFilePath := filepath.Join(rootDir, conf.PrivValidator.State)
|
||||
|
||||
// Write default config file if missing.
|
||||
writeDefaultConfigFileIfNone(rootDir)
|
||||
if err := writeDefaultConfigFileIfNone(rootDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !tmos.FileExists(genesisFilePath) {
|
||||
if chainID == "" {
|
||||
chainID = "tendermint_test"
|
||||
}
|
||||
testGenesis := fmt.Sprintf(testGenesisFmt, chainID)
|
||||
mustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
|
||||
if err := writeFile(genesisFilePath, []byte(testGenesis), 0644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// we always overwrite the priv val
|
||||
mustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644)
|
||||
mustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644)
|
||||
if err := writeFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeFile(privStateFilePath, []byte(testPrivValidatorState), 0644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := TestConfig().SetRoot(rootDir)
|
||||
return config
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func mustWriteFile(filePath string, contents []byte, mode os.FileMode) {
|
||||
if err := ioutil.WriteFile(filePath, contents, mode); err != nil {
|
||||
tmos.Exit(fmt.Sprintf("failed to write file: %v", err))
|
||||
func writeFile(filePath string, contents []byte, mode os.FileMode) error {
|
||||
if err := os.WriteFile(filePath, contents, mode); err != nil {
|
||||
return fmt.Errorf("failed to write file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var testGenesisFmt = `{
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -23,18 +22,18 @@ func TestEnsureRoot(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
// setup temp dir for test
|
||||
tmpDir, err := ioutil.TempDir("", "config-test")
|
||||
require.Nil(err)
|
||||
tmpDir, err := os.MkdirTemp("", "config-test")
|
||||
require.NoError(err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// create root dir
|
||||
EnsureRoot(tmpDir)
|
||||
|
||||
WriteConfigFile(tmpDir, DefaultConfig())
|
||||
require.NoError(WriteConfigFile(tmpDir, DefaultConfig()))
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
|
||||
require.Nil(err)
|
||||
data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
|
||||
require.NoError(err)
|
||||
|
||||
checkConfig(t, string(data))
|
||||
|
||||
@@ -47,12 +46,13 @@ func TestEnsureTestRoot(t *testing.T) {
|
||||
testName := "ensureTestRoot"
|
||||
|
||||
// create root dir
|
||||
cfg := ResetTestRoot(testName)
|
||||
cfg, err := ResetTestRoot(testName)
|
||||
require.NoError(err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
rootDir := cfg.RootDir
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
|
||||
data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
|
||||
require.Nil(err)
|
||||
|
||||
checkConfig(t, string(data))
|
||||
@@ -70,7 +70,6 @@ func checkConfig(t *testing.T, configFile string) {
|
||||
"moniker",
|
||||
"seeds",
|
||||
"proxy-app",
|
||||
"blocksync",
|
||||
"create-empty-blocks",
|
||||
"peer",
|
||||
"timeout",
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !libsecp256k1
|
||||
// +build !libsecp256k1
|
||||
|
||||
package secp256k1
|
||||
|
||||
@@ -34,6 +34,10 @@ module.exports = {
|
||||
"label": "v0.34",
|
||||
"key": "v0.34"
|
||||
},
|
||||
{
|
||||
"label": "v0.35",
|
||||
"key": "v0.35"
|
||||
},
|
||||
{
|
||||
"label": "master",
|
||||
"key": "master"
|
||||
@@ -48,10 +52,6 @@ module.exports = {
|
||||
{
|
||||
title: 'Resources',
|
||||
children: [
|
||||
{
|
||||
title: 'Developer Sessions',
|
||||
path: '/DEV_SESSIONS.html'
|
||||
},
|
||||
{
|
||||
title: 'RPC',
|
||||
path: 'https://docs.tendermint.com/master/rpc/',
|
||||
|
||||
@@ -11,9 +11,9 @@ and other supported release branches.
|
||||
|
||||
There is a [GitHub Actions workflow](https://github.com/tendermint/docs/actions/workflows/deployment.yml)
|
||||
in the `tendermint/docs` repository that clones and builds the documentation
|
||||
site from the contents of this `docs` directory, for `master` and for each
|
||||
supported release branch. Under the hood, this workflow runs `make build-docs`
|
||||
from the [Makefile](../Makefile#L214).
|
||||
site from the contents of this `docs` directory, for `master` and for the
|
||||
backport branch of each supported release. Under the hood, this workflow runs
|
||||
`make build-docs` from the [Makefile](../Makefile#L214).
|
||||
|
||||
The list of supported versions are defined in [`config.js`](./.vuepress/config.js),
|
||||
which defines the UI menu on the documentation site, and also in
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
---
|
||||
order: false
|
||||
parent:
|
||||
title: "Building Applications"
|
||||
order: 3
|
||||
---
|
||||
|
||||
# Apps
|
||||
---
|
||||
@@ -65,7 +65,9 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md)
|
||||
- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md)
|
||||
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
||||
- [ADR-066-E2E-Testing](./adr-066-e2e-testing.md)
|
||||
- [ADR-066: E2E-Testing](./adr-066-e2e-testing.md)
|
||||
- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md)
|
||||
|
||||
### Accepted
|
||||
|
||||
- [ADR-006: Trust-Metric](./adr-006-trust-metric.md)
|
||||
@@ -99,4 +101,3 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-057: RPC](./adr-057-RPC.md)
|
||||
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
|
||||
- [ADR-071: Proposer-Based Timestamps](adr-071-proposer-based-timestamps.md)
|
||||
- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md)
|
||||
|
||||
@@ -178,7 +178,7 @@ type TrustMetricStore struct {
|
||||
}
|
||||
|
||||
// OnStart implements Service
|
||||
func (tms *TrustMetricStore) OnStart() error {}
|
||||
func (tms *TrustMetricStore) OnStart(context.Context) error { return nil }
|
||||
|
||||
// OnStop implements Service
|
||||
func (tms *TrustMetricStore) OnStop() {}
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
- April 28, 2021: Specify search capabilities are only supported through the KV indexer (@marbar3778)
|
||||
- May 19, 2021: Update the SQL schema and the eventsink interface (@jayt106)
|
||||
- Aug 30, 2021: Update the SQL schema and the psql implementation (@creachadair)
|
||||
- Oct 5, 2021: Clarify goals and implementation changes (@creachadair)
|
||||
|
||||
## Status
|
||||
|
||||
@@ -73,19 +74,38 @@ the database used.
|
||||
We will adopt a similar approach to that of the Cosmos SDK's `KVStore` state
|
||||
listening described in [ADR-038](https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-038-state-listening.md).
|
||||
|
||||
Namely, we will perform the following:
|
||||
We will implement the following changes:
|
||||
|
||||
- Introduce a new interface, `EventSink`, that all data sinks must implement.
|
||||
- Augment the existing `tx_index.indexer` configuration to now accept a series
|
||||
of one or more indexer types, i.e sinks.
|
||||
of one or more indexer types, i.e., sinks.
|
||||
- Combine the current `TxIndexer` and `BlockIndexer` into a single `KVEventSink`
|
||||
that implements the `EventSink` interface.
|
||||
- Introduce an additional `EventSink` that is backed by [PostgreSQL](https://www.postgresql.org/).
|
||||
- Implement the necessary schemas to support both block and transaction event
|
||||
indexing.
|
||||
- Introduce an additional `EventSink` implementation that is backed by
|
||||
[PostgreSQL](https://www.postgresql.org/).
|
||||
- Implement the necessary schemas to support both block and transaction event indexing.
|
||||
- Update `IndexerService` to use a series of `EventSinks`.
|
||||
- Proxy queries to the relevant sink's native query layer.
|
||||
- Update all relevant RPC methods.
|
||||
|
||||
In addition:
|
||||
|
||||
- The Postgres indexer implementation will _not_ implement the proprietary `kv`
|
||||
query language. Users wishing to write queries against the Postgres indexer
|
||||
will connect to the underlying DBMS directly and use SQL queries based on the
|
||||
indexing schema.
|
||||
|
||||
Future custom indexer implementations will not be required to support the
|
||||
proprietary query language either.
|
||||
|
||||
- For now, the existing `kv` indexer will be left in place with its current
|
||||
query support, but will be marked as deprecated in a subsequent release, and
|
||||
the documentation will be updated to encourage users who need to query the
|
||||
event index to migrate to the Postgres indexer.
|
||||
|
||||
- In the future we may remove the `kv` indexer entirely, or replace it with a
|
||||
different implementation; that decision is deferred as future work.
|
||||
|
||||
- In the future, we may remove the index query endpoints from the RPC service
|
||||
entirely; that decision is deferred as future work, but recommended.
|
||||
|
||||
|
||||
## Detailed Design
|
||||
|
||||
@@ -1,45 +1,13 @@
|
||||
# ADR 71: Proposer-Based Timestamps
|
||||
|
||||
* [Changelog](#changelog)
|
||||
* [Status](#status)
|
||||
* [Context](#context)
|
||||
* [Alternative Approaches](#alternative-approaches)
|
||||
* [Remove timestamps altogether](#remove-timestamps-altogether)
|
||||
* [Decision](#decision)
|
||||
* [Detailed Design](#detailed-design)
|
||||
* [Overview](#overview)
|
||||
* [Proposal Timestamp and Block Timestamp](#proposal-timestamp-and-block-timestamp)
|
||||
* [Saving the timestamp across heights](#saving-the-timestamp-across-heights)
|
||||
* [Changes to `CommitSig`](#changes-to-commitsig)
|
||||
* [Changes to `Commit`](#changes-to-commit)
|
||||
* [Changes to `Vote` messages](#changes-to-vote-messages)
|
||||
* [New consensus parameters](#new-consensus-parameters)
|
||||
* [Changes to `Header`](#changes-to-header)
|
||||
* [Changes to the block proposal step](#changes-to-the-block-proposal-step)
|
||||
* [Proposer selects proposal timestamp](#proposer-selects-proposal-timestamp)
|
||||
* [Proposer selects block timestamp](#proposer-selects-block-timestamp)
|
||||
* [Proposer waits](#proposer-waits)
|
||||
* [Changes to the propose step timeout](#changes-to-the-propose-step-timeout)
|
||||
* [Changes to validation rules](#changes-to-validation-rules)
|
||||
* [Proposal timestamp validation](#proposal-timestamp-validation)
|
||||
* [Block timestamp validation](#block-timestamp-validation)
|
||||
* [Changes to the prevote step](#changes-to-the-prevote-step)
|
||||
* [Changes to the precommit step](#changes-to-the-precommit-step)
|
||||
* [Changes to locking a block](#changes-to-locking-a-block)
|
||||
* [Remove voteTime Completely](#remove-votetime-completely)
|
||||
* [Future Improvements](#future-improvements)
|
||||
* [Consequences](#consequences)
|
||||
* [Positive](#positive)
|
||||
* [Neutral](#neutral)
|
||||
* [Negative](#negative)
|
||||
* [References](#references)
|
||||
|
||||
## Changelog
|
||||
|
||||
- July 15 2021: Created by @williambanfield
|
||||
- Aug 4 2021: Draft completed by @williambanfield
|
||||
- Aug 5 2021: Draft updated to include data structure changes by @williambanfield
|
||||
- Aug 20 2021: Language edits completed by @williambanfield
|
||||
- Oct 25 2021: Update the ADR to match updated spec from @cason by @williambanfield
|
||||
- Nov 10 2021: Additional language updates by @williambanfield per feedback from @cason
|
||||
|
||||
## Status
|
||||
|
||||
@@ -68,7 +36,7 @@ However, their currently known Unix time may be greatly divergent from the block
|
||||
The proposer-based timestamps specification suggests an alternative approach for producing block timestamps that remedies these issues.
|
||||
Proposer-based timestamps alter the current mechanism for producing block timestamps in two main ways:
|
||||
|
||||
1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block.
|
||||
1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block instead of the `BFTTime`.
|
||||
1. Correct validators only approve the proposed block timestamp if it is close enough to their own currently known Unix time.
|
||||
|
||||
The result of these changes is a more meaningful timestamp that cannot be controlled by `<= 2/3` of the validator voting power.
|
||||
@@ -111,45 +79,9 @@ Implementing proposer-based timestamps will require a few changes to Tendermint
|
||||
These changes will be to the following components:
|
||||
* The `internal/consensus/` package.
|
||||
* The `state/` package.
|
||||
* The `Vote`, `CommitSig`, `Commit` and `Header` types.
|
||||
* The `Vote`, `CommitSig` and `Header` types.
|
||||
* The consensus parameters.
|
||||
|
||||
### Proposal Timestamp and Block Timestamp
|
||||
|
||||
This design discusses two timestamps: (1) The timestamp in the block and (2) the timestamp in the proposal message.
|
||||
The existence and use of both of these timestamps can get a bit confusing, so some background is given here to clarify their uses.
|
||||
|
||||
The [proposal message currently has a timestamp](https://github.com/tendermint/tendermint/blob/e5312942e30331e7c42b75426da2c6c9c00ae476/types/proposal.go#L31).
|
||||
This timestamp is the current Unix time known to the proposer when sending the `Proposal` message.
|
||||
This timestamp is not currently used as part of consensus.
|
||||
The changes in this ADR will begin using the proposal message timestamp as part of consensus.
|
||||
We will refer to this as the **proposal timestamp** throughout this design.
|
||||
|
||||
The block has a timestamp field [in the header](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/block.go#L338).
|
||||
This timestamp is set currently as part of Tendermint’s `BFTtime` algorithm.
|
||||
It is set when a block is proposed and it is checked by the validators when they are deciding to prevote the block.
|
||||
This field will continue to be used but the logic for creating and validating this timestamp will change.
|
||||
We will refer to this as the **block timestamp** throughout this design.
|
||||
|
||||
At a high level, the proposal timestamp from height `H` is used as the block timestamp at height `H+1`.
|
||||
The following image shows this relationship.
|
||||
The rest of this document describes the code changes that will make this possible.
|
||||
|
||||

|
||||
|
||||
### Saving the timestamp across heights
|
||||
|
||||
Currently, `BFTtime` uses `LastCommit` to construct the block timestamp.
|
||||
The `LastCommit` is created at height `H-1` and is saved in the state store to be included in the block at height `H`.
|
||||
`BFTtime` takes the weighted median of the timestamps in `LastCommit.CommitSig` to build the timestamp for height `H`.
|
||||
|
||||
For proposer-based timestamps, the `LastCommit.CommitSig` timestamps will no longer be used to build the timestamps for height `H`.
|
||||
Instead, the proposal timestamp from height `H-1` will become the block timestamp for height `H`.
|
||||
To enable this, we will add a `Timestamp` field to the `Commit` struct.
|
||||
This field will be populated at each height with the proposal timestamp decided on at the previous height.
|
||||
This timestamp will also be saved with the rest of the commit in the state store [when the commit is finalized](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L1611) so that it can be recovered if Tendermint crashes.
|
||||
Changes to the `CommitSig` and `Commit` struct are detailed below.
|
||||
|
||||
### Changes to `CommitSig`
|
||||
|
||||
The [CommitSig](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L604) struct currently contains a timestamp.
|
||||
@@ -167,32 +99,14 @@ type CommitSig struct {
|
||||
}
|
||||
```
|
||||
|
||||
### Changes to `Commit`
|
||||
|
||||
The [Commit](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L746) struct does not currently contain a timestamp.
|
||||
The timestamps in the `Commit.CommitSig` entries are currently used to build the block timestamp.
|
||||
With these timestamps removed, the commit time will instead be stored in the `Commit` struct.
|
||||
|
||||
`Commit` will be updated as follows.
|
||||
|
||||
```diff
|
||||
type Commit struct {
|
||||
Height int64 `json:"height"`
|
||||
Round int32 `json:"round"`
|
||||
++ Timestamp time.Time `json:"timestamp"`
|
||||
BlockID BlockID `json:"block_id"`
|
||||
Signatures []CommitSig `json:"signatures"`
|
||||
}
|
||||
```
|
||||
|
||||
### Changes to `Vote` messages
|
||||
|
||||
`Precommit` and `Prevote` messages use a common [Vote struct](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/vote.go#L50).
|
||||
This struct currently contains a timestamp.
|
||||
This timestamp is set using the [voteTime](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2241) function and therefore vote times correspond to the current Unix time known to the validator.
|
||||
For precommits, this timestamp is used to construct the [CommitSig that is included in the block in the LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L754) field.
|
||||
For prevotes, this field is unused.
|
||||
Proposer-based timestamps will use the [RoundState.Proposal](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/internal/consensus/types/round_state.go#L76) timestamp to construct the `signedBytes` `CommitSig`.
|
||||
For prevotes, this field is currently unused.
|
||||
Proposer-based timestamps will use the timestamp that the proposer sets into the block and will therefore no longer require that a timestamp be included in the vote messages.
|
||||
This timestamp is therefore no longer useful and will be dropped.
|
||||
|
||||
`Vote` will be updated as follows:
|
||||
@@ -250,58 +164,28 @@ type TimestampParams struct {
|
||||
}
|
||||
```
|
||||
|
||||
### Changes to `Header`
|
||||
|
||||
The [Header](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L338) struct currently contains a timestamp.
|
||||
This timestamp is set as the `BFTtime` derived from the block's `LastCommit.CommitSig` timestamps.
|
||||
This timestamp will no longer be derived from the `LastCommit.CommitSig` timestamps and will instead be included directly into the block's `LastCommit`.
|
||||
This timestamp will therfore be identical in both the `Header` and the `LastCommit`.
|
||||
To clarify that the timestamp in the header corresponds to the `LastCommit`'s time, we will rename this timestamp field to `last_timestamp`.
|
||||
|
||||
`Header` will be updated as follows:
|
||||
|
||||
```diff
|
||||
type Header struct {
|
||||
// basic block info
|
||||
Version version.Consensus `json:"version"`
|
||||
ChainID string `json:"chain_id"`
|
||||
Height int64 `json:"height"`
|
||||
-- Time time.Time `json:"time"`
|
||||
++ LastTimestamp time.Time `json:"last_timestamp"`
|
||||
|
||||
// prev block info
|
||||
LastBlockID BlockID `json:"last_block_id"`
|
||||
|
||||
// hashes of block data
|
||||
LastCommitHash tmbytes.HexBytes `json:"last_commit_hash"`
|
||||
DataHash tmbytes.HexBytes `json:"data_hash"`
|
||||
|
||||
// hashes from the app output from the prev block
|
||||
ValidatorsHash tmbytes.HexBytes `json:"validators_hash"`
|
||||
NextValidatorsHash tmbytes.HexBytes `json:"next_validators_hash"`
|
||||
ConsensusHash tmbytes.HexBytes `json:"consensus_hash"`
|
||||
AppHash tmbytes.HexBytes `json:"app_hash"`
|
||||
|
||||
// root hash of all results from the txs from the previous block
|
||||
LastResultsHash tmbytes.HexBytes `json:"last_results_hash"`
|
||||
|
||||
// consensus info
|
||||
EvidenceHash tmbytes.HexBytes `json:"evidence_hash"`
|
||||
ProposerAddress Address `json:"proposer_address"`
|
||||
}
|
||||
```
|
||||
|
||||
### Changes to the block proposal step
|
||||
|
||||
#### Proposer selects proposal timestamp
|
||||
|
||||
The proposal logic already [sets the Unix time known to the validator](https://github.com/tendermint/tendermint/blob/2abfe20114ee3bb3adfee817589033529a804e4d/types/proposal.go#L44) into the `Proposal` message.
|
||||
This satisfies the proposer-based timestamp specification and does not need to change.
|
||||
|
||||
#### Proposer selects block timestamp
|
||||
|
||||
The proposal timestamp that was decided in height `H-1` will be stored in the `State` struct's in the `RoundState.LastCommit` field.
|
||||
The proposer will select this timestamp to use as the block timestamp at height `H`.
|
||||
Tendermint currently uses the `BFTTime` algorithm to produce the block's `Header.Timestamp`.
|
||||
The [proposal logic](https://github.com/tendermint/tendermint/blob/68ca65f5d79905abd55ea999536b1a3685f9f19d/internal/state/state.go#L269) sets the weighted median of the times in the `LastCommit.CommitSigs` as the proposed block's `Header.Timestamp`.
|
||||
|
||||
In proposer-based timestamps, the proposer will still set a timestamp into the `Header.Timestamp`.
|
||||
The timestamp the proposer sets into the `Header` will change depending on if the block has previously received a [polka](https://github.com/tendermint/tendermint/blob/053651160f496bb44b107a434e3e6482530bb287/docs/introduction/what-is-tendermint.md#consensus-overview) or not.
|
||||
|
||||
#### Proposal of a block that has not previously received a polka
|
||||
|
||||
If a proposer is proposing a new block, then it will set the Unix time currently known to the proposer into the `Header.Timestamp` field.
|
||||
The proposer will also set this same timestamp into the `Timestamp` field of the `Proposal` message that it issues.
|
||||
|
||||
#### Re-proposal of a block that has previously received a polka
|
||||
|
||||
If a proposer is re-proposing a block that has previously received a polka on the network, then the proposer does not update the `Header.Timestamp` of that block.
|
||||
Instead, the proposer simply re-proposes the exact same block.
|
||||
This way, the proposed block has the exact same block ID as the previously proposed block and the validators that have already received that block do not need to attempt to receive it again.
|
||||
|
||||
The proposer will set the re-proposed block's `Header.Timestamp` as the `Proposal` message's `Timestamp`.
|
||||
|
||||
#### Proposer waits
|
||||
|
||||
@@ -310,72 +194,94 @@ In `BFTTime`, if a validator’s clock was behind, the [validator added 1 millis
|
||||
A goal of adding proposer-based timestamps is to enforce some degree of clock synchronization, so having a mechanism that completely ignores the Unix time of the validator time no longer works.
|
||||
|
||||
Validator clocks will not be perfectly in sync.
|
||||
Therefore, the proposer’s current known Unix time may be less than the `LastCommit.Timestamp`.
|
||||
If the proposer’s current known Unix time is less than the `LastCommit.Timestamp`, the proposer will sleep until its known Unix time exceeds `LastCommit.Timestamp`.
|
||||
Therefore, the proposer’s current known Unix time may be less than the previous block's `Header.Time`.
|
||||
If the proposer’s current known Unix time is less than the previous block's `Header.Time`, the proposer will sleep until its known Unix time exceeds it.
|
||||
|
||||
This change will require amending the [defaultDecideProposal](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1180) method.
|
||||
This method should now block until the proposer’s time is greater than `LastCommit.Timestamp`.
|
||||
This method should now schedule a timeout that fires when the proposer’s time is greater than the previous block's `Header.Time`.
|
||||
When the timeout fires, the proposer will finally issue the `Proposal` message.
|
||||
|
||||
#### Changes to the propose step timeout
|
||||
|
||||
Currently, a validator waiting for a proposal will proceed past the propose step if the configured propose timeout is reached and no proposal is seen.
|
||||
Proposer-based timestamps requires changing this timeout logic.
|
||||
Proposer-based timestamps requires changing this timeout logic.
|
||||
|
||||
The proposer will now wait until its current known Unix time exceeds the `LastCommit.Timestamp` to propose a block.
|
||||
The proposer will now wait until its current known Unix time exceeds the previous block's `Header.Time` to propose a block.
|
||||
The validators must now take this and some other factors into account when deciding when to timeout the propose step.
|
||||
Specifically, the propose step timeout must also take into account potential inaccuracy in the validator’s clock and in the clock of the proposer.
|
||||
Additionally, there may be a delay communicating the proposal message from the proposer to the other validators.
|
||||
|
||||
Therefore, validators waiting for a proposal must wait until after the `LastCommit.Timestamp` before timing out.
|
||||
To account for possible inaccuracy in its own clock, inaccuracy in the proposer’s clock, and message delay, validators waiting for a proposal will wait until `LastCommit.Timesatmp + 2*ACCURACY + MSGDELAY`.
|
||||
Therefore, validators waiting for a proposal must wait until after the previous block's `Header.Time` before timing out.
|
||||
To account for possible inaccuracy in its own clock, inaccuracy in the proposer’s clock, and message delay, validators waiting for a proposal will wait until the previous block's `Header.Time + 2*ACCURACY + MSGDELAY`.
|
||||
The spec defines this as `waitingTime`.
|
||||
|
||||
The [propose step’s timeout is set in enterPropose](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1108) in `state.go`.
|
||||
`enterPropose` will be changed to calculate waiting time using the new consensus parameters.
|
||||
The timeout in `enterPropose` will then be set as the maximum of `waitingTime` and the [configured proposal step timeout](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/config/config.go#L1013).
|
||||
|
||||
### Changes to validation rules
|
||||
### Changes to proposal validation rules
|
||||
|
||||
The rules for validating that a proposal is valid will need slight modification to implement proposer-based timestamps.
|
||||
Specifically, we will change the validation logic to ensure that the proposal timestamp is `timely` and we will modify the way the block timestamp is validated as well.
|
||||
The rules for validating a proposed block will be modification to implement proposer-based timestamps.
|
||||
We will change the validation logic to ensure that a proposal is `timely`.
|
||||
|
||||
#### Proposal timestamp validation
|
||||
Per the proposer-based timestamps spec, `timely` only needs to be checked if a block has not received a +2/3 majority of `Prevotes` in a round.
|
||||
If a block previously received a +2/3 majority of prevotes in a previous round, then +2/3 of the voting power considered the block's timestamp near enough to their own currently known Unix time in that round.
|
||||
|
||||
Adding proposal timestamp validation is a reasonably straightforward change.
|
||||
The current Unix time known to the proposer is already included in the [Proposal message](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/proposal.go#L31).
|
||||
Once the proposal is received, the complete message is stored in the `RoundState.Proposal` field.
|
||||
The precommit and prevote validation logic does not currently use this timestamp.
|
||||
This validation logic will be updated to check that the proposal timestamp is within `PRECISION` of the current Unix time known to the validators.
|
||||
If the timestamp is not within `PRECISION` of the current Unix time known to the validator, the proposal will not be considered it valid.
|
||||
The validator will also check that the proposal time is greater than the block timestamp from the previous height.
|
||||
The validation logic will be updated to check `timely` for blocks that did not previously receive +2/3 prevotes in a round.
|
||||
Receiving +2/3 prevotes in a round is frequently referred to as a 'polka' and we will use this term for simplicity.
|
||||
|
||||
If no valid proposal is received by the proposal timeout, the validator will prevote nil.
|
||||
This is identical to the current logic.
|
||||
#### Current timestamp validation logic
|
||||
|
||||
#### Block timestamp validation
|
||||
To provide a better understanding of the changes needed to timestamp validation, we will first detail how timestamp validation works currently in Tendermint.
|
||||
|
||||
The [validBlock function](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L14) currently [validates the proposed block timestamp in three ways](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L118).
|
||||
First, the validation logic checks that this timestamp is greater than the previous block’s timestamp.
|
||||
Additionally, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48).
|
||||
Finally, the logic also authenticates the timestamps in the `LastCommit`.
|
||||
The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the validator’s private key.
|
||||
One of the items in this `signedBytes` hash is derived from the timestamp in the `CommitSig`.
|
||||
To authenticate the `CommitSig` timestamp, the validator builds a hash of fields that includes the timestamp and checks this hash against the provided signature.
|
||||
|
||||
Second, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48).
|
||||
|
||||
Finally, the validation logic authenticates the timestamps in the `LastCommit.CommitSig`.
|
||||
The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the voting validator’s private key.
|
||||
One of the items in this `signedBytes` hash is the timestamp in the `CommitSig`.
|
||||
To authenticate the `CommitSig` timestamp, the validator authenticating votes builds a hash of fields that includes the `CommitSig` timestamp and checks this hash against the signature.
|
||||
This takes place in the [VerifyCommit function](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/validation.go#L25).
|
||||
|
||||
The logic to validate that the block timestamp is greater than the previous block’s timestamp also works for proposer-based timestamps and will not change.
|
||||
#### Remove unused timestamp validation logic
|
||||
|
||||
`BFTTime` validation is no longer applicable and will be removed.
|
||||
Validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps.
|
||||
This will mean removing the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117).
|
||||
This means that validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps.
|
||||
Specifically, we will remove the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117).
|
||||
The `MedianTime` function can be completely removed.
|
||||
The `LastCommit` timestamps may also be removed.
|
||||
|
||||
The `signedBytes` validation logic in `VerifyCommit` will be slightly altered.
|
||||
The `CommitSig`s in the block’s `LastCommit` will no longer each contain a timestamp.
|
||||
The validation logic will instead include the `LastCommit.Timestamp` in the hash of fields for generating the `signedBytes`.
|
||||
The cryptographic signatures included in the `CommitSig`s will then be checked against this `signedBytes` hash to authenticate the timestamp.
|
||||
Specifically, the `VerifyCommit` function will be updated to use this new timestamp.
|
||||
Since `CommitSig`s will no longer contain a timestamp, the validator authenticating a commit will no longer include the `CommitSig` timestamp in the hash of fields it builds to check against the cryptographic signature.
|
||||
|
||||
#### Timestamp validation when a block has not received a polka
|
||||
|
||||
The [POLRound](https://github.com/tendermint/tendermint/blob/68ca65f5d79905abd55ea999536b1a3685f9f19d/types/proposal.go#L29) in the `Proposal` message indicates which round the block received a polka.
|
||||
A negative value in the `POLRound` field indicates that the block has not previously been proposed on the network.
|
||||
Therefore the validation logic will check for timely when `POLRound < 0`.
|
||||
|
||||
When a validator receives a `Proposal` message, the validator will check that the `Proposal.Timestamp` is at most `PRECISION` greater than the current Unix time known to the validator, and at minimum `PRECISION + MSGDELAY` less than the current Unix time known to the validator.
|
||||
If the timestamp is not within these bounds, the proposed block will not be considered `timely`.
|
||||
|
||||
Once a full block matching the `Proposal` message is received, the validator will also check that the timestamp in the `Header.Timestamp` of the block matches this `Proposal.Timestamp`.
|
||||
Using the `Proposal.Timestamp` to check `timely` allows for the `MSGDELAY` parameter to be more finely tuned since `Proposal` messages do not change sizes and are therefore faster to gossip than full blocks across the network.
|
||||
|
||||
A validator will also check that the proposed timestamp is greater than the timestamp of the block for the previous height.
|
||||
If the timestamp is not greater than the previous block's timestamp, the block will not be considered valid, which is the same as the current logic.
|
||||
|
||||
#### Timestamp validation when a block has received a polka
|
||||
|
||||
When a block is re-proposed that has already received a +2/3 majority of `Prevote`s on the network, the `Proposal` message for the re-proposed block is created with a `POLRound` that is `>= 0`.
|
||||
A validator will not check that the `Proposal` is `timely` if the propose message has a non-negative `POLRound`.
|
||||
If the `POLRound` is non-negative, each validator will simply ensure that it received the `Prevote` messages for the proposed block in the round indicated by `POLRound`.
|
||||
|
||||
If the validator did not receive `Prevote` messages for the proposed block in `POLRound`, then it will prevote nil.
|
||||
Validators already check that +2/3 prevotes were seen in `POLRound`, so this does not represent a change to the prevote logic.
|
||||
|
||||
A validator will also check that the proposed timestamp is greater than the timestamp of the block for the previous height.
|
||||
If the timestamp is not greater than the previous block's timestamp, the block will not be considered valid, which is the same as the current logic.
|
||||
|
||||
Additionally, this validation logic can be updated to check that the `Proposal.Timestamp` matches the `Header.Timestamp` of the proposed block, but it is less relevant since checking that votes were received is sufficient to ensure the block timestamp is correct.
|
||||
|
||||
### Changes to the prevote step
|
||||
|
||||
@@ -383,26 +289,14 @@ Currently, a validator will prevote a proposal in one of three cases:
|
||||
|
||||
* Case 1: Validator has no locked block and receives a valid proposal.
|
||||
* Case 2: Validator has a locked block and receives a valid proposal matching its locked block.
|
||||
* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +⅔ prevotes for the new proposal’s block.
|
||||
* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +⅔ prevotes for the proposal’s block, either in the current round or in a round greater than or equal to the round in which it locked its locked block.
|
||||
|
||||
The only change we will make to the prevote step is to what a validator considers a valid proposal as detailed above.
|
||||
|
||||
### Changes to the precommit step
|
||||
|
||||
The precommit step will not require much modification.
|
||||
Its proposal validation rules will change in the same ways that validation will change in the prevote step.
|
||||
|
||||
### Changes to locking a block
|
||||
When a validator receives a valid proposed block and +2/3 prevotes for that block, it stores the block as its ‘locked block’ in the [RoundState.ValidBlock](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/types/round_state.go#L85) field.
|
||||
In each subsequent round it will prevote that block.
|
||||
A validator will only change which block it has locked if it sees +2/3 prevotes for a different block.
|
||||
|
||||
This mechanism will remain largely unchanged.
|
||||
The only difference is the addition of proposal timestamp validation.
|
||||
A validator will prevote nil in a round if the proposal message it received is not `timely`.
|
||||
Prevoting nil in this case will not cause a validator to ‘unlock’ its locked block.
|
||||
This difference is an incidental result of the changes to prevote validation.
|
||||
It is included in this design for completeness and to clarify that no additional changes will be made to block locking.
|
||||
Its proposal validation rules will change in the same ways that validation will change in the prevote step with the exception of the `timely` check: precommit validation will never check that the timestamp is `timely`.
|
||||
|
||||
### Remove voteTime Completely
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
Implemented
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
235
docs/architecture/adr-073-libp2p.md
Normal file
235
docs/architecture/adr-073-libp2p.md
Normal file
@@ -0,0 +1,235 @@
|
||||
# ADR 073: Adopt LibP2P
|
||||
|
||||
## Changelog
|
||||
|
||||
- 2021-11-02: Initial Draft (@tychoish)
|
||||
|
||||
## Status
|
||||
|
||||
Proposed.
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
As part of the 0.35 development cycle, the Tendermint team completed
|
||||
the first phase of the work described in ADRs 61 and 62, which included a
|
||||
large scale refactoring of the reactors and the p2p message
|
||||
routing. This replaced the switch and many of the other legacy
|
||||
components without breaking protocol or network-level
|
||||
interoperability and left the legacy connection/socket handling code.
|
||||
|
||||
Following the release, the team has reexamined the state of the code
|
||||
and the design, as well as Tendermint's requirements. The notes
|
||||
from that process are available in the [P2P Roadmap
|
||||
RFC][rfc].
|
||||
|
||||
This ADR supersedes the decisions made in ADRs 60 and 61, but
|
||||
builds on the completed portions of this work. Previously, the
|
||||
boundaries of peer management, message handling, and the higher level
|
||||
business logic (e.g., "the reactors") were intermingled, and core
|
||||
elements of the p2p system were responsible for the orchestration of
|
||||
higher-level business logic. Refactoring the legacy components
|
||||
made it more obvious that this entanglement of responsibilities
|
||||
had outsized influence on the entire implementation, making
|
||||
it difficult to iterate within the current abstractions.
|
||||
It would not be viable to maintain interoperability with legacy
|
||||
systems while also achieving many of our broader objectives.
|
||||
|
||||
LibP2P is a thoroughly-specified implementation of a peer-to-peer
|
||||
networking stack, designed specifically for systems such as
|
||||
ours. Adopting LibP2P as the basis of Tendermint will allow the
|
||||
Tendermint team to focus more of their time on other differentiating
|
||||
aspects of the system, and make it possible for the ecosystem as a
|
||||
whole to take advantage of tooling and efforts of the LibP2P
|
||||
platform.
|
||||
|
||||
## Alternative Approaches
|
||||
|
||||
As discussed in the [P2P Roadmap RFC][rfc], the primary alternative would be to
|
||||
continue development of Tendermint's home-grown peer-to-peer
|
||||
layer. While that would give the Tendermint team maximal control
|
||||
over the peer system, the current design is unexceptional on its
|
||||
own merits, and the prospective maintenance burden for this system
|
||||
exceeds our tolerances for the medium term.
|
||||
|
||||
Tendermint can and should differentiate itself not on the basis of
|
||||
its networking implementation or peer management tools, but providing
|
||||
a consistent operator experience, a battle-tested consensus algorithm,
|
||||
and an ergonomic user experience.
|
||||
|
||||
## Decision
|
||||
|
||||
Tendermint will adopt libp2p during the 0.37 development cycle,
|
||||
replacing the bespoke Tendermint P2P stack. This will remove the
|
||||
`Endpoint`, `Transport`, `Connection`, and `PeerManager` abstractions
|
||||
and leave the reactors, `p2p.Router` and `p2p.Channel`
|
||||
abstractions.
|
||||
|
||||
LibP2P may obviate the need for a dedicated peer exchange (PEX)
|
||||
reactor, which would also in turn obviate the need for a dedicated
|
||||
seed mode. If this is the case, then all of this functionality would
|
||||
be removed.
|
||||
|
||||
If it turns out (based on the advice of Protocol Labs) that it makes
|
||||
sense to maintain separate pubsub or gossipsub topics
|
||||
per-message-type, then the `Router` abstraction could also
|
||||
be entirely subsumed.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Implementation Changes
|
||||
|
||||
The seams in the P2P implementation between the higher level
|
||||
constructs (reactors), the routing layer (`Router`) and the lower
|
||||
level connection and peer management code make this operation
|
||||
relatively straightforward to implement. A key
|
||||
goal in this design is to minimize the impact on the reactors
|
||||
(potentially entirely,) and completely remove the lower level
|
||||
components (e.g., `Transport`, `Connection` and `PeerManager`) using the
|
||||
separation afforded by the `Router` layer. The current state of the
|
||||
code makes these changes relatively surgical, and limited to a small
|
||||
number of methods:
|
||||
|
||||
- `p2p.Router.OpenChannel` will still return a `Channel` structure
|
||||
which will continue to serve as a pipe between the reactors and the
|
||||
`Router`. The implementation will no longer need the queue
|
||||
implementation, and will instead start goroutines that
|
||||
are responsible for routing the messages from the channel to libp2p
|
||||
fundamentals, replacing the current `p2p.Router.routeChannel`.
|
||||
|
||||
- The current `p2p.Router.dialPeers` and `p2p.Router.acceptPeers`,
|
||||
are responsible for establishing outbound and inbound connections,
|
||||
respectively. These methods will be removed, along with
|
||||
`p2p.Router.openConnection`, and the libp2p connection manager will
|
||||
be responsible for maintaining network connectivity.
|
||||
|
||||
- The `p2p.Channel` interface will change to replace Go
|
||||
channels with a more functional interface for sending messages.
|
||||
New methods on this object will take contexts to support safe
|
||||
cancellation, and return errors, and will block rather than
|
||||
running asynchronously. The `Out` channel through which
|
||||
reactors send messages to Peers, will be replaced by a `Send`
|
||||
method, and the Error channel will be replaced by an `Error`
|
||||
method.
|
||||
|
||||
- Reactors will be passed an interface that will allow them to
|
||||
access Peer information from libp2p. This will supplant the
|
||||
`p2p.PeerUpdates` subscription.
|
||||
|
||||
- Add some kind of heartbeat message at the application level
|
||||
(e.g. with a reactor,) potentially connected to libp2p's DHT to be
|
||||
used by reactors for service discovery, message targeting, or other
|
||||
features.
|
||||
|
||||
- Replace the existing/legacy handshake protocol with [Noise](http://www.noiseprotocol.org/noise.html).
|
||||
|
||||
This project will initially use the TCP-based transport protocols within
|
||||
libp2p. QUIC is also available as an option that we may implement later.
|
||||
We will not support mixed networks in the initial release, but will
|
||||
revisit that possibility later if there is a demonstrated need.
|
||||
|
||||
### Upgrade and Compatibility
|
||||
|
||||
Because the routers and all current P2P libraries are `internal`
|
||||
packages and not part of the public API, the only changes to the public
|
||||
API surface area of Tendermint will be different configuration
|
||||
file options, replacing the current P2P options with options relevant
|
||||
to libp2p.
|
||||
|
||||
However, it will not be possible to run a network with both networking
|
||||
stacks active at once, so the upgrade to the version of Tendermint
|
||||
will need to be coordinated between all nodes of the network. This is
|
||||
consistent with the expectations around upgrades for Tendermint moving
|
||||
forward, and will help manage both the complexity of the project and
|
||||
the implementation timeline.
|
||||
|
||||
## Open Questions
|
||||
|
||||
- What is the role of Protocol Labs in the implementation of libp2p in
|
||||
tendermint, both during the initial implementation and on an ongoing
|
||||
basis thereafter?
|
||||
|
||||
- Should all P2P traffic for a given node be pushed to a single topic,
|
||||
so that a topic maps to a specific ChainID, or should
|
||||
each reactor (or type of message) have its own topic? How many
|
||||
topics can a libp2p network support? Is there testing that validates
|
||||
the capabilities?
|
||||
|
||||
- Tendermint presently provides a very coarse QoS-like functionality
|
||||
using priorities based on message-type.
|
||||
This intuitively/theoretically ensures that evidence and consensus
|
||||
messages don't get starved by blocksync/statesync messages. It's
|
||||
unclear if we can or should attempt to replicate this with libp2p.
|
||||
|
||||
- What kind of QoS functionality does libp2p provide and what kind of
|
||||
metrics does libp2p provide about it's QoS functionality?
|
||||
|
||||
- Is it possible to store additional (and potentially arbitrary)
|
||||
information into the DHT as part of the heartbeats between nodes,
|
||||
such as the latest height, and then access that in the
|
||||
reactors. How frequently can the DHT be updated?
|
||||
|
||||
- Does it make sense to have reactors continue to consume inbound
|
||||
messages from a Channel (`In`) or is there another interface or
|
||||
pattern that we should consider?
|
||||
|
||||
- We should avoid exposing Go channels when possible, and likely
|
||||
some kind of alternate iterator likely makes sense for processing
|
||||
messages within the reactors.
|
||||
|
||||
- What are the security and protocol implications of tracking
|
||||
information from peer heartbeats and exposing that to reactors?
|
||||
|
||||
- How much (or how little) configuration can Tendermint provide for
|
||||
libp2p, particularly on the first release?
|
||||
|
||||
- In general, we should not support byo-functionality for libp2p
|
||||
components within Tendermint, and reduce the configuration surface
|
||||
area, as much as possible.
|
||||
|
||||
- What are the best ways to provide request/response semantics for
|
||||
reactors on top of libp2p? Will it be possible to add
|
||||
request/response semantics in a future release or is there
|
||||
anticipatory work that needs to be done as part of the initial
|
||||
release?
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Reduce the maintenance burden for the Tendermint Core team by
|
||||
removing a large swath of legacy code that has proven to be
|
||||
difficult to modify safely.
|
||||
|
||||
- Remove the responsibility for maintaining and developing the entire
|
||||
peer management system (p2p) and stack.
|
||||
|
||||
- Provide users with a more stable peer and networking system,
|
||||
Tendermint can improve operator experience and network stability.
|
||||
|
||||
### Negative
|
||||
|
||||
- By deferring to library implementations for peer management and
|
||||
networking, Tendermint loses some flexibility for innovating at the
|
||||
peer and networking level. However, Tendermint should be innovating
|
||||
primarily at the consensus layer, and libp2p does not preclude
|
||||
optimization or development in the peer layer.
|
||||
|
||||
- Libp2p is a large dependency and Tendermint would become dependent
|
||||
upon Protocol Labs' release cycle and prioritization for bug
|
||||
fixes. If this proves onerous, it's possible to maintain a vendor
|
||||
fork of relevant components as needed.
|
||||
|
||||
### Neutral
|
||||
|
||||
- N/A
|
||||
|
||||
## References
|
||||
|
||||
- [ADR 61: P2P Refactor Scope][adr61]
|
||||
- [ADR 62: P2P Architecture][adr62]
|
||||
- [P2P Roadmap RFC][rfc]
|
||||
|
||||
[adr61]: ./adr-061-p2p-refactor-scope.md
|
||||
[adr62]: ./adr-062-p2p-architecture.md
|
||||
[rfc]: ../rfc/rfc-000-p2p.rst
|
||||
@@ -1,16 +0,0 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
title: Networks
|
||||
order: 1
|
||||
---
|
||||
|
||||
# Overview
|
||||
|
||||
Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your
|
||||
local machine.
|
||||
|
||||
Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint
|
||||
testnets to the cloud.
|
||||
|
||||
See the `tendermint testnet --help` command for more help initializing testnets.
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
title: Nodes
|
||||
title: Node Operators
|
||||
order: 4
|
||||
---
|
||||
|
||||
|
||||
@@ -221,9 +221,6 @@ pprof-laddr = ""
|
||||
#######################################################
|
||||
[p2p]
|
||||
|
||||
# Enable the legacy p2p layer.
|
||||
use-legacy = false
|
||||
|
||||
# Select the p2p internal queue
|
||||
queue-type = "priority"
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ The following metrics are available:
|
||||
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- |
|
||||
| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods |
|
||||
| consensus_height | Gauge | | Height of the chain |
|
||||
| consensus_validators | Gauge | | Number of validators |
|
||||
| consensus_validators_power | Gauge | | Total voting power of all validators |
|
||||
@@ -55,6 +56,16 @@ The following metrics are available:
|
||||
|
||||
Percentage of missing + byzantine validators:
|
||||
|
||||
```md
|
||||
((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100
|
||||
```prometheus
|
||||
((consensus_byzantine_validators_power + consensus_missing_validators_power) / consensus_validators_power) * 100
|
||||
```
|
||||
|
||||
Rate at which the application is responding to each ABCI method call.
|
||||
```
|
||||
sum(rate(tendermint_abci_connection_method_timing_count[5m])) by (method)
|
||||
```
|
||||
|
||||
The 95th percentile response time for the application to the `deliver_tx` ABCI method call.
|
||||
```
|
||||
histogram_quantile(0.95, sum by(le) (rate(tendermint_abci_connection_method_timing_bucket{method="deliver_tx"}[5m])))
|
||||
```
|
||||
|
||||
12646
docs/package-lock.json
generated
12646
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@
|
||||
"vuepress-theme-cosmos": "^1.0.182"
|
||||
},
|
||||
"devDependencies": {
|
||||
"watchpack": "^2.2.0"
|
||||
"watchpack": "^2.3.1"
|
||||
},
|
||||
"scripts": {
|
||||
"preserve": "./pre.sh",
|
||||
|
||||
@@ -43,5 +43,6 @@ sections.
|
||||
- [RFC-003: Performance Taxonomy](./rfc-003-performance-questions.md)
|
||||
- [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md)
|
||||
- [RFC-005: Event System](./rfc-005-event-system.rst)
|
||||
- [RFC-006: Event Subscription](./rfc-006-event-subscription.md)
|
||||
|
||||
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->
|
||||
|
||||
204
docs/rfc/rfc-006-event-subscription.md
Normal file
204
docs/rfc/rfc-006-event-subscription.md
Normal file
@@ -0,0 +1,204 @@
|
||||
# RFC 006: Event Subscription
|
||||
|
||||
## Changelog
|
||||
|
||||
- 30-Oct-2021: Initial draft (@creachadair)
|
||||
|
||||
## Abstract
|
||||
|
||||
The Tendermint consensus node allows clients to subscribe to its event stream
|
||||
via methods on its RPC service. The ability to view the event stream is
|
||||
valuable for clients, but the current implementation has some deficiencies that
|
||||
make it difficult for some clients to use effectively. This RFC documents these
|
||||
issues and discusses possible approaches to solving them.
|
||||
|
||||
|
||||
## Background
|
||||
|
||||
A running Tendermint consensus node exports a [JSON-RPC service][rpc-service]
|
||||
that provides a [large set of methods][rpc-methods] for inspecting and
|
||||
interacting with the node. One important cluster of these methods are the
|
||||
`subscribe`, `unsubscribe`, and `unsubscribe_all` methods, which permit clients
|
||||
to subscribe to a filtered stream of the [events generated by the node][events]
|
||||
as it runs.
|
||||
|
||||
Unlike the other methods of the service, the methods in the "event
|
||||
subscription" cluster are not accessible via [ordinary HTTP GET or POST
|
||||
requests][rpc-transport], but require upgrading the HTTP connection to a
|
||||
[websocket][ws]. This is necessary because the `subscribe` request needs a
|
||||
persistent channel to deliver results back to the client, and an ordinary HTTP
|
||||
connection does not reliably persist across multiple requests. Since these
|
||||
methods do not work properly without a persistent channel, they are _only_
|
||||
exported via a websocket connection, and are not routed for plain HTTP.
|
||||
|
||||
|
||||
## Discussion
|
||||
|
||||
There are some operational problems with the current implementation of event
|
||||
subscription in the RPC service:
|
||||
|
||||
- **Event delivery is not valid JSON-RPC.** When a client issues a `subscribe`
|
||||
request, the server replies (correctly) with an initial empty acknowledgement
|
||||
(`{}`). After that, each matching event is delivered "unsolicited" (without
|
||||
another request from the client), as a separate [response object][json-response]
|
||||
with the same ID as the initial request.
|
||||
|
||||
This matters because it means a standard JSON-RPC client library can't
|
||||
interact correctly with the event subscription mechanism.
|
||||
|
||||
Even for clients that can handle unsolicited values pushed by the server,
|
||||
these responses are invalid: They have an ID, so they cannot be treated as
|
||||
[notifications][json-notify]; but the ID corresponds to a request that was
|
||||
already completed. In practice, this means that general-purpose JSON-RPC
|
||||
libraries cannot use this method correctly -- it requires a custom client.
|
||||
|
||||
The Go RPC client from the Tendermint core can support this case, but clients
|
||||
in other languages have no easy solution.
|
||||
|
||||
This is the cause of issue [#2949][issue2949].
|
||||
|
||||
- **Subscriptions are terminated by disconnection.** When the connection to the
|
||||
client is interrupted, the subscription is silently dropped.
|
||||
|
||||
This is a reasonable behavior, but it matters because a client whose
|
||||
subscription is dropped gets no useful error feedback, just a closed
|
||||
connection. Should they try again? Is the node overloaded? Was the client
|
||||
too slow? Did the caller forget to respond to pings? Debugging these kinds
|
||||
of failures is unnecessarily painful.
|
||||
|
||||
Websockets compound this, because websocket connections time out if no
|
||||
traffic is seen for a while, and keeping them alive requires active
|
||||
cooperation between the client and server. With a plain TCP socket, liveness
|
||||
is handled transparently by the keepalive mechanism. On a websocket,
|
||||
however, one side has to occasionally send a PING (if the connection is
|
||||
otherwise idle). The other side must return a matching PONG in time, or the
|
||||
connection is dropped. Apart from being tedious, this is highly susceptible
|
||||
to CPU load.
|
||||
|
||||
The Tendermint Go implementation automatically sends and responds to pings.
|
||||
Clients in other languages (or not wanting to use the Tendermint libraries)
|
||||
need to handle it explicitly. This burdens the client for no practical
|
||||
benefit: A subscriber has no information about when matching events may be
|
||||
available, so it shouldn't have to participate in keeping the connection
|
||||
alive.
|
||||
|
||||
- **Mismatched load profiles.** Most of the RPC service is mainly important for
|
||||
low-volume local use, either by the application the node serves (e.g., the
|
||||
ABCI methods) or by the node operator (e.g., the info methods). Event
|
||||
subscription is important for remote clients, and may represent a much higher
|
||||
volume of traffic.
|
||||
|
||||
This matters because both are using the same JSON-RPC mechanism. For
|
||||
low-volume local use, the ergonomics of JSON-RPC are a good fit: It's easy to
|
||||
issue queries from the command line (e.g., using `curl`) or to write scripts
|
||||
that call the RPC methods to monitor the running node.
|
||||
|
||||
For high-volume remote use, JSON-RPC is not such a good fit: Even leaving
|
||||
aside the non-standard delivery protocol mentioned above, the time and memory
|
||||
cost of encoding event data matters for the stability of the node when there
|
||||
can be potentially hundreds of subscribers. Moreover, a subscription is
|
||||
long-lived compared to most RPC methods, in that it may persist as long the
|
||||
node is active.
|
||||
|
||||
- **Mismatched security profiles.** The RPC service exports several methods
|
||||
that should not be open to arbitrary remote callers, both for correctness
|
||||
reasons (e.g., `remove_tx` and `broadcast_tx_*`) and for operational
|
||||
stability reasons (e.g., `tx_search`). A node may still need to expose
|
||||
events, however, to support UI tools.
|
||||
|
||||
This matters, because all the methods share the same network endpoint. While
|
||||
it is possible to block the top-level GET and POST handlers with a proxy,
|
||||
exposing the `/websocket` handler exposes not _only_ the event subscription
|
||||
methods, but the rest of the service as well.
|
||||
|
||||
### Possible Improvements
|
||||
|
||||
There are several things we could do to improve the experience of developers
|
||||
who need to subscribe to events from the consensus node. These are not all
|
||||
mutually exclusive.
|
||||
|
||||
1. **Split event subscription into a separate service**. Instead of exposing
|
||||
event subscription on the same endpoint as the rest of the RPC service,
|
||||
dedicate a separate endpoint on the node for _only_ event subscription. The
|
||||
rest of the RPC services (_sans_ events) would remain as-is.
|
||||
|
||||
This would make it easy to disable or firewall outside access to sensitive
|
||||
RPC methods, without blocking access to event subscription (and vice versa).
|
||||
This is probably worth doing, even if we don't take any of the other steps
|
||||
described here.
|
||||
|
||||
2. **Use a different protocol for event subscription.** There are various ways
|
||||
we could approach this, depending how much we're willing to shake up the
|
||||
current API. Here are sketches of a few options:
|
||||
|
||||
- Keep the websocket, but rework the API to be more JSON-RPC compliant,
|
||||
perhaps by converting event delivery into notifications. This is less
|
||||
up-front change for existing clients, but retains all of the existing
|
||||
implementation complexity, and doesn't contribute much toward more serious
|
||||
performance and UX improvements later.
|
||||
|
||||
- Switch from websocket to plain HTTP, and rework the subscription API to
|
||||
use a more conventional request/response pattern instead of streaming.
|
||||
This is a little more up-front work for existing clients, but leverages
|
||||
better library support for clients not written in Go.
|
||||
|
||||
The protocol would become more chatty, but we could mitigate that with
|
||||
batching, and in return we would get more control over what to do about
|
||||
slow clients: Instead of simply silently dropping them, as we do now, we
|
||||
could drop messages and signal the client that they missed some data ("M
|
||||
dropped messages since your last poll").
|
||||
|
||||
This option is probably the best balance between work, API change, and
|
||||
benefit, and has a nice incidental effect that it would be easier to debug
|
||||
subscriptions from the command-line, like the other RPC methods.
|
||||
|
||||
- Switch to gRPC: Preserves a persistent connection and gives us a more
|
||||
efficient binary wire format (protobuf), at the cost of much more work for
|
||||
clients and harder debugging. This may be the best option if performance
|
||||
and server load are our top concerns.
|
||||
|
||||
Given that we are currently using JSON-RPC, however, I'm not convinced the
|
||||
costs of encoding and sending messages on the event subscription channel
|
||||
are the limiting factor on subscription efficiency, however.
|
||||
|
||||
3. **Delegate event subscriptions to a proxy.** Give responsibility for
|
||||
managing event subscription to a proxy that runs separately from the node,
|
||||
and switch the node to push events to the proxy (like a webhook) instead of
|
||||
serving subscribers directly. This is more work for the operator (another
|
||||
process to configure and run) but may scale better for big networks.
|
||||
|
||||
I mention this option for completeness, but making this change would be a
|
||||
fairly substantial project. If we want to consider shifting responsibility
|
||||
for event subscription outside the node anyway, we should probably be more
|
||||
systematic about it. For a more principled approach, see point (4) below.
|
||||
|
||||
4. **Move event subscription downstream of indexing.** We are already planning
|
||||
to give applications more control over event indexing. By extension, we
|
||||
might allow the application to also control how events are filtered,
|
||||
queried, and subscribed. Having the application control these concerns,
|
||||
rather than the node, might make life easier for developers building UI and
|
||||
tools for that application.
|
||||
|
||||
This is a much larger change, so I don't think it is likely to be practical
|
||||
in the near-term, but it's worth considering as a broader option. Some of
|
||||
the existing code for filtering and selection could be made more reusable,
|
||||
so applications would not need to reinvent everything.
|
||||
|
||||
|
||||
## References
|
||||
|
||||
- [Tendermint RPC service][rpc-service]
|
||||
- [Tendermint RPC routes][rpc-methods]
|
||||
- [Discussion of the event system][events]
|
||||
- [Discussion about RPC transport options][rpc-transport] (from RFC 002)
|
||||
- [RFC 6455: The websocket protocol][ws]
|
||||
- [JSON-RPC 2.0 Specification](https://www.jsonrpc.org/specification)
|
||||
|
||||
[rpc-service]: https://docs.tendermint.com/master/rpc/
|
||||
[rpc-methods]: https://github.com/tendermint/tendermint/blob/master/internal/rpc/core/routes.go#L12
|
||||
[events]: ./rfc-005-event-system.rst
|
||||
[rpc-transport]: ./rfc-002-ipc-ecosystem.md#rpc-transport
|
||||
[ws]: https://datatracker.ietf.org/doc/html/rfc6455
|
||||
[json-response]: https://www.jsonrpc.org/specification#response_object
|
||||
[json-notify]: https://www.jsonrpc.org/specification#notification
|
||||
[issue2949]: https://github.com/tendermint/tendermint/issues/2949
|
||||
6
docs/roadmap/README.md
Normal file
6
docs/roadmap/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
order: false
|
||||
parent:
|
||||
title: Roadmap
|
||||
order: 7
|
||||
---
|
||||
97
docs/roadmap/roadmap.md
Normal file
97
docs/roadmap/roadmap.md
Normal file
@@ -0,0 +1,97 @@
|
||||
---
|
||||
order: 1
|
||||
---
|
||||
|
||||
# Tendermint Roadmap
|
||||
|
||||
*Last Updated: Friday 8 October 2021*
|
||||
|
||||
This document endeavours to inform the wider Tendermint community about development plans and priorities for Tendermint Core, and when we expect features to be delivered. It is intended to broadly inform all users of Tendermint, including application developers, node operators, integrators, and the engineering and research teams.
|
||||
|
||||
Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/spec/issues/new/choose) in the spec. Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint).
|
||||
|
||||
This roadmap should be read as a high-level guide to plans and priorities, rather than a commitment to schedules and deliverables. Features earlier on the roadmap will generally be more specific and detailed than those later on. We will update this document periodically to reflect the current status.
|
||||
|
||||
The upgrades are split into two components: **Epics**, the features that define a release and to a large part dictate the timing of releases; and **minors**, features of smaller scale and lower priority, that could land in neighboring releases.
|
||||
|
||||
## V0.35 (completed Q3 2021)
|
||||
|
||||
### Prioritized Mempool
|
||||
|
||||
Transactions were previously added to blocks in the order with which they arrived to the mempool. Adding a priority field via `CheckTx` gives applications more control over which transactions make it into a block. This is important in the presence of transaction fees. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-067-mempool-refactor.md)
|
||||
|
||||
### Refactor of the P2P Framework
|
||||
|
||||
The Tendermint P2P system is undergoing a large redesign to improve its performance and reliability. The first phase of this redesign is included in 0.35. This phase cleans and decouples abstractions, improves peer lifecycle management, peer address handling and enables pluggable transports. It is implemented to be protocol-compatible with the previous implementation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-062-p2p-architecture.md)
|
||||
|
||||
### State Sync Improvements
|
||||
|
||||
Following the initial version of state sync, several improvements have been made. These include the addition of [Reverse Sync](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-068-reverse-sync.md) needed for evidence handling, the introduction of a [P2P State Provider](https://github.com/tendermint/tendermint/pull/6807) as an alternative to RPC endpoints, new configuration parameters to adjust throughput, and several bug fixes.
|
||||
|
||||
### Custom event indexing + PSQL Indexer
|
||||
|
||||
Added a new `EventSink` interface to allow alternatives to Tendermint's proprietary transaction indexer. We also added a PostgreSQL Indexer implementation, allowing rich SQL-based index queries. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-065-custom-event-indexing.md)
|
||||
|
||||
### Minor Works
|
||||
|
||||
- Several Go packages were reorganized to make the distinction between public APIs and implementation details more clear.
|
||||
- Block indexer to index begin-block and end-block events. [More](https://github.com/tendermint/tendermint/pull/6226)
|
||||
- Block, state, evidence, and light storage keys were reworked to preserve lexicographic order. This change requires a database migration. [More](https://github.com/tendermint/tendermint/pull/5771)
|
||||
- Introduciton of Tendermint modes. Part of this change includes the possibility to run a separate seed node that runs the PEX reactor only. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
|
||||
|
||||
## V0.36 (expected Q1 2022)
|
||||
|
||||
### ABCI++
|
||||
|
||||
An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, injection of signed information into votes, and more compact delivery of blocks after agreement (to allow for concurrent execution). [More](https://github.com/tendermint/spec/blob/master/rfc/004-abci%2B%2B.md)
|
||||
|
||||
### Proposer-Based Timestamps
|
||||
|
||||
Proposer-based timestamps are a replacement of [BFT time](https://docs.tendermint.com/master/spec/consensus/bft-time.html), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md)
|
||||
|
||||
### Soft Upgrades
|
||||
|
||||
We are working on a suite of tools and patterns to make it easier for both node operators and application developers to quickly and safely upgrade to newer versions of Tendermint. [More](https://github.com/tendermint/spec/pull/222)
|
||||
|
||||
### Minor Works
|
||||
|
||||
- Remove the "legacy" P2P framework, and clean up of P2P package. [More](https://github.com/tendermint/tendermint/issues/5670)
|
||||
- Remove the global mutex from the local ABCI client to enable application-controlled concurrency. [More](https://github.com/tendermint/tendermint/issues/7073)
|
||||
- Enable P2P support for light clients
|
||||
- Node orchestration of services + Node initialization and composibility
|
||||
- Remove redundancy in several data structures. Remove unused components such as the block sync v2 reactor, gRPC in the RPC layer, and the socket-based remote signer.
|
||||
- Improve node visibility by introducing more metrics
|
||||
|
||||
## V0.37 (expected Q3 2022)
|
||||
|
||||
### Complete P2P Refactor
|
||||
|
||||
Finish the final phase of the P2P system. Ongoing research and planning is taking place to decide whether to adopt [libp2p](https://libp2p.io/), alternative transports to `MConn` such as [QUIC](https://en.wikipedia.org/wiki/QUIC) and handshake/authentication protocols such as [Noise](https://noiseprotocol.org/). Research into more advanced gossiping techniques.
|
||||
|
||||
### Streamline Storage Engine
|
||||
|
||||
Tendermint currently has an abstraction to allow support for multiple database backends. This generality incurs maintenance overhead and interferes with application-specific optimizations that Tendermint could use (ACID guarantees, etc.). We plan to converge on a single database and streamline the Tendermint storage engine. [More](https://github.com/tendermint/tendermint/pull/6897)
|
||||
|
||||
### Evaluate Interprocess Communication
|
||||
|
||||
Tendermint nodes currently have multiple areas of communication with other processes (ABCI, remote-signer, P2P, JSONRPC, websockets, events as examples). Many of these have multiple implementations in which a single suffices. Consolidate and clean up IPC. [More](https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-002-ipc-ecosystem.md)
|
||||
|
||||
### Minor Works
|
||||
|
||||
- Amnesia attack handling. [More](https://github.com/tendermint/tendermint/issues/5270)
|
||||
- Remove / Update Consensus WAL. [More](https://github.com/tendermint/tendermint/issues/6397)
|
||||
- Signature Aggregation. [More](https://github.com/tendermint/tendermint/issues/1319)
|
||||
- Remove gogoproto dependency. [More](https://github.com/tendermint/tendermint/issues/5446)
|
||||
|
||||
## V1.0 (expected Q4 2022)
|
||||
|
||||
Has the same feature set as V0.37 but with a focus towards testing, protocol correctness and minor tweaks to ensure a stable product. Such work might include extending the [consensus testing framework](https://github.com/tendermint/tendermint/issues/5920), the use of canary/long-lived testnets and greater integration tests.
|
||||
|
||||
## Post 1.0 Work
|
||||
|
||||
- Improved block propagation with erasure coding and/or compact blocks. [More](https://github.com/tendermint/spec/issues/347)
|
||||
- Consensus engine refactor
|
||||
- Bidirectional ABCI
|
||||
- Randomized Leader Election
|
||||
- ZK proofs / other cryptographic primitives
|
||||
- Multichain Tendermint
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
title: System
|
||||
title: Understanding Tendermint
|
||||
order: 5
|
||||
---
|
||||
|
||||
@@ -10,11 +10,15 @@ parent:
|
||||
This section dives into the internals of Go-Tendermint.
|
||||
|
||||
- [Using Tendermint](./using-tendermint.md)
|
||||
- [Running in Production](./running-in-production.md)
|
||||
- [Subscribing to events](./subscription.md)
|
||||
- [Block Structure](./block-structure.md)
|
||||
- [RPC](./rpc.md)
|
||||
- [Block Sync](./block-sync.md)
|
||||
- [State Sync](./state-sync.md)
|
||||
- [Mempool](./mempool.md)
|
||||
- [Block Sync](./block-sync/README.md)
|
||||
- [State Sync](./state-sync/README.md)
|
||||
- [Mempool](./mempool/README.md)
|
||||
- [Light Client](./light-client.md)
|
||||
- [Consensus](./consensus/README.md)
|
||||
- [Peer Exachange (PEX)](./pex/README.md)
|
||||
- [Evidence](./evidence/README.md)
|
||||
|
||||
For full specifications refer to the [spec repo](https://github.com/tendermint/spec).
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
---
|
||||
order: 10
|
||||
order: 1
|
||||
parent:
|
||||
title: Block Sync
|
||||
order: 6
|
||||
---
|
||||
|
||||
|
||||
# Block Sync
|
||||
*Formerly known as Fast Sync*
|
||||
|
||||
@@ -61,3 +65,7 @@ another event for exposing the fast-sync `complete` status and the state `height
|
||||
|
||||
The user can query the events by subscribing `EventQueryBlockSyncStatus`
|
||||
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.
|
||||
|
||||
## Implementation
|
||||
|
||||
To read more on the implamentation please see the [reactor doc](./reactor.md) and the [implementation doc](./implementation.md)
|
||||
BIN
docs/tendermint-core/block-sync/img/bc-reactor-routines.png
Normal file
BIN
docs/tendermint-core/block-sync/img/bc-reactor-routines.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 265 KiB |
BIN
docs/tendermint-core/block-sync/img/bc-reactor.png
Normal file
BIN
docs/tendermint-core/block-sync/img/bc-reactor.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 43 KiB |
47
docs/tendermint-core/block-sync/implementation.md
Normal file
47
docs/tendermint-core/block-sync/implementation.md
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
order: 3
|
||||
---
|
||||
|
||||
# Implementation
|
||||
|
||||
## Blocksync Reactor
|
||||
|
||||
- coordinates the pool for syncing
|
||||
- coordinates the store for persistence
|
||||
- coordinates the playing of blocks towards the app using a sm.BlockExecutor
|
||||
- handles switching between fastsync and consensus
|
||||
- it is a p2p.BaseReactor
|
||||
- starts the pool.Start() and its poolRoutine()
|
||||
- registers all the concrete types and interfaces for serialisation
|
||||
|
||||
### poolRoutine
|
||||
|
||||
- listens to these channels:
|
||||
- pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends
|
||||
a &bcBlockRequestMessage for a specific height
|
||||
- pool signals timeout of a specific peer by posting to timeoutsCh
|
||||
- switchToConsensusTicker to periodically try and switch to consensus
|
||||
- trySyncTicker to periodically check if we have fallen behind and then catch-up sync
|
||||
- if there aren't any new blocks available on the pool it skips syncing
|
||||
- tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores
|
||||
them on disk
|
||||
- implements Receive which is called by the switch/peer
|
||||
- calls AddBlock on the pool when it receives a new block from a peer
|
||||
|
||||
## Block Pool
|
||||
|
||||
- responsible for downloading blocks from peers
|
||||
- makeRequestersRoutine()
|
||||
- removes timeout peers
|
||||
- starts new requesters by calling makeNextRequester()
|
||||
- requestRoutine():
|
||||
- picks a peer and sends the request, then blocks until:
|
||||
- pool is stopped by listening to pool.Quit
|
||||
- requester is stopped by listening to Quit
|
||||
- request is redone
|
||||
- we receive a block
|
||||
- gotBlockCh is strange
|
||||
|
||||
## Go Routines in Blocksync Reactor
|
||||
|
||||

|
||||
278
docs/tendermint-core/block-sync/reactor.md
Normal file
278
docs/tendermint-core/block-sync/reactor.md
Normal file
@@ -0,0 +1,278 @@
|
||||
---
|
||||
order: 2
|
||||
---
|
||||
# Reactor
|
||||
|
||||
The Blocksync Reactor's high level responsibility is to enable peers who are
|
||||
far behind the current state of the consensus to quickly catch up by downloading
|
||||
many blocks in parallel, verifying their commits, and executing them against the
|
||||
ABCI application.
|
||||
|
||||
Tendermint full nodes run the Blocksync Reactor as a service to provide blocks
|
||||
to new nodes. New nodes run the Blocksync Reactor in "fast_sync" mode,
|
||||
where they actively make requests for more blocks until they sync up.
|
||||
Once caught up, "fast_sync" mode is disabled and the node switches to
|
||||
using (and turns on) the Consensus Reactor.
|
||||
|
||||
## Architecture and algorithm
|
||||
|
||||
The Blocksync reactor is organised as a set of concurrent tasks:
|
||||
|
||||
- Receive routine of Blocksync Reactor
|
||||
- Task for creating Requesters
|
||||
- Set of Requesters tasks and - Controller task.
|
||||
|
||||

|
||||
|
||||
### Data structures
|
||||
|
||||
These are the core data structures necessarily to provide the Blocksync Reactor logic.
|
||||
|
||||
Requester data structure is used to track assignment of request for `block` at position `height` to a peer with id equals to `peerID`.
|
||||
|
||||
```go
|
||||
type Requester {
|
||||
mtx Mutex
|
||||
block Block
|
||||
height int64
|
||||
peerID p2p.ID
|
||||
redoChannel chan p2p.ID //redo may send multi-time; peerId is used to identify repeat
|
||||
}
|
||||
```
|
||||
|
||||
Pool is a core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc.
|
||||
|
||||
```go
|
||||
type Pool {
|
||||
mtx Mutex
|
||||
requesters map[int64]*Requester
|
||||
height int64
|
||||
peers map[p2p.ID]*Peer
|
||||
maxPeerHeight int64
|
||||
numPending int32
|
||||
store BlockStore
|
||||
requestsChannel chan<- BlockRequest
|
||||
errorsChannel chan<- peerError
|
||||
}
|
||||
```
|
||||
|
||||
Peer data structure stores for each peer current `height` and number of pending requests sent to the peer (`numPending`), etc.
|
||||
|
||||
```go
|
||||
type Peer struct {
|
||||
id p2p.ID
|
||||
height int64
|
||||
numPending int32
|
||||
timeout *time.Timer
|
||||
didTimeout bool
|
||||
}
|
||||
```
|
||||
|
||||
BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to a peer (`PeerID`).
|
||||
|
||||
```go
|
||||
type BlockRequest {
|
||||
Height int64
|
||||
PeerID p2p.ID
|
||||
}
|
||||
```
|
||||
|
||||
### Receive routine of Blocksync Reactor
|
||||
|
||||
It is executed upon message reception on the BlocksyncChannel inside p2p receive routine. There is a separate p2p receive routine (and therefore receive routine of the Blocksync Reactor) executed for each peer. Note that try to send will not block (returns immediately) if outgoing buffer is full.
|
||||
|
||||
```go
|
||||
handleMsg(pool, m):
|
||||
upon receiving bcBlockRequestMessage m from peer p:
|
||||
block = load block for height m.Height from pool.store
|
||||
if block != nil then
|
||||
try to send BlockResponseMessage(block) to p
|
||||
else
|
||||
try to send bcNoBlockResponseMessage(m.Height) to p
|
||||
|
||||
upon receiving bcBlockResponseMessage m from peer p:
|
||||
pool.mtx.Lock()
|
||||
requester = pool.requesters[m.Height]
|
||||
if requester == nil then
|
||||
error("peer sent us a block we didn't expect")
|
||||
continue
|
||||
|
||||
if requester.block == nil and requester.peerID == p then
|
||||
requester.block = m
|
||||
pool.numPending -= 1 // atomic decrement
|
||||
peer = pool.peers[p]
|
||||
if peer != nil then
|
||||
peer.numPending--
|
||||
if peer.numPending == 0 then
|
||||
peer.timeout.Stop()
|
||||
// NOTE: we don't send Quit signal to the corresponding requester task!
|
||||
else
|
||||
trigger peer timeout to expire after peerTimeout
|
||||
pool.mtx.Unlock()
|
||||
|
||||
|
||||
upon receiving bcStatusRequestMessage m from peer p:
|
||||
try to send bcStatusResponseMessage(pool.store.Height)
|
||||
|
||||
upon receiving bcStatusResponseMessage m from peer p:
|
||||
pool.mtx.Lock()
|
||||
peer = pool.peers[p]
|
||||
if peer != nil then
|
||||
peer.height = m.height
|
||||
else
|
||||
peer = create new Peer data structure with id = p and height = m.Height
|
||||
pool.peers[p] = peer
|
||||
|
||||
if m.Height > pool.maxPeerHeight then
|
||||
pool.maxPeerHeight = m.Height
|
||||
pool.mtx.Unlock()
|
||||
|
||||
onTimeout(p):
|
||||
send error message to pool error channel
|
||||
peer = pool.peers[p]
|
||||
peer.didTimeout = true
|
||||
```
|
||||
|
||||
### Requester tasks
|
||||
|
||||
Requester task is responsible for fetching a single block at position `height`.
|
||||
|
||||
```go
|
||||
fetchBlock(height, pool):
|
||||
while true do {
|
||||
peerID = nil
|
||||
block = nil
|
||||
peer = pickAvailablePeer(height)
|
||||
peerID = peer.id
|
||||
|
||||
enqueue BlockRequest(height, peerID) to pool.requestsChannel
|
||||
redo = false
|
||||
while !redo do
|
||||
select {
|
||||
upon receiving Quit message do
|
||||
return
|
||||
upon receiving redo message with id on redoChannel do
|
||||
if peerID == id {
|
||||
mtx.Lock()
|
||||
pool.numPending++
|
||||
redo = true
|
||||
mtx.UnLock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pickAvailablePeer(height):
|
||||
selectedPeer = nil
|
||||
while selectedPeer = nil do
|
||||
pool.mtx.Lock()
|
||||
for each peer in pool.peers do
|
||||
if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then
|
||||
peer.numPending++
|
||||
selectedPeer = peer
|
||||
break
|
||||
pool.mtx.Unlock()
|
||||
|
||||
if selectedPeer = nil then
|
||||
sleep requestIntervalMS
|
||||
|
||||
return selectedPeer
|
||||
```
|
||||
|
||||
sleep for requestIntervalMS
|
||||
|
||||
### Task for creating Requesters
|
||||
|
||||
This task is responsible for continuously creating and starting Requester tasks.
|
||||
|
||||
```go
|
||||
createRequesters(pool):
|
||||
while true do
|
||||
if !pool.isRunning then break
|
||||
if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then
|
||||
pool.mtx.Lock()
|
||||
nextHeight = pool.height + size(pool.requesters)
|
||||
requester = create new requester for height nextHeight
|
||||
pool.requesters[nextHeight] = requester
|
||||
pool.numPending += 1 // atomic increment
|
||||
start requester task
|
||||
pool.mtx.Unlock()
|
||||
else
|
||||
sleep requestIntervalMS
|
||||
pool.mtx.Lock()
|
||||
for each peer in pool.peers do
|
||||
if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then
|
||||
send error on pool error channel
|
||||
peer.didTimeout = true
|
||||
if peer.didTimeout then
|
||||
for each requester in pool.requesters do
|
||||
if requester.getPeerID() == peer then
|
||||
enqueue msg on requestor's redoChannel
|
||||
delete(pool.peers, peerID)
|
||||
pool.mtx.Unlock()
|
||||
```
|
||||
|
||||
### Main blocksync reactor controller task
|
||||
|
||||
```go
|
||||
main(pool):
|
||||
create trySyncTicker with interval trySyncIntervalMS
|
||||
create statusUpdateTicker with interval statusUpdateIntervalSeconds
|
||||
create switchToConsensusTicker with interval switchToConsensusIntervalSeconds
|
||||
|
||||
while true do
|
||||
select {
|
||||
upon receiving BlockRequest(Height, Peer) on pool.requestsChannel:
|
||||
try to send bcBlockRequestMessage(Height) to Peer
|
||||
|
||||
upon receiving error(peer) on errorsChannel:
|
||||
stop peer for error
|
||||
|
||||
upon receiving message on statusUpdateTickerChannel:
|
||||
broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine
|
||||
|
||||
upon receiving message on switchToConsensusTickerChannel:
|
||||
pool.mtx.Lock()
|
||||
receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds
|
||||
ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight
|
||||
haveSomePeers = size of pool.peers > 0
|
||||
pool.mtx.Unlock()
|
||||
if haveSomePeers && receivedBlockOrTimedOut && ourChainIsLongestAmongPeers then
|
||||
switch to consensus mode
|
||||
|
||||
upon receiving message on trySyncTickerChannel:
|
||||
for i = 0; i < 10; i++ do
|
||||
pool.mtx.Lock()
|
||||
firstBlock = pool.requesters[pool.height].block
|
||||
secondBlock = pool.requesters[pool.height].block
|
||||
if firstBlock == nil or secondBlock == nil then continue
|
||||
pool.mtx.Unlock()
|
||||
verify firstBlock using LastCommit from secondBlock
|
||||
if verification failed
|
||||
pool.mtx.Lock()
|
||||
peerID = pool.requesters[pool.height].peerID
|
||||
redoRequestsForPeer(peerId)
|
||||
delete(pool.peers, peerID)
|
||||
stop peer peerID for error
|
||||
pool.mtx.Unlock()
|
||||
else
|
||||
delete(pool.requesters, pool.height)
|
||||
save firstBlock to store
|
||||
pool.height++
|
||||
execute firstBlock
|
||||
}
|
||||
|
||||
redoRequestsForPeer(pool, peerId):
|
||||
for each requester in pool.requesters do
|
||||
if requester.getPeerID() == peerID
|
||||
enqueue msg on redoChannel for requester
|
||||
```
|
||||
|
||||
## Channels
|
||||
|
||||
Defines `maxMsgSize` for the maximum size of incoming messages,
|
||||
`SendQueueCapacity` and `RecvBufferCapacity` for maximum sending and
|
||||
receiving buffers respectively. These are supposed to prevent amplification
|
||||
attacks by setting up the upper limit on how much data we can receive & send to
|
||||
a peer.
|
||||
|
||||
Sending incorrectly encoded data will result in stopping the peer.
|
||||
42
docs/tendermint-core/consensus/README.md
Normal file
42
docs/tendermint-core/consensus/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
title: Consensus
|
||||
order: 6
|
||||
---
|
||||
|
||||
# Consensus
|
||||
|
||||
Tendermint Consensus is a distributed protocol executed by validator processes to agree on
|
||||
the next block to be added to the Tendermint blockchain. The protocol proceeds in rounds, where
|
||||
each round is a try to reach agreement on the next block. A round starts by having a dedicated
|
||||
process (called proposer) suggesting to other processes what should be the next block with
|
||||
the `ProposalMessage`.
|
||||
The processes respond by voting for a block with `VoteMessage` (there are two kinds of vote
|
||||
messages, prevote and precommit votes). Note that a proposal message is just a suggestion what the
|
||||
next block should be; a validator might vote with a `VoteMessage` for a different block. If in some
|
||||
round, enough number of processes vote for the same block, then this block is committed and later
|
||||
added to the blockchain. `ProposalMessage` and `VoteMessage` are signed by the private key of the
|
||||
validator. The internals of the protocol and how it ensures safety and liveness properties are
|
||||
explained in a forthcoming document.
|
||||
|
||||
For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the
|
||||
block as the block size is big, i.e., they don't embed the block inside `Proposal` and
|
||||
`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in
|
||||
[Blockchain](https://github.com/tendermint/spec/blob/master/spec/core/data_structures.md#blockid) section)
|
||||
that uniquely identifies each block. The block itself is
|
||||
disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a
|
||||
proposer first splitting a block into a number of block parts, that are then gossiped between
|
||||
processes using `BlockPartMessage`.
|
||||
|
||||
Validators in Tendermint communicate by peer-to-peer gossiping protocol. Each validator is connected
|
||||
only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers
|
||||
all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can
|
||||
reach agreement on some block, and also obtain the content of the chosen block (block parts). As
|
||||
part of the gossiping protocol, processes also send auxiliary messages that inform peers about the
|
||||
executed steps of the core consensus algorithm (`NewRoundStepMessage` and `NewValidBlockMessage`), and
|
||||
also messages that inform peers what votes the process has seen (`HasVoteMessage`,
|
||||
`VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping
|
||||
protocol to determine what messages a process should send to its peers.
|
||||
|
||||
We now describe the content of each message exchanged during Tendermint consensus protocol.
|
||||
370
docs/tendermint-core/consensus/reactor.md
Normal file
370
docs/tendermint-core/consensus/reactor.md
Normal file
@@ -0,0 +1,370 @@
|
||||
---
|
||||
order: 2
|
||||
---
|
||||
|
||||
# Reactor
|
||||
|
||||
Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that
|
||||
manages the state of the Tendermint consensus internal state machine.
|
||||
When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service.
|
||||
Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state
|
||||
(that is used extensively in gossip routines) and starts the following three routines for the peer p:
|
||||
Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible
|
||||
for decoding messages received from a peer and for adequate processing of the message depending on its type and content.
|
||||
The processing normally consists of updating the known peer state and for some messages
|
||||
(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module
|
||||
for further processing. In the following text we specify the core functionality of those separate unit of executions
|
||||
that are part of the Consensus Reactor.
|
||||
|
||||
## ConsensusState service
|
||||
|
||||
Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals,
|
||||
and upon reaching agreement, commits blocks to the chain and executes them against the application.
|
||||
The internal state machine receives input from peers, the internal validator and from a timer.
|
||||
|
||||
Inside Consensus State we have the following units of execution: Timeout Ticker and Receive Routine.
|
||||
Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed
|
||||
by the Receive Routine.
|
||||
|
||||
### Receive Routine of the ConsensusState service
|
||||
|
||||
Receive Routine of the ConsensusState handles messages which may cause internal consensus state transitions.
|
||||
It is the only routine that updates RoundState that contains internal consensus state.
|
||||
Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities.
|
||||
It receives messages from peers, internal validators and from Timeout Ticker
|
||||
and invokes the corresponding handlers, potentially updating the RoundState.
|
||||
The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are
|
||||
discussed in separate document. For understanding of this document
|
||||
it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is
|
||||
then extensively used by the gossip routines to determine what information should be sent to peer processes.
|
||||
|
||||
## Round State
|
||||
|
||||
RoundState defines the internal consensus state. It contains height, round, round step, a current validator set,
|
||||
a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of
|
||||
received votes and last commit and last validators set.
|
||||
|
||||
```go
|
||||
type RoundState struct {
|
||||
Height int64
|
||||
Round int
|
||||
Step RoundStepType
|
||||
Validators ValidatorSet
|
||||
Proposal Proposal
|
||||
ProposalBlock Block
|
||||
ProposalBlockParts PartSet
|
||||
LockedRound int
|
||||
LockedBlock Block
|
||||
LockedBlockParts PartSet
|
||||
Votes HeightVoteSet
|
||||
LastCommit VoteSet
|
||||
LastValidators ValidatorSet
|
||||
}
|
||||
```
|
||||
|
||||
Internally, consensus will run as a state machine with the following states:
|
||||
|
||||
- RoundStepNewHeight
|
||||
- RoundStepNewRound
|
||||
- RoundStepPropose
|
||||
- RoundStepProposeWait
|
||||
- RoundStepPrevote
|
||||
- RoundStepPrevoteWait
|
||||
- RoundStepPrecommit
|
||||
- RoundStepPrecommitWait
|
||||
- RoundStepCommit
|
||||
|
||||
## Peer Round State
|
||||
|
||||
Peer round state contains the known state of a peer. It is being updated by the Receive routine of
|
||||
Consensus Reactor and by the gossip routines upon sending a message to the peer.
|
||||
|
||||
```golang
|
||||
type PeerRoundState struct {
|
||||
Height int64 // Height peer is at
|
||||
Round int // Round peer is at, -1 if unknown.
|
||||
Step RoundStepType // Step peer is at
|
||||
Proposal bool // True if peer has proposal for this round
|
||||
ProposalBlockPartsHeader PartSetHeader
|
||||
ProposalBlockParts BitArray
|
||||
ProposalPOLRound int // Proposal's POL round. -1 if none.
|
||||
ProposalPOL BitArray // nil until ProposalPOLMessage received.
|
||||
Prevotes BitArray // All votes peer has for this round
|
||||
Precommits BitArray // All precommits peer has for this round
|
||||
LastCommitRound int // Round of commit for last height. -1 if none.
|
||||
LastCommit BitArray // All commit precommits of commit for last height.
|
||||
CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none.
|
||||
CatchupCommit BitArray // All commit precommits peer has for this height & CatchupCommitRound
|
||||
}
|
||||
```
|
||||
|
||||
## Receive method of Consensus reactor
|
||||
|
||||
The entry point of the Consensus reactor is a receive method. When a message is
|
||||
received from a peer p, normally the peer round state is updated
|
||||
correspondingly, and some messages are passed for further processing, for
|
||||
example to ConsensusState service. We now specify the processing of messages in
|
||||
the receive method of Consensus reactor for each message type. In the following
|
||||
message handler, `rs` and `prs` denote `RoundState` and `PeerRoundState`,
|
||||
respectively.
|
||||
|
||||
### NewRoundStepMessage handler
|
||||
|
||||
```go
|
||||
handleMessage(msg):
|
||||
if msg is from smaller height/round/step then return
|
||||
// Just remember these values.
|
||||
prsHeight = prs.Height
|
||||
prsRound = prs.Round
|
||||
prsCatchupCommitRound = prs.CatchupCommitRound
|
||||
prsCatchupCommit = prs.CatchupCommit
|
||||
|
||||
Update prs with values from msg
|
||||
if prs.Height or prs.Round has been updated then
|
||||
reset Proposal related fields of the peer state
|
||||
if prs.Round has been updated and msg.Round == prsCatchupCommitRound then
|
||||
prs.Precommits = psCatchupCommit
|
||||
if prs.Height has been updated then
|
||||
if prsHeight+1 == msg.Height && prsRound == msg.LastCommitRound then
|
||||
prs.LastCommitRound = msg.LastCommitRound
|
||||
prs.LastCommit = prs.Precommits
|
||||
} else {
|
||||
prs.LastCommitRound = msg.LastCommitRound
|
||||
prs.LastCommit = nil
|
||||
}
|
||||
Reset prs.CatchupCommitRound and prs.CatchupCommit
|
||||
```
|
||||
|
||||
### NewValidBlockMessage handler
|
||||
|
||||
```go
|
||||
handleMessage(msg):
|
||||
if prs.Height != msg.Height then return
|
||||
|
||||
if prs.Round != msg.Round && !msg.IsCommit then return
|
||||
|
||||
prs.ProposalBlockPartsHeader = msg.BlockPartsHeader
|
||||
prs.ProposalBlockParts = msg.BlockParts
|
||||
```
|
||||
|
||||
The number of block parts is limited to 1601 (`types.MaxBlockPartsCount`) to
|
||||
protect the node against DOS attacks.
|
||||
|
||||
### HasVoteMessage handler
|
||||
|
||||
```go
|
||||
handleMessage(msg):
|
||||
if prs.Height == msg.Height then
|
||||
prs.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
|
||||
```
|
||||
|
||||
### VoteSetMaj23Message handler
|
||||
|
||||
```go
|
||||
handleMessage(msg):
|
||||
if prs.Height == msg.Height then
|
||||
Record in rs that a peer claim to have ⅔ majority for msg.BlockID
|
||||
Send VoteSetBitsMessage showing votes node has for that BlockId
|
||||
```
|
||||
|
||||
### ProposalMessage handler
|
||||
|
||||
```go
|
||||
handleMessage(msg):
|
||||
if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return
|
||||
prs.Proposal = true
|
||||
if prs.ProposalBlockParts == empty set then // otherwise it is set in NewValidBlockMessage handler
|
||||
prs.ProposalBlockPartsHeader = msg.BlockPartsHeader
|
||||
prs.ProposalPOLRound = msg.POLRound
|
||||
prs.ProposalPOL = nil
|
||||
Send msg through internal peerMsgQueue to ConsensusState service
|
||||
```
|
||||
|
||||
### ProposalPOLMessage handler
|
||||
|
||||
```go
|
||||
handleMessage(msg):
|
||||
if prs.Height != msg.Height or prs.ProposalPOLRound != msg.ProposalPOLRound then return
|
||||
prs.ProposalPOL = msg.ProposalPOL
|
||||
```
|
||||
|
||||
The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the
|
||||
node against DOS attacks.
|
||||
|
||||
### BlockPartMessage handler
|
||||
|
||||
```go
|
||||
handleMessage(msg):
|
||||
if prs.Height != msg.Height || prs.Round != msg.Round then return
|
||||
Record in prs that peer has block part msg.Part.Index
|
||||
Send msg trough internal peerMsgQueue to ConsensusState service
|
||||
```
|
||||
|
||||
### VoteMessage handler
|
||||
|
||||
```go
|
||||
handleMessage(msg):
|
||||
Record in prs that a peer knows vote with index msg.vote.ValidatorIndex for particular height and round
|
||||
Send msg trough internal peerMsgQueue to ConsensusState service
|
||||
```
|
||||
|
||||
### VoteSetBitsMessage handler
|
||||
|
||||
```go
|
||||
handleMessage(msg):
|
||||
Update prs for the bit-array of votes peer claims to have for the msg.BlockID
|
||||
```
|
||||
|
||||
The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the
|
||||
node against DOS attacks.
|
||||
|
||||
## Gossip Data Routine
|
||||
|
||||
It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and
|
||||
`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`)
|
||||
and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below:
|
||||
|
||||
```go
|
||||
1a) if rs.ProposalBlockPartsHeader == prs.ProposalBlockPartsHeader and the peer does not have all the proposal parts then
|
||||
Part = pick a random proposal block part the peer does not have
|
||||
Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel
|
||||
if send returns true, record that the peer knows the corresponding block Part
|
||||
Continue
|
||||
|
||||
1b) if (0 < prs.Height) and (prs.Height < rs.Height) then
|
||||
help peer catch up using gossipDataForCatchup function
|
||||
Continue
|
||||
|
||||
1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then
|
||||
Sleep PeerGossipSleepDuration
|
||||
Continue
|
||||
|
||||
// at this point rs.Height == prs.Height and rs.Round == prs.Round
|
||||
1d) if (rs.Proposal != nil and !prs.Proposal) then
|
||||
Send ProposalMessage(rs.Proposal) to the peer
|
||||
if send returns true, record that the peer knows Proposal
|
||||
if 0 <= rs.Proposal.POLRound then
|
||||
polRound = rs.Proposal.POLRound
|
||||
prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray()
|
||||
Send ProposalPOLMessage(rs.Height, polRound, prevotesBitArray)
|
||||
Continue
|
||||
|
||||
2) Sleep PeerGossipSleepDuration
|
||||
```
|
||||
|
||||
### Gossip Data For Catchup
|
||||
|
||||
This function is responsible for helping peer catch up if it is at the smaller height (prs.Height < rs.Height).
|
||||
The function executes the following logic:
|
||||
|
||||
```go
|
||||
if peer does not have all block parts for prs.ProposalBlockPart then
|
||||
blockMeta = Load Block Metadata for height prs.Height from blockStore
|
||||
if (!blockMeta.BlockID.PartsHeader == prs.ProposalBlockPartsHeader) then
|
||||
Sleep PeerGossipSleepDuration
|
||||
return
|
||||
Part = pick a random proposal block part the peer does not have
|
||||
Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel
|
||||
if send returns true, record that the peer knows the corresponding block Part
|
||||
return
|
||||
else Sleep PeerGossipSleepDuration
|
||||
```
|
||||
|
||||
## Gossip Votes Routine
|
||||
|
||||
It is used to send the following message: `VoteMessage` on the VoteChannel.
|
||||
The gossip votes routine is based on the local RoundState (`rs`)
|
||||
and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below:
|
||||
|
||||
```go
|
||||
1a) if rs.Height == prs.Height then
|
||||
if prs.Step == RoundStepNewHeight then
|
||||
vote = random vote from rs.LastCommit the peer does not have
|
||||
Send VoteMessage(vote) to the peer
|
||||
if send returns true, continue
|
||||
|
||||
if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then
|
||||
Prevotes = rs.Votes.Prevotes(prs.Round)
|
||||
vote = random vote from Prevotes the peer does not have
|
||||
Send VoteMessage(vote) to the peer
|
||||
if send returns true, continue
|
||||
|
||||
if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then
|
||||
Precommits = rs.Votes.Precommits(prs.Round)
|
||||
vote = random vote from Precommits the peer does not have
|
||||
Send VoteMessage(vote) to the peer
|
||||
if send returns true, continue
|
||||
|
||||
if prs.ProposalPOLRound != -1 then
|
||||
PolPrevotes = rs.Votes.Prevotes(prs.ProposalPOLRound)
|
||||
vote = random vote from PolPrevotes the peer does not have
|
||||
Send VoteMessage(vote) to the peer
|
||||
if send returns true, continue
|
||||
|
||||
1b) if prs.Height != 0 and rs.Height == prs.Height+1 then
|
||||
vote = random vote from rs.LastCommit peer does not have
|
||||
Send VoteMessage(vote) to the peer
|
||||
if send returns true, continue
|
||||
|
||||
1c) if prs.Height != 0 and rs.Height >= prs.Height+2 then
|
||||
Commit = get commit from BlockStore for prs.Height
|
||||
vote = random vote from Commit the peer does not have
|
||||
Send VoteMessage(vote) to the peer
|
||||
if send returns true, continue
|
||||
|
||||
2) Sleep PeerGossipSleepDuration
|
||||
```
|
||||
|
||||
## QueryMaj23Routine
|
||||
|
||||
It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given
|
||||
BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs`) and the known PeerRoundState
|
||||
(`prs`). The routine repeats forever the logic shown below.
|
||||
|
||||
```go
|
||||
1a) if rs.Height == prs.Height then
|
||||
Prevotes = rs.Votes.Prevotes(prs.Round)
|
||||
if there is a ⅔ majority for some blockId in Prevotes then
|
||||
m = VoteSetMaj23Message(prs.Height, prs.Round, Prevote, blockId)
|
||||
Send m to peer
|
||||
Sleep PeerQueryMaj23SleepDuration
|
||||
|
||||
1b) if rs.Height == prs.Height then
|
||||
Precommits = rs.Votes.Precommits(prs.Round)
|
||||
if there is a ⅔ majority for some blockId in Precommits then
|
||||
m = VoteSetMaj23Message(prs.Height,prs.Round,Precommit,blockId)
|
||||
Send m to peer
|
||||
Sleep PeerQueryMaj23SleepDuration
|
||||
|
||||
1c) if rs.Height == prs.Height and prs.ProposalPOLRound >= 0 then
|
||||
Prevotes = rs.Votes.Prevotes(prs.ProposalPOLRound)
|
||||
if there is a ⅔ majority for some blockId in Prevotes then
|
||||
m = VoteSetMaj23Message(prs.Height,prs.ProposalPOLRound,Prevotes,blockId)
|
||||
Send m to peer
|
||||
Sleep PeerQueryMaj23SleepDuration
|
||||
|
||||
1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and
|
||||
prs.Height <= blockStore.Height() then
|
||||
Commit = LoadCommit(prs.Height)
|
||||
m = VoteSetMaj23Message(prs.Height,Commit.Round,Precommit,Commit.BlockID)
|
||||
Send m to peer
|
||||
Sleep PeerQueryMaj23SleepDuration
|
||||
|
||||
2) Sleep PeerQueryMaj23SleepDuration
|
||||
```
|
||||
|
||||
## Broadcast routine
|
||||
|
||||
The Broadcast routine subscribes to an internal event bus to receive new round steps and votes messages, and broadcasts messages to peers upon receiving those
|
||||
events.
|
||||
It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that
|
||||
broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel.
|
||||
Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel.
|
||||
|
||||
## Channels
|
||||
|
||||
Defines 4 channels: state, data, vote and vote_set_bits. Each channel
|
||||
has `SendQueueCapacity` and `RecvBufferCapacity` and
|
||||
`RecvMessageCapacity` set to `maxMsgSize`.
|
||||
|
||||
Sending incorrectly encoded data will result in stopping the peer.
|
||||
13
docs/tendermint-core/evidence/README.md
Normal file
13
docs/tendermint-core/evidence/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
title: Evidence
|
||||
order: 3
|
||||
---
|
||||
|
||||
Evidence is used to identify validators who have or are acting malicious. There are multiple types of evidence, to read more on the evidence types please see [Evidence Types](https://docs.tendermint.com/master/spec/core/data_structures.html#evidence).
|
||||
|
||||
The evidence reactor works similar to the mempool reactor. When evidence is observed, it is sent to all the peers in a repetitive manner. This ensures evidence is sent to as many people as possible to avoid sensoring. After evidence is received by peers and committed in a block it is pruned from the evidence module.
|
||||
|
||||
Sending incorrectly encoded data or data exceeding `maxMsgSize` will result
|
||||
in stopping the peer.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user