mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-22 12:42:49 +00:00
Compare commits
171 Commits
wb/undo-qu
...
wb/issue-8
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6df37bdf80 | ||
|
|
7fadc8e030 | ||
|
|
c60651d95b | ||
|
|
6d6569e566 | ||
|
|
e0349bd137 | ||
|
|
185f15d645 | ||
|
|
70d02a0eaa | ||
|
|
cd027e88ab | ||
|
|
d0f4feec7d | ||
|
|
285bcb0995 | ||
|
|
e9fdca779c | ||
|
|
befe1a6d4f | ||
|
|
12293f0f07 | ||
|
|
2697751e8f | ||
|
|
dfb322e68b | ||
|
|
3d0d89e505 | ||
|
|
9e643f3628 | ||
|
|
1949095c51 | ||
|
|
41a1bf539b | ||
|
|
462c475abc | ||
|
|
9d56520f76 | ||
|
|
14f5588ce2 | ||
|
|
3945915920 | ||
|
|
0af58409bf | ||
|
|
7d39f639f6 | ||
|
|
e4ae922c33 | ||
|
|
cbce877480 | ||
|
|
b29cc95920 | ||
|
|
0c9558a742 | ||
|
|
e2fc50ec9f | ||
|
|
2361e0c65c | ||
|
|
e2365c1368 | ||
|
|
43943d2dce | ||
|
|
cc838a5a19 | ||
|
|
c33be0a410 | ||
|
|
c680cca96e | ||
|
|
039fef14e0 | ||
|
|
3a75fbceec | ||
|
|
ea964e2133 | ||
|
|
485c96b0d3 | ||
|
|
9a833a8495 | ||
|
|
0bded371c5 | ||
|
|
12d13cd31d | ||
|
|
bba8367aac | ||
|
|
f1a8f47d4d | ||
|
|
f61e6e4201 | ||
|
|
1db41663c7 | ||
|
|
5e0e05f938 | ||
|
|
5bb51aab03 | ||
|
|
13f7501950 | ||
|
|
4400b0f6d3 | ||
|
|
5b6849ccf7 | ||
|
|
a68e356596 | ||
|
|
7c91b53999 | ||
|
|
02c7199eec | ||
|
|
1dd8807cc3 | ||
|
|
07b46d5a05 | ||
|
|
7a0b05f22d | ||
|
|
bedb68078c | ||
|
|
348c494c99 | ||
|
|
48b1952f18 | ||
|
|
93c4e00e8e | ||
|
|
68c624f5de | ||
|
|
4dce885994 | ||
|
|
faf123bda2 | ||
|
|
da5c09cf6f | ||
|
|
b08dd93d88 | ||
|
|
a5320da5c8 | ||
|
|
8e5dfa55ef | ||
|
|
70df7d9e6e | ||
|
|
98dd0d6c5a | ||
|
|
aff1481682 | ||
|
|
e9bc33d807 | ||
|
|
72bbe64da7 | ||
|
|
658a7661c5 | ||
|
|
89b4321af2 | ||
|
|
c79bb13807 | ||
|
|
d9c9675e2a | ||
|
|
a54bae25b7 | ||
|
|
ddbc93d993 | ||
|
|
6f7427ec7e | ||
|
|
7c03e7dbfb | ||
|
|
c35d6d6e2c | ||
|
|
4edc8c5523 | ||
|
|
f992a7e740 | ||
|
|
691cb52528 | ||
|
|
01266881b8 | ||
|
|
2df5c85a8d | ||
|
|
1f03287f52 | ||
|
|
e7955185b4 | ||
|
|
854add04b0 | ||
|
|
8df7b6103f | ||
|
|
f1659ce329 | ||
|
|
8d0bd1c0ff | ||
|
|
0b8a62c87b | ||
|
|
9accc1a531 | ||
|
|
0167f0d527 | ||
|
|
c8c248d733 | ||
|
|
9d98484845 | ||
|
|
63ff2f052d | ||
|
|
7c4fe5b108 | ||
|
|
a3881f0fb1 | ||
|
|
59eaa4dba0 | ||
|
|
33e6f7af11 | ||
|
|
af96ef2fe4 | ||
|
|
65065e6054 | ||
|
|
c42c6d06d2 | ||
|
|
a22942504c | ||
|
|
ea46a4e9d1 | ||
|
|
21087563eb | ||
|
|
a965f03c15 | ||
|
|
82a2ca4ba5 | ||
|
|
58dc172611 | ||
|
|
9cb01168a6 | ||
|
|
e4dced2437 | ||
|
|
8175b2b26d | ||
|
|
0fcfaa4568 | ||
|
|
b488198d47 | ||
|
|
b848c79971 | ||
|
|
f25b7ceeb2 | ||
|
|
e762dbb603 | ||
|
|
cd0472014a | ||
|
|
ab32f5a9b6 | ||
|
|
a153f82433 | ||
|
|
c80734e5af | ||
|
|
89dbebd1c5 | ||
|
|
af60a9c385 | ||
|
|
c8ae5db50e | ||
|
|
49e3688b79 | ||
|
|
c85e3e4ba8 | ||
|
|
8c5e36159e | ||
|
|
858d57a984 | ||
|
|
0875074ea2 | ||
|
|
3e2d5db289 | ||
|
|
f795d3f360 | ||
|
|
06e6d3f2e9 | ||
|
|
680ebc6f8e | ||
|
|
211b80a484 | ||
|
|
62a1cb8d17 | ||
|
|
a57567ba33 | ||
|
|
5662bd12a8 | ||
|
|
61a81279bd | ||
|
|
f939f962b1 | ||
|
|
9968f53c15 | ||
|
|
80186a9d9c | ||
|
|
21461e55a7 | ||
|
|
2ffb262600 | ||
|
|
912751cf93 | ||
|
|
926c469fcc | ||
|
|
3dc04430c3 | ||
|
|
70ee282d9e | ||
|
|
c88cf0b66c | ||
|
|
50de246a2b | ||
|
|
705f365bcd | ||
|
|
3b20931da3 | ||
|
|
bd6fce13ae | ||
|
|
351adf8ddb | ||
|
|
e80541a251 | ||
|
|
ce898a738c | ||
|
|
a185163c57 | ||
|
|
51b93c8606 | ||
|
|
325740a57c | ||
|
|
abdf717761 | ||
|
|
d65237ff87 | ||
|
|
3401eb2410 | ||
|
|
81bd9ad812 | ||
|
|
4425e62e9e | ||
|
|
c490d3f00a | ||
|
|
8a238fdcb4 | ||
|
|
abfcd08903 | ||
|
|
7f8f1cde8c |
5
.github/CODEOWNERS
vendored
5
.github/CODEOWNERS
vendored
@@ -7,4 +7,7 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair
|
||||
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @ancazamfir
|
||||
|
||||
# Spec related changes can be approved by the protocol design team
|
||||
/spec @josef-widder @milosevic @cason
|
||||
|
||||
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
|
||||
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -43,13 +43,13 @@ jobs:
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1.12.0
|
||||
uses: docker/login-action@v1.14.1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.9.0
|
||||
uses: docker/build-push-action@v2.10.0
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
2
.github/workflows/e2e-manual.yml
vendored
2
.github/workflows/e2e-manual.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
|
||||
2
.github/workflows/e2e-nightly-34x.yml
vendored
2
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
|
||||
2
.github/workflows/e2e-nightly-35x.yml
vendored
2
.github/workflows/e2e-nightly-35x.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.35.x'
|
||||
|
||||
|
||||
2
.github/workflows/e2e-nightly-master.yml
vendored
2
.github/workflows/e2e-nightly-master.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
|
||||
2
.github/workflows/e2e.yml
vendored
2
.github/workflows/e2e.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
|
||||
2
.github/workflows/fuzz-nightly.yml
vendored
2
.github/workflows/fuzz-nightly.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
|
||||
2
.github/workflows/jepsen.yml
vendored
2
.github/workflows/jepsen.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the Jepsen repository
|
||||
uses: actions/checkout@v2.4.0
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: 'tendermint/jepsen'
|
||||
|
||||
|
||||
4
.github/workflows/linkchecker.yml
vendored
4
.github/workflows/linkchecker.yml
vendored
@@ -6,7 +6,7 @@ jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
- uses: actions/checkout@v3
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.14
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
9
.github/workflows/lint.yml
vendored
9
.github/workflows/lint.yml
vendored
@@ -13,17 +13,20 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.17'
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v2.5.2
|
||||
- uses: golangci/golangci-lint-action@v3.1.0
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.42.1
|
||||
version: v1.44
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
2
.github/workflows/linter.yml
vendored
2
.github/workflows/linter.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2.4.0
|
||||
uses: actions/checkout@v3
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v4
|
||||
env:
|
||||
|
||||
32
.github/workflows/markdown-links.yml
vendored
32
.github/workflows/markdown-links.yml
vendored
@@ -1,18 +1,18 @@
|
||||
# Currently disabled until all links have been fixed
|
||||
# name: Check Markdown links
|
||||
name: Check Markdown links
|
||||
|
||||
# on:
|
||||
# push:
|
||||
# branches:
|
||||
# - master
|
||||
# pull_request:
|
||||
# branches: [master]
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
# jobs:
|
||||
# markdown-link-check:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: actions/checkout@master
|
||||
# - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
# with:
|
||||
# check-modified-files-only: 'yes'
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.14
|
||||
with:
|
||||
check-modified-files-only: 'yes'
|
||||
config-file: '.md-link-check.json'
|
||||
|
||||
24
.github/workflows/proto-check.yml
vendored
24
.github/workflows/proto-check.yml
vendored
@@ -1,24 +0,0 @@
|
||||
name: Proto Check
|
||||
# Protobuf runs buf (https://buf.build/) lint and check-breakage
|
||||
# This workflow is only run when a file in the proto directory
|
||||
# has been modified.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
paths:
|
||||
- "proto/*"
|
||||
jobs:
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
64
.github/workflows/proto-dockerfile.yml
vendored
64
.github/workflows/proto-dockerfile.yml
vendored
@@ -1,64 +0,0 @@
|
||||
# This workflow (re)builds and pushes a Docker image containing the
|
||||
# protobuf build tools used by the other workflows.
|
||||
#
|
||||
# When making changes that require updates to the builder image, you
|
||||
# should merge the updates first and wait for this workflow to complete,
|
||||
# so that the changes will be available for the dependent workflows.
|
||||
#
|
||||
|
||||
name: Build & Push Proto Builder Image
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "proto/*"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "proto/*"
|
||||
schedule:
|
||||
# run this job once a month to recieve any go or buf updates
|
||||
- cron: "0 9 1 * *"
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: tendermint/docker-build-proto
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- name: Check out and assign tags
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE="${REGISTRY}/${IMAGE_NAME}"
|
||||
VERSION=noop
|
||||
if [[ "$GITHUB_REF" == "refs/tags/*" ]]; then
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
elif [[ "$GITHUB_REF" == "refs/heads/*" ]]; then
|
||||
VERSION="$(echo "${GITHUB_REF#refs/heads/}" | sed -r 's#/+#-#g')"
|
||||
if [[ "${{ github.event.repository.default_branch }}" = "$VERSION" ]]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
echo ::set-output name=tags::"${TAGS}"
|
||||
|
||||
- name: Set up docker buildx
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
|
||||
- name: Log in to the container registry
|
||||
uses: docker/login-action@v1.12.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v2.9.0
|
||||
with:
|
||||
context: ./proto
|
||||
file: ./proto/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
21
.github/workflows/proto-lint.yml
vendored
Normal file
21
.github/workflows/proto-lint.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Protobuf Lint
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'proto/**'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'proto/**'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.3.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.4.0
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
4
.github/workflows/tests.yml
vendored
4
.github/workflows/tests.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
|
||||
6
.md-link-check.json
Normal file
6
.md-link-check.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503]
|
||||
}
|
||||
35
CHANGELOG.md
35
CHANGELOG.md
@@ -2,6 +2,27 @@
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.35.2
|
||||
|
||||
February 28, 2022
|
||||
|
||||
Special thanks to external contributors on this release: @ashcherbakov, @yihuang, @waelsy123
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [consensus] [\#7875](https://github.com/tendermint/tendermint/pull/7875) additional timing metrics. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [abci] [\#7990](https://github.com/tendermint/tendermint/pull/7990) revert buffer limit change. (@williambanfield)
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
|
||||
- [cli] [\#7869](https://github.com/tendermint/tendermint/pull/7869) Update unsafe-reset-all command to match release v35. (waelsy123)
|
||||
- [light] [\#7640](https://github.com/tendermint/tendermint/pull/7640) Light Client: fix absence proof verification (@ashcherbakov)
|
||||
- [light] [\#7641](https://github.com/tendermint/tendermint/pull/7641) Light Client: fix querying against the latest height (@ashcherbakov)
|
||||
- [mempool] [\#7718](https://github.com/tendermint/tendermint/pull/7718) return duplicate tx errors more consistently. (@tychoish)
|
||||
- [rpc] [\#7744](https://github.com/tendermint/tendermint/pull/7744) fix layout of endpoint list. (@creachadair)
|
||||
- [statesync] [\#7886](https://github.com/tendermint/tendermint/pull/7886) assert app version matches. (@cmwaters)
|
||||
|
||||
## v0.35.1
|
||||
|
||||
January 26, 2022
|
||||
@@ -209,6 +230,18 @@ Special thanks to external contributors on this release: @JayT106,
|
||||
- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
|
||||
|
||||
## v0.34.16
|
||||
|
||||
Special thanks to external contributors on this release: @yihuang
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#7617](https://github.com/tendermint/tendermint/issues/7617) calculate prevote message delay metric (backport #7551) (@williambanfield).
|
||||
- [consensus] [\#7631](https://github.com/tendermint/tendermint/issues/7631) check proposal non-nil in prevote message delay metric (backport #7625) (@williambanfield).
|
||||
- [statesync] [\#7885](https://github.com/tendermint/tendermint/issues/7885) statesync: assert app version matches (backport #7856) (@cmwaters).
|
||||
- [statesync] [\#7881](https://github.com/tendermint/tendermint/issues/7881) fix app hash in state rollback (backport #7837) (@cmwaters).
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang).
|
||||
|
||||
## v0.34.15
|
||||
|
||||
Special thanks to external contributors on this release: @thanethomson
|
||||
@@ -980,7 +1013,7 @@ and a validator address plus a timestamp. Note we may remove the validator
|
||||
address & timestamp fields in the future (see ADR-25).
|
||||
|
||||
`lite2` package has been added to solve `lite` issues and introduce weak
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
|
||||
subjectivity interface. Refer to the [spec](./spec/consensus/light-client/) for complete details.
|
||||
`lite` package is now deprecated and will be removed in v0.34 release.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -17,10 +17,15 @@ Special thanks to external contributors on this release:
|
||||
- [mempool] \#7171 Remove legacy mempool implementation. (@tychoish)
|
||||
- [rpc] \#7575 Rework how RPC responses are written back via HTTP. (@creachadair)
|
||||
- [rpc] \#7713 Remove unused options for websocket clients. (@creachadair)
|
||||
- [config] \#7930 Add new event subscription options and defaults. (@creachadair)
|
||||
- [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair)
|
||||
- [cli] \#8081 make the reset command safe to use. (@marbar3778)
|
||||
- [config] \#8222 default indexer configuration to null. (@creachadair)
|
||||
|
||||
- Apps
|
||||
|
||||
- [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec).
|
||||
- [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish)
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
@@ -63,6 +68,7 @@ Special thanks to external contributors on this release:
|
||||
- [internal/protoio] \#7325 Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em)
|
||||
- [consensus] \#6969 remove logic to 'unlock' a locked block.
|
||||
- [evidence] \#7700 Evidence messages contain single Evidence instead of EvidenceList (@jmalicevic)
|
||||
- [evidence] \#7802 Evidence pool emits events when evidence is validated and updates a metric when the number of evidence in the evidence pool changes. (@jmalicevic)
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [node] \#7521 Define concrete type for seed node implementation (@spacech1mp)
|
||||
- [rpc] \#7612 paginate mempool /unconfirmed_txs rpc endpoint (@spacech1mp)
|
||||
@@ -74,3 +80,4 @@ Special thanks to external contributors on this release:
|
||||
- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek)
|
||||
- [light] \#7640 Light Client: fix absence proof verification (@ashcherbakov)
|
||||
- [light] \#7641 Light Client: fix querying against the latest height (@ashcherbakov)
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
|
||||
|
||||
@@ -20,7 +20,7 @@ This code of conduct applies to all projects run by the Tendermint/COSMOS team a
|
||||
|
||||
* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
|
||||
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups.
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](https://github.com/stumpsyn/policies/blob/master/citizen_code_of_conduct.md); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups.
|
||||
|
||||
* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back.
|
||||
|
||||
|
||||
@@ -105,11 +105,33 @@ specify exactly the dependency you want to update, eg.
|
||||
|
||||
## Protobuf
|
||||
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
|
||||
with [`gogoproto`](https://github.com/gogo/protobuf) to generate code for use
|
||||
across Tendermint Core.
|
||||
|
||||
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
To generate proto stubs, lint, and check protos for breaking changes, you will
|
||||
need to install [buf](https://buf.build/) and `gogoproto`. Then, from the root
|
||||
of the repository, run:
|
||||
|
||||
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. This command uses the spec repo to get the necessary protobuf files for generating the go code. If you are modifying the proto files manually for changes in the core data structures, you will need to clone them into the go repo and comment out lines 22-37 of the file `./scripts/protocgen.sh`.
|
||||
```bash
|
||||
# Lint all of the .proto files in proto/tendermint
|
||||
make proto-lint
|
||||
|
||||
# Check if any of your local changes (prior to committing to the Git repository)
|
||||
# are breaking
|
||||
make proto-check-breaking
|
||||
|
||||
# Generate Go code from the .proto files in proto/tendermint
|
||||
make proto-gen
|
||||
```
|
||||
|
||||
To automatically format `.proto` files, you will need
|
||||
[`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) installed. Once
|
||||
installed, you can run:
|
||||
|
||||
```bash
|
||||
make proto-format
|
||||
```
|
||||
|
||||
### Visual Studio Code
|
||||
|
||||
|
||||
1
DOCKER/.gitignore
vendored
1
DOCKER/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
tendermint
|
||||
@@ -2,7 +2,7 @@
|
||||
FROM golang:1.17-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
apk --no-cache add make git
|
||||
COPY / /tendermint
|
||||
WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
@@ -53,4 +53,3 @@ CMD ["start"]
|
||||
|
||||
# Expose the data directory as a volume since there's mutable state in there
|
||||
VOLUME [ "$TMHOME" ]
|
||||
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
FROM amazonlinux:2
|
||||
|
||||
RUN yum -y update && \
|
||||
yum -y install wget
|
||||
|
||||
RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \
|
||||
rpm -ivh epel-release-latest-7.noarch.rpm
|
||||
|
||||
RUN yum -y groupinstall "Development Tools"
|
||||
RUN yum -y install leveldb-devel which
|
||||
|
||||
ENV GOVERSION=1.16.5
|
||||
|
||||
RUN cd /tmp && \
|
||||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
mkdir -p /go/src && \
|
||||
mkdir -p /go/bin
|
||||
|
||||
ENV PATH=$PATH:/usr/local/go/bin:/go/bin
|
||||
ENV GOBIN=/go/bin
|
||||
ENV GOPATH=/go/src
|
||||
|
||||
RUN mkdir -p /tendermint
|
||||
WORKDIR /tendermint
|
||||
|
||||
CMD ["/usr/bin/make", "build", "TENDERMINT_BUILD_OPTIONS=cleveldb"]
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
FROM golang:latest
|
||||
|
||||
# Grab deps (jq, hexdump, xxd, killall)
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
jq bsdmainutils vim-common psmisc netcat
|
||||
|
||||
# Add testing deps for curl
|
||||
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends curl
|
||||
|
||||
VOLUME /go
|
||||
|
||||
EXPOSE 26656
|
||||
EXPOSE 26657
|
||||
@@ -1,13 +0,0 @@
|
||||
build:
|
||||
@sh -c "'$(CURDIR)/build.sh'"
|
||||
|
||||
push:
|
||||
@sh -c "'$(CURDIR)/push.sh'"
|
||||
|
||||
build_testing:
|
||||
docker build --tag tendermint/testing -f ./Dockerfile.testing .
|
||||
|
||||
build_amazonlinux_buildimage:
|
||||
docker build -t "tendermint/tendermint:build_c-amazonlinux" -f Dockerfile.build_c-amazonlinux .
|
||||
|
||||
.PHONY: build push build_testing build_amazonlinux_buildimage
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Get the tag from the version, or try to figure it out.
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
|
||||
fi
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "Please specify a tag."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG_NO_PATCH=${TAG%.*}
|
||||
|
||||
read -p "==> Build 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$TAG" -t "tendermint/tendermint:$TAG_NO_PATCH" .
|
||||
fi
|
||||
@@ -1,22 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Get the tag from the version, or try to figure it out.
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
|
||||
fi
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "Please specify a tag."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG_NO_PATCH=${TAG%.*}
|
||||
|
||||
read -p "==> Push 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
docker push "tendermint/tendermint:latest"
|
||||
docker push "tendermint/tendermint:$TAG"
|
||||
docker push "tendermint/tendermint:$TAG_NO_PATCH"
|
||||
fi
|
||||
61
Makefile
61
Makefile
@@ -13,8 +13,6 @@ endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto
|
||||
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE)
|
||||
CGO_ENABLED ?= 0
|
||||
|
||||
# handle nostrip
|
||||
@@ -73,41 +71,57 @@ install:
|
||||
|
||||
$(BUILDDIR)/:
|
||||
mkdir -p $@
|
||||
# The Docker image containing the generator, formatter, and linter.
|
||||
# This is generated by proto/Dockerfile. To update tools, make changes
|
||||
# there and run the Build & Push Proto Builder Image workflow.
|
||||
IMAGE := ghcr.io/tendermint/docker-build-proto:latest
|
||||
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(IMAGE)
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
|
||||
###############################################################################
|
||||
### Protobuf ###
|
||||
###############################################################################
|
||||
|
||||
proto-all: proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
check-proto-deps:
|
||||
ifeq (,$(shell which buf))
|
||||
$(error "buf is required for Protobuf building, linting and breakage checking. See https://docs.buf.build/installation for installation instructions.")
|
||||
endif
|
||||
ifeq (,$(shell which protoc-gen-gogofaster))
|
||||
$(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install")
|
||||
endif
|
||||
.PHONY: check-proto-deps
|
||||
|
||||
proto-gen:
|
||||
check-proto-format-deps:
|
||||
ifeq (,$(shell which clang-format))
|
||||
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
|
||||
endif
|
||||
.PHONY: check-proto-format-deps
|
||||
|
||||
proto-gen: check-proto-deps
|
||||
@echo "Generating Protobuf files"
|
||||
@$(DOCKER_PROTO_BUILDER) buf generate --template=./buf.gen.yaml --config ./buf.yaml
|
||||
@buf generate
|
||||
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-lint:
|
||||
@$(DOCKER_PROTO_BUILDER) buf lint --error-format=json --config ./buf.yaml
|
||||
# These targets are provided for convenience and are intended for local
|
||||
# execution only.
|
||||
proto-lint: check-proto-deps
|
||||
@echo "Linting Protobuf files"
|
||||
@buf lint
|
||||
.PHONY: proto-lint
|
||||
|
||||
proto-format:
|
||||
proto-format: check-proto-format-deps
|
||||
@echo "Formatting Protobuf files"
|
||||
@$(DOCKER_PROTO_BUILDER) find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
|
||||
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-check-breaking:
|
||||
@$(DOCKER_PROTO_BUILDER) buf breaking --against .git --config ./buf.yaml
|
||||
proto-check-breaking: check-proto-deps
|
||||
@echo "Checking for breaking changes in Protobuf files against local branch"
|
||||
@echo "Note: This is only useful if your changes have not yet been committed."
|
||||
@echo " Otherwise read up on buf's \"breaking\" command usage:"
|
||||
@echo " https://docs.buf.build/breaking/usage"
|
||||
@buf breaking --against ".git"
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
proto-check-breaking-ci:
|
||||
@$(DOCKER_PROTO_BUILDER) buf breaking --against $(HTTPS_GIT) --config ./buf.yaml
|
||||
.PHONY: proto-check-breaking-ci
|
||||
# TODO: Should be removed when work on ABCI++ is complete.
|
||||
# For more information, see https://github.com/tendermint/tendermint/issues/8066
|
||||
abci-proto-gen:
|
||||
./scripts/abci-gen.sh
|
||||
.PHONY: abci-proto-gen
|
||||
|
||||
###############################################################################
|
||||
### Build ABCI ###
|
||||
@@ -229,10 +243,8 @@ build-docs:
|
||||
### Docker image ###
|
||||
###############################################################################
|
||||
|
||||
build-docker: build-linux
|
||||
cp $(BUILDDIR)/tendermint DOCKER/tendermint
|
||||
build-docker:
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile .
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
@@ -329,4 +341,3 @@ split-test-packages:$(BUILDDIR)/packages.txt
|
||||
split -d -n l/$(NUM_SPLIT) $< $<.
|
||||
test-group-%:split-test-packages
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
|
||||
|
||||
54
README.md
54
README.md
@@ -3,7 +3,7 @@
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
@@ -20,10 +20,14 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines.
|
||||
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
For protocol details, refer to the [Tendermint Specification](./spec/README.md).
|
||||
|
||||
For detailed analysis of the consensus protocol, including safety and liveness proofs,
|
||||
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
read our paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/).
|
||||
|
||||
## Releases
|
||||
|
||||
@@ -33,7 +37,7 @@ Tendermint has been in the production of private and public environments, most n
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork).
|
||||
contact us [over email](mailto:hello@interchain.io) or [join the chat](https://discord.gg/cosmosnetwork).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
|
||||
@@ -52,20 +56,15 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.17 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
|
||||
|
||||
### Install
|
||||
|
||||
See the [install instructions](/docs/introduction/install.md).
|
||||
See the [install instructions](./docs/introduction/install.md).
|
||||
|
||||
### Quick Start
|
||||
|
||||
- [Single node](/docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](/docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](/docs/tools/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
- [Single node](./docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](./docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -73,9 +72,9 @@ Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
|
||||
|
||||
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
|
||||
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
|
||||
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
|
||||
[specifications](./spec/README.md),
|
||||
and familiarize yourself with our
|
||||
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
[Architectural Decision Records (ADRs)](./docs/architecture/README.md) and [Request For Comments (RFCs)](./docs/rfc/README.md).
|
||||
|
||||
## Versioning
|
||||
|
||||
@@ -112,26 +111,23 @@ in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
### Roadmap
|
||||
|
||||
We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap.md)
|
||||
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](https://docs.tendermint.com/master/spec/).
|
||||
### Libraries
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: <https://docs.tendermint.com/master/>
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test).
|
||||
Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building applications in Golang
|
||||
- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs)
|
||||
- [ABCI Tower](https://github.com/penumbra-zone/tower-abci)
|
||||
|
||||
### Applications
|
||||
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
- [Cosmos Hub](https://hub.cosmos.network/)
|
||||
- [Terra](https://www.terra.money/)
|
||||
- [Celestia](https://celestia.org/)
|
||||
- [Anoma](https://anoma.network/)
|
||||
- [Vocdoni](https://docs.vocdoni.io/)
|
||||
|
||||
### Research
|
||||
|
||||
@@ -144,7 +140,7 @@ Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
## Join us!
|
||||
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
|
||||
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/p/682fb7e8a6f601-software-engineer-tendermint-core)!
|
||||
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/)!
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
|
||||
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
|
||||
|
||||
29
RELEASES.md
29
RELEASES.md
@@ -42,15 +42,42 @@ In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
|
||||
1. Start on `master`
|
||||
|
||||
2. Create and push the backport branch:
|
||||
```sh
|
||||
git checkout -b v0.35.x
|
||||
git push origin v0.35.x
|
||||
```
|
||||
|
||||
3. Create a PR to update the documentation directory for the backport branch.
|
||||
|
||||
We only maintain RFC and ADR documents on master, to avoid confusion.
|
||||
In addition, we rewrite Markdown URLs pointing to master to point to the
|
||||
backport branch, so that generated documentation will link to the correct
|
||||
versions of files elsewhere in the repository. For context on the latter,
|
||||
see https://github.com/tendermint/tendermint/issues/7675.
|
||||
|
||||
To prepare the PR:
|
||||
```sh
|
||||
# Remove the RFC and ADR documents from the backport.
|
||||
# We only maintain these on master to avoid confusion.
|
||||
git rm -r docs/rfc docs/architecture
|
||||
|
||||
# Update absolute links to point to the backport.
|
||||
go run ./scripts/linkpatch -recur -target v0.35.x -skip-path docs/DOCS_README.md,docs/README.md docs
|
||||
|
||||
# Create and push the PR.
|
||||
git checkout -b update-docs-v035x
|
||||
git commit -m "Update docs for v0.35.x backport branch." docs
|
||||
git push -u origin update-docs-v035x
|
||||
```
|
||||
|
||||
Be sure to merge this PR before making other changes on the newly-created
|
||||
backport branch.
|
||||
|
||||
After doing these steps, go back to `master` and do the following:
|
||||
|
||||
1. Tag `master` as the dev branch for the _next_ major release and push it back up.
|
||||
1. Tag `master` as the dev branch for the _next_ major release and push it up to GitHub.
|
||||
For example:
|
||||
```sh
|
||||
git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."
|
||||
|
||||
123
UPGRADING.md
123
UPGRADING.md
@@ -2,6 +2,112 @@
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
|
||||
## v0.36
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
#### ABCI++
|
||||
|
||||
Coming soon...
|
||||
|
||||
#### ABCI Mutex
|
||||
|
||||
In previous versions of ABCI, Tendermint was prevented from making
|
||||
concurrent calls to ABCI implementations by virtue of mutexes in the
|
||||
implementation of Tendermint's ABCI infrastructure. These mutexes have
|
||||
been removed from the current implementation and applications will now
|
||||
be responsible for managing their own concurrency control.
|
||||
|
||||
To replicate the prior semantics, ensure that ABCI applications have a
|
||||
single mutex that protects all ABCI method calls from concurrent
|
||||
access. You can relax these requirements if your application can
|
||||
provide safe concurrent access via other means. This safety is an
|
||||
application concern so be very sure to test the application thoroughly
|
||||
using realistic workloads and the race detector to ensure your
|
||||
applications remains correct.
|
||||
|
||||
### Config Changes
|
||||
|
||||
The default configuration for a newly-created node now disables indexing for
|
||||
ABCI event metadata. Existing node configurations that already have indexing
|
||||
turned on are not affected. Operators who wish to enable indexing for a new
|
||||
node, however, must now edit the `config.toml` explicitly.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
Tendermint v0.36 adds a new RPC event subscription API. The existing event
|
||||
subscription API based on websockets is now deprecated. It will continue to
|
||||
work throughout the v0.36 release, but the `subscribe`, `unsubscribe`, and
|
||||
`unsubscribe_all` methods, along with websocket support, will be removed in
|
||||
Tendermint v0.37. Callers currently using these features should migrate as
|
||||
soon as is practical to the new API.
|
||||
|
||||
To enable the new API, node operators set a new `event-log-window-size`
|
||||
parameter in the `[rpc]` section of the `config.toml` file. This defines a
|
||||
duration of time during which the node will log all events published to the
|
||||
event bus for use by RPC consumers.
|
||||
|
||||
Consumers use the new `events` JSON-RPC method to poll for events matching
|
||||
their query in the log. Unlike the streaming API, events are not discarded if
|
||||
the caller is slow, loses its connection, or crashes. As long as the client
|
||||
recovers before its events expire from the log window, it will be able to
|
||||
replay and catch up after recovering. Also unlike the streaming API, the client
|
||||
can tell if it has truly missed events because they have expired from the log.
|
||||
|
||||
The `events` method is a normal JSON-RPC method, and does not require any
|
||||
non-standard response processing (in contrast with the old `subscribe`).
|
||||
Clients can modify their query at any time, and no longer need to coordinate
|
||||
subscribe and unsubscribe calls to handle multiple queries.
|
||||
|
||||
The Go client implementations in the Tendermint Core repository have all been
|
||||
updated to add a new `Events` method, including the light client proxy.
|
||||
|
||||
A new `rpc/client/eventstream` package has also been added to make it easier
|
||||
for users to update existing use of the streaming API to use the polling API
|
||||
The `eventstream` package handles polling and delivers matching events to a
|
||||
callback.
|
||||
|
||||
For more detailed information, see [ADR 075](https://tinyurl.com/adr075) which
|
||||
defines and describes the new API in detail.
|
||||
|
||||
### Timeout Parameter Changes
|
||||
|
||||
Tendermint v0.36 updates how the Tendermint consensus timing parameters are
|
||||
configured. These parameters, `timeout-propose`, `timeout-propose-delta`,
|
||||
`timeout-prevote`, `timeout-prevote-delta`, `timeout-precommit`,
|
||||
`timeout-precommit-delta`, `timeout-commit`, and `skip-timeout-commit`, were
|
||||
previously configured in `config.toml`. These timing parameters have moved and
|
||||
are no longer configured in the `config.toml` file. These parameters have been
|
||||
migrated into the `ConsensusParameters`. Nodes with these parameters set in the
|
||||
local configuration file will see a warning logged on startup indicating that
|
||||
these parameters are no longer used.
|
||||
|
||||
These parameters have also been pared-down. There are no longer separate
|
||||
parameters for both the `prevote` and `precommit` phases of Tendermint. The
|
||||
separate `timeout-prevote` and `timeout-precommit` parameters have been merged
|
||||
into a single `timeout-vote` parameter that configures both of these similar
|
||||
phases of the consensus protocol.
|
||||
|
||||
A set of reasonable defaults have been put in place for these new parameters
|
||||
that will take effect when the node starts up in version v0.36. New chains
|
||||
created using v0.36 and beyond will be able to configure these parameters in the
|
||||
chain's `genesis.json` file. Chains that upgrade to v0.36 from a previous
|
||||
compatible version of Tendermint will begin running with the default values.
|
||||
Upgrading applications that wish to use different values from the defaults for
|
||||
these parameters may do so by setting the `ConsensusParams.Timeout` field of the
|
||||
`FinalizeBlock` `ABCI` response.
|
||||
|
||||
As a safety measure in case of unusual timing issues during the upgrade to
|
||||
v0.36, an operator may override the consensus timeout values for a single node.
|
||||
Note, however, that these overrides will be removed in Tendermint v0.37. See
|
||||
[configuration](https://github.com/tendermint/tendermint/blob/wb/issue-8182/docs/nodes/configuration.md)
|
||||
for more information about these overrides.
|
||||
|
||||
For more discussion of this, see [ADR 074](https://tinyurl.com/adr074), which
|
||||
lays out the reasoning for the changes as well as [RFC
|
||||
009](https://tinyurl.com/rfc009) for a discussion of the complexities of
|
||||
upgrading consensus parameters.
|
||||
|
||||
## v0.35
|
||||
|
||||
### ABCI Changes
|
||||
@@ -113,11 +219,11 @@ To access any of the functionality previously available via the
|
||||
`node.Node` type, use the `*local.Local` "RPC" client, that exposes
|
||||
the full RPC interface provided as direct function calls. Import the
|
||||
`github.com/tendermint/tendermint/rpc/client/local` package and pass
|
||||
the node service as in the following:
|
||||
the node service as in the following:
|
||||
|
||||
```go
|
||||
node := node.NewDefault() //construct the node object
|
||||
// start and set up the node service
|
||||
// start and set up the node service
|
||||
|
||||
client := local.New(node.(local.NodeService))
|
||||
// use client object to interact with the node
|
||||
@@ -144,10 +250,10 @@ both stacks.
|
||||
The P2P library was reimplemented in this release. The new implementation is
|
||||
enabled by default in this version of Tendermint. The legacy implementation is still
|
||||
included in this version of Tendermint as a backstop to work around unforeseen
|
||||
production issues. The new and legacy version are interoperable. If necessary,
|
||||
production issues. The new and legacy version are interoperable. If necessary,
|
||||
you can enable the legacy implementation in the server configuration file.
|
||||
|
||||
To make use of the legacy P2P implemementation add or update the following field of
|
||||
To make use of the legacy P2P implemementation add or update the following field of
|
||||
your server's configuration file under the `[p2p]` section:
|
||||
|
||||
```toml
|
||||
@@ -172,8 +278,8 @@ in the order in which they were received.
|
||||
|
||||
* `priority`: A priority queue of messages.
|
||||
|
||||
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
|
||||
weighted deficit round robin queue is created per peer. Each queue contains a
|
||||
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
|
||||
weighted deficit round robin queue is created per peer. Each queue contains a
|
||||
separate 'flow' for each of the channels of communication that exist between any two
|
||||
peers. Tendermint maintains a channel per message type between peers. Each WDRR
|
||||
queue maintains a shared buffered with a fixed capacity through which messages on different
|
||||
@@ -217,7 +323,7 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
were added to support the new State Sync feature.
|
||||
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
|
||||
new nodes are able to join a network in a matter of seconds.
|
||||
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
|
||||
Read [the spec](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md)
|
||||
if you want to learn more about State Sync, or if you'd like your application to use it.
|
||||
(If you don't want to support State Sync in your application, you can just implement these new
|
||||
ABCI methods as no-ops, leaving them empty.)
|
||||
@@ -342,7 +448,6 @@ The `bech32` package has moved to the Cosmos SDK:
|
||||
### CLI
|
||||
|
||||
The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API.
|
||||
See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
|
||||
### Light Client
|
||||
|
||||
@@ -617,7 +722,7 @@ the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
<https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support>)
|
||||
<https://docs.tendermint.com/v0.35/introduction/install.html)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
|
||||
@@ -19,8 +19,8 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [A protobuf file](https://github.com/tendermint/spec/blob/master/proto/tendermint/abci/types.proto)
|
||||
- [The main spec](../spec/abci/abci.md)
|
||||
- [A protobuf file](../proto/tendermint/abci/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
## Protocol Buffers
|
||||
|
||||
@@ -19,8 +19,8 @@ const (
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
//
|
||||
// All `Async` methods return a `ReqRes` object and an error.
|
||||
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
|
||||
// All methods return the appropriate protobuf ResponseXxx struct and
|
||||
// an error.
|
||||
//
|
||||
// NOTE these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes
|
||||
@@ -52,65 +52,35 @@ type Client interface {
|
||||
|
||||
// NewClient returns a new ABCI client of the specified transport type.
|
||||
// It returns an error if the transport is not "socket" or "grpc"
|
||||
func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (client Client, err error) {
|
||||
func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (Client, error) {
|
||||
switch transport {
|
||||
case "socket":
|
||||
client = NewSocketClient(logger, addr, mustConnect)
|
||||
return NewSocketClient(logger, addr, mustConnect), nil
|
||||
case "grpc":
|
||||
client = NewGRPCClient(logger, addr, mustConnect)
|
||||
return NewGRPCClient(logger, addr, mustConnect), nil
|
||||
default:
|
||||
err = fmt.Errorf("unknown abci transport %s", transport)
|
||||
return nil, fmt.Errorf("unknown abci transport %s", transport)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type ReqRes struct {
|
||||
type requestAndResponse struct {
|
||||
*types.Request
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
*types.Response
|
||||
|
||||
mtx sync.Mutex
|
||||
signal chan struct{}
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
|
||||
func NewReqRes(req *types.Request) *ReqRes {
|
||||
return &ReqRes{
|
||||
func makeReqRes(req *types.Request) *requestAndResponse {
|
||||
return &requestAndResponse{
|
||||
Request: req,
|
||||
Response: nil,
|
||||
signal: make(chan struct{}),
|
||||
cb: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Sets sets the callback. If reqRes is already done, it will call the cb
|
||||
// immediately. Note, reqRes.cb should not change if reqRes.done and only one
|
||||
// callback is supported.
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
|
||||
select {
|
||||
case <-r.signal:
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
default:
|
||||
r.cb = cb
|
||||
r.mtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// InvokeCallback invokes a thread-safe execution of the configured callback
|
||||
// if non-nil.
|
||||
func (r *ReqRes) InvokeCallback() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
// markDone marks the ReqRes object as done.
|
||||
func (r *requestAndResponse) markDone() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package abciclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
// Creator creates new ABCI clients.
|
||||
type Creator func(log.Logger) (Client, error)
|
||||
|
||||
// NewLocalCreator returns a Creator for the given app,
|
||||
// which will be running locally.
|
||||
func NewLocalCreator(app types.Application) Creator {
|
||||
return func(logger log.Logger) (Client, error) {
|
||||
return NewLocalClient(logger, app), nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewRemoteCreator returns a Creator for the given address (e.g.
|
||||
// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you
|
||||
// want the client to connect before reporting success.
|
||||
func NewRemoteCreator(logger log.Logger, addr, transport string, mustConnect bool) Creator {
|
||||
return func(log.Logger) (Client, error) {
|
||||
remoteApp, err := NewClient(logger, addr, transport, mustConnect)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to proxy: %w", err)
|
||||
}
|
||||
|
||||
return remoteApp, nil
|
||||
}
|
||||
}
|
||||
@@ -7,23 +7,14 @@
|
||||
//
|
||||
// ## Socket client
|
||||
//
|
||||
// async: the client maintains an internal buffer of a fixed size. when the
|
||||
// buffer becomes full, all Async calls will return an error immediately.
|
||||
//
|
||||
// sync: the client blocks on 1) enqueuing the Sync request 2) enqueuing the
|
||||
// Flush requests 3) waiting for the Flush response
|
||||
// The client blocks for enqueuing the request, for enqueuing the
|
||||
// Flush to send the request, and for the Flush response to return.
|
||||
//
|
||||
// ## Local client
|
||||
//
|
||||
// async: global mutex is locked during each call (meaning it's not really async!)
|
||||
// sync: global mutex is locked during each call
|
||||
// The global mutex is locked during each call
|
||||
//
|
||||
// ## gRPC client
|
||||
//
|
||||
// async: gRPC is synchronous, but an internal buffer of a fixed size is used
|
||||
// to store responses and later call callbacks (separate goroutine per
|
||||
// response).
|
||||
//
|
||||
// sync: waits for all Async calls to complete (essentially what Flush does in
|
||||
// the socket client) and calls Sync method.
|
||||
// The client waits for all calls to complete.
|
||||
package abciclient
|
||||
|
||||
@@ -24,9 +24,8 @@ type grpcClient struct {
|
||||
|
||||
mustConnect bool
|
||||
|
||||
client types.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
|
||||
client types.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
|
||||
mtx sync.Mutex
|
||||
addr string
|
||||
@@ -38,25 +37,11 @@ var _ Client = (*grpcClient)(nil)
|
||||
// NewGRPCClient creates a gRPC client, which will connect to addr upon the
|
||||
// start. Note Client#Start returns an error if connection is unsuccessful and
|
||||
// mustConnect is true.
|
||||
//
|
||||
// GRPC calls are synchronous, but some callbacks expect to be called
|
||||
// asynchronously (eg. the mempool expects to be able to lock to remove bad txs
|
||||
// from cache). To accommodate, we finish each call in its own go-routine,
|
||||
// which is expensive, but easy - if you want something better, use the socket
|
||||
// protocol! maybe one day, if people really want it, we use grpc streams, but
|
||||
// hopefully not :D
|
||||
func NewGRPCClient(logger log.Logger, addr string, mustConnect bool) Client {
|
||||
cli := &grpcClient{
|
||||
logger: logger,
|
||||
addr: addr,
|
||||
mustConnect: mustConnect,
|
||||
// Buffering the channel is needed to make calls appear asynchronous,
|
||||
// which is required when the caller makes multiple async calls before
|
||||
// processing callbacks (e.g. due to holding locks). 64 means that a
|
||||
// caller can make up to 64 async calls before a callback must be
|
||||
// processed (otherwise it deadlocks). It also means that we can make 64
|
||||
// gRPC calls while processing a slow callback at the channel head.
|
||||
chReqRes: make(chan *ReqRes, 64),
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(logger, "grpcClient", cli)
|
||||
return cli
|
||||
@@ -67,35 +52,6 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStart(ctx context.Context) error {
|
||||
// This processes asynchronous request/response messages and dispatches
|
||||
// them to callbacks.
|
||||
go func() {
|
||||
// Use a separate function to use defer for mutex unlocks (this handles panics)
|
||||
callCb := func(reqres *ReqRes) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
reqres.SetDone()
|
||||
|
||||
// Notify reqRes listener if set
|
||||
reqres.InvokeCallback()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.chReqRes:
|
||||
if reqres != nil {
|
||||
callCb(reqres)
|
||||
} else {
|
||||
cli.logger.Error("Received nil reqres")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err := grpc.Dial(cli.addr,
|
||||
@@ -135,30 +91,18 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStop() {
|
||||
if cli.conn != nil {
|
||||
cli.conn.Close()
|
||||
}
|
||||
close(cli.chReqRes)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) StopForError(err error) {
|
||||
if !cli.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
if cli.err == nil {
|
||||
cli.err = err
|
||||
}
|
||||
cli.mtx.Unlock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
cli.logger.Error("Stopping abci.grpcClient for error", "err", err)
|
||||
cli.Stop()
|
||||
if cli.conn != nil {
|
||||
cli.err = cli.conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Error() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
return cli.err
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package abciclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -15,8 +14,6 @@ import (
|
||||
// RPC endpoint), but defers are used everywhere for the sake of consistency.
|
||||
type localClient struct {
|
||||
service.BaseService
|
||||
|
||||
mtx sync.Mutex
|
||||
types.Application
|
||||
}
|
||||
|
||||
@@ -25,7 +22,7 @@ var _ Client = (*localClient)(nil)
|
||||
// NewLocalClient creates a local client, which will be directly calling the
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
// The client methods ignore their context argument.
|
||||
func NewLocalClient(logger log.Logger, app types.Application) Client {
|
||||
cli := &localClient{
|
||||
Application: app,
|
||||
@@ -36,169 +33,82 @@ func NewLocalClient(logger log.Logger, app types.Application) Client {
|
||||
|
||||
func (*localClient) OnStart(context.Context) error { return nil }
|
||||
func (*localClient) OnStop() {}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
func (app *localClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
func (*localClient) Error() error { return nil }
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) Flush(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
func (*localClient) Flush(context.Context) error { return nil }
|
||||
|
||||
func (app *localClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
func (app *localClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTx(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) CheckTx(_ context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
res := app.Application.CheckTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Query(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) Query(_ context.Context, req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChain(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) InitChain(_ context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
res := app.Application.InitChain(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshots(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) ListSnapshots(_ context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshot(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) OfferSnapshot(_ context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunk(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) LoadSnapshotChunk(_ context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunk(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) ApplySnapshotChunk(_ context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposal(
|
||||
ctx context.Context,
|
||||
req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) PrepareProposal(_ context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposal(
|
||||
ctx context.Context,
|
||||
req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) ProcessProposal(_ context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ExtendVote(
|
||||
ctx context.Context,
|
||||
req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) ExtendVote(_ context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
res := app.Application.ExtendVote(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) VerifyVoteExtension(
|
||||
ctx context.Context,
|
||||
req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) VerifyVoteExtension(_ context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
res := app.Application.VerifyVoteExtension(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlock(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
func (app *localClient) FinalizeBlock(_ context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
res := app.Application.FinalizeBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -5,10 +5,7 @@ package mocks
|
||||
import (
|
||||
context "context"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
@@ -63,29 +60,6 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields: _a0
|
||||
func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -34,12 +33,11 @@ type socketClient struct {
|
||||
mustConnect bool
|
||||
conn net.Conn
|
||||
|
||||
reqQueue chan *ReqRes
|
||||
reqQueue chan *requestAndResponse
|
||||
|
||||
mtx sync.Mutex
|
||||
err error
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
}
|
||||
|
||||
var _ Client = (*socketClient)(nil)
|
||||
@@ -50,11 +48,10 @@ var _ Client = (*socketClient)(nil)
|
||||
func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client {
|
||||
cli := &socketClient{
|
||||
logger: logger,
|
||||
reqQueue: make(chan *ReqRes, reqQueueSize),
|
||||
reqQueue: make(chan *requestAndResponse, reqQueueSize),
|
||||
mustConnect: mustConnect,
|
||||
addr: addr,
|
||||
reqSent: list.New(),
|
||||
resCb: nil,
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(logger, "socketClient", cli)
|
||||
return cli
|
||||
@@ -126,6 +123,7 @@ func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer
|
||||
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := bw.Flush(); err != nil {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
return
|
||||
@@ -140,23 +138,20 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
var res = &types.Response{}
|
||||
err := types.ReadMessage(r, res)
|
||||
if err != nil {
|
||||
res := &types.Response{}
|
||||
|
||||
if err := types.ReadMessage(r, res); err != nil {
|
||||
cli.stopForError(fmt.Errorf("read message: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// cli.logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
|
||||
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_Exception: // app responded with error
|
||||
// XXX After setting cli.err, release waiters (e.g. reqres.Done())
|
||||
cli.stopForError(errors.New(r.Exception.Error))
|
||||
return
|
||||
default:
|
||||
err := cli.didRecvResponse(res)
|
||||
if err != nil {
|
||||
if err := cli.didRecvResponse(res); err != nil {
|
||||
cli.stopForError(err)
|
||||
return
|
||||
}
|
||||
@@ -164,7 +159,7 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) willSendReq(reqres *ReqRes) {
|
||||
func (cli *socketClient) willSendReq(reqres *requestAndResponse) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.reqSent.PushBack(reqres)
|
||||
@@ -177,265 +172,172 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
// Get the first ReqRes.
|
||||
next := cli.reqSent.Front()
|
||||
if next == nil {
|
||||
return fmt.Errorf("unexpected %v when nothing expected", reflect.TypeOf(res.Value))
|
||||
return fmt.Errorf("unexpected %T when nothing expected", res.Value)
|
||||
}
|
||||
|
||||
reqres := next.Value.(*ReqRes)
|
||||
reqres := next.Value.(*requestAndResponse)
|
||||
if !resMatchesReq(reqres.Request, res) {
|
||||
return fmt.Errorf("unexpected %v when response to %v expected",
|
||||
reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value))
|
||||
return fmt.Errorf("unexpected %T when response to %T expected", res.Value, reqres.Request.Value)
|
||||
}
|
||||
|
||||
reqres.Response = res
|
||||
reqres.SetDone() // release waiters
|
||||
reqres.markDone() // release waiters
|
||||
cli.reqSent.Remove(next) // pop first item from linked list
|
||||
|
||||
// Notify client listener if set (global callback).
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, res)
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set (request specific callback).
|
||||
//
|
||||
// NOTE: It is possible this callback isn't set on the reqres object. At this
|
||||
// point, in which case it will be called after, when it is set.
|
||||
reqres.InvokeCallback()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) Flush(ctx context.Context) error {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true)
|
||||
_, err := cli.doRequest(ctx, types.ToRequestFlush())
|
||||
if err != nil {
|
||||
return queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-reqRes.signal:
|
||||
return cli.Error()
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestEcho(msg))
|
||||
res, err := cli.doRequest(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetEcho(), nil
|
||||
return res.GetEcho(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Info(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInfo(req))
|
||||
func (cli *socketClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetInfo(), nil
|
||||
return res.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTx(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCheckTx(req))
|
||||
func (cli *socketClient) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetCheckTx(), nil
|
||||
return res.GetCheckTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Query(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestQuery(req))
|
||||
func (cli *socketClient) Query(ctx context.Context, req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetQuery(), nil
|
||||
return res.GetQuery(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCommit())
|
||||
res, err := cli.doRequest(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetCommit(), nil
|
||||
return res.GetCommit(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChain(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInitChain(req))
|
||||
func (cli *socketClient) InitChain(ctx context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetInitChain(), nil
|
||||
return res.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshots(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestListSnapshots(req))
|
||||
func (cli *socketClient) ListSnapshots(ctx context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetListSnapshots(), nil
|
||||
return res.GetListSnapshots(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshot(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestOfferSnapshot(req))
|
||||
func (cli *socketClient) OfferSnapshot(ctx context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetOfferSnapshot(), nil
|
||||
return res.GetOfferSnapshot(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunk(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetLoadSnapshotChunk(), nil
|
||||
return res.GetLoadSnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunk(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetApplySnapshotChunk(), nil
|
||||
return res.GetApplySnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposal(
|
||||
ctx context.Context,
|
||||
req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestPrepareProposal(req))
|
||||
func (cli *socketClient) PrepareProposal(ctx context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestPrepareProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetPrepareProposal(), nil
|
||||
return res.GetPrepareProposal(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposal(
|
||||
ctx context.Context,
|
||||
req types.RequestProcessProposal,
|
||||
) (*types.ResponseProcessProposal, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestProcessProposal(req))
|
||||
func (cli *socketClient) ProcessProposal(ctx context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestProcessProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetProcessProposal(), nil
|
||||
return res.GetProcessProposal(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ExtendVote(
|
||||
ctx context.Context,
|
||||
req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestExtendVote(req))
|
||||
func (cli *socketClient) ExtendVote(ctx context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestExtendVote(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetExtendVote(), nil
|
||||
return res.GetExtendVote(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) VerifyVoteExtension(
|
||||
ctx context.Context,
|
||||
req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestVerifyVoteExtension(req))
|
||||
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestVerifyVoteExtension(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetVerifyVoteExtension(), nil
|
||||
return res.GetVerifyVoteExtension(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlock(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestFinalizeBlock(req))
|
||||
func (cli *socketClient) FinalizeBlock(ctx context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestFinalizeBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetFinalizeBlock(), nil
|
||||
return res.GetFinalizeBlock(), nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// queueRequest enqueues req onto the queue. If the queue is full, it ether
|
||||
// returns an error (sync=false) or blocks (sync=true).
|
||||
//
|
||||
// When sync=true, ctx can be used to break early. When sync=false, ctx will be
|
||||
// used later to determine if request should be dropped (if ctx.Err is
|
||||
// non-nil).
|
||||
//
|
||||
// The caller is responsible for checking cli.Error.
|
||||
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, sync bool) (*ReqRes, error) {
|
||||
reqres := NewReqRes(req)
|
||||
func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
|
||||
reqres := makeReqRes(req)
|
||||
|
||||
if sync {
|
||||
select {
|
||||
case cli.reqQueue <- reqres:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case cli.reqQueue <- reqres:
|
||||
default:
|
||||
return nil, errors.New("buffer is full")
|
||||
select {
|
||||
case cli.reqQueue <- reqres:
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("can't queue req: %w", ctx.Err())
|
||||
}
|
||||
|
||||
select {
|
||||
case <-reqres.signal:
|
||||
if err := cli.Error(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
return reqres, nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAndFlush(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, true)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func queueErr(e error) error {
|
||||
return fmt.Errorf("can't queue req: %w", e)
|
||||
}
|
||||
|
||||
// drainQueue marks as complete and discards all remaining pending requests
|
||||
@@ -446,8 +348,8 @@ func (cli *socketClient) drainQueue(ctx context.Context) {
|
||||
|
||||
// mark all in-flight messages as resolved (they will get cli.Error())
|
||||
for req := cli.reqSent.Front(); req != nil; req = req.Next() {
|
||||
reqres := req.Value.(*ReqRes)
|
||||
reqres.SetDone()
|
||||
reqres := req.Value.(*requestAndResponse)
|
||||
reqres.markDone()
|
||||
}
|
||||
|
||||
// Mark all queued messages as resolved.
|
||||
@@ -460,7 +362,7 @@ func (cli *socketClient) drainQueue(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case reqres := <-cli.reqQueue:
|
||||
reqres.SetDone()
|
||||
reqres.markDone()
|
||||
default:
|
||||
return
|
||||
}
|
||||
@@ -485,6 +387,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Query)
|
||||
case *types.Request_InitChain:
|
||||
_, ok = res.Value.(*types.Response_InitChain)
|
||||
case *types.Request_ProcessProposal:
|
||||
_, ok = res.Value.(*types.Response_ProcessProposal)
|
||||
case *types.Request_PrepareProposal:
|
||||
_, ok = res.Value.(*types.Response_PrepareProposal)
|
||||
case *types.Request_ExtendVote:
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
package abciclient_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"math/rand"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
func TestProperSyncCalls(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
app := slowApp{}
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
_, c := setupClientServer(ctx, t, logger, app)
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
rsp, err := c.FinalizeBlock(ctx, types.RequestFinalizeBlock{})
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, c.Flush(ctx))
|
||||
assert.NotNil(t, rsp)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case resp <- c.Error():
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
require.Fail(t, "No response arrived")
|
||||
case err, ok := <-resp:
|
||||
require.True(t, ok, "Must not close channel")
|
||||
assert.NoError(t, err, "This should return success")
|
||||
}
|
||||
}
|
||||
|
||||
func setupClientServer(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
app types.Application,
|
||||
) (service.Service, abciclient.Client) {
|
||||
t.Helper()
|
||||
|
||||
// some port between 20k and 30k
|
||||
port := 20000 + rand.Int31()%10000
|
||||
addr := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
s, err := server.NewServer(logger, addr, "socket", app)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.Start(ctx))
|
||||
t.Cleanup(s.Wait)
|
||||
|
||||
c := abciclient.NewSocketClient(logger, addr, true)
|
||||
require.NoError(t, c.Start(ctx))
|
||||
t.Cleanup(c.Wait)
|
||||
|
||||
require.True(t, s.IsRunning())
|
||||
require.True(t, c.IsRunning())
|
||||
|
||||
return s, c
|
||||
}
|
||||
|
||||
type slowApp struct {
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseFinalizeBlock{}
|
||||
}
|
||||
@@ -125,7 +125,7 @@ func addCommands(cmd *cobra.Command, logger log.Logger) {
|
||||
cmd.AddCommand(consoleCmd)
|
||||
cmd.AddCommand(echoCmd)
|
||||
cmd.AddCommand(infoCmd)
|
||||
cmd.AddCommand(deliverTxCmd)
|
||||
cmd.AddCommand(finalizeBlockCmd)
|
||||
cmd.AddCommand(checkTxCmd)
|
||||
cmd.AddCommand(commitCmd)
|
||||
cmd.AddCommand(versionCmd)
|
||||
@@ -150,10 +150,9 @@ where example.file looks something like:
|
||||
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
finalize_block 0x00
|
||||
check_tx 0x00
|
||||
deliver_tx 0x01
|
||||
deliver_tx 0x04
|
||||
finalize_block 0x01 0x04 0xff
|
||||
info
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
@@ -169,7 +168,7 @@ This command opens an interactive console for running any of the other commands
|
||||
without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"},
|
||||
ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "commit", "query"},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
@@ -188,11 +187,11 @@ var infoCmd = &cobra.Command{
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
Use: "deliver_tx",
|
||||
Short: "deliver a new transaction to the application",
|
||||
Long: "deliver a new transaction to the application",
|
||||
Args: cobra.ExactArgs(1),
|
||||
var finalizeBlockCmd = &cobra.Command{
|
||||
Use: "finalize_block",
|
||||
Short: "deliver a block of transactions to the application",
|
||||
Long: "deliver a block of transactions to the application",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: cmdFinalizeBlock,
|
||||
}
|
||||
|
||||
@@ -426,7 +425,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
return cmdCheckTx(cmd, actualArgs)
|
||||
case "commit":
|
||||
return cmdCommit(cmd, actualArgs)
|
||||
case "deliver_tx":
|
||||
case "finalize_block":
|
||||
return cmdFinalizeBlock(cmd, actualArgs)
|
||||
case "echo":
|
||||
return cmdEcho(cmd, actualArgs)
|
||||
@@ -500,19 +499,23 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
printResponse(cmd, args, response{
|
||||
Code: codeBad,
|
||||
Log: "want the tx",
|
||||
Log: "Must provide at least one transaction",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
txBytes, err := stringOrHexToBytes(args[0])
|
||||
txs := make([][]byte, len(args))
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txs[i] = txBytes
|
||||
}
|
||||
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: txs})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tx := range res.Txs {
|
||||
for _, tx := range res.TxResults {
|
||||
printResponse(cmd, args, response{
|
||||
Code: tx.Code,
|
||||
Data: tx.Data,
|
||||
|
||||
@@ -31,18 +31,18 @@ func init() {
|
||||
func TestKVStore(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewTestingLogger(t)
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
logger.Info("### Testing KVStore")
|
||||
t.Log("### Testing KVStore")
|
||||
testBulk(ctx, t, logger, kvstore.NewApplication())
|
||||
}
|
||||
|
||||
func TestBaseApp(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewTestingLogger(t)
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
logger.Info("### Testing BaseApp")
|
||||
t.Log("### Testing BaseApp")
|
||||
testBulk(ctx, t, logger, types.NewBaseApplication())
|
||||
}
|
||||
|
||||
@@ -50,9 +50,9 @@ func TestGRPC(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
logger := log.NewTestingLogger(t)
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
logger.Info("### Testing GRPC")
|
||||
t.Log("### Testing GRPC")
|
||||
testGRPCSync(ctx, t, logger, types.NewGRPCApplication(types.NewBaseApplication()))
|
||||
}
|
||||
|
||||
@@ -84,8 +84,8 @@ func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Ap
|
||||
// Send bulk request
|
||||
res, err := client.FinalizeBlock(ctx, rfb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, numDeliverTxs, len(res.Txs), "Number of txs doesn't match")
|
||||
for _, tx := range res.Txs {
|
||||
require.Equal(t, numDeliverTxs, len(res.TxResults), "Number of txs doesn't match")
|
||||
for _, tx := range res.TxResults {
|
||||
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
|
||||
}
|
||||
|
||||
@@ -138,8 +138,8 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
|
||||
// Send request
|
||||
response, err := client.FinalizeBlock(ctx, &rfb)
|
||||
require.NoError(t, err, "Error in GRPC FinalizeBlock")
|
||||
require.Equal(t, numDeliverTxs, len(response.Txs), "Number of txs returned via GRPC doesn't match")
|
||||
for _, tx := range response.Txs {
|
||||
require.Equal(t, numDeliverTxs, len(response.TxResults), "Number of txs returned via GRPC doesn't match")
|
||||
for _, tx := range response.TxResults {
|
||||
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,14 +2,21 @@ package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
@@ -65,17 +72,41 @@ var _ types.Application = (*Application)(nil)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
mu sync.Mutex
|
||||
state State
|
||||
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
|
||||
logger log.Logger
|
||||
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
|
||||
}
|
||||
|
||||
func NewApplication() *Application {
|
||||
state := loadState(dbm.NewMemDB())
|
||||
return &Application{state: state}
|
||||
return &Application{
|
||||
logger: log.NewNopLogger(),
|
||||
state: loadState(dbm.NewMemDB()),
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
func (app *Application) InitChain(req types.RequestInitChain) types.ResponseInitChain {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
for _, v := range req.Validators {
|
||||
r := app.updateValidator(v)
|
||||
if r.IsErr() {
|
||||
app.logger.Error("error updating validators", "r", r)
|
||||
panic("problem updating validators")
|
||||
}
|
||||
}
|
||||
return types.ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
return types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
@@ -85,8 +116,20 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) HandleTx(tx []byte) *types.ResponseDeliverTx {
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *Application) handleTx(tx []byte) *types.ExecTxResult {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(tx)
|
||||
}
|
||||
|
||||
if isPrepareTx(tx) {
|
||||
return app.execPrepareTx(tx)
|
||||
}
|
||||
|
||||
var key, value string
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
@@ -113,22 +156,56 @@ func (app *Application) HandleTx(tx []byte) *types.ResponseDeliverTx {
|
||||
},
|
||||
}
|
||||
|
||||
return &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
return &types.ExecTxResult{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
func (app *Application) Close() error {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
return app.state.db.Close()
|
||||
}
|
||||
|
||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
txs := make([]*types.ResponseDeliverTx, len(req.Txs))
|
||||
for i, tx := range req.Txs {
|
||||
txs[i] = app.HandleTx(tx)
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
PubKey: pubKey,
|
||||
Power: ev.Validator.Power - 1,
|
||||
})
|
||||
app.logger.Info("Decreased val power by 1 because of the equivocation",
|
||||
"val", addr)
|
||||
} else {
|
||||
panic(fmt.Errorf("wanted to punish val %q but can't find it", addr))
|
||||
}
|
||||
}
|
||||
}
|
||||
return types.ResponseFinalizeBlock{Txs: txs}
|
||||
|
||||
respTxs := make([]*types.ExecTxResult, len(req.Txs))
|
||||
for i, tx := range req.Txs {
|
||||
respTxs[i] = app.handleTx(tx)
|
||||
}
|
||||
|
||||
return types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
func (*Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() types.ResponseCommit {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
// Using a memdb - just return the big endian size of the db
|
||||
appHash := make([]byte, 8)
|
||||
binary.PutVarint(appHash, app.state.Size)
|
||||
@@ -144,43 +221,242 @@ func (app *Application) Commit() types.ResponseCommit {
|
||||
}
|
||||
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
if reqQuery.Path == "/val" {
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
value, err := app.state.db.Get(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return types.ResponseQuery{
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
if reqQuery.Prove {
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery := types.ResponseQuery{
|
||||
Index: -1,
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
Height: app.state.Height,
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Index = -1 // TODO make Proof return index
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return
|
||||
return resQuery
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery := types.ResponseQuery{
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
Height: app.state.Height,
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return resQuery
|
||||
}
|
||||
|
||||
func (app *Application) PrepareProposal(
|
||||
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
func (app *Application) PrepareProposal(req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
return types.ResponsePrepareProposal{
|
||||
BlockData: req.BlockData}
|
||||
ModifiedTxStatus: types.ResponsePrepareProposal_MODIFIED,
|
||||
TxRecords: app.substPrepareTx(req.Txs),
|
||||
}
|
||||
}
|
||||
|
||||
func (*Application) ProcessProposal(req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
func (app *Application) Validators() (validators []types.ValidatorUpdate) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
itr, err := app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
validators = append(validators, *validator)
|
||||
}
|
||||
}
|
||||
if err = itr.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte {
|
||||
pk, err := encoding.PubKeyFromProto(pubkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
|
||||
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
|
||||
}
|
||||
|
||||
// format is "val:pubkey!power"
|
||||
// pubkey is a base64-encoded 32-byte ed25519 key
|
||||
func (app *Application) execValidatorTx(tx []byte) *types.ExecTxResult {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
|
||||
}
|
||||
|
||||
// decode the power
|
||||
power, err := strconv.ParseInt(powerS, 10, 64)
|
||||
if err != nil {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *Application) updateValidator(v types.ValidatorUpdate) *types.ExecTxResult {
|
||||
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
key := []byte("val:" + string(pubkey.Bytes()))
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
hasKey, err := app.state.db.Has(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeUnauthorized,
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
}
|
||||
if err = app.state.db.Delete(key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("error encoding validator: %v", err)}
|
||||
}
|
||||
if err = app.state.db.Set(key, value.Bytes()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
app.ValUpdates = append(app.ValUpdates, v)
|
||||
|
||||
return &types.ExecTxResult{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// prepare proposal machinery
|
||||
|
||||
const PreparePrefix = "prepare"
|
||||
|
||||
func isPrepareTx(tx []byte) bool {
|
||||
return bytes.HasPrefix(tx, []byte(PreparePrefix))
|
||||
}
|
||||
|
||||
// execPrepareTx is noop. tx data is considered as placeholder
|
||||
// and is substitute at the PrepareProposal.
|
||||
func (app *Application) execPrepareTx(tx []byte) *types.ExecTxResult {
|
||||
// noop
|
||||
return &types.ExecTxResult{}
|
||||
}
|
||||
|
||||
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
|
||||
// proposal for transactions with the prefix strips.
|
||||
// It marks all of the original transactions as 'REMOVED' so that
|
||||
// Tendermint will remove them from its mempool.
|
||||
func (app *Application) substPrepareTx(blockData [][]byte) []*types.TxRecord {
|
||||
trs := make([]*types.TxRecord, len(blockData))
|
||||
var removed []*types.TxRecord
|
||||
for i, tx := range blockData {
|
||||
if isPrepareTx(tx) {
|
||||
removed = append(removed, &types.TxRecord{
|
||||
Tx: tx,
|
||||
Action: types.TxRecord_REMOVED,
|
||||
})
|
||||
trs[i] = &types.TxRecord{
|
||||
Tx: bytes.TrimPrefix(tx, []byte(PreparePrefix)),
|
||||
Action: types.TxRecord_ADDED,
|
||||
}
|
||||
continue
|
||||
}
|
||||
trs[i] = &types.TxRecord{
|
||||
Tx: tx,
|
||||
Action: types.TxRecord_UNMODIFIED,
|
||||
}
|
||||
}
|
||||
|
||||
return append(trs, removed...)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -26,12 +27,12 @@ const (
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
|
||||
ar := app.FinalizeBlock(req)
|
||||
require.Equal(t, 1, len(ar.Txs))
|
||||
require.False(t, ar.Txs[0].IsErr())
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
// repeating tx doesn't raise error
|
||||
ar = app.FinalizeBlock(req)
|
||||
require.Equal(t, 1, len(ar.Txs))
|
||||
require.False(t, ar.Txs[0].IsErr())
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
// commit
|
||||
app.Commit()
|
||||
|
||||
@@ -74,7 +75,7 @@ func TestKVStoreKV(t *testing.T) {
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
logger := log.NewTestingLogger(t)
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
key := testKey
|
||||
@@ -89,7 +90,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
logger := log.NewTestingLogger(t)
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
InitKVStore(kvstore)
|
||||
@@ -106,7 +107,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
}
|
||||
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header, Height: height})
|
||||
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
|
||||
kvstore.Commit()
|
||||
|
||||
resInfo = kvstore.Info(types.RequestInfo{})
|
||||
@@ -118,10 +119,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
kvstore := NewApplication()
|
||||
|
||||
// init with some validators
|
||||
total := 10
|
||||
@@ -198,7 +196,6 @@ func makeApplyBlock(
|
||||
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
|
||||
Hash: hash,
|
||||
Header: header,
|
||||
Height: height,
|
||||
Txs: txs,
|
||||
})
|
||||
|
||||
@@ -210,6 +207,7 @@ func makeApplyBlock(
|
||||
|
||||
// order doesn't matter
|
||||
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
t.Helper()
|
||||
if len(vals1) != len(vals2) {
|
||||
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
|
||||
}
|
||||
@@ -231,9 +229,11 @@ func makeSocketClientServer(
|
||||
app types.Application,
|
||||
name string,
|
||||
) (abciclient.Client, service.Service, error) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.Cleanup(cancel)
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
@@ -263,6 +263,8 @@ func makeGRPCClientServer(
|
||||
) (abciclient.Client, service.Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.Cleanup(cancel)
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
|
||||
@@ -286,7 +288,7 @@ func makeGRPCClientServer(
|
||||
func TestClientServer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewTestingLogger(t)
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
// set up socket app
|
||||
kvstore := NewApplication()
|
||||
@@ -323,13 +325,13 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client)
|
||||
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
|
||||
ar, err := app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.Txs))
|
||||
require.False(t, ar.Txs[0].IsErr())
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
// repeating FinalizeBlock doesn't raise error
|
||||
ar, err = app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.Txs))
|
||||
require.False(t, ar.Txs[0].IsErr())
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
// commit
|
||||
_, err = app.Commit(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,16 +2,10 @@ package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
ptypes "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
@@ -26,325 +20,42 @@ const (
|
||||
var _ types.Application = (*PersistentKVStoreApplication)(nil)
|
||||
|
||||
type PersistentKVStoreApplication struct {
|
||||
app *Application
|
||||
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
|
||||
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
|
||||
|
||||
logger log.Logger
|
||||
*Application
|
||||
}
|
||||
|
||||
func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *PersistentKVStoreApplication {
|
||||
name := "kvstore"
|
||||
db, err := dbm.NewGoLevelDB(name, dbDir)
|
||||
db, err := dbm.NewGoLevelDB("kvstore", dbDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
state := loadState(db)
|
||||
|
||||
return &PersistentKVStoreApplication{
|
||||
app: &Application{state: state},
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
logger: logger,
|
||||
Application: &Application{
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
state: loadState(db),
|
||||
logger: logger,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Close() error {
|
||||
return app.app.state.db.Close()
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
res := app.app.Info(req)
|
||||
res.LastBlockHeight = app.app.state.Height
|
||||
res.LastBlockAppHash = app.app.state.AppHash
|
||||
return res
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) HandleTx(tx []byte) *types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(tx)
|
||||
}
|
||||
|
||||
if isPrepareTx(tx) {
|
||||
return app.execPrepareTx(tx)
|
||||
}
|
||||
|
||||
// otherwise, update the key-value store
|
||||
return app.app.HandleTx(tx)
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(req)
|
||||
}
|
||||
|
||||
// Commit will panic if InitChain was not called
|
||||
func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
|
||||
return app.app.Commit()
|
||||
}
|
||||
|
||||
// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded.
|
||||
// For any other path, returns an associated value or nil if missing.
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
switch reqQuery.Path {
|
||||
case "/val":
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
value, err := app.app.state.db.Get(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
return
|
||||
default:
|
||||
return app.app.Query(reqQuery)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the validators in the merkle tree
|
||||
func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
|
||||
for _, v := range req.Validators {
|
||||
r := app.updateValidator(v)
|
||||
if r.IsErr() {
|
||||
app.logger.Error("error updating validators", "r", r)
|
||||
}
|
||||
}
|
||||
return types.ResponseInitChain{}
|
||||
}
|
||||
|
||||
// Track the block hash and header information
|
||||
// Execute transactions
|
||||
// Update the validator set
|
||||
func (app *PersistentKVStoreApplication) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
PubKey: pubKey,
|
||||
Power: ev.Validator.Power - 1,
|
||||
})
|
||||
app.logger.Info("Decreased val power by 1 because of the equivocation",
|
||||
"val", addr)
|
||||
} else {
|
||||
app.logger.Error("Wanted to punish val, but can't find it",
|
||||
"val", addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
respTxs := make([]*types.ResponseDeliverTx, len(req.Txs))
|
||||
for i, tx := range req.Txs {
|
||||
respTxs[i] = app.HandleTx(tx)
|
||||
}
|
||||
|
||||
return types.ResponseFinalizeBlock{Txs: respTxs, ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ExtendVote(
|
||||
req types.RequestExtendVote) types.ResponseExtendVote {
|
||||
return types.ResponseExtendVote{
|
||||
VoteExtension: ConstructVoteExtension(req.Vote.ValidatorAddress),
|
||||
}
|
||||
func (app *PersistentKVStoreApplication) ExtendVote(req types.RequestExtendVote) types.ResponseExtendVote {
|
||||
return types.ResponseExtendVote{VoteExtension: ConstructVoteExtension(req.Vote.ValidatorAddress)}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) VerifyVoteExtension(
|
||||
req types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
|
||||
return types.RespondVerifyVoteExtension(
|
||||
app.verifyExtension(req.Vote.ValidatorAddress, req.Vote.VoteExtension))
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) PrepareProposal(
|
||||
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
return types.ResponsePrepareProposal{BlockData: app.substPrepareTx(req.BlockData)}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ProcessProposal(
|
||||
req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 {
|
||||
return types.ResponseProcessProposal{Result: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Result: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
|
||||
itr, err := app.app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
validators = append(validators, *validator)
|
||||
}
|
||||
}
|
||||
if err = itr.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte {
|
||||
pk, err := encoding.PubKeyFromProto(pubkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
|
||||
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
|
||||
}
|
||||
|
||||
// format is "val:pubkey!power"
|
||||
// pubkey is a base64-encoded 32-byte ed25519 key
|
||||
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return &types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return &types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
|
||||
}
|
||||
|
||||
// decode the power
|
||||
power, err := strconv.ParseInt(powerS, 10, 64)
|
||||
if err != nil {
|
||||
return &types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) *types.ResponseDeliverTx {
|
||||
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
key := []byte("val:" + string(pubkey.Bytes()))
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
hasKey, err := app.app.state.db.Has(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
|
||||
return &types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeUnauthorized,
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
}
|
||||
if err = app.app.state.db.Delete(key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
return &types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("error encoding validator: %v", err)}
|
||||
}
|
||||
if err = app.app.state.db.Set(key, value.Bytes()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
app.ValUpdates = append(app.ValUpdates, v)
|
||||
|
||||
return &types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
func (app *PersistentKVStoreApplication) VerifyVoteExtension(req types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
|
||||
return types.RespondVerifyVoteExtension(app.verifyExtension(req.Vote.ValidatorAddress, req.Vote.VoteExtension))
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
|
||||
const PreparePrefix = "prepare"
|
||||
|
||||
func isPrepareTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), PreparePrefix)
|
||||
}
|
||||
|
||||
// execPrepareTx is noop. tx data is considered as placeholder
|
||||
// and is substitute at the PrepareProposal.
|
||||
func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) *types.ResponseDeliverTx {
|
||||
// noop
|
||||
return &types.ResponseDeliverTx{}
|
||||
}
|
||||
|
||||
// substPrepareTx subst all the preparetx in the blockdata
|
||||
// to null string(could be any arbitrary string).
|
||||
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte) [][]byte {
|
||||
// TODO: this mechanism will change with the current spec of PrepareProposal
|
||||
// We now have a special type for marking a tx as changed
|
||||
for i, tx := range blockData {
|
||||
if isPrepareTx(tx) {
|
||||
blockData[i] = make([]byte, len(tx))
|
||||
}
|
||||
}
|
||||
|
||||
return blockData
|
||||
}
|
||||
|
||||
func ConstructVoteExtension(valAddr []byte) *ptypes.VoteExtension {
|
||||
return &ptypes.VoteExtension{
|
||||
AppDataToSign: valAddr,
|
||||
|
||||
@@ -16,10 +16,9 @@ type GRPCServer struct {
|
||||
service.BaseService
|
||||
logger log.Logger
|
||||
|
||||
proto string
|
||||
addr string
|
||||
listener net.Listener
|
||||
server *grpc.Server
|
||||
proto string
|
||||
addr string
|
||||
server *grpc.Server
|
||||
|
||||
app types.ABCIApplicationServer
|
||||
}
|
||||
@@ -28,11 +27,10 @@ type GRPCServer struct {
|
||||
func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicationServer) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &GRPCServer{
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
app: app,
|
||||
}
|
||||
s.BaseService = *service.NewBaseService(logger, "ABCIServer", s)
|
||||
return s
|
||||
@@ -40,13 +38,11 @@ func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicatio
|
||||
|
||||
// OnStart starts the gRPC service.
|
||||
func (s *GRPCServer) OnStart(ctx context.Context) error {
|
||||
|
||||
ln, err := net.Listen(s.proto, s.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.listener = ln
|
||||
s.server = grpc.NewServer()
|
||||
types.RegisterABCIApplicationServer(s.server, s.app)
|
||||
|
||||
@@ -57,7 +53,7 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
|
||||
s.server.GracefulStop()
|
||||
}()
|
||||
|
||||
if err := s.server.Serve(s.listener); err != nil {
|
||||
if err := s.server.Serve(ln); err != nil {
|
||||
s.logger.Error("error serving gRPC server", "err", err)
|
||||
}
|
||||
}()
|
||||
@@ -65,6 +61,4 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// OnStop stops the gRPC server.
|
||||
func (s *GRPCServer) OnStop() {
|
||||
s.server.Stop()
|
||||
}
|
||||
func (s *GRPCServer) OnStop() { s.server.Stop() }
|
||||
|
||||
@@ -3,6 +3,7 @@ package server
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -26,22 +27,21 @@ type SocketServer struct {
|
||||
listener net.Listener
|
||||
|
||||
connsMtx sync.Mutex
|
||||
conns map[int]net.Conn
|
||||
connsClose map[int]func()
|
||||
nextConnID int
|
||||
|
||||
appMtx sync.Mutex
|
||||
app types.Application
|
||||
app types.Application
|
||||
}
|
||||
|
||||
func NewSocketServer(logger log.Logger, protoAddr string, app types.Application) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &SocketServer{
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
conns: make(map[int]net.Conn),
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
connsClose: make(map[int]func()),
|
||||
}
|
||||
s.BaseService = *service.NewBaseService(logger, "ABCIServer", s)
|
||||
return s
|
||||
@@ -67,44 +67,35 @@ func (s *SocketServer) OnStop() {
|
||||
s.connsMtx.Lock()
|
||||
defer s.connsMtx.Unlock()
|
||||
|
||||
for id, conn := range s.conns {
|
||||
delete(s.conns, id)
|
||||
if err := conn.Close(); err != nil {
|
||||
s.logger.Error("error closing connection", "id", id, "conn", conn, "err", err)
|
||||
}
|
||||
for _, closer := range s.connsClose {
|
||||
closer()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) addConn(conn net.Conn) int {
|
||||
func (s *SocketServer) addConn(closer func()) int {
|
||||
s.connsMtx.Lock()
|
||||
defer s.connsMtx.Unlock()
|
||||
|
||||
connID := s.nextConnID
|
||||
s.nextConnID++
|
||||
s.conns[connID] = conn
|
||||
|
||||
s.connsClose[connID] = closer
|
||||
return connID
|
||||
}
|
||||
|
||||
// deletes conn even if close errs
|
||||
func (s *SocketServer) rmConn(connID int) error {
|
||||
func (s *SocketServer) rmConn(connID int) {
|
||||
s.connsMtx.Lock()
|
||||
defer s.connsMtx.Unlock()
|
||||
|
||||
conn, ok := s.conns[connID]
|
||||
if !ok {
|
||||
return fmt.Errorf("connection %d does not exist", connID)
|
||||
if closer, ok := s.connsClose[connID]; ok {
|
||||
closer()
|
||||
delete(s.connsClose, connID)
|
||||
}
|
||||
|
||||
delete(s.conns, connID)
|
||||
return conn.Close()
|
||||
}
|
||||
|
||||
func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// Accept a connection
|
||||
@@ -118,149 +109,134 @@ func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
s.logger.Info("Accepted a new connection")
|
||||
cctx, ccancel := context.WithCancel(ctx)
|
||||
connID := s.addConn(ccancel)
|
||||
|
||||
connID := s.addConn(conn)
|
||||
s.logger.Info("Accepted a new connection", "id", connID)
|
||||
|
||||
closeConn := make(chan error, 2) // Push to signal connection closed
|
||||
responses := make(chan *types.Response, 1000) // A channel to buffer responses
|
||||
|
||||
once := &sync.Once{}
|
||||
closer := func(err error) {
|
||||
ccancel()
|
||||
once.Do(func() {
|
||||
if cerr := conn.Close(); err != nil {
|
||||
s.logger.Error("error closing connection",
|
||||
"id", connID,
|
||||
"close_err", cerr,
|
||||
"err", err)
|
||||
}
|
||||
s.rmConn(connID)
|
||||
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled):
|
||||
s.logger.Error("Connection terminated",
|
||||
"id", connID,
|
||||
"err", err)
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
s.logger.Error("Connection encountered timeout",
|
||||
"id", connID,
|
||||
"err", err)
|
||||
case errors.Is(err, io.EOF):
|
||||
s.logger.Error("Connection was closed by client",
|
||||
"id", connID)
|
||||
case err != nil:
|
||||
s.logger.Error("Connection error",
|
||||
"id", connID,
|
||||
"err", err)
|
||||
default:
|
||||
s.logger.Error("Connection was closed",
|
||||
"id", connID)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
go s.handleRequests(ctx, closeConn, conn, responses)
|
||||
go s.handleRequests(cctx, closer, conn, responses)
|
||||
// Pull responses from 'responses' and write them to conn.
|
||||
go s.handleResponses(ctx, closeConn, conn, responses)
|
||||
|
||||
// Wait until signal to close connection
|
||||
go s.waitForClose(ctx, closeConn, connID)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) waitForClose(ctx context.Context, closeConn chan error, connID int) {
|
||||
defer func() {
|
||||
// Close the connection
|
||||
if err := s.rmConn(connID); err != nil {
|
||||
s.logger.Error("error closing connection", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case err := <-closeConn:
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
s.logger.Error("Connection was closed by client")
|
||||
case err != nil:
|
||||
s.logger.Error("Connection error", "err", err)
|
||||
default:
|
||||
// never happens
|
||||
s.logger.Error("Connection was closed")
|
||||
}
|
||||
go s.handleResponses(cctx, closer, conn, responses)
|
||||
}
|
||||
}
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
func (s *SocketServer) handleRequests(
|
||||
ctx context.Context,
|
||||
closeConn chan error,
|
||||
closer func(error),
|
||||
conn io.Reader,
|
||||
responses chan<- *types.Response,
|
||||
) {
|
||||
var count int
|
||||
var bufReader = bufio.NewReader(conn)
|
||||
|
||||
defer func() {
|
||||
// make sure to recover from any app-related panics to allow proper socket cleanup
|
||||
r := recover()
|
||||
if r != nil {
|
||||
if r := recover(); r != nil {
|
||||
const size = 64 << 10
|
||||
buf := make([]byte, size)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
err := fmt.Errorf("recovered from panic: %v\n%s", r, buf)
|
||||
closeConn <- err
|
||||
s.appMtx.Unlock()
|
||||
closer(fmt.Errorf("recovered from panic: %v\n%s", r, buf))
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
req := &types.Request{}
|
||||
if err := types.ReadMessage(bufReader, req); err != nil {
|
||||
closer(fmt.Errorf("error reading message: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
var req = &types.Request{}
|
||||
err := types.ReadMessage(bufReader, req)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
closeConn <- err
|
||||
} else {
|
||||
closeConn <- fmt.Errorf("error reading message: %w", err)
|
||||
}
|
||||
resp := s.processRequest(req)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
closer(ctx.Err())
|
||||
return
|
||||
case responses <- resp:
|
||||
}
|
||||
s.appMtx.Lock()
|
||||
count++
|
||||
s.handleRequest(req, responses)
|
||||
s.appMtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) {
|
||||
func (s *SocketServer) processRequest(req *types.Request) *types.Response {
|
||||
switch r := req.Value.(type) {
|
||||
case *types.Request_Echo:
|
||||
responses <- types.ToResponseEcho(r.Echo.Message)
|
||||
return types.ToResponseEcho(r.Echo.Message)
|
||||
case *types.Request_Flush:
|
||||
responses <- types.ToResponseFlush()
|
||||
return types.ToResponseFlush()
|
||||
case *types.Request_Info:
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
return types.ToResponseInfo(s.app.Info(*r.Info))
|
||||
case *types.Request_CheckTx:
|
||||
res := s.app.CheckTx(*r.CheckTx)
|
||||
responses <- types.ToResponseCheckTx(res)
|
||||
return types.ToResponseCheckTx(s.app.CheckTx(*r.CheckTx))
|
||||
case *types.Request_Commit:
|
||||
res := s.app.Commit()
|
||||
responses <- types.ToResponseCommit(res)
|
||||
return types.ToResponseCommit(s.app.Commit())
|
||||
case *types.Request_Query:
|
||||
res := s.app.Query(*r.Query)
|
||||
responses <- types.ToResponseQuery(res)
|
||||
return types.ToResponseQuery(s.app.Query(*r.Query))
|
||||
case *types.Request_InitChain:
|
||||
res := s.app.InitChain(*r.InitChain)
|
||||
responses <- types.ToResponseInitChain(res)
|
||||
return types.ToResponseInitChain(s.app.InitChain(*r.InitChain))
|
||||
case *types.Request_ListSnapshots:
|
||||
res := s.app.ListSnapshots(*r.ListSnapshots)
|
||||
responses <- types.ToResponseListSnapshots(res)
|
||||
return types.ToResponseListSnapshots(s.app.ListSnapshots(*r.ListSnapshots))
|
||||
case *types.Request_OfferSnapshot:
|
||||
res := s.app.OfferSnapshot(*r.OfferSnapshot)
|
||||
responses <- types.ToResponseOfferSnapshot(res)
|
||||
return types.ToResponseOfferSnapshot(s.app.OfferSnapshot(*r.OfferSnapshot))
|
||||
case *types.Request_PrepareProposal:
|
||||
res := s.app.PrepareProposal(*r.PrepareProposal)
|
||||
responses <- types.ToResponsePrepareProposal(res)
|
||||
return types.ToResponsePrepareProposal(s.app.PrepareProposal(*r.PrepareProposal))
|
||||
case *types.Request_ProcessProposal:
|
||||
res := s.app.ProcessProposal(*r.ProcessProposal)
|
||||
responses <- types.ToResponseProcessProposal(res)
|
||||
return types.ToResponseProcessProposal(s.app.ProcessProposal(*r.ProcessProposal))
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
|
||||
responses <- types.ToResponseLoadSnapshotChunk(res)
|
||||
return types.ToResponseLoadSnapshotChunk(s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk))
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
|
||||
responses <- types.ToResponseApplySnapshotChunk(res)
|
||||
return types.ToResponseApplySnapshotChunk(s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk))
|
||||
case *types.Request_ExtendVote:
|
||||
res := s.app.ExtendVote(*r.ExtendVote)
|
||||
responses <- types.ToResponseExtendVote(res)
|
||||
return types.ToResponseExtendVote(s.app.ExtendVote(*r.ExtendVote))
|
||||
case *types.Request_VerifyVoteExtension:
|
||||
res := s.app.VerifyVoteExtension(*r.VerifyVoteExtension)
|
||||
responses <- types.ToResponseVerifyVoteExtension(res)
|
||||
return types.ToResponseVerifyVoteExtension(s.app.VerifyVoteExtension(*r.VerifyVoteExtension))
|
||||
case *types.Request_FinalizeBlock:
|
||||
res := s.app.FinalizeBlock(*r.FinalizeBlock)
|
||||
responses <- types.ToResponseFinalizeBlock(res)
|
||||
return types.ToResponseFinalizeBlock(s.app.FinalizeBlock(*r.FinalizeBlock))
|
||||
default:
|
||||
responses <- types.ToResponseException("Unknown request")
|
||||
return types.ToResponseException("Unknown request")
|
||||
}
|
||||
}
|
||||
|
||||
// Pull responses from 'responses' and write them to conn.
|
||||
func (s *SocketServer) handleResponses(
|
||||
ctx context.Context,
|
||||
closeConn chan error,
|
||||
closer func(error),
|
||||
conn io.Writer,
|
||||
responses <-chan *types.Response,
|
||||
) {
|
||||
@@ -268,21 +244,15 @@ func (s *SocketServer) handleResponses(
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
closer(ctx.Err())
|
||||
return
|
||||
case res := <-responses:
|
||||
if err := types.WriteMessage(res, bw); err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case closeConn <- fmt.Errorf("error writing message: %w", err):
|
||||
}
|
||||
closer(fmt.Errorf("error writing message: %w", err))
|
||||
return
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case closeConn <- fmt.Errorf("error flushing write buffer: %w", err):
|
||||
}
|
||||
|
||||
closer(fmt.Errorf("error writing message: %w", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error
|
||||
|
||||
func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error {
|
||||
res, _ := client.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: txBytes})
|
||||
for i, tx := range res.Txs {
|
||||
for i, tx := range res.TxResults {
|
||||
code, data, log := tx.Code, tx.Data, tx.Log
|
||||
if code != codeExp[i] {
|
||||
fmt.Println("Failed test: FinalizeBlock")
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
echo hello
|
||||
info
|
||||
commit
|
||||
deliver_tx "abc"
|
||||
finalize_block "abc"
|
||||
info
|
||||
commit
|
||||
query "abc"
|
||||
deliver_tx "def=xyz"
|
||||
finalize_block "def=xyz" "ghi=123"
|
||||
commit
|
||||
query "def"
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
|
||||
> deliver_tx "abc"
|
||||
> finalize_block "abc"
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
@@ -33,12 +33,14 @@
|
||||
-> value: abc
|
||||
-> value.hex: 616263
|
||||
|
||||
> deliver_tx "def=xyz"
|
||||
> finalize_block "def=xyz" "ghi=123"
|
||||
-> code: OK
|
||||
> finalize_block "def=xyz" "ghi=123"
|
||||
-> code: OK
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
-> data.hex: 0x0600000000000000
|
||||
|
||||
> query "def"
|
||||
-> code: OK
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
finalize_block 0x00
|
||||
check_tx 0x00
|
||||
deliver_tx 0x01
|
||||
deliver_tx 0x04
|
||||
finalize_block 0x01
|
||||
finalize_block 0x04
|
||||
info
|
||||
|
||||
@@ -4,20 +4,20 @@
|
||||
> check_tx 0xff
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x00
|
||||
> finalize_block 0x00
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x01
|
||||
> finalize_block 0x01
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x04
|
||||
> finalize_block 0x04
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"hashes":0,"txs":3}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A337D
|
||||
-> data: {"size":3}
|
||||
-> data.hex: 0x7B2273697A65223A337D
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Application
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
// to be driven by a blockchain-based replication engine via the ABCI.
|
||||
// All methods take a RequestXxx argument and return a ResponseXxx argument,
|
||||
@@ -41,8 +42,7 @@ type Application interface {
|
||||
|
||||
var _ Application = (*BaseApplication)(nil)
|
||||
|
||||
type BaseApplication struct {
|
||||
}
|
||||
type BaseApplication struct{}
|
||||
|
||||
func NewBaseApplication() *BaseApplication {
|
||||
return &BaseApplication{}
|
||||
@@ -66,7 +66,7 @@ func (BaseApplication) ExtendVote(req RequestExtendVote) ResponseExtendVote {
|
||||
|
||||
func (BaseApplication) VerifyVoteExtension(req RequestVerifyVoteExtension) ResponseVerifyVoteExtension {
|
||||
return ResponseVerifyVoteExtension{
|
||||
Result: ResponseVerifyVoteExtension_ACCEPT,
|
||||
Status: ResponseVerifyVoteExtension_ACCEPT,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,20 +95,20 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
|
||||
}
|
||||
|
||||
func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal {
|
||||
return ResponsePrepareProposal{}
|
||||
return ResponsePrepareProposal{ModifiedTxStatus: ResponsePrepareProposal_UNMODIFIED}
|
||||
}
|
||||
|
||||
func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProcessProposal {
|
||||
return ResponseProcessProposal{}
|
||||
return ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
|
||||
txs := make([]*ResponseDeliverTx, len(req.Txs))
|
||||
txs := make([]*ExecTxResult, len(req.Txs))
|
||||
for i := range req.Txs {
|
||||
txs[i] = &ResponseDeliverTx{Code: CodeTypeOK}
|
||||
txs[i] = &ExecTxResult{Code: CodeTypeOK}
|
||||
}
|
||||
return ResponseFinalizeBlock{
|
||||
Txs: txs,
|
||||
TxResults: txs,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
b, err := json.Marshal(&ResponseDeliverTx{})
|
||||
b, err := json.Marshal(&ExecTxResult{Code: 1})
|
||||
assert.NoError(t, err)
|
||||
// include empty fields.
|
||||
assert.True(t, strings.Contains(string(b), "code"))
|
||||
|
||||
209
abci/types/mocks/application.go
Normal file
209
abci/types/mocks/application.go
Normal file
@@ -0,0 +1,209 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// Application is an autogenerated mock type for the Application type
|
||||
type Application struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseApplySnapshotChunk)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0
|
||||
func (_m *Application) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCheckTx)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields:
|
||||
func (_m *Application) Commit() types.ResponseCommit {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func() types.ResponseCommit); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCommit)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ExtendVote provides a mock function with given fields: _a0
|
||||
func (_m *Application) ExtendVote(_a0 types.RequestExtendVote) types.ResponseExtendVote {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseExtendVote
|
||||
if rf, ok := ret.Get(0).(func(types.RequestExtendVote) types.ResponseExtendVote); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseExtendVote)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// FinalizeBlock provides a mock function with given fields: _a0
|
||||
func (_m *Application) FinalizeBlock(_a0 types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestFinalizeBlock) types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseFinalizeBlock)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0
|
||||
func (_m *Application) Info(_a0 types.RequestInfo) types.ResponseInfo {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) types.ResponseInfo); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInfo)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0
|
||||
func (_m *Application) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInitChain)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0
|
||||
func (_m *Application) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseListSnapshots)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseLoadSnapshotChunk)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0
|
||||
func (_m *Application) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseOfferSnapshot)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) PrepareProposal(_a0 types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponsePrepareProposal)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) ProcessProposal(_a0 types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseProcessProposal)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0
|
||||
func (_m *Application) Query(_a0 types.RequestQuery) types.ResponseQuery {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) types.ResponseQuery); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseQuery)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// VerifyVoteExtension provides a mock function with given fields: _a0
|
||||
func (_m *Application) VerifyVoteExtension(_a0 types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseVerifyVoteExtension
|
||||
if rf, ok := ret.Get(0).(func(types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseVerifyVoteExtension)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
175
abci/types/mocks/base.go
Normal file
175
abci/types/mocks/base.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// BaseMock provides a wrapper around the generated Application mock and a BaseApplication.
|
||||
// BaseMock first tries to use the mock's implementation of the method.
|
||||
// If no functionality was provided for the mock by the user, BaseMock dispatches
|
||||
// to the BaseApplication and uses its functionality.
|
||||
// BaseMock allows users to provide mocked functionality for only the methods that matter
|
||||
// for their test while avoiding a panic if the code calls Application methods that are
|
||||
// not relevant to the test.
|
||||
type BaseMock struct {
|
||||
base *types.BaseApplication
|
||||
*Application
|
||||
}
|
||||
|
||||
func NewBaseMock() BaseMock {
|
||||
return BaseMock{
|
||||
base: types.NewBaseApplication(),
|
||||
Application: new(Application),
|
||||
}
|
||||
}
|
||||
|
||||
// Info/Query Connection
|
||||
// Return application info
|
||||
func (m BaseMock) Info(input types.RequestInfo) (ret types.ResponseInfo) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Info(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Info(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) Query(input types.RequestQuery) (ret types.ResponseQuery) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Query(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Query(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Mempool Connection
|
||||
// Validate a tx for the mempool
|
||||
func (m BaseMock) CheckTx(input types.RequestCheckTx) (ret types.ResponseCheckTx) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.CheckTx(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.CheckTx(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Consensus Connection
|
||||
// Initialize blockchain w validators/other info from TendermintCore
|
||||
func (m BaseMock) InitChain(input types.RequestInitChain) (ret types.ResponseInitChain) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.InitChain(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.InitChain(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) PrepareProposal(input types.RequestPrepareProposal) (ret types.ResponsePrepareProposal) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.PrepareProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.PrepareProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ProcessProposal(input types.RequestProcessProposal) (ret types.ResponseProcessProposal) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ProcessProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ProcessProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Commit the state and return the application Merkle root hash
|
||||
func (m BaseMock) Commit() (ret types.ResponseCommit) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Commit()
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Commit()
|
||||
return ret
|
||||
}
|
||||
|
||||
// Create application specific vote extension
|
||||
func (m BaseMock) ExtendVote(input types.RequestExtendVote) (ret types.ResponseExtendVote) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ExtendVote(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ExtendVote(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Verify application's vote extension data
|
||||
func (m BaseMock) VerifyVoteExtension(input types.RequestVerifyVoteExtension) (ret types.ResponseVerifyVoteExtension) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.VerifyVoteExtension(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.VerifyVoteExtension(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// State Sync Connection
|
||||
// List available snapshots
|
||||
func (m BaseMock) ListSnapshots(input types.RequestListSnapshots) (ret types.ResponseListSnapshots) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ListSnapshots(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ListSnapshots(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) OfferSnapshot(input types.RequestOfferSnapshot) (ret types.ResponseOfferSnapshot) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.OfferSnapshot(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.OfferSnapshot(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) LoadSnapshotChunk(input types.RequestLoadSnapshotChunk) (ret types.ResponseLoadSnapshotChunk) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.LoadSnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.LoadSnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ApplySnapshotChunk(input types.RequestApplySnapshotChunk) (ret types.ResponseApplySnapshotChunk) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ApplySnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ApplySnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) FinalizeBlock(input types.RequestFinalizeBlock) (ret types.ResponseFinalizeBlock) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.FinalizeBlock(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.FinalizeBlock(input)
|
||||
return ret
|
||||
}
|
||||
@@ -33,6 +33,16 @@ func (r ResponseDeliverTx) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ExecTxResult) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ExecTxResult) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ResponseQuery) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
@@ -43,24 +53,35 @@ func (r ResponseQuery) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsUnknown returns true if Code is Unknown
|
||||
func (r ResponseVerifyVoteExtension) IsUnknown() bool {
|
||||
return r.Result == ResponseVerifyVoteExtension_UNKNOWN
|
||||
func (r ResponsePrepareProposal) IsTxStatusUnknown() bool {
|
||||
return r.ModifiedTxStatus == ResponsePrepareProposal_UNKNOWN
|
||||
}
|
||||
|
||||
func (r ResponsePrepareProposal) IsTxStatusModified() bool {
|
||||
return r.ModifiedTxStatus == ResponsePrepareProposal_MODIFIED
|
||||
}
|
||||
|
||||
func (r ResponseProcessProposal) IsAccepted() bool {
|
||||
return r.Status == ResponseProcessProposal_ACCEPT
|
||||
}
|
||||
|
||||
func (r ResponseProcessProposal) IsStatusUnknown() bool {
|
||||
return r.Status == ResponseProcessProposal_UNKNOWN
|
||||
}
|
||||
|
||||
// IsStatusUnknown returns true if Code is Unknown
|
||||
func (r ResponseVerifyVoteExtension) IsStatusUnknown() bool {
|
||||
return r.Status == ResponseVerifyVoteExtension_UNKNOWN
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK
|
||||
func (r ResponseVerifyVoteExtension) IsOK() bool {
|
||||
return r.Result == ResponseVerifyVoteExtension_ACCEPT
|
||||
return r.Status == ResponseVerifyVoteExtension_ACCEPT
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ResponseVerifyVoteExtension) IsErr() bool {
|
||||
return r.Result != ResponseVerifyVoteExtension_ACCEPT
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK
|
||||
func (r ResponseProcessProposal) IsOK() bool {
|
||||
return r.Result == ResponseProcessProposal_ACCEPT
|
||||
return r.Status != ResponseVerifyVoteExtension_ACCEPT
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
@@ -154,11 +175,39 @@ func RespondExtendVote(appDataToSign, appDataSelfAuthenticating []byte) Response
|
||||
}
|
||||
|
||||
func RespondVerifyVoteExtension(ok bool) ResponseVerifyVoteExtension {
|
||||
result := ResponseVerifyVoteExtension_REJECT
|
||||
status := ResponseVerifyVoteExtension_REJECT
|
||||
if ok {
|
||||
result = ResponseVerifyVoteExtension_ACCEPT
|
||||
status = ResponseVerifyVoteExtension_ACCEPT
|
||||
}
|
||||
return ResponseVerifyVoteExtension{
|
||||
Result: result,
|
||||
Status: status,
|
||||
}
|
||||
}
|
||||
|
||||
// deterministicExecTxResult constructs a copy of response that omits
|
||||
// non-deterministic fields. The input response is not modified.
|
||||
func deterministicExecTxResult(response *ExecTxResult) *ExecTxResult {
|
||||
return &ExecTxResult{
|
||||
Code: response.Code,
|
||||
Data: response.Data,
|
||||
GasWanted: response.GasWanted,
|
||||
GasUsed: response.GasUsed,
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalTxResults encodes the the TxResults as a list of byte
|
||||
// slices. It strips off the non-deterministic pieces of the TxResults
|
||||
// so that the resulting data can be used for hash comparisons and used
|
||||
// in Merkle proofs.
|
||||
func MarshalTxResults(r []*ExecTxResult) ([][]byte, error) {
|
||||
s := make([][]byte, len(r))
|
||||
for i, e := range r {
|
||||
d := deterministicExecTxResult(e)
|
||||
b, err := d.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s[i] = b
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
74
abci/types/types_test.go
Normal file
74
abci/types/types_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package types_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
)
|
||||
|
||||
func TestHashAndProveResults(t *testing.T) {
|
||||
trs := []*abci.ExecTxResult{
|
||||
// Note, these tests rely on the first two entries being in this order.
|
||||
{Code: 0, Data: nil},
|
||||
{Code: 0, Data: []byte{}},
|
||||
|
||||
{Code: 0, Data: []byte("one")},
|
||||
{Code: 14, Data: nil},
|
||||
{Code: 14, Data: []byte("foo")},
|
||||
{Code: 14, Data: []byte("bar")},
|
||||
}
|
||||
|
||||
// Nil and []byte{} should produce the same bytes
|
||||
bz0, err := trs[0].Marshal()
|
||||
require.NoError(t, err)
|
||||
bz1, err := trs[1].Marshal()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bz0, bz1)
|
||||
|
||||
// Make sure that we can get a root hash from results and verify proofs.
|
||||
rs, err := abci.MarshalTxResults(trs)
|
||||
require.NoError(t, err)
|
||||
root := merkle.HashFromByteSlices(rs)
|
||||
assert.NotEmpty(t, root)
|
||||
|
||||
_, proofs := merkle.ProofsFromByteSlices(rs)
|
||||
for i, tr := range trs {
|
||||
bz, err := tr.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
valid := proofs[i].Verify(root, bz)
|
||||
assert.NoError(t, valid, "%d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashDeterministicFieldsOnly(t *testing.T) {
|
||||
tr1 := abci.ExecTxResult{
|
||||
Code: 1,
|
||||
Data: []byte("transaction"),
|
||||
Log: "nondeterministic data: abc",
|
||||
Info: "nondeterministic data: abc",
|
||||
GasWanted: 1000,
|
||||
GasUsed: 1000,
|
||||
Events: []abci.Event{},
|
||||
Codespace: "nondeterministic.data.abc",
|
||||
}
|
||||
tr2 := abci.ExecTxResult{
|
||||
Code: 1,
|
||||
Data: []byte("transaction"),
|
||||
Log: "nondeterministic data: def",
|
||||
Info: "nondeterministic data: def",
|
||||
GasWanted: 1000,
|
||||
GasUsed: 1000,
|
||||
Events: []abci.Event{},
|
||||
Codespace: "nondeterministic.data.def",
|
||||
}
|
||||
r1, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr1})
|
||||
require.NoError(t, err)
|
||||
r2, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, merkle.HashFromByteSlices(r1), merkle.HashFromByteSlices(r2))
|
||||
}
|
||||
19
buf.gen.yaml
19
buf.gen.yaml
@@ -1,14 +1,9 @@
|
||||
# The version of the generation template (required).
|
||||
# The only currently-valid value is v1beta1.
|
||||
version: v1beta1
|
||||
|
||||
# The plugins to run.
|
||||
version: v1
|
||||
plugins:
|
||||
# The name of the plugin.
|
||||
- name: gogofaster
|
||||
# The directory where the generated proto output will be written.
|
||||
# The directory is relative to where the generation tool was run.
|
||||
out: proto
|
||||
# Set options to assign import paths to the well-known types
|
||||
# and to enable service generation.
|
||||
opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative
|
||||
out: ./proto/
|
||||
opt:
|
||||
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types
|
||||
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
|
||||
- plugins=grpc
|
||||
- paths=source_relative
|
||||
|
||||
3
buf.work.yaml
Normal file
3
buf.work.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
version: v1
|
||||
directories:
|
||||
- proto
|
||||
@@ -2,38 +2,29 @@ package debug
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var (
|
||||
nodeRPCAddr string
|
||||
profAddr string
|
||||
frequency uint
|
||||
|
||||
const (
|
||||
flagNodeRPCAddr = "rpc-laddr"
|
||||
flagProfAddr = "pprof-laddr"
|
||||
flagFrequency = "frequency"
|
||||
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
|
||||
)
|
||||
|
||||
// DebugCmd defines the root command containing subcommands that assist in
|
||||
// debugging running Tendermint processes.
|
||||
var DebugCmd = &cobra.Command{
|
||||
Use: "debug",
|
||||
Short: "A utility to kill or watch a Tendermint process while aggregating debugging data",
|
||||
}
|
||||
|
||||
func init() {
|
||||
DebugCmd.PersistentFlags().SortFlags = true
|
||||
DebugCmd.PersistentFlags().StringVar(
|
||||
&nodeRPCAddr,
|
||||
func GetDebugCommand(logger log.Logger) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "debug",
|
||||
Short: "A utility to kill or watch a Tendermint process while aggregating debugging data",
|
||||
}
|
||||
cmd.PersistentFlags().SortFlags = true
|
||||
cmd.PersistentFlags().String(
|
||||
flagNodeRPCAddr,
|
||||
"tcp://localhost:26657",
|
||||
"the Tendermint node's RPC address (<host>:<port>)",
|
||||
"the Tendermint node's RPC address <host>:<port>)",
|
||||
)
|
||||
|
||||
DebugCmd.AddCommand(killCmd)
|
||||
DebugCmd.AddCommand(dumpCmd)
|
||||
cmd.AddCommand(getKillCmd(logger))
|
||||
cmd.AddCommand(getDumpCmd(logger))
|
||||
return cmd
|
||||
|
||||
}
|
||||
|
||||
@@ -13,78 +13,102 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cli"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
)
|
||||
|
||||
var dumpCmd = &cobra.Command{
|
||||
Use: "dump [output-directory]",
|
||||
Short: "Continuously poll a Tendermint process and dump debugging data into a single location",
|
||||
Long: `Continuously poll a Tendermint process and dump debugging data into a single
|
||||
func getDumpCmd(logger log.Logger) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "dump [output-directory]",
|
||||
Short: "Continuously poll a Tendermint process and dump debugging data into a single location",
|
||||
Long: `Continuously poll a Tendermint process and dump debugging data into a single
|
||||
location at a specified frequency. At each frequency interval, an archived and compressed
|
||||
file will contain node debugging information including the goroutine and heap profiles
|
||||
if enabled.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: dumpCmdHandler,
|
||||
}
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
outDir := args[0]
|
||||
if outDir == "" {
|
||||
return errors.New("invalid output directory")
|
||||
}
|
||||
frequency, err := cmd.Flags().GetUint(flagFrequency)
|
||||
if err != nil {
|
||||
return fmt.Errorf("flag %q not defined: %w", flagFrequency, err)
|
||||
}
|
||||
|
||||
func init() {
|
||||
dumpCmd.Flags().UintVar(
|
||||
&frequency,
|
||||
if frequency == 0 {
|
||||
return errors.New("frequency must be positive")
|
||||
}
|
||||
|
||||
nodeRPCAddr, err := cmd.Flags().GetString(flagNodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("flag %q not defined: %w", flagNodeRPCAddr, err)
|
||||
}
|
||||
|
||||
profAddr, err := cmd.Flags().GetString(flagProfAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("flag %q not defined: %w", flagProfAddr, err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(outDir); os.IsNotExist(err) {
|
||||
if err := os.Mkdir(outDir, os.ModePerm); err != nil {
|
||||
return fmt.Errorf("failed to create output directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
ctx := cmd.Context()
|
||||
|
||||
home := viper.GetString(cli.HomeFlag)
|
||||
conf := config.DefaultConfig()
|
||||
conf = conf.SetRoot(home)
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
|
||||
dumpArgs := dumpDebugDataArgs{
|
||||
conf: conf,
|
||||
outDir: outDir,
|
||||
profAddr: profAddr,
|
||||
}
|
||||
dumpDebugData(ctx, logger, rpc, dumpArgs)
|
||||
|
||||
ticker := time.NewTicker(time.Duration(frequency) * time.Second)
|
||||
for range ticker.C {
|
||||
dumpDebugData(ctx, logger, rpc, dumpArgs)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().Uint(
|
||||
flagFrequency,
|
||||
30,
|
||||
"the frequency (seconds) in which to poll, aggregate and dump Tendermint debug data",
|
||||
)
|
||||
|
||||
dumpCmd.Flags().StringVar(
|
||||
&profAddr,
|
||||
cmd.Flags().String(
|
||||
flagProfAddr,
|
||||
"",
|
||||
"the profiling server address (<host>:<port>)",
|
||||
)
|
||||
|
||||
return cmd
|
||||
|
||||
}
|
||||
|
||||
func dumpCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
outDir := args[0]
|
||||
if outDir == "" {
|
||||
return errors.New("invalid output directory")
|
||||
}
|
||||
|
||||
if frequency == 0 {
|
||||
return errors.New("frequency must be positive")
|
||||
}
|
||||
|
||||
if _, err := os.Stat(outDir); os.IsNotExist(err) {
|
||||
if err := os.Mkdir(outDir, os.ModePerm); err != nil {
|
||||
return fmt.Errorf("failed to create output directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
ctx := cmd.Context()
|
||||
|
||||
home := viper.GetString(cli.HomeFlag)
|
||||
conf := config.DefaultConfig()
|
||||
conf = conf.SetRoot(home)
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
|
||||
dumpDebugData(ctx, outDir, conf, rpc)
|
||||
|
||||
ticker := time.NewTicker(time.Duration(frequency) * time.Second)
|
||||
for range ticker.C {
|
||||
dumpDebugData(ctx, outDir, conf, rpc)
|
||||
}
|
||||
|
||||
return nil
|
||||
type dumpDebugDataArgs struct {
|
||||
conf *config.Config
|
||||
outDir string
|
||||
profAddr string
|
||||
}
|
||||
|
||||
func dumpDebugData(ctx context.Context, outDir string, conf *config.Config, rpc *rpchttp.HTTP) {
|
||||
func dumpDebugData(ctx context.Context, logger log.Logger, rpc *rpchttp.HTTP, args dumpDebugDataArgs) {
|
||||
start := time.Now().UTC()
|
||||
|
||||
tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp")
|
||||
tmpDir, err := os.MkdirTemp(args.outDir, "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err)
|
||||
return
|
||||
@@ -110,26 +134,26 @@ func dumpDebugData(ctx context.Context, outDir string, conf *config.Config, rpc
|
||||
}
|
||||
|
||||
logger.Info("copying node WAL...")
|
||||
if err := copyWAL(conf, tmpDir); err != nil {
|
||||
if err := copyWAL(args.conf, tmpDir); err != nil {
|
||||
logger.Error("failed to copy node WAL", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if profAddr != "" {
|
||||
if args.profAddr != "" {
|
||||
logger.Info("getting node goroutine profile...")
|
||||
if err := dumpProfile(tmpDir, profAddr, "goroutine", 2); err != nil {
|
||||
if err := dumpProfile(tmpDir, args.profAddr, "goroutine", 2); err != nil {
|
||||
logger.Error("failed to dump goroutine profile", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("getting node heap profile...")
|
||||
if err := dumpProfile(tmpDir, profAddr, "heap", 2); err != nil {
|
||||
if err := dumpProfile(tmpDir, args.profAddr, "heap", 2); err != nil {
|
||||
logger.Error("failed to dump heap profile", "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
outFile := filepath.Join(outDir, fmt.Sprintf("%s.zip", start.Format(time.RFC3339)))
|
||||
outFile := filepath.Join(args.outDir, fmt.Sprintf("%s.zip", start.Format(time.RFC3339)))
|
||||
if err := zipDir(tmpDir, outFile); err != nil {
|
||||
logger.Error("failed to create and compress archive", "file", outFile, "error", err)
|
||||
}
|
||||
|
||||
@@ -15,89 +15,96 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cli"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
)
|
||||
|
||||
var killCmd = &cobra.Command{
|
||||
Use: "kill [pid] [compressed-output-file]",
|
||||
Short: "Kill a Tendermint process while aggregating and packaging debugging data",
|
||||
Long: `Kill a Tendermint process while also aggregating Tendermint process data
|
||||
func getKillCmd(logger log.Logger) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "kill [pid] [compressed-output-file]",
|
||||
Short: "Kill a Tendermint process while aggregating and packaging debugging data",
|
||||
Long: `Kill a Tendermint process while also aggregating Tendermint process data
|
||||
such as the latest node state, including consensus and networking state,
|
||||
go-routine state, and the node's WAL and config information. This aggregated data
|
||||
is packaged into a compressed archive.
|
||||
|
||||
Example:
|
||||
$ tendermint debug kill 34255 /path/to/tm-debug.zip`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: killCmdHandler,
|
||||
}
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
pid, err := strconv.ParseInt(args[0], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
pid, err := strconv.ParseInt(args[0], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
outFile := args[1]
|
||||
if outFile == "" {
|
||||
return errors.New("invalid output file")
|
||||
}
|
||||
nodeRPCAddr, err := cmd.Flags().GetString(flagNodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("flag %q not defined: %w", flagNodeRPCAddr, err)
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
home := viper.GetString(cli.HomeFlag)
|
||||
conf := config.DefaultConfig()
|
||||
conf = conf.SetRoot(home)
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
|
||||
// Create a temporary directory which will contain all the state dumps and
|
||||
// relevant files and directories that will be compressed into a file.
|
||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
logger.Info("getting node status...")
|
||||
if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("getting node network info...")
|
||||
if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("getting node consensus state...")
|
||||
if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("copying node WAL...")
|
||||
if err := copyWAL(conf, tmpDir); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("node WAL does not exist; continuing...")
|
||||
}
|
||||
|
||||
logger.Info("copying node configuration...")
|
||||
if err := copyConfig(home, tmpDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("killing Tendermint process")
|
||||
if err := killProc(int(pid), tmpDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("archiving and compressing debug directory...")
|
||||
return zipDir(tmpDir, outFile)
|
||||
},
|
||||
}
|
||||
|
||||
outFile := args[1]
|
||||
if outFile == "" {
|
||||
return errors.New("invalid output file")
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
home := viper.GetString(cli.HomeFlag)
|
||||
conf := config.DefaultConfig()
|
||||
conf = conf.SetRoot(home)
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
|
||||
// Create a temporary directory which will contain all the state dumps and
|
||||
// relevant files and directories that will be compressed into a file.
|
||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
logger.Info("getting node status...")
|
||||
if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("getting node network info...")
|
||||
if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("getting node consensus state...")
|
||||
if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("copying node WAL...")
|
||||
if err := copyWAL(conf, tmpDir); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("node WAL does not exist; continuing...")
|
||||
}
|
||||
|
||||
logger.Info("copying node configuration...")
|
||||
if err := copyConfig(home, tmpDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("killing Tendermint process")
|
||||
if err := killProc(int(pid), tmpDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("archiving and compressing debug directory...")
|
||||
return zipDir(tmpDir, outFile)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// killProc attempts to kill the Tendermint process with a given PID with an
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/scripts/keymigrate"
|
||||
"github.com/tendermint/tendermint/scripts/scmigrate"
|
||||
)
|
||||
|
||||
func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command {
|
||||
@@ -51,6 +52,13 @@ func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command {
|
||||
return fmt.Errorf("running migration for context %q: %w",
|
||||
dbctx, err)
|
||||
}
|
||||
|
||||
if dbctx == "blockstore" {
|
||||
if err := scmigrate.Migrate(ctx, db); err != nil {
|
||||
return fmt.Errorf("running seen commit migration: %w", err)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("completed database migration successfully")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -149,7 +148,7 @@ for applications built w/ Cosmos SDK).
|
||||
// Initiate the light client. If the trusted store already has blocks in it, this
|
||||
// will be used else we use the trusted options.
|
||||
c, err := light.NewHTTPClient(
|
||||
context.Background(),
|
||||
cmd.Context(),
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: trustingPeriod,
|
||||
|
||||
@@ -213,7 +213,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
|
||||
Height: b.Height,
|
||||
Index: uint32(i),
|
||||
Tx: b.Data.Txs[i],
|
||||
Result: *(r.FinalizeBlock.Txs[i]),
|
||||
Result: *(r.FinalizeBlock.TxResults[i]),
|
||||
}
|
||||
|
||||
_ = batch.Add(&tr)
|
||||
|
||||
@@ -153,10 +153,10 @@ func TestReIndexEvent(t *testing.T) {
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
|
||||
|
||||
dtx := abcitypes.ResponseDeliverTx{}
|
||||
dtx := abcitypes.ExecTxResult{}
|
||||
abciResp := &prototmstate.ABCIResponses{
|
||||
FinalizeBlock: &abcitypes.ResponseFinalizeBlock{
|
||||
Txs: []*abcitypes.ResponseDeliverTx{&dtx},
|
||||
TxResults: []*abcitypes.ExecTxResult{&dtx},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -31,6 +32,20 @@ func MakeResetAllCommand(conf *config.Config, logger log.Logger) *cobra.Command
|
||||
return cmd
|
||||
}
|
||||
|
||||
// MakeResetStateCommand constructs a command that removes the database of
|
||||
// the specified Tendermint core instance.
|
||||
func MakeResetStateCommand(conf *config.Config, logger log.Logger) *cobra.Command {
|
||||
var keyType string
|
||||
|
||||
return &cobra.Command{
|
||||
Use: "reset-state",
|
||||
Short: "Remove all the data and WAL",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return resetState(conf.DBDir(), logger, keyType)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *cobra.Command {
|
||||
var keyType string
|
||||
|
||||
@@ -55,18 +70,76 @@ func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *c
|
||||
// it's only suitable for testnets.
|
||||
|
||||
// resetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// Exported so other CLI tools can use it.
|
||||
func resetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
logger.Error("error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
}
|
||||
// recreate the dbDir since the privVal state needs to live there
|
||||
|
||||
return resetFilePV(privValKeyFile, privValStateFile, logger, keyType)
|
||||
}
|
||||
|
||||
// resetState removes address book files plus all databases.
|
||||
func resetState(dbDir string, logger log.Logger, keyType string) error {
|
||||
blockdb := filepath.Join(dbDir, "blockstore.db")
|
||||
state := filepath.Join(dbDir, "state.db")
|
||||
wal := filepath.Join(dbDir, "cs.wal")
|
||||
evidence := filepath.Join(dbDir, "evidence.db")
|
||||
txIndex := filepath.Join(dbDir, "tx_index.db")
|
||||
peerstore := filepath.Join(dbDir, "peerstore.db")
|
||||
|
||||
if tmos.FileExists(blockdb) {
|
||||
if err := os.RemoveAll(blockdb); err == nil {
|
||||
logger.Info("Removed all blockstore.db", "dir", blockdb)
|
||||
} else {
|
||||
logger.Error("error removing all blockstore.db", "dir", blockdb, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(state) {
|
||||
if err := os.RemoveAll(state); err == nil {
|
||||
logger.Info("Removed all state.db", "dir", state)
|
||||
} else {
|
||||
logger.Error("error removing all state.db", "dir", state, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(wal) {
|
||||
if err := os.RemoveAll(wal); err == nil {
|
||||
logger.Info("Removed all cs.wal", "dir", wal)
|
||||
} else {
|
||||
logger.Error("error removing all cs.wal", "dir", wal, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(evidence) {
|
||||
if err := os.RemoveAll(evidence); err == nil {
|
||||
logger.Info("Removed all evidence.db", "dir", evidence)
|
||||
} else {
|
||||
logger.Error("error removing all evidence.db", "dir", evidence, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(txIndex) {
|
||||
if err := os.RemoveAll(txIndex); err == nil {
|
||||
logger.Info("Removed tx_index.db", "dir", txIndex)
|
||||
} else {
|
||||
logger.Error("error removing tx_index.db", "dir", txIndex, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(peerstore) {
|
||||
if err := os.RemoveAll(peerstore); err == nil {
|
||||
logger.Info("Removed peerstore.db", "dir", peerstore)
|
||||
} else {
|
||||
logger.Error("error removing peerstore.db", "dir", peerstore, "err", err)
|
||||
}
|
||||
}
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
return resetFilePV(privValKeyFile, privValStateFile, logger, keyType)
|
||||
return nil
|
||||
}
|
||||
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
|
||||
|
||||
@@ -53,6 +53,7 @@ func TestRollbackIntegration(t *testing.T) {
|
||||
defer cancel()
|
||||
node2, _, err2 := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout)
|
||||
require.NoError(t, err2)
|
||||
t.Cleanup(node2.Wait)
|
||||
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
|
||||
@@ -51,6 +51,12 @@ func RootCommand(conf *config.Config, logger log.Logger) *cobra.Command {
|
||||
}
|
||||
*conf = *pconf
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
if err := log.OverrideWithNewLogger(logger, conf.LogFormat, conf.LogLevel); err != nil {
|
||||
return err
|
||||
}
|
||||
if warning := pconf.DeprecatedFieldWarning(); warning != nil {
|
||||
logger.Info("WARNING", "deprecated field warning", warning)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -34,6 +34,7 @@ func main() {
|
||||
commands.MakeReplayCommand(conf, logger),
|
||||
commands.MakeReplayConsoleCommand(conf, logger),
|
||||
commands.MakeResetAllCommand(conf, logger),
|
||||
commands.MakeResetStateCommand(conf, logger),
|
||||
commands.MakeResetPrivateValidatorCommand(conf, logger),
|
||||
commands.MakeShowValidatorCommand(conf, logger),
|
||||
commands.MakeTestnetFilesCommand(conf, logger),
|
||||
@@ -43,7 +44,7 @@ func main() {
|
||||
commands.MakeInspectCommand(conf, logger),
|
||||
commands.MakeRollbackStateCommand(conf),
|
||||
commands.MakeKeyMigrateCommand(conf, logger),
|
||||
debug.DebugCmd,
|
||||
debug.GetDebugCommand(logger),
|
||||
commands.NewCompletionCmd(rcmd, true),
|
||||
)
|
||||
|
||||
|
||||
235
config/config.go
235
config/config.go
@@ -8,6 +8,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -145,6 +146,10 @@ func (cfg *Config) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *Config) DeprecatedFieldWarning() error {
|
||||
return cfg.Consensus.DeprecatedFieldWarning()
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// BaseConfig
|
||||
|
||||
@@ -442,6 +447,33 @@ type RPCConfig struct {
|
||||
// to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"`
|
||||
|
||||
// If true, disable the websocket interface to the RPC service. This has
|
||||
// the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all
|
||||
// methods for event subscription.
|
||||
//
|
||||
// EXPERIMENTAL: This setting will be removed in Tendermint v0.37.
|
||||
ExperimentalDisableWebsocket bool `mapstructure:"experimental-disable-websocket"`
|
||||
|
||||
// The time window size for the event log. All events up to this long before
|
||||
// the latest (up to EventLogMaxItems) will be available for subscribers to
|
||||
// fetch via the /events method. If 0 (the default) the event log and the
|
||||
// /events RPC method are disabled.
|
||||
EventLogWindowSize time.Duration `mapstructure:"event-log-window-size"`
|
||||
|
||||
// The maxiumum number of events that may be retained by the event log. If
|
||||
// this value is 0, no upper limit is set. Otherwise, items in excess of
|
||||
// this number will be discarded from the event log.
|
||||
//
|
||||
// Warning: This setting is a safety valve. Setting it too low may cause
|
||||
// subscribers to miss events. Try to choose a value higher than the
|
||||
// maximum worst-case expected event load within the chosen window size in
|
||||
// ordinary operation.
|
||||
//
|
||||
// For example, if the window size is 10 minutes and the node typically
|
||||
// averages 1000 events per ten minutes, but with occasional known spikes of
|
||||
// up to 2000, choose a value > 2000.
|
||||
EventLogMaxItems int `mapstructure:"event-log-max-items"`
|
||||
|
||||
// How long to wait for a tx to be committed during /broadcast_tx_commit
|
||||
// WARNING: Using a value larger than 10s will result in increasing the
|
||||
// global HTTP write timeout, which applies to all connections and endpoints.
|
||||
@@ -487,9 +519,14 @@ func DefaultRPCConfig() *RPCConfig {
|
||||
Unsafe: false,
|
||||
MaxOpenConnections: 900,
|
||||
|
||||
MaxSubscriptionClients: 100,
|
||||
MaxSubscriptionsPerClient: 5,
|
||||
TimeoutBroadcastTxCommit: 10 * time.Second,
|
||||
// Settings for event subscription.
|
||||
MaxSubscriptionClients: 100,
|
||||
MaxSubscriptionsPerClient: 5,
|
||||
ExperimentalDisableWebsocket: false, // compatible with TM v0.35 and earlier
|
||||
EventLogWindowSize: 0, // disables /events RPC by default
|
||||
EventLogMaxItems: 0,
|
||||
|
||||
TimeoutBroadcastTxCommit: 10 * time.Second,
|
||||
|
||||
MaxBodyBytes: int64(1000000), // 1MB
|
||||
MaxHeaderBytes: 1 << 20, // same as the net/http default
|
||||
@@ -519,6 +556,12 @@ func (cfg *RPCConfig) ValidateBasic() error {
|
||||
if cfg.MaxSubscriptionsPerClient < 0 {
|
||||
return errors.New("max-subscriptions-per-client can't be negative")
|
||||
}
|
||||
if cfg.EventLogWindowSize < 0 {
|
||||
return errors.New("event-log-window-size must not be negative")
|
||||
}
|
||||
if cfg.EventLogMaxItems < 0 {
|
||||
return errors.New("event-log-max-items must not be negative")
|
||||
}
|
||||
if cfg.TimeoutBroadcastTxCommit < 0 {
|
||||
return errors.New("timeout-broadcast-tx-commit can't be negative")
|
||||
}
|
||||
@@ -918,27 +961,6 @@ type ConsensusConfig struct {
|
||||
WalPath string `mapstructure:"wal-file"`
|
||||
walFile string // overrides WalPath if set
|
||||
|
||||
// TODO: remove timeout configs, these should be global not local
|
||||
// How long we wait for a proposal block before prevoting nil
|
||||
TimeoutPropose time.Duration `mapstructure:"timeout-propose"`
|
||||
// How much timeout-propose increases with each round
|
||||
TimeoutProposeDelta time.Duration `mapstructure:"timeout-propose-delta"`
|
||||
// How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
|
||||
TimeoutPrevote time.Duration `mapstructure:"timeout-prevote"`
|
||||
// How much the timeout-prevote increases with each round
|
||||
TimeoutPrevoteDelta time.Duration `mapstructure:"timeout-prevote-delta"`
|
||||
// How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
|
||||
TimeoutPrecommit time.Duration `mapstructure:"timeout-precommit"`
|
||||
// How much the timeout-precommit increases with each round
|
||||
TimeoutPrecommitDelta time.Duration `mapstructure:"timeout-precommit-delta"`
|
||||
// How long we wait after committing a block, before starting on the new
|
||||
// height (this gives us a chance to receive some more precommits, even
|
||||
// though we already have +2/3).
|
||||
TimeoutCommit time.Duration `mapstructure:"timeout-commit"`
|
||||
|
||||
// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
SkipTimeoutCommit bool `mapstructure:"skip-timeout-commit"`
|
||||
|
||||
// EmptyBlocks mode and possible interval between empty blocks
|
||||
CreateEmptyBlocks bool `mapstructure:"create-empty-blocks"`
|
||||
CreateEmptyBlocksInterval time.Duration `mapstructure:"create-empty-blocks-interval"`
|
||||
@@ -948,20 +970,59 @@ type ConsensusConfig struct {
|
||||
PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer-query-maj23-sleep-duration"`
|
||||
|
||||
DoubleSignCheckHeight int64 `mapstructure:"double-sign-check-height"`
|
||||
|
||||
// TODO: The following fields are all temporary overrides that should exist only
|
||||
// for the duration of the v0.36 release. The below fields should be completely
|
||||
// removed in the v0.37 release of Tendermint.
|
||||
// See: https://github.com/tendermint/tendermint/issues/8188
|
||||
|
||||
// UnsafeProposeTimeoutOverride provides an unsafe override of the Propose
|
||||
// timeout consensus parameter. It configures how long the consensus engine
|
||||
// will wait to receive a proposal block before prevoting nil.
|
||||
UnsafeProposeTimeoutOverride time.Duration `mapstructure:"unsafe-propose-timeout-override"`
|
||||
// UnsafeProposeTimeoutDeltaOverride provides an unsafe override of the
|
||||
// ProposeDelta timeout consensus parameter. It configures how much the
|
||||
// propose timeout increases with each round.
|
||||
UnsafeProposeTimeoutDeltaOverride time.Duration `mapstructure:"unsafe-propose-timeout-delta-override"`
|
||||
// UnsafeVoteTimeoutOverride provides an unsafe override of the Vote timeout
|
||||
// consensus parameter. It configures how long the consensus engine will wait
|
||||
// to gather additional votes after receiving +2/3 votes in a round.
|
||||
UnsafeVoteTimeoutOverride time.Duration `mapstructure:"unsafe-vote-timeout-override"`
|
||||
// UnsafeVoteTimeoutDeltaOverride provides an unsafe override of the VoteDelta
|
||||
// timeout consensus parameter. It configures how much the vote timeout
|
||||
// increases with each round.
|
||||
UnsafeVoteTimeoutDeltaOverride time.Duration `mapstructure:"unsafe-vote-timeout-delta-override"`
|
||||
// UnsafeCommitTimeoutOverride provides an unsafe override of the Commit timeout
|
||||
// consensus parameter. It configures how long the consensus engine will wait
|
||||
// after receiving +2/3 precommits before beginning the next height.
|
||||
UnsafeCommitTimeoutOverride time.Duration `mapstructure:"unsafe-commit-timeout-override"`
|
||||
|
||||
// UnsafeBypassCommitTimeoutOverride provides an unsafe override of the
|
||||
// BypassCommitTimeout consensus parameter. It configures if the consensus
|
||||
// engine will wait for the full Commit timeout before proceeding to the next height.
|
||||
// If it is set to true, the consensus engine will proceed to the next height
|
||||
// as soon as the node has gathered votes from all of the validators on the network.
|
||||
UnsafeBypassCommitTimeoutOverride *bool `mapstructure:"unsafe-bypass-commit-timeout-override"`
|
||||
|
||||
// Deprecated timeout parameters. These parameters are present in this struct
|
||||
// so that they can be parsed so that validation can check if they have erroneously
|
||||
// been included and provide a helpful error message.
|
||||
// These fields should be completely removed in v0.37.
|
||||
// See: https://github.com/tendermint/tendermint/issues/8188
|
||||
DeprecatedTimeoutPropose *interface{} `mapstructure:"timeout-propose"`
|
||||
DeprecatedTimeoutProposeDelta *interface{} `mapstructure:"timeout-propose-delta"`
|
||||
DeprecatedTimeoutPrevote *interface{} `mapstructure:"timeout-prevote"`
|
||||
DeprecatedTimeoutPrevoteDelta *interface{} `mapstructure:"timeout-prevote-delta"`
|
||||
DeprecatedTimeoutPrecommit *interface{} `mapstructure:"timeout-precommit"`
|
||||
DeprecatedTimeoutPrecommitDelta *interface{} `mapstructure:"timeout-precommit-delta"`
|
||||
DeprecatedTimeoutCommit *interface{} `mapstructure:"timeout-commit"`
|
||||
DeprecatedSkipTimeoutCommit *interface{} `mapstructure:"skip-timeout-commit"`
|
||||
}
|
||||
|
||||
// DefaultConsensusConfig returns a default configuration for the consensus service
|
||||
func DefaultConsensusConfig() *ConsensusConfig {
|
||||
return &ConsensusConfig{
|
||||
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
|
||||
TimeoutPropose: 3000 * time.Millisecond,
|
||||
TimeoutProposeDelta: 500 * time.Millisecond,
|
||||
TimeoutPrevote: 1000 * time.Millisecond,
|
||||
TimeoutPrevoteDelta: 500 * time.Millisecond,
|
||||
TimeoutPrecommit: 1000 * time.Millisecond,
|
||||
TimeoutPrecommitDelta: 500 * time.Millisecond,
|
||||
TimeoutCommit: 1000 * time.Millisecond,
|
||||
SkipTimeoutCommit: false,
|
||||
CreateEmptyBlocks: true,
|
||||
CreateEmptyBlocksInterval: 0 * time.Second,
|
||||
PeerGossipSleepDuration: 100 * time.Millisecond,
|
||||
@@ -973,14 +1034,6 @@ func DefaultConsensusConfig() *ConsensusConfig {
|
||||
// TestConsensusConfig returns a configuration for testing the consensus service
|
||||
func TestConsensusConfig() *ConsensusConfig {
|
||||
cfg := DefaultConsensusConfig()
|
||||
cfg.TimeoutPropose = 40 * time.Millisecond
|
||||
cfg.TimeoutProposeDelta = 1 * time.Millisecond
|
||||
cfg.TimeoutPrevote = 10 * time.Millisecond
|
||||
cfg.TimeoutPrevoteDelta = 1 * time.Millisecond
|
||||
cfg.TimeoutPrecommit = 10 * time.Millisecond
|
||||
cfg.TimeoutPrecommitDelta = 1 * time.Millisecond
|
||||
cfg.TimeoutCommit = 10 * time.Millisecond
|
||||
cfg.SkipTimeoutCommit = true
|
||||
cfg.PeerGossipSleepDuration = 5 * time.Millisecond
|
||||
cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond
|
||||
cfg.DoubleSignCheckHeight = int64(0)
|
||||
@@ -992,33 +1045,6 @@ func (cfg *ConsensusConfig) WaitForTxs() bool {
|
||||
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
|
||||
}
|
||||
|
||||
// Propose returns the amount of time to wait for a proposal
|
||||
func (cfg *ConsensusConfig) Propose(round int32) time.Duration {
|
||||
return time.Duration(
|
||||
cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round),
|
||||
) * time.Nanosecond
|
||||
}
|
||||
|
||||
// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes
|
||||
func (cfg *ConsensusConfig) Prevote(round int32) time.Duration {
|
||||
return time.Duration(
|
||||
cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round),
|
||||
) * time.Nanosecond
|
||||
}
|
||||
|
||||
// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits
|
||||
func (cfg *ConsensusConfig) Precommit(round int32) time.Duration {
|
||||
return time.Duration(
|
||||
cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round),
|
||||
) * time.Nanosecond
|
||||
}
|
||||
|
||||
// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits
|
||||
// for a single block (ie. a commit).
|
||||
func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
|
||||
return t.Add(cfg.TimeoutCommit)
|
||||
}
|
||||
|
||||
// WalFile returns the full path to the write-ahead log file
|
||||
func (cfg *ConsensusConfig) WalFile() string {
|
||||
if cfg.walFile != "" {
|
||||
@@ -1035,26 +1061,20 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) {
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg *ConsensusConfig) ValidateBasic() error {
|
||||
if cfg.TimeoutPropose < 0 {
|
||||
return errors.New("timeout-propose can't be negative")
|
||||
if cfg.UnsafeProposeTimeoutOverride < 0 {
|
||||
return errors.New("unsafe-propose-timeout-override can't be negative")
|
||||
}
|
||||
if cfg.TimeoutProposeDelta < 0 {
|
||||
return errors.New("timeout-propose-delta can't be negative")
|
||||
if cfg.UnsafeProposeTimeoutDeltaOverride < 0 {
|
||||
return errors.New("unsafe-propose-timeout-delta-override can't be negative")
|
||||
}
|
||||
if cfg.TimeoutPrevote < 0 {
|
||||
return errors.New("timeout-prevote can't be negative")
|
||||
if cfg.UnsafeVoteTimeoutOverride < 0 {
|
||||
return errors.New("unsafe-vote-timeout-override can't be negative")
|
||||
}
|
||||
if cfg.TimeoutPrevoteDelta < 0 {
|
||||
return errors.New("timeout-prevote-delta can't be negative")
|
||||
if cfg.UnsafeVoteTimeoutDeltaOverride < 0 {
|
||||
return errors.New("unsafe-vote-timeout-delta-override can't be negative")
|
||||
}
|
||||
if cfg.TimeoutPrecommit < 0 {
|
||||
return errors.New("timeout-precommit can't be negative")
|
||||
}
|
||||
if cfg.TimeoutPrecommitDelta < 0 {
|
||||
return errors.New("timeout-precommit-delta can't be negative")
|
||||
}
|
||||
if cfg.TimeoutCommit < 0 {
|
||||
return errors.New("timeout-commit can't be negative")
|
||||
if cfg.UnsafeCommitTimeoutOverride < 0 {
|
||||
return errors.New("unsafe-commit-timeout-override can't be negative")
|
||||
}
|
||||
if cfg.CreateEmptyBlocksInterval < 0 {
|
||||
return errors.New("create-empty-blocks-interval can't be negative")
|
||||
@@ -1071,6 +1091,44 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *ConsensusConfig) DeprecatedFieldWarning() error {
|
||||
var fields []string
|
||||
if cfg.DeprecatedSkipTimeoutCommit != nil {
|
||||
fields = append(fields, "skip-timeout-commit")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutPropose != nil {
|
||||
fields = append(fields, "timeout-propose")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutProposeDelta != nil {
|
||||
fields = append(fields, "timeout-propose-delta")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutPrevote != nil {
|
||||
fields = append(fields, "timeout-prevote")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutPrevoteDelta != nil {
|
||||
fields = append(fields, "timeout-prevote-delta")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutPrecommit != nil {
|
||||
fields = append(fields, "timeout-precommit")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutPrecommitDelta != nil {
|
||||
fields = append(fields, "timeout-precommit-delta")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutCommit != nil {
|
||||
fields = append(fields, "timeout-commit")
|
||||
}
|
||||
if cfg.DeprecatedSkipTimeoutCommit != nil {
|
||||
fields = append(fields, "skip-timeout-commit")
|
||||
}
|
||||
if len(fields) != 0 {
|
||||
return fmt.Errorf("the following deprecated fields were set in the "+
|
||||
"configuration file: %s. These fields were removed in v0.36. Timeout "+
|
||||
"configuration has been moved to the ConsensusParams. For more information see "+
|
||||
"https://tinyurl.com/adr074", strings.Join(fields, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// TxIndexConfig
|
||||
// Remember that Event has the following structure:
|
||||
@@ -1087,9 +1145,8 @@ type TxIndexConfig struct {
|
||||
// If list contains `null`, meaning no indexer service will be used.
|
||||
//
|
||||
// Options:
|
||||
// 1) "null" - no indexer services.
|
||||
// 2) "kv" (default) - the simplest possible indexer,
|
||||
// backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
// 1) "null" (default) - no indexer services.
|
||||
// 2) "kv" - a simple indexer backed by key-value storage (see DBBackend)
|
||||
// 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
Indexer []string `mapstructure:"indexer"`
|
||||
|
||||
@@ -1100,14 +1157,12 @@ type TxIndexConfig struct {
|
||||
|
||||
// DefaultTxIndexConfig returns a default configuration for the transaction indexer.
|
||||
func DefaultTxIndexConfig() *TxIndexConfig {
|
||||
return &TxIndexConfig{
|
||||
Indexer: []string{"kv"},
|
||||
}
|
||||
return &TxIndexConfig{Indexer: []string{"null"}}
|
||||
}
|
||||
|
||||
// TestTxIndexConfig returns a default configuration for the transaction indexer.
|
||||
func TestTxIndexConfig() *TxIndexConfig {
|
||||
return DefaultTxIndexConfig()
|
||||
return &TxIndexConfig{Indexer: []string{"kv"}}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
@@ -29,8 +29,8 @@ func TestConfigValidateBasic(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with timeout_propose
|
||||
cfg.Consensus.TimeoutPropose = -10 * time.Second
|
||||
// tamper with unsafe-propose-timeout-override
|
||||
cfg.Consensus.UnsafeProposeTimeoutOverride = -10 * time.Second
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
@@ -106,25 +106,21 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
modify func(*ConsensusConfig)
|
||||
expectErr bool
|
||||
}{
|
||||
"TimeoutPropose": {func(c *ConsensusConfig) { c.TimeoutPropose = time.Second }, false},
|
||||
"TimeoutPropose negative": {func(c *ConsensusConfig) { c.TimeoutPropose = -1 }, true},
|
||||
"TimeoutProposeDelta": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = time.Second }, false},
|
||||
"TimeoutProposeDelta negative": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = -1 }, true},
|
||||
"TimeoutPrevote": {func(c *ConsensusConfig) { c.TimeoutPrevote = time.Second }, false},
|
||||
"TimeoutPrevote negative": {func(c *ConsensusConfig) { c.TimeoutPrevote = -1 }, true},
|
||||
"TimeoutPrevoteDelta": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = time.Second }, false},
|
||||
"TimeoutPrevoteDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = -1 }, true},
|
||||
"TimeoutPrecommit": {func(c *ConsensusConfig) { c.TimeoutPrecommit = time.Second }, false},
|
||||
"TimeoutPrecommit negative": {func(c *ConsensusConfig) { c.TimeoutPrecommit = -1 }, true},
|
||||
"TimeoutPrecommitDelta": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = time.Second }, false},
|
||||
"TimeoutPrecommitDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = -1 }, true},
|
||||
"TimeoutCommit": {func(c *ConsensusConfig) { c.TimeoutCommit = time.Second }, false},
|
||||
"TimeoutCommit negative": {func(c *ConsensusConfig) { c.TimeoutCommit = -1 }, true},
|
||||
"PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false},
|
||||
"PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true},
|
||||
"PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false},
|
||||
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
|
||||
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
|
||||
"UnsafeProposeTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutOverride = time.Second }, false},
|
||||
"UnsafeProposeTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutOverride = -1 }, true},
|
||||
"UnsafeProposeTimeoutDeltaOverride": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutDeltaOverride = time.Second }, false},
|
||||
"UnsafeProposeTimeoutDeltaOverride negative": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutDeltaOverride = -1 }, true},
|
||||
"UnsafePrevoteTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutOverride = time.Second }, false},
|
||||
"UnsafePrevoteTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutOverride = -1 }, true},
|
||||
"UnsafePrevoteTimeoutDeltaOverride": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutDeltaOverride = time.Second }, false},
|
||||
"UnsafePrevoteTimeoutDeltaOverride negative": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutDeltaOverride = -1 }, true},
|
||||
"UnsafeCommitTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeCommitTimeoutOverride = time.Second }, false},
|
||||
"UnsafeCommitTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeCommitTimeoutOverride = -1 }, true},
|
||||
"PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false},
|
||||
"PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true},
|
||||
"PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false},
|
||||
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
|
||||
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
|
||||
}
|
||||
for desc, tc := range testcases {
|
||||
tc := tc // appease linter
|
||||
|
||||
103
config/toml.go
103
config/toml.go
@@ -220,6 +220,33 @@ max-subscription-clients = {{ .RPC.MaxSubscriptionClients }}
|
||||
# to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }}
|
||||
|
||||
# If true, disable the websocket interface to the RPC service. This has
|
||||
# the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all
|
||||
# methods for event subscription.
|
||||
#
|
||||
# EXPERIMENTAL: This setting will be removed in Tendermint v0.37.
|
||||
experimental-disable-websocket = {{ .RPC.ExperimentalDisableWebsocket }}
|
||||
|
||||
# The time window size for the event log. All events up to this long before
|
||||
# the latest (up to EventLogMaxItems) will be available for subscribers to
|
||||
# fetch via the /events method. If 0 (the default) the event log and the
|
||||
# /events RPC method are disabled.
|
||||
event-log-window-size = "{{ .RPC.EventLogWindowSize }}"
|
||||
|
||||
# The maxiumum number of events that may be retained by the event log. If
|
||||
# this value is 0, no upper limit is set. Otherwise, items in excess of
|
||||
# this number will be discarded from the event log.
|
||||
#
|
||||
# Warning: This setting is a safety valve. Setting it too low may cause
|
||||
# subscribers to miss events. Try to choose a value higher than the
|
||||
# maximum worst-case expected event load within the chosen window size in
|
||||
# ordinary operation.
|
||||
#
|
||||
# For example, if the window size is 10 minutes and the node typically
|
||||
# averages 1000 events per ten minutes, but with occasional known spikes of
|
||||
# up to 2000, choose a value > 2000.
|
||||
event-log-max-items = {{ .RPC.EventLogMaxItems }}
|
||||
|
||||
# How long to wait for a tx to be committed during /broadcast_tx_commit.
|
||||
# WARNING: Using a value larger than 10s will result in increasing the
|
||||
# global HTTP write timeout, which applies to all connections and endpoints.
|
||||
@@ -423,32 +450,12 @@ fetchers = "{{ .StateSync.Fetchers }}"
|
||||
|
||||
wal-file = "{{ js .Consensus.WalPath }}"
|
||||
|
||||
# How long we wait for a proposal block before prevoting nil
|
||||
timeout-propose = "{{ .Consensus.TimeoutPropose }}"
|
||||
# How much timeout-propose increases with each round
|
||||
timeout-propose-delta = "{{ .Consensus.TimeoutProposeDelta }}"
|
||||
# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
|
||||
timeout-prevote = "{{ .Consensus.TimeoutPrevote }}"
|
||||
# How much the timeout-prevote increases with each round
|
||||
timeout-prevote-delta = "{{ .Consensus.TimeoutPrevoteDelta }}"
|
||||
# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
|
||||
timeout-precommit = "{{ .Consensus.TimeoutPrecommit }}"
|
||||
# How much the timeout-precommit increases with each round
|
||||
timeout-precommit-delta = "{{ .Consensus.TimeoutPrecommitDelta }}"
|
||||
# How long we wait after committing a block, before starting on the new
|
||||
# height (this gives us a chance to receive some more precommits, even
|
||||
# though we already have +2/3).
|
||||
timeout-commit = "{{ .Consensus.TimeoutCommit }}"
|
||||
|
||||
# How many blocks to look back to check existence of the node's consensus votes before joining consensus
|
||||
# When non-zero, the node will panic upon restart
|
||||
# if the same consensus key was used to sign {double-sign-check-height} last blocks.
|
||||
# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic.
|
||||
double-sign-check-height = {{ .Consensus.DoubleSignCheckHeight }}
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip-timeout-commit = {{ .Consensus.SkipTimeoutCommit }}
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks
|
||||
create-empty-blocks = {{ .Consensus.CreateEmptyBlocks }}
|
||||
create-empty-blocks-interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
|
||||
@@ -457,6 +464,50 @@ create-empty-blocks-interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
|
||||
peer-gossip-sleep-duration = "{{ .Consensus.PeerGossipSleepDuration }}"
|
||||
peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
|
||||
### Unsafe Timeout Overrides ###
|
||||
|
||||
# These fields provide temporary overrides for the Timeout consensus parameters.
|
||||
# Use of these parameters is strongly discouraged. Using these parameters may have serious
|
||||
# liveness implications for the validator and for the chain.
|
||||
#
|
||||
# These fields will be removed from the configuration file in the v0.37 release of Tendermint.
|
||||
# For additional information, see ADR-74:
|
||||
# https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-074-timeout-params.md
|
||||
|
||||
# This field provides an unsafe override of the Propose timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait for a proposal block before prevoting nil.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-propose-timeout-override = {{ .Consensus.UnsafeProposeTimeoutOverride }}
|
||||
|
||||
# This field provides an unsafe override of the ProposeDelta timeout consensus parameter.
|
||||
# This field configures how much the propose timeout increases with each round.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-propose-timeout-delta-override = {{ .Consensus.UnsafeProposeTimeoutDeltaOverride }}
|
||||
|
||||
# This field provides an unsafe override of the Vote timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait after
|
||||
# receiving +2/3 votes in a round.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-vote-timeout-override = {{ .Consensus.UnsafeVoteTimeoutOverride }}
|
||||
|
||||
# This field provides an unsafe override of the VoteDelta timeout consensus parameter.
|
||||
# This field configures how much the vote timeout increases with each round.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-vote-timeout-delta-override = {{ .Consensus.UnsafeVoteTimeoutDeltaOverride }}
|
||||
|
||||
# This field provides an unsafe override of the Commit timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait after receiving
|
||||
# +2/3 precommits before beginning the next height.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-commit-timeout-override = {{ .Consensus.UnsafeCommitTimeoutOverride }}
|
||||
|
||||
# This field provides an unsafe override of the BypassCommitTimeout consensus parameter.
|
||||
# This field configures if the consensus engine will wait for the full Commit timeout
|
||||
# before proceeding to the next height.
|
||||
# If this field is set to true, the consensus engine will proceed to the next height
|
||||
# as soon as the node has gathered votes from all of the validators on the network.
|
||||
# unsafe-bypass-commit-timeout-override =
|
||||
|
||||
#######################################################
|
||||
### Transaction Indexer Configuration Options ###
|
||||
#######################################################
|
||||
@@ -469,8 +520,8 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
# to decide which txs to index based on configuration set in the application.
|
||||
#
|
||||
# Options:
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
# 1) "null" (default) - no indexer services.
|
||||
# 2) "kv" - a simple indexer backed by key-value storage (see DBBackend)
|
||||
# 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}]
|
||||
@@ -575,6 +626,14 @@ var testGenesisFmt = `{
|
||||
"message_delay": "500000000",
|
||||
"precision": "10000000"
|
||||
},
|
||||
"timeout": {
|
||||
"propose": "30000000",
|
||||
"propose_delta": "50000",
|
||||
"vote": "30000000",
|
||||
"vote_delta": "50000",
|
||||
"commit": "10000000",
|
||||
"bypass_timeout_commit": true
|
||||
},
|
||||
"evidence": {
|
||||
"max_age_num_blocks": "100000",
|
||||
"max_age_duration": "172800000000000",
|
||||
|
||||
@@ -33,10 +33,6 @@ module.exports = {
|
||||
{
|
||||
"label": "v0.35",
|
||||
"key": "v0.35"
|
||||
},
|
||||
{
|
||||
"label": "master",
|
||||
"key": "master"
|
||||
}
|
||||
],
|
||||
topbar: {
|
||||
@@ -49,8 +45,10 @@ module.exports = {
|
||||
title: 'Resources',
|
||||
children: [
|
||||
{
|
||||
// TODO(creachadair): Figure out how to make this per-branch.
|
||||
// See: https://github.com/tendermint/tendermint/issues/7908
|
||||
title: 'RPC',
|
||||
path: 'https://docs.tendermint.com/master/rpc/',
|
||||
path: 'https://docs.tendermint.com/v0.35/rpc/',
|
||||
static: true
|
||||
},
|
||||
]
|
||||
@@ -162,6 +160,12 @@ module.exports = {
|
||||
{
|
||||
ga: 'UA-51029217-11'
|
||||
}
|
||||
],
|
||||
[
|
||||
'@vuepress/plugin-html-redirect',
|
||||
{
|
||||
countdown: 0
|
||||
}
|
||||
]
|
||||
]
|
||||
};
|
||||
|
||||
1
docs/.vuepress/redirects
Normal file
1
docs/.vuepress/redirects
Normal file
@@ -0,0 +1 @@
|
||||
/master/ /v0.35/
|
||||
@@ -21,7 +21,7 @@ Tendermint?](introduction/what-is-tendermint.md).
|
||||
|
||||
To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md).
|
||||
|
||||
To learn about application development on Tendermint, see the [Application Blockchain Interface](https://github.com/tendermint/tendermint/tree/master/spec/abci).
|
||||
To learn about application development on Tendermint, see the [Application Blockchain Interface](../spec/abci).
|
||||
|
||||
For more details on using Tendermint, see the respective documentation for
|
||||
[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](nodes/).
|
||||
|
||||
@@ -27,17 +27,17 @@ Usage:
|
||||
abci-cli [command]
|
||||
|
||||
Available Commands:
|
||||
batch Run a batch of abci commands against an application
|
||||
check_tx Validate a tx
|
||||
commit Commit the application state and return the Merkle root hash
|
||||
console Start an interactive abci console for multiple commands
|
||||
deliver_tx Deliver a new tx to the application
|
||||
kvstore ABCI demo example
|
||||
echo Have the application echo a message
|
||||
help Help about any command
|
||||
info Get some info about the application
|
||||
query Query the application state
|
||||
set_option Set an options on the application
|
||||
batch Run a batch of abci commands against an application
|
||||
check_tx Validate a tx
|
||||
commit Commit the application state and return the Merkle root hash
|
||||
console Start an interactive abci console for multiple commands
|
||||
finalize_block Send a set of transactions to the application
|
||||
kvstore ABCI demo example
|
||||
echo Have the application echo a message
|
||||
help Help about any command
|
||||
info Get some info about the application
|
||||
query Query the application state
|
||||
set_option Set an options on the application
|
||||
|
||||
Flags:
|
||||
--abci string socket or grpc (default "socket")
|
||||
@@ -53,7 +53,7 @@ Use "abci-cli [command] --help" for more information about a command.
|
||||
The `abci-cli` tool lets us send ABCI messages to our application, to
|
||||
help build and debug them.
|
||||
|
||||
The most important messages are `deliver_tx`, `check_tx`, and `commit`,
|
||||
The most important messages are `finalize_block`, `check_tx`, and `commit`,
|
||||
but there are others for convenience, configuration, and information
|
||||
purposes.
|
||||
|
||||
@@ -173,7 +173,7 @@ Try running these commands:
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
|
||||
> deliver_tx "abc"
|
||||
> finalize_block "abc"
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
@@ -192,7 +192,7 @@ Try running these commands:
|
||||
-> value: abc
|
||||
-> value.hex: 616263
|
||||
|
||||
> deliver_tx "def=xyz"
|
||||
> finalize_block "def=xyz"
|
||||
-> code: OK
|
||||
|
||||
> commit
|
||||
@@ -207,8 +207,8 @@ Try running these commands:
|
||||
-> value.hex: 78797A
|
||||
```
|
||||
|
||||
Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if
|
||||
we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
|
||||
Note that if we do `finalize_block "abc"` it will store `(abc, abc)`, but if
|
||||
we do `finalize_block "abc=efg"` it will store `(abc, efg)`.
|
||||
|
||||
Similarly, you could put the commands in a file and run
|
||||
`abci-cli --verbose batch < myfile`.
|
||||
|
||||
@@ -67,6 +67,10 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
||||
- [ADR-066: E2E-Testing](./adr-066-e2e-testing.md)
|
||||
- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md)
|
||||
- [ADR-077: Block Retention](./adr-077-block-retention.md)
|
||||
- [ADR-078: Non-zero Genesis](./adr-078-nonzero-genesis.md)
|
||||
- [ADR-079: ED25519 Verification](./adr-079-ed25519-verification.md)
|
||||
- [ADR-080: Reverse Sync](./adr-080-reverse-sync.md)
|
||||
|
||||
### Accepted
|
||||
|
||||
@@ -81,6 +85,11 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md)
|
||||
- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md)
|
||||
- [ADR-076: Combine Spec and Tendermint Repositories](./adr-076-combine-spec-repo.md)
|
||||
- [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md)
|
||||
|
||||
### Deprecated
|
||||
|
||||
None
|
||||
|
||||
### Rejected
|
||||
|
||||
@@ -88,7 +97,6 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-029: Check-Tx-Consensus](./adr-029-check-tx-consensus.md)
|
||||
- [ADR-058: Event-Hashing](./adr-058-event-hashing.md)
|
||||
|
||||
|
||||
### Proposed
|
||||
|
||||
- [ADR-007: Trust-Metric-Usage](./adr-007-trust-metric-usage.md)
|
||||
|
||||
@@ -84,7 +84,7 @@ The linear verification algorithm requires downloading all headers
|
||||
between the `TrustHeight` and the `LatestHeight`. The lite client downloads the
|
||||
full header for the provided `TrustHeight` and then proceeds to download `N+1`
|
||||
headers and applies the [Tendermint validation
|
||||
rules](https://docs.tendermint.com/master/spec/light-client/verification/)
|
||||
rules](https://github.com/tendermint/tendermint/tree/master/spec/light-client/verification/README.md)
|
||||
to each block.
|
||||
|
||||
### Bisecting Verification
|
||||
|
||||
@@ -18,7 +18,7 @@ graceful here, but that's for another day.
|
||||
|
||||
It's possible to fool lite clients without there being a fork on the
|
||||
main chain - so called Fork-Lite. See the
|
||||
[fork accountability](https://docs.tendermint.com/master/spec/light-client/accountability/)
|
||||
[fork accountability](https://github.com/tendermint/tendermint/blob/master/spec/light-client/accountability/README.md)
|
||||
document for more details. For a sequential lite client, this can happen via
|
||||
equivocation or amnesia attacks. For a skipping lite client this can also happen
|
||||
via lunatic validator attacks. There must be some way for applications to punish
|
||||
|
||||
@@ -67,7 +67,7 @@ The 8 timeout parameters will be consolidated down to 6. These will be as follow
|
||||
parameters.
|
||||
* `TimeoutCommit`
|
||||
* Same as current `TimeoutCommit`.
|
||||
* `EnableTimeoutCommitBypass`
|
||||
* `BypassCommitTimeout`
|
||||
* Same as current `SkipTimeoutCommit`, renamed for clarity.
|
||||
|
||||
A safe default will be provided by Tendermint for each of these parameters and
|
||||
@@ -149,7 +149,7 @@ message TimeoutParams {
|
||||
google.protobuf.Duration vote = 3;
|
||||
google.protobuf.Duration vote_delta = 4;
|
||||
google.protobuf.Duration commit = 5;
|
||||
bool enable_commit_timeout_bypass = 6;
|
||||
bool bypass_commit_timeout = 6;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
## Changelog
|
||||
|
||||
- 01-Mar-2022: Update long-polling interface (@creachadair).
|
||||
- 10-Feb-2022: Updates to reflect implementation.
|
||||
- 26-Jan-2022: Marked accepted.
|
||||
- 22-Jan-2022: Updated and expanded (@creachadair).
|
||||
- 20-Nov-2021: Initial draft (@creachadair).
|
||||
@@ -267,12 +269,12 @@ initial implementation will store the event log in-memory, and the operator
|
||||
will be given two per-node configuration settings. Note, these names are
|
||||
provisional:
|
||||
|
||||
- `[event-subscription] time-window`: A duration before present during which the
|
||||
node will retain event items published. Setting this value to zero disables
|
||||
event subscription.
|
||||
- `[rpc] event-log-window-size`: A duration before the latest published event,
|
||||
during which the node will retain event items published. Setting this value
|
||||
to zero disables event subscription.
|
||||
|
||||
- `[event-subscription] max-items`: A maximum number of event items that the
|
||||
node will retain within the time window. If the number of items exceeds this
|
||||
- `[rpc] event-log-max-items`: A maximum number of event items that the node
|
||||
will retain within the time window. If the number of items exceeds this
|
||||
value, the node discardes the oldest items in the window. Setting this value
|
||||
to zero means that no limit is imposed on the number of items.
|
||||
|
||||
@@ -307,11 +309,11 @@ type EventParams struct {
|
||||
|
||||
// Return only items after this cursor. If empty, the limit is just
|
||||
// before the the beginning of the event log.
|
||||
After string `json:"after_item"`
|
||||
After string `json:"after"`
|
||||
|
||||
// Return only items before this cursor. If empty, the limit is just
|
||||
// after the head of the event log.
|
||||
Before string `json:"before_item"`
|
||||
Before string `json:"before"`
|
||||
|
||||
// Wait for up to this long for events to be available.
|
||||
WaitTime time.Duration `json:"wait_time"`
|
||||
@@ -335,8 +337,8 @@ type Filter struct {
|
||||
The semantics of the request are as follows: An item in the event log is
|
||||
**eligible** for a query if:
|
||||
|
||||
- It is newer than the `after_item` cursor (if set).
|
||||
- It is older than the `before_item` cursor (if set).
|
||||
- It is newer than the `after` cursor (if set).
|
||||
- It is older than the `before` cursor (if set).
|
||||
- It matches the filter (if set).
|
||||
|
||||
Among the eligible items in the log, the server returns up to `max_results` of
|
||||
@@ -344,13 +346,13 @@ the newest items, in reverse order of cursor. If `max_results` is unset the
|
||||
server chooses a number to return, and will cap `max_results` at a sensible
|
||||
limit.
|
||||
|
||||
The `wait_time` parameter is used to effect polling. If `before_item` is empty,
|
||||
the server will wait for up to `wait_time` for additional items, if there are
|
||||
fewer than `max_results` eligible results in the log. If `wait_time` is zero,
|
||||
the server will return whatever eligible items are available immediately.
|
||||
The `wait_time` parameter is used to effect polling. If `before` is empty and
|
||||
no items are available, the server will wait for up to `wait_time` for matching
|
||||
items to arrive at the head of the log. If `wait_time` is zero or negative, the
|
||||
server will wait for a default (positive) interval.
|
||||
|
||||
If `before_item` non-empty, `wait_time` is ignored: new results are only added
|
||||
to the head of the log, so there is no need to wait. This allows the client to
|
||||
If `before` non-empty, `wait_time` is ignored: new results are only added to
|
||||
the head of the log, so there is no need to wait. This allows the client to
|
||||
poll for new data, and "page" backward through matching event items. This is
|
||||
discussed in more detail below.
|
||||
|
||||
@@ -372,11 +374,11 @@ type EventReply struct {
|
||||
|
||||
// The cursor of the oldest item in the log at the time of this reply,
|
||||
// or "" if the log is empty.
|
||||
Oldest string `json:"oldest_item"`
|
||||
Oldest string `json:"oldest"`
|
||||
|
||||
// The cursor of the newest item in the log at the time of this reply,
|
||||
// or "" if the log is empty.
|
||||
Newest string `json:"newest_item"`
|
||||
Newest string `json:"newest"`
|
||||
}
|
||||
|
||||
type EventItem struct {
|
||||
@@ -392,9 +394,9 @@ type EventItem struct {
|
||||
}
|
||||
```
|
||||
|
||||
The `oldest_item` and `newest_item` fields of the reply report the cursors of
|
||||
the oldest and newest items (of any kind) recorded in the event log at the time
|
||||
of the reply, or are `""` if the log is empty.
|
||||
The `oldest` and `newest` fields of the reply report the cursors of the oldest
|
||||
and newest items (of any kind) recorded in the event log at the time of the
|
||||
reply, or are `""` if the log is empty.
|
||||
|
||||
The `data` field contains the type-specific event datum. The datum carries any
|
||||
ABCI events that may have been defined.
|
||||
@@ -412,26 +414,26 @@ The semantics of the reply are as follows:
|
||||
- If `more` is true, there is at least one additional, older item in the
|
||||
event log that was not returned (in excess of `max_results`).
|
||||
|
||||
In this case the client can fetch the next page by setting `before_item`
|
||||
in a new request, to the cursor of the oldest item fetched (i.e., the
|
||||
last one in `items`).
|
||||
In this case the client can fetch the next page by setting `before` in a
|
||||
new request, to the cursor of the oldest item fetched (i.e., the last one
|
||||
in `items`).
|
||||
|
||||
- Otherwise (if `more` is false), all the matching results have been
|
||||
reported (pagination is complete).
|
||||
|
||||
- The first element of `items` identifies the newest item considered.
|
||||
Subsequent poll requests can set `after_item` to this cursor to skip
|
||||
items that were already retrieved.
|
||||
Subsequent poll requests can set `after` to this cursor to skip items
|
||||
that were already retrieved.
|
||||
|
||||
- If `items` is empty:
|
||||
|
||||
- If the `before_item` was set in the request, there are no further
|
||||
eligible items for this query in the log (pagination is complete).
|
||||
- If the `before` was set in the request, there are no further eligible
|
||||
items for this query in the log (pagination is complete).
|
||||
|
||||
This is just a safety case; the client can detect this without issuing
|
||||
another call by consulting the `more` field of the previous reply.
|
||||
|
||||
- If the `before_item` was empty in the request, no eligible items were
|
||||
- If the `before` was empty in the request, no eligible items were
|
||||
available before the `wait_time` expired. The client may poll again to
|
||||
wait for more event items.
|
||||
|
||||
@@ -453,12 +455,11 @@ crashes and connectivity issues:
|
||||
|
||||
1. In ordinary operation, clients will **long-poll** the head of the event
|
||||
log for new events matching their criteria (by setting a `wait_time` and
|
||||
no `before_item`).
|
||||
no `before`).
|
||||
|
||||
2. If there are more events than the client requested, or if the client needs
|
||||
to to read older events to recover from a stall or crash, clients will
|
||||
**page** backward through the event log (by setting `before_item` and
|
||||
possibly `after_item`).
|
||||
**page** backward through the event log (by setting `before` and `after`).
|
||||
|
||||
- While the new API requires explicit polling by the client, it makes better
|
||||
use of the node's existing HTTP infrastructure (e.g., connection pools).
|
||||
@@ -479,7 +480,7 @@ crashes and connectivity issues:
|
||||
The initial implementation will do this by checking the tail of the event log
|
||||
after each new item is published. If the number of items in the log exceeds
|
||||
the item limit, it will delete oldest items until the log is under the limit;
|
||||
then discard any older than the time window before present.
|
||||
then discard any older than the time window before the latest.
|
||||
|
||||
To minimize coordination interference between the publisher (the event bus)
|
||||
and the subcribers (the `events` service handlers), the event log will be
|
||||
@@ -664,13 +665,14 @@ The following alternative approaches were considered:
|
||||
- [rpc: remove duplication of events when querying][i7273] (#7273)
|
||||
|
||||
[rfc006]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-006-event-subscription.md
|
||||
[rpc-service]: https://docs.tendermint.com/master/rpc
|
||||
[rpc-service]: https://github.com/tendermint/tendermint/blob/master/rpc/openapi/openapi.yaml
|
||||
[query-grammar]: https://pkg.go.dev/github.com/tendermint/tendermint@master/internal/pubsub/query/syntax
|
||||
[ws]: https://datatracker.ietf.org/doc/html/rfc6455
|
||||
[jsonrpc2]: https://www.jsonrpc.org/specification
|
||||
[nginx]: https://nginx.org/en/docs/
|
||||
[fcgi]: http://www.mit.edu/~yandros/doc/specs/fcgi-spec.html
|
||||
[rp-ws]: https://nginx.org/en/docs/http/websocket.html
|
||||
<!-- markdown-link-check-disable-next-line -->
|
||||
[ng-xm]: https://www.nginx.com/resources/wiki/extending/
|
||||
[abci-event]: https://pkg.go.dev/github.com/tendermint/tendermint/abci/types#Event
|
||||
[rfc001]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-001-storage-engine.rst
|
||||
|
||||
201
docs/architecture/adr-081-protobuf-mgmt.md
Normal file
201
docs/architecture/adr-081-protobuf-mgmt.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# ADR 081: Protocol Buffers Management
|
||||
|
||||
## Changelog
|
||||
|
||||
- 2022-02-28: First draft
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
[Tracking issue](https://github.com/tendermint/tendermint/issues/8121)
|
||||
|
||||
## Context
|
||||
|
||||
At present, we manage the [Protocol Buffers] schema files ("protos") that define
|
||||
our wire-level data formats within the Tendermint repository itself (see the
|
||||
[`proto`](../../proto/) directory). Recently, we have been making use of [Buf],
|
||||
both locally and in CI, in order to generate Go stubs, and lint and check
|
||||
`.proto` files for breaking changes.
|
||||
|
||||
The version of Buf used at the time of this decision was `v1beta1`, and it was
|
||||
discussed in [\#7975] and in weekly calls as to whether we should upgrade to
|
||||
`v1` and harmonize our approach with that used by the Cosmos SDK. The team
|
||||
managing the Cosmos SDK was primarily interested in having our protos versioned
|
||||
and easily accessible from the [Buf] registry.
|
||||
|
||||
The three main sets of stakeholders for the `.proto` files and their needs, as
|
||||
currently understood, are as follows.
|
||||
|
||||
1. Tendermint needs Go code generated from `.proto` files.
|
||||
2. Consumers of Tendermint's `.proto` files, specifically projects that want to
|
||||
interoperate with Tendermint and need to generate code for their own
|
||||
programming language, want to be able to access these files in a reliable and
|
||||
efficient way.
|
||||
3. The Tendermint Core team wants to provide stable interfaces that are as easy
|
||||
as possible to maintain, on which consumers can depend, and to be able to
|
||||
notify those consumers promptly when those interfaces change. To this end, we
|
||||
want to:
|
||||
1. Prevent any breaking changes from being introduced in minor/patch releases
|
||||
of Tendermint. Only major version updates should be able to contain
|
||||
breaking interface changes.
|
||||
2. Prevent generated code from diverging from the Protobuf schema files.
|
||||
|
||||
There was also discussion surrounding the notion of automated documentation
|
||||
generation and hosting, but it is not clear at this time whether this would be
|
||||
that valuable to any of our stakeholders. What will, of course, be valuable at
|
||||
minimum would be better documentation (in comments) of the `.proto` files
|
||||
themselves.
|
||||
|
||||
## Alternative Approaches
|
||||
|
||||
### Meeting stakeholders' needs
|
||||
|
||||
1. Go stub generation from protos. We could use:
|
||||
1. [Buf]. This approach has been rather cumbersome up to this point, and it
|
||||
is not clear what Buf really provides beyond that which `protoc` provides
|
||||
to justify the additional complexity in configuring Buf for stub
|
||||
generation.
|
||||
2. [protoc] - the Protocol Buffers compiler.
|
||||
2. Notification of breaking changes:
|
||||
1. Buf in CI for all pull requests to *release* branches only (and not on
|
||||
`master`).
|
||||
2. Buf in CI on every pull request to every branch (this was the case at the
|
||||
time of this decision, and the team decided that the signal-to-noise ratio
|
||||
for this approach was too low to be of value).
|
||||
3. `.proto` linting:
|
||||
1. Buf in CI on every pull request
|
||||
4. `.proto` formatting:
|
||||
1. [clang-format] locally and a [clang-format GitHub Action] in CI to check
|
||||
that files are formatted properly on every pull request.
|
||||
5. Sharing of `.proto` files in a versioned, reliable manner:
|
||||
1. Consumers could simply clone the Tendermint repository, check out a
|
||||
specific commit, tag or branch and manually copy out all of the `.proto`
|
||||
files they need. This requires no effort from the Tendermint Core team and
|
||||
will continue to be an option for consumers. The drawback of this approach
|
||||
is that it requires manual coding/scripting to implement and is brittle in
|
||||
the face of bigger changes.
|
||||
2. Uploading our `.proto` files to Buf's registry on every release. This is
|
||||
by far the most seamless for consumers of our `.proto` files, but requires
|
||||
the dependency on Buf. This has the additional benefit that the Buf
|
||||
registry will automatically [generate and host
|
||||
documentation][buf-docs-gen] for these protos.
|
||||
3. We could create a process that, upon release, creates a `.zip` file
|
||||
containing our `.proto` files.
|
||||
|
||||
### Popular alternatives to Buf
|
||||
|
||||
[Prototool] was not considered as it appears deprecated, and the ecosystem seems
|
||||
to be converging on Buf at this time.
|
||||
|
||||
### Tooling complexity
|
||||
|
||||
The more tools we have in our build/CI processes, the more complex and fragile
|
||||
repository/CI management becomes, and the longer it takes to onboard new team
|
||||
members. Maintainability is a core concern here.
|
||||
|
||||
### Buf sustainability and costs
|
||||
|
||||
One of the primary considerations regarding the usage of Buf is whether, for
|
||||
example, access to its registry will eventually become a
|
||||
paid-for/subscription-based service and whether this is valuable enough for us
|
||||
and the ecosystem to pay for such a service. At this time, it appears as though
|
||||
Buf will never charge for hosting open source projects' protos.
|
||||
|
||||
Another consideration was Buf's sustainability as a project - what happens when
|
||||
their resources run out? Will there be a strong and broad enough open source
|
||||
community to continue maintaining it?
|
||||
|
||||
### Local Buf usage options
|
||||
|
||||
Local usage of Buf (i.e. not in CI) can be accomplished in two ways:
|
||||
|
||||
1. Installing the relevant tools individually.
|
||||
2. By way of its [Docker image][buf-docker].
|
||||
|
||||
Local installation of Buf requires developers to manually keep their toolchains
|
||||
up-to-date. The Docker option comes with a number of complexities, including
|
||||
how the file system permissions of code generated by a Docker container differ
|
||||
between platforms (e.g. on Linux, Buf-generated code ends up being owned by
|
||||
`root`).
|
||||
|
||||
The trouble with the Docker-based approach is that we make use of the
|
||||
[gogoprotobuf] plugin for `protoc`. Continuing to use the Docker-based approach
|
||||
to using Buf will mean that we will have to continue building our own custom
|
||||
Docker image with embedded gogoprotobuf.
|
||||
|
||||
Along these lines, we could eventually consider coming up with a [Nix]- or
|
||||
[redo]-based approach to developer tooling to ensure tooling consistency across
|
||||
the team and for anyone who wants to be able to contribute to Tendermint.
|
||||
|
||||
## Decision
|
||||
|
||||
1. We will adopt Buf for now for proto generation, linting, breakage checking
|
||||
and its registry (mainly in CI, with optional usage locally).
|
||||
2. Failing CI when checking for breaking changes in `.proto` files will only
|
||||
happen when performing minor/patch releases.
|
||||
3. Local tooling will be favored over Docker-based tooling.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
We currently aim to:
|
||||
|
||||
1. Update to Buf `v1` to facilitate linting, breakage checking and uploading to
|
||||
the Buf registry.
|
||||
2. Configure CI appropriately for proto management:
|
||||
1. Uploading protos to the Buf registry on every release (e.g. the
|
||||
[approach][cosmos-sdk-buf-registry-ci] used by the Cosmos SDK).
|
||||
2. Linting on every pull request (e.g. the
|
||||
[approach][cosmos-sdk-buf-linting-ci] used by the Cosmos SDK). The linter
|
||||
passing should be considered a requirement for accepting PRs.
|
||||
3. Checking for breaking changes in minor/patch version releases and failing
|
||||
CI accordingly - see [\#8003].
|
||||
4. Add [clang-format GitHub Action] to check `.proto` file formatting. Format
|
||||
checking should be considered a requirement for accepting PRs.
|
||||
3. Update the Tendermint [`Makefile`](../../Makefile) to primarily facilitate
|
||||
local Protobuf stub generation, linting, formatting and breaking change
|
||||
checking. More specifically:
|
||||
1. This includes removing the dependency on Docker and introducing the
|
||||
dependency on local toolchain installation. CI-based equivalents, where
|
||||
relevant, will rely on specific GitHub Actions instead of the Makefile.
|
||||
2. Go code generation will rely on `protoc` directly.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- We will still offer Go stub generation, proto linting and breakage checking.
|
||||
- Breakage checking will only happen on minor/patch releases to increase the
|
||||
signal-to-noise ratio in CI.
|
||||
- Versioned protos will be made available via Buf's registry upon every release.
|
||||
|
||||
### Negative
|
||||
|
||||
- Developers/contributors will need to install the relevant Protocol
|
||||
Buffers-related tooling (Buf, gogoprotobuf, clang-format) locally in order to
|
||||
build, lint, format and check `.proto` files for breaking changes.
|
||||
|
||||
### Neutral
|
||||
|
||||
## References
|
||||
|
||||
- [Protocol Buffers]
|
||||
- [Buf]
|
||||
- [\#7975]
|
||||
- [protoc] - The Protocol Buffers compiler
|
||||
|
||||
[Protocol Buffers]: https://developers.google.com/protocol-buffers
|
||||
[Buf]: https://buf.build/
|
||||
[\#7975]: https://github.com/tendermint/tendermint/pull/7975
|
||||
[protoc]: https://github.com/protocolbuffers/protobuf
|
||||
[clang-format]: https://clang.llvm.org/docs/ClangFormat.html
|
||||
[clang-format GitHub Action]: https://github.com/marketplace/actions/clang-format-github-action
|
||||
[buf-docker]: https://hub.docker.com/r/bufbuild/buf
|
||||
[cosmos-sdk-buf-registry-ci]: https://github.com/cosmos/cosmos-sdk/blob/e6571906043b6751951a42b6546431b1c38b05bd/.github/workflows/proto-registry.yml
|
||||
[cosmos-sdk-buf-linting-ci]: https://github.com/cosmos/cosmos-sdk/blob/e6571906043b6751951a42b6546431b1c38b05bd/.github/workflows/proto.yml#L15
|
||||
[\#8003]: https://github.com/tendermint/tendermint/issues/8003
|
||||
[Nix]: https://nixos.org/
|
||||
[gogoprotobuf]: https://github.com/gogo/protobuf
|
||||
[Prototool]: https://github.com/uber/prototool
|
||||
[buf-docs-gen]: https://docs.buf.build/bsr/documentation
|
||||
[redo]: https://redo.readthedocs.io/en/latest/
|
||||
@@ -6,12 +6,30 @@
|
||||
|
||||
## Status
|
||||
|
||||
> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted"
|
||||
> once it is agreed upon. Once the ADR has been implemented mark the ADR as
|
||||
> "implemented". If a later ADR changes or reverses a decision, it may be marked
|
||||
> as "deprecated" or "superseded" with a reference to its replacement.
|
||||
> An architecture decision is considered "proposed" when a PR containing the ADR
|
||||
> is submitted. When merged, an ADR must have a status associated with it, which
|
||||
> must be one of: "Accepted", "Rejected", "Deprecated" or "Superseded".
|
||||
>
|
||||
> An accepted ADR's implementation status must be tracked via a tracking issue,
|
||||
> milestone or project board (only one of these is necessary). For example:
|
||||
>
|
||||
> Accepted
|
||||
>
|
||||
> [Tracking issue](https://github.com/tendermint/tendermint/issues/123)
|
||||
> [Milestone](https://github.com/tendermint/tendermint/milestones/123)
|
||||
> [Project board](https://github.com/orgs/tendermint/projects/123)
|
||||
>
|
||||
> Rejected ADRs are captured as a record of recommendations that we specifically
|
||||
> do not (and possibly never) want to implement. The ADR itself must, for
|
||||
> posterity, include reasoning as to why it was rejected.
|
||||
>
|
||||
> If an ADR is deprecated, simply write "Deprecated" in this section. If an ADR
|
||||
> is superseded by one or more other ADRs, provide local a reference to those
|
||||
> ADRs, e.g.:
|
||||
>
|
||||
> Superseded by [ADR 123](./adr-123.md)
|
||||
|
||||
{Deprecated|Declined|Accepted|Implemented}
|
||||
Accepted | Rejected | Deprecated | Superseded by
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
@@ -68,10 +68,10 @@ Tendermint is in essence similar software, but with two key differences:
|
||||
|
||||
- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a
|
||||
1/3 of failures, but those failures can include arbitrary behaviour -
|
||||
including hacking and malicious attacks.
|
||||
- It does not specify a particular application, like a fancy key-value
|
||||
store. Instead, it focuses on arbitrary state machine replication,
|
||||
so developers can build the application logic that's right for them,
|
||||
including hacking and malicious attacks.
|
||||
- It does not specify a particular application, like a fancy key-value
|
||||
store. Instead, it focuses on arbitrary state machine replication,
|
||||
so developers can build the application logic that's right for them,
|
||||
from key-value store to cryptocurrency to e-voting platform and beyond.
|
||||
|
||||
### Bitcoin, Ethereum, etc
|
||||
@@ -104,12 +104,10 @@ to Tendermint, but is more opinionated about how the state is managed,
|
||||
and requires that all application behaviour runs in potentially many
|
||||
docker containers, modules it calls "chaincode". It uses an
|
||||
implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf).
|
||||
from a team at IBM that is [augmented to handle potentially
|
||||
non-deterministic
|
||||
chaincode](https://www.zurich.ibm.com/~cca/papers/sieve.pdf) It is
|
||||
possible to implement this docker-based behaviour as a ABCI app in
|
||||
Tendermint, though extending Tendermint to handle non-determinism
|
||||
remains for future work.
|
||||
from a team at IBM that is augmented to handle potentially non-deterministic
|
||||
chaincode It is possible to implement this docker-based behaviour as a ABCI app
|
||||
in Tendermint, though extending Tendermint to handle non-determinism remains
|
||||
for future work.
|
||||
|
||||
[Burrow](https://github.com/hyperledger/burrow) is an implementation of
|
||||
the Ethereum Virtual Machine and Ethereum transaction mechanics, with
|
||||
|
||||
@@ -16,7 +16,8 @@ the parameters set with their default values. It will look something
|
||||
like the file below, however, double check by inspecting the
|
||||
`config.toml` created with your version of `tendermint` installed:
|
||||
|
||||
```toml# This is a TOML config file.
|
||||
```toml
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or
|
||||
@@ -33,11 +34,10 @@ like the file below, however, double check by inspecting the
|
||||
proxy-app = "tcp://127.0.0.1:26658"
|
||||
|
||||
# A custom human readable name for this node
|
||||
moniker = "ape"
|
||||
moniker = "sidewinder"
|
||||
|
||||
|
||||
# Mode of Node: full | validator | seed (default: "validator")
|
||||
# * validator node (default)
|
||||
# Mode of Node: full | validator | seed
|
||||
# * validator node
|
||||
# - all reactors
|
||||
# - with priv_validator_key.json, priv_validator_state.json
|
||||
# * full node
|
||||
@@ -48,11 +48,6 @@ moniker = "ape"
|
||||
# - No priv_validator_key.json, priv_validator_state.json
|
||||
mode = "validator"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast-sync = true
|
||||
|
||||
# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb
|
||||
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
# - pure go
|
||||
@@ -120,10 +115,10 @@ laddr = ""
|
||||
client-certificate-file = ""
|
||||
|
||||
# Client key generated while creating certificates for secure connection
|
||||
validator-client-key-file = ""
|
||||
client-key-file = ""
|
||||
|
||||
# Path to the Root Certificate Authority used to sign both client and server certificates
|
||||
certificate-authority = ""
|
||||
root-ca-file = ""
|
||||
|
||||
|
||||
#######################################################################
|
||||
@@ -149,26 +144,10 @@ cors-allowed-methods = ["HEAD", "GET", "POST", ]
|
||||
# A list of non simple headers the client is allowed to use with cross-domain requests
|
||||
cors-allowed-headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-laddr = ""
|
||||
|
||||
# Maximum number of simultaneous connections.
|
||||
# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
# 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-max-open-connections = 900
|
||||
|
||||
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
|
||||
unsafe = false
|
||||
|
||||
# Maximum number of simultaneous connections (including WebSocket).
|
||||
# Does not include gRPC connections. See grpc-max-open-connections
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
@@ -182,10 +161,37 @@ max-open-connections = 900
|
||||
max-subscription-clients = 100
|
||||
|
||||
# Maximum number of unique queries a given client can /subscribe to
|
||||
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
|
||||
# the estimated # maximum number of broadcast_tx_commit calls per block.
|
||||
# If you're using a Local RPC client and /broadcast_tx_commit, set this
|
||||
# to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
max-subscriptions-per-client = 5
|
||||
|
||||
# If true, disable the websocket interface to the RPC service. This has
|
||||
# the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all
|
||||
# methods for event subscription.
|
||||
#
|
||||
# EXPERIMENTAL: This setting will be removed in Tendermint v0.37.
|
||||
experimental-disable-websocket = false
|
||||
|
||||
# The time window size for the event log. All events up to this long before
|
||||
# the latest (up to EventLogMaxItems) will be available for subscribers to
|
||||
# fetch via the /events method. If 0 (the default) the event log and the
|
||||
# /events RPC method are disabled.
|
||||
event-log-window-size = "0s"
|
||||
|
||||
# The maxiumum number of events that may be retained by the event log. If
|
||||
# this value is 0, no upper limit is set. Otherwise, items in excess of
|
||||
# this number will be discarded from the event log.
|
||||
#
|
||||
# Warning: This setting is a safety valve. Setting it too low may cause
|
||||
# subscribers to miss events. Try to choose a value higher than the
|
||||
# maximum worst-case expected event load within the chosen window size in
|
||||
# ordinary operation.
|
||||
#
|
||||
# For example, if the window size is 10 minutes and the node typically
|
||||
# averages 1000 events per ten minutes, but with occasional known spikes of
|
||||
# up to 2000, choose a value > 2000.
|
||||
event-log-max-items = 0
|
||||
|
||||
# How long to wait for a tx to be committed during /broadcast_tx_commit.
|
||||
# WARNING: Using a value larger than 10s will result in increasing the
|
||||
# global HTTP write timeout, which applies to all connections and endpoints.
|
||||
@@ -252,63 +258,12 @@ persistent-peers = ""
|
||||
# UPNP port forwarding
|
||||
upnp = false
|
||||
|
||||
# Path to address book
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
addr-book-file = "config/addrbook.json"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
# Set false for private or local networks
|
||||
addr-book-strict = true
|
||||
|
||||
# Maximum number of inbound peers
|
||||
#
|
||||
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
max-num-inbound-peers = 40
|
||||
|
||||
# Maximum number of outbound peers to connect to, excluding persistent peers
|
||||
#
|
||||
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
max-num-outbound-peers = 10
|
||||
|
||||
# Maximum number of connections (inbound and outbound).
|
||||
max-connections = 64
|
||||
|
||||
# Rate limits the number of incoming connection attempts per IP address.
|
||||
max-incoming-connection-attempts = 100
|
||||
|
||||
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
unconditional-peer-ids = ""
|
||||
|
||||
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
persistent-peers-max-dial-period = "0s"
|
||||
|
||||
# Time to wait before flushing messages out on the connection
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
flush-throttle-timeout = "100ms"
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
max-packet-msg-payload-size = 1400
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
send-rate = 5120000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
recv-rate = 5120000
|
||||
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = true
|
||||
|
||||
@@ -323,16 +278,28 @@ allow-duplicate-ip = false
|
||||
handshake-timeout = "20s"
|
||||
dial-timeout = "3s"
|
||||
|
||||
# Time to wait before flushing messages out on the connection
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
flush-throttle-timeout = "100ms"
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
max-packet-msg-payload-size = 1400
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
send-rate = 5120000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
recv-rate = 5120000
|
||||
|
||||
|
||||
#######################################################
|
||||
### Mempool Configuration Option ###
|
||||
#######################################################
|
||||
[mempool]
|
||||
|
||||
# Mempool version to use:
|
||||
# 1) "v0" - The legacy non-prioritized mempool reactor.
|
||||
# 2) "v1" (default) - The prioritized mempool reactor.
|
||||
version = "v1"
|
||||
|
||||
recheck = true
|
||||
broadcast = true
|
||||
|
||||
@@ -388,22 +355,30 @@ ttl-num-blocks = 0
|
||||
# starting from the height of the snapshot.
|
||||
enable = false
|
||||
|
||||
# RPC servers (comma-separated) for light client verification of the synced state machine and
|
||||
# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding
|
||||
# header hash obtained from a trusted source, and a period during which validators can be trusted.
|
||||
#
|
||||
# For Cosmos SDK-based chains, trust-period should usually be about 2/3 of the unbonding time (~2
|
||||
# weeks) during which they can be financially punished (slashed) for misbehavior.
|
||||
# State sync uses light client verification to verify state. This can be done either through the
|
||||
# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer
|
||||
# will be used.
|
||||
use-p2p = false
|
||||
|
||||
# If using RPC, at least two addresses need to be provided. They should be compatible with net.Dial,
|
||||
# for example: "host.example.com:2125"
|
||||
rpc-servers = ""
|
||||
|
||||
# The hash and height of a trusted block. Must be within the trust-period.
|
||||
trust-height = 0
|
||||
trust-hash = ""
|
||||
|
||||
# The trust period should be set so that Tendermint can detect and gossip misbehavior before
|
||||
# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding
|
||||
# period should suffice.
|
||||
trust-period = "168h0m0s"
|
||||
|
||||
# Time to spend discovering snapshots before initiating a restore.
|
||||
discovery-time = "15s"
|
||||
|
||||
# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp).
|
||||
# Will create a new, randomly named directory within, and remove it when done.
|
||||
# Temporary directory for state sync snapshot chunks, defaults to os.TempDir().
|
||||
# The synchronizer will create a new, randomly named directory within this directory
|
||||
# and remove it when the sync is complete.
|
||||
temp-dir = ""
|
||||
|
||||
# The timeout duration before re-requesting a chunk, possibly from a different
|
||||
@@ -413,21 +388,6 @@ chunk-request-timeout = "15s"
|
||||
# The number of concurrent chunk and block fetchers to run (default: 4).
|
||||
fetchers = "4"
|
||||
|
||||
#######################################################
|
||||
### Block Sync Configuration Connections ###
|
||||
#######################################################
|
||||
[blocksync]
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, BlockSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
enable = true
|
||||
|
||||
# Block Sync version to use:
|
||||
# 1) "v0" (default) - the standard block sync implementation
|
||||
# 2) "v2" - DEPRECATED, please use v0
|
||||
version = "v0"
|
||||
|
||||
#######################################################
|
||||
### Consensus Configuration Options ###
|
||||
#######################################################
|
||||
@@ -435,32 +395,12 @@ version = "v0"
|
||||
|
||||
wal-file = "data/cs.wal/wal"
|
||||
|
||||
# How long we wait for a proposal block before prevoting nil
|
||||
timeout-propose = "3s"
|
||||
# How much timeout-propose increases with each round
|
||||
timeout-propose-delta = "500ms"
|
||||
# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
|
||||
timeout-prevote = "1s"
|
||||
# How much the timeout-prevote increases with each round
|
||||
timeout-prevote-delta = "500ms"
|
||||
# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
|
||||
timeout-precommit = "1s"
|
||||
# How much the timeout-precommit increases with each round
|
||||
timeout-precommit-delta = "500ms"
|
||||
# How long we wait after committing a block, before starting on the new
|
||||
# height (this gives us a chance to receive some more precommits, even
|
||||
# though we already have +2/3).
|
||||
timeout-commit = "1s"
|
||||
|
||||
# How many blocks to look back to check existence of the node's consensus votes before joining consensus
|
||||
# When non-zero, the node will panic upon restart
|
||||
# if the same consensus key was used to sign {double-sign-check-height} last blocks.
|
||||
# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic.
|
||||
double-sign-check-height = 0
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip-timeout-commit = false
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks
|
||||
create-empty-blocks = true
|
||||
create-empty-blocks-interval = "0s"
|
||||
@@ -469,6 +409,50 @@ create-empty-blocks-interval = "0s"
|
||||
peer-gossip-sleep-duration = "100ms"
|
||||
peer-query-maj23-sleep-duration = "2s"
|
||||
|
||||
### Unsafe Timeout Overrides ###
|
||||
|
||||
# These fields provide temporary overrides for the Timeout consensus parameters.
|
||||
# Use of these parameters is strongly discouraged. Using these parameters may have serious
|
||||
# liveness implications for the validator and for the chain.
|
||||
#
|
||||
# These fields will be removed from the configuration file in the v0.37 release of Tendermint.
|
||||
# For additional information, see ADR-74:
|
||||
# https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-074-timeout-params.md
|
||||
|
||||
# This field provides an unsafe override of the Propose timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait for a proposal block before prevoting nil.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-propose-timeout-override = 0s
|
||||
|
||||
# This field provides an unsafe override of the ProposeDelta timeout consensus parameter.
|
||||
# This field configures how much the propose timeout increases with each round.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-propose-timeout-delta-override = 0s
|
||||
|
||||
# This field provides an unsafe override of the Vote timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait after
|
||||
# receiving +2/3 votes in a around.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-vote-timeout-override = 0s
|
||||
|
||||
# This field provides an unsafe override of the VoteDelta timeout consensus parameter.
|
||||
# This field configures how much the vote timeout increases with each round.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-vote-timeout-delta-override = 0s
|
||||
|
||||
# This field provides an unsafe override of the Commit timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait after receiving
|
||||
# +2/3 precommits before beginning the next height.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-commit-timeout-override = 0s
|
||||
|
||||
# This field provides an unsafe override of the BypassCommitTimeout consensus parameter.
|
||||
# This field configures if the consensus engine will wait for the full Commit timeout
|
||||
# before proceeding to the next height.
|
||||
# If this field is set to true, the consensus engine will proceed to the next height
|
||||
# as soon as the node has gathered votes from all of the validators on the network.
|
||||
# unsafe-bypass-commit-timeout-override =
|
||||
|
||||
#######################################################
|
||||
### Transaction Indexer Configuration Options ###
|
||||
#######################################################
|
||||
@@ -543,46 +527,6 @@ transactions every `create-empty-blocks-interval`. For instance, with
|
||||
Tendermint will only create blocks if there are transactions, or after waiting
|
||||
30 seconds without receiving any transactions.
|
||||
|
||||
## Consensus timeouts explained
|
||||
|
||||
There's a variety of information about timeouts in [Running in
|
||||
production](../tendermint-core/running-in-production.md)
|
||||
|
||||
You can also find more detailed technical explanation in the spec: [The latest
|
||||
gossip on BFT consensus](https://arxiv.org/abs/1807.04938).
|
||||
|
||||
```toml
|
||||
[consensus]
|
||||
...
|
||||
|
||||
timeout-propose = "3s"
|
||||
timeout-propose-delta = "500ms"
|
||||
timeout-prevote = "1s"
|
||||
timeout-prevote-delta = "500ms"
|
||||
timeout-precommit = "1s"
|
||||
timeout-precommit-delta = "500ms"
|
||||
timeout-commit = "1s"
|
||||
```
|
||||
|
||||
Note that in a successful round, the only timeout that we absolutely wait no
|
||||
matter what is `timeout-commit`.
|
||||
|
||||
Here's a brief summary of the timeouts:
|
||||
|
||||
- `timeout-propose` = how long we wait for a proposal block before prevoting
|
||||
nil
|
||||
- `timeout-propose-delta` = how much timeout-propose increases with each round
|
||||
- `timeout-prevote` = how long we wait after receiving +2/3 prevotes for
|
||||
anything (ie. not a single block or nil)
|
||||
- `timeout-prevote-delta` = how much the timeout-prevote increases with each
|
||||
round
|
||||
- `timeout-precommit` = how long we wait after receiving +2/3 precommits for
|
||||
anything (ie. not a single block or nil)
|
||||
- `timeout-precommit-delta` = how much the timeout-precommit increases with
|
||||
each round
|
||||
- `timeout-commit` = how long we wait after committing a block, before starting
|
||||
on the new height (this gives us a chance to receive some more precommits,
|
||||
even though we already have +2/3)
|
||||
|
||||
## P2P settings
|
||||
|
||||
@@ -594,7 +538,7 @@ This section will cover settings within the p2p section of the `config.toml`.
|
||||
- `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on.
|
||||
- `private-peer-ids` = is a comma-separated list of node ids that will _not_ be exposed to other peers (i.e., you will not tell other peers about the ids in this list). This can be filled with a validator's node id.
|
||||
|
||||
Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config paramters being deprecated and/or replaced.
|
||||
Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config parameters being deprecated and/or replaced.
|
||||
|
||||
We will cover the new and deprecated parameters below.
|
||||
### New Parameters
|
||||
@@ -606,7 +550,7 @@ There are three new parameters, which are enabled if use-legacy is set to false.
|
||||
- `max-connections` = is the max amount of allowed inbound and outbound connections.
|
||||
### Deprecated Parameters
|
||||
|
||||
> Note: For Tendermint 0.35, there are two p2p implementations. The old version is used by deafult with the deprecated fields. The new implementation uses different config parameters, explained above.
|
||||
> Note: For Tendermint 0.35, there are two p2p implementations. The old version is used by default with the deprecated fields. The new implementation uses different config parameters, explained above.
|
||||
|
||||
- `max-num-inbound-peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). *This was replaced by `max-connections`*
|
||||
- `max-num-outbound-peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection).*This was replaced by `max-connections`*
|
||||
@@ -648,3 +592,27 @@ Example:
|
||||
```shell
|
||||
$ psql ... -f state/indexer/sink/psql/schema.sql
|
||||
```
|
||||
|
||||
## Unsafe Consensus Timeout Overrides
|
||||
|
||||
Tendermint version v0.36 provides a set of unsafe overrides for the consensus
|
||||
timing parameters. These parameters are provided as a safety measure in case of
|
||||
unusual timing issues during the upgrade to v0.36 so that an operator may
|
||||
override the timings for a single node. These overrides will completely be
|
||||
removed in Tendermint v0.37.
|
||||
|
||||
- `unsafe-propose-override`: How long the Tendermint consensus engine will wait
|
||||
for a proposal block before prevoting nil.
|
||||
- `unsafe-propose-delta-override`: How much the propose timeout increase with
|
||||
each round.
|
||||
- `unsafe-vote-override`: How long the consensus engine will wait after
|
||||
receiving +2/3 votes in a round.
|
||||
- `unsafe-vote-delta-override`: How much the vote timeout increases with each
|
||||
round.
|
||||
- `unsafe-commit-override`: How long the consensus engine will wait after
|
||||
receiving +2/3 precommits before beginning the next height.
|
||||
- `unsafe-bypass-commit-timeout-override`: Configures if the consensus engine
|
||||
will wait for the full commit timeout before proceeding to the next height. If
|
||||
this field is set to true, the consensus engine will proceed to the next
|
||||
height as soon as the node has gathered votes from all of the validators on
|
||||
the network.
|
||||
|
||||
@@ -40,6 +40,7 @@ The following metrics are available:
|
||||
| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) |
|
||||
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
|
||||
| consensus_block_size_bytes | Gauge | | Block size in bytes |
|
||||
| evidence_pool_num_evidence | Gauge | | Number of evidence in the evidence pool
|
||||
| p2p_peers | Gauge | | Number of peers node's connected to |
|
||||
| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer |
|
||||
| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer |
|
||||
|
||||
14311
docs/package-lock.json
generated
14311
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -7,6 +7,7 @@
|
||||
"vuepress-theme-cosmos": "^1.0.183"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@vuepress/plugin-html-redirect": "^0.1.4",
|
||||
"watchpack": "^2.3.1"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
261
docs/rfc/rfc-015-abci++-tx-mutation.md
Normal file
261
docs/rfc/rfc-015-abci++-tx-mutation.md
Normal file
@@ -0,0 +1,261 @@
|
||||
# RFC 015: ABCI++ TX Mutation
|
||||
|
||||
## Changelog
|
||||
|
||||
- 23-Feb-2022: Initial draft (@williambanfield).
|
||||
- 28-Feb-2022: Revised draft (@williambanfield).
|
||||
|
||||
## Abstract
|
||||
|
||||
A previous version of the ABCI++ specification detailed a mechanism for proposers to replace transactions
|
||||
in the proposed block. This scheme required the proposer to construct new transactions
|
||||
and mark these new transactions as replacing other removed transactions. The specification
|
||||
was ambiguous as to how the replacement may be communicated to peer nodes.
|
||||
This RFC discusses issues with this mechanism and possible solutions.
|
||||
|
||||
## Background
|
||||
|
||||
### What is the proposed change?
|
||||
|
||||
A previous version of the ABCI++ specification proposed mechanisms for adding, removing, and replacing
|
||||
transactions in a proposed block. To replace a transaction, the application running
|
||||
`ProcessProposal` could mark a transaction as replaced by other application-supplied
|
||||
transactions by returning a new transaction marked with the `ADDED` flag setting
|
||||
the `new_hashes` field of the removed transaction to contain the list of transaction hashes
|
||||
that replace it. In that previous specification for ABCI++, the full use of the
|
||||
`new_hashes` field is left somewhat ambiguous. At present, these hashes are not
|
||||
gossiped and are not eventually included in the block to signal replacement to
|
||||
other nodes. The specification did indicate that the transactions specified in
|
||||
the `new_hashes` field will be removed from the mempool but it's not clear how
|
||||
peer nodes will learn about them.
|
||||
|
||||
### What systems would be affected by adding transaction replacement?
|
||||
|
||||
The 'transaction' is a central building block of a Tendermint blockchain, so adding
|
||||
a mechanism for transaction replacement would require changes to many aspects of Tendermint.
|
||||
|
||||
The following is a rough list of the functionality that this mechanism would affect:
|
||||
|
||||
#### Transaction indexing
|
||||
|
||||
Tendermint's indexer stores transactions and transaction results using the hash of the executed
|
||||
transaction [as the key][tx-result-index] and the ABCI results and transaction bytes as the value.
|
||||
|
||||
To allow transaction replacement, the replaced transactions would need to stored as well in the
|
||||
indexer, likely as a mapping of original transaction to list of transaction hashes that replaced
|
||||
the original transaction.
|
||||
|
||||
#### Transaction inclusion proofs
|
||||
|
||||
The result of a transaction query includes a Merkle proof of the existence of the
|
||||
transaction in the block chain. This [proof is built][inclusion-proof] as a merkle tree
|
||||
of the hashes of all of the transactions in the block where the queried transaction was executed.
|
||||
|
||||
To allow transaction replacement, these proofs would need to be updated to prove
|
||||
that a replaced transaction was included by replacement in the block.
|
||||
|
||||
#### RPC-based transaction query parameters and results
|
||||
|
||||
Tendermint's RPC allows clients to retrieve information about transactions via the
|
||||
`/tx_search` and `/tx` RPC endpoints.
|
||||
|
||||
RPC query results containing replaced transactions would need to be updated to include
|
||||
information on replaced transactions, either by returning results for all of the replaced
|
||||
transactions, or by including a response with just the hashes of the replaced transactions
|
||||
which clients could proceed to query individually.
|
||||
|
||||
#### Mempool transaction removal
|
||||
|
||||
Additional logic would need to be added to the Tendermint mempool to clear out replaced
|
||||
transactions after each block is executed. Tendermint currently removes executed transactions
|
||||
from the mempool, so this would be a pretty straightforward change.
|
||||
|
||||
## Discussion
|
||||
|
||||
### What value may be added to Tendermint by introducing transaction replacement?
|
||||
|
||||
Transaction replacement would would enable applications to aggregate or disaggregate transactions.
|
||||
|
||||
For aggregation, a set of transactions that all related work, such as transferring
|
||||
tokens between the same two accounts, could be replaced with a single transaction,
|
||||
i.e. one that transfers a single sum from one account to the other.
|
||||
Applications that make frequent use of aggregation may be able to achieve a higher throughput.
|
||||
Aggregation would decrease the space occupied by a single client-submitted transaction in the block, allowing
|
||||
more client-submitted transactions to be executed per block.
|
||||
|
||||
For disaggregation, a very complex transaction could be split into multiple smaller transactions.
|
||||
This may be useful if an application wishes to perform more fine-grained indexing on intermediate parts
|
||||
of a multi-part transaction.
|
||||
|
||||
### Drawbacks to transaction replacement
|
||||
|
||||
Transaction replacement would require updating and shimming many of the places that
|
||||
Tendermint records and exposes information about executed transactions. While
|
||||
systems within Tendermint could be updated to account for transaction replacement,
|
||||
such a system would leave new issues and rough edges.
|
||||
|
||||
#### No way of guaranteeing correct replacement
|
||||
|
||||
If a user issues a transaction to the network and the transaction is replaced, the
|
||||
user has no guarantee that the replacement was correct. For example, suppose a set of users issue
|
||||
transactions A, B, and C and they are all aggregated into a new transaction, D.
|
||||
There is nothing guaranteeing that D was constructed correctly from the inputs.
|
||||
The only way for users to ensure D is correct would be if D contained all of the
|
||||
information of its constituent transactions, in which case, nothing is really gained by the replacement.
|
||||
|
||||
#### Replacement transactions not signed by submitter
|
||||
|
||||
Abstractly, Tendermint simply views transactions as a ball of bytes and therefore
|
||||
should be fine with replacing one for another. However, many applications require
|
||||
that transactions submitted to the chain be signed by some private key to authenticate
|
||||
and authorize the transaction. Replaced transactions could not be signed by the
|
||||
submitter, only by the application node. Therefore, any use of transaction replacement
|
||||
could not contain authorization from the submitter and would either need to grant
|
||||
application-submitted transactions power to perform application logic on behalf
|
||||
of a user without their consent.
|
||||
|
||||
Granting this power to application-submitted transactions would be very dangerous
|
||||
and therefore might not be of much value to application developers.
|
||||
Transaction replacement might only be really safe in the case of application-submitted
|
||||
transactions or for transactions that require no authorization. For such transactions,
|
||||
it's quite not quite clear what the utility of replacement is: the application can already
|
||||
generate any transactions that it wants. The fact that such a transaction was a replacement
|
||||
is not particularly relevant to participants in the chain since the application is
|
||||
merely replacing its own transactions.
|
||||
|
||||
#### New vector for censorship
|
||||
|
||||
Depending on the implementation, transaction replacement may allow a node signal
|
||||
to the rest of the chain that some transaction should no longer be considered for execution.
|
||||
Honest nodes will use the replacement mechanism to signal that a transaction has been aggregated.
|
||||
Malicious nodes will be granted a new vector for censoring transactions.
|
||||
There is no guarantee that a replaced transactions is actually executed at all.
|
||||
A malicious node could censor a transaction by simply listing it as replaced.
|
||||
Honest nodes seeing the replacement would flush the transaction from their mempool
|
||||
and not execute or propose it it in later blocks.
|
||||
|
||||
### Transaction tracking implementations
|
||||
|
||||
This section discusses possible ways to flesh out the implementation of transaction replacement.
|
||||
Specifically, this section proposes a few alternative ways that Tendermint blockchains could
|
||||
track and store transaction replacements.
|
||||
|
||||
#### Include transaction replacements in the block
|
||||
|
||||
One option to track transaction replacement is to include information on the
|
||||
transaction replacement within the block. An additional structure may be added
|
||||
the block of the following form:
|
||||
|
||||
```proto
|
||||
message Block {
|
||||
...
|
||||
repeated Replacement replacements = 5;
|
||||
}
|
||||
|
||||
message Replacement {
|
||||
bytes included_tx_key = 1;
|
||||
repeated bytes replaced_txs_keys = 2;
|
||||
}
|
||||
```
|
||||
|
||||
Applications executing `PrepareProposal` would return the list of replacements and
|
||||
Tendermint would include an encoding of these replacements in the block that is gossiped
|
||||
and committed.
|
||||
|
||||
Tendermint's transaction indexing would include a new mapping for each replaced transaction
|
||||
key to the committed transaction.
|
||||
Transaction inclusion proofs would be updated to include these additional new transaction
|
||||
keys in the Merkle tree and queries for transaction hashes that were replaced would return
|
||||
information indicating that the transaction was replaced along with the hash of the
|
||||
transaction that replaced it.
|
||||
|
||||
Block validation of gossiped blocks would be updated to check that each of the
|
||||
`included_txs_key` matches the hash of some transaction in the proposed block.
|
||||
|
||||
Implementing the changes described in this section would allow Tendermint to gossip
|
||||
and index transaction replacements as part of block propagation. These changes would
|
||||
still require the application to certify that the replacements were valid. This
|
||||
validation may be performed in one of two ways:
|
||||
|
||||
1. **Applications optimistically trust that the proposer performed a legitimate replacement.**
|
||||
|
||||
In this validation scheme, applications would not verify that the substitution
|
||||
is valid during consensus and instead simply trust that the proposer is correct.
|
||||
This would have the drawback of allowing a malicious proposer to remove transactions
|
||||
it did not want executed.
|
||||
|
||||
2. **Applications completely validate transaction replacement.**
|
||||
|
||||
In this validation scheme, applications that allow replacement would check that
|
||||
each listed replaced transaction was correctly reflected in the replacement transaction.
|
||||
In order to perform such validation, the node would need to have the replaced transactions
|
||||
locally. This could be accomplished one of a few ways: by querying the mempool,
|
||||
by adding an additional p2p gossip channel for transaction replacements, or by including the replaced transactions
|
||||
in the block. Replacement validation via mempool querying would require the node
|
||||
to have received all of the replaced transactions in the mempool which is far from
|
||||
guaranteed. Adding an additional gossip channel would make gossiping replaced transactions
|
||||
a requirement for consensus to proceed, since all nodes would need to receive all replacement
|
||||
messages before considering a block valid. Finally, including replaced transactions in
|
||||
the block seems to obviate any benefit gained from performing a transaction replacement
|
||||
since the replaced transaction and the original transactions would now both appear in the block.
|
||||
|
||||
#### Application defined transaction replacement
|
||||
|
||||
An additional option for allowing transaction replacement is to leave it entirely as a responsibility
|
||||
of the application. The `PrepareProposal` ABCI++ call allows for applications to add
|
||||
new transactions to a proposed block. Applications that wished to implement a transaction
|
||||
replacement mechanism would be free to do so without the newly defined `new_hashes` field.
|
||||
Applications wishing to implement transaction replacement would add the aggregated
|
||||
transactions in the `PrepareProposal` response, and include one additional bookkeeping
|
||||
transaction that listed all of the replacements, with a similar scheme to the `new_hashes`
|
||||
field described in ABCI++. This new bookkeeping transaction could be used by the
|
||||
application to determine which transactions to clear from the mempool in future calls
|
||||
to `CheckTx`.
|
||||
|
||||
The meaning of any transaction in the block is completely opaque to Tendermint,
|
||||
so applications performing this style of replacement would not be able to have the replacement
|
||||
reflected in any most of Tendermint's transaction tracking mechanisms, such as transaction indexing
|
||||
and the `/tx` endpoint.
|
||||
|
||||
#### Application defined Tx Keys
|
||||
|
||||
Tendermint currently uses cryptographic hashes, SHA256, as a key for each transaction.
|
||||
As noted in the section on systems that would require changing, this key is used
|
||||
to identify the transaction in the mempool, in the indexer, and within the RPC system.
|
||||
|
||||
An alternative approach to allowing `ProcessProposal` to specify a set of transaction
|
||||
replacements would be instead to allow the application to specify an additional key or set
|
||||
of keys for each transaction during `ProcessProposal`. This new `secondary_keys` set
|
||||
would be included in the block and therefore gossiped during block propagation.
|
||||
Additional RPC endpoints could be exposed to query by the application-defined keys.
|
||||
|
||||
Applications wishing to implement replacement would leverage this new field by providing the
|
||||
replaced transaction hashes as the `secondary_keys` and checking their validity during
|
||||
`ProcessProposal`. During `RecheckTx` the application would then be responsible for
|
||||
clearing out transactions that matched the `secondary_keys`.
|
||||
|
||||
It is worth noting that something like this would be possible without `secondary_keys`.
|
||||
An application wishing to implement a system like this one could define a replacement
|
||||
transaction, as discussed in the section on application-defined transaction replacement,
|
||||
and use a custom [ABCI event type][abci-event-type] to communicate that the replacement should
|
||||
be indexed within Tendermint's ABCI event indexing.
|
||||
|
||||
### Complexity to value-add tradeoff
|
||||
|
||||
It is worth remarking that adding a system like this may introduce a decent amount
|
||||
of new complexity into Tendermint. An approach that leaves much of the replacement
|
||||
logic to Tendermint would require altering the core transaction indexing and querying
|
||||
data. In many of the cases listed, a system for transaction replacement is possible
|
||||
without explicitly defining it as part of `PrepareProposal`. Since applications
|
||||
can now add transactions during `PrepareProposal` they can and should leverage this
|
||||
functionality to include additional bookkeeping transactions in the block. It may
|
||||
be worth encouraging applications to discover new and interesting ways to leverage this
|
||||
power instead of immediately solving the problem for them.
|
||||
|
||||
### References
|
||||
|
||||
[inclusion-proof]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/types/tx.go#L67
|
||||
[tx-serach-result]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/rpc/coretypes/responses.go#L267
|
||||
[tx-rpc-func]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/rpc/core/tx.go#L21
|
||||
[tx-result-index]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/state/indexer/tx/kv/kv.go#L90
|
||||
[abci-event-type]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/abci/types/types.pb.go#L3168
|
||||
@@ -47,7 +47,7 @@ An overhaul of the existing interface between the application and consensus, to
|
||||
|
||||
### Proposer-Based Timestamps
|
||||
|
||||
Proposer-based timestamps are a replacement of [BFT time](https://docs.tendermint.com/master/spec/consensus/bft-time.html), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md)
|
||||
Proposer-based timestamps are a replacement of [BFT time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/bft-time.md), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md)
|
||||
|
||||
### RPC Event Subscription
|
||||
|
||||
|
||||
95
docs/tendermint-core/consensus/proposer-based-timestamps.md
Normal file
95
docs/tendermint-core/consensus/proposer-based-timestamps.md
Normal file
@@ -0,0 +1,95 @@
|
||||
---
|
||||
order: 3
|
||||
---
|
||||
|
||||
# PBTS
|
||||
|
||||
This document provides an overview of the Proposer-Based Timestamp (PBTS)
|
||||
algorithm added to Tendermint in the v0.36 release. It outlines the core
|
||||
functionality as well as the parameters and constraints of the this algorithm.
|
||||
|
||||
## Algorithm Overview
|
||||
|
||||
The PBTS algorithm defines a way for a Tendermint blockchain to create block
|
||||
timestamps that are within a reasonable bound of the clocks of the validators on
|
||||
the network. This replaces the original BFTTime algorithm for timestamp
|
||||
assignment that relied on the timestamps included in precommit messages.
|
||||
|
||||
## Algorithm Parameters
|
||||
|
||||
The functionality of the PBTS algorithm is governed by two parameters within
|
||||
Tendermint. These two parameters are [consensus
|
||||
parameters](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#L291),
|
||||
meaning they are configured by the ABCI application and are expected to be the
|
||||
same across all nodes on the network.
|
||||
|
||||
### `Precision`
|
||||
|
||||
The `Precision` parameter configures the acceptable upper-bound of clock drift
|
||||
among all of the nodes on a Tendermint network. Any two nodes on a Tendermint
|
||||
network are expected to have clocks that differ by at most `Precision`
|
||||
milliseconds any given instant.
|
||||
|
||||
### `MessageDelay`
|
||||
|
||||
The `MessageDelay` parameter configures the acceptable upper-bound for
|
||||
transmitting a `Proposal` message from the proposer to _all_ of the validators
|
||||
on the network.
|
||||
|
||||
Networks should choose as small a value for `MessageDelay` as is practical,
|
||||
provided it is large enough that messages can reach all participants with high
|
||||
probability given the number of participants and latency of their connections.
|
||||
|
||||
## Algorithm Concepts
|
||||
|
||||
### Block timestamps
|
||||
|
||||
Each block produced by the Tendermint consensus engine contains a timestamp.
|
||||
The timestamp produced in each block is a meaningful representation of time that is
|
||||
useful for the protocols and applications built on top of Tendermint.
|
||||
|
||||
The following protocols and application features require a reliable source of time:
|
||||
|
||||
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification.
|
||||
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification).
|
||||
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21
|
||||
days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime).
|
||||
* IBC packets can use either a [timestamp or a height to timeout packet
|
||||
delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements)
|
||||
|
||||
### Proposer Selects a Block Timestamp
|
||||
|
||||
When the proposer node creates a new block proposal, the node reads the time
|
||||
from its local clock and uses this reading as the timestamp for the proposed
|
||||
block.
|
||||
|
||||
### Timeliness
|
||||
|
||||
When each validator on a Tendermint network receives a proposed block, it
|
||||
performs a series of checks to ensure that the block can be considered valid as
|
||||
a candidate to be the next block in the chain.
|
||||
|
||||
The PBTS algorithm performs a validity check on the timestamp of proposed
|
||||
blocks. When a validator receives a proposal it ensures that the timestamp in
|
||||
the proposal is within a bound of the validator's local clock. Specifically, the
|
||||
algorithm checks that the timestamp is no more than `Precision` greater than the
|
||||
node's local clock and no less than `Precision` + `MessageDelay` behind than the
|
||||
node's local clock. This creates range of acceptable timestamps around the
|
||||
node's local time. If the timestamp is within this range, the PBTS algorithm
|
||||
considers the block **timely**. If a block is not **timely**, the node will
|
||||
issue a `nil` `prevote` for this block, signaling to the rest of the network
|
||||
that the node does not consider the block to be valid.
|
||||
|
||||
### Clock Synchronization
|
||||
|
||||
The PBTS algorithm requires the clocks of the validators on a Tendermint network
|
||||
are within `Precision` of each other. In practice, this means that validators
|
||||
should periodically synchronize to a reliable NTP server. Validators that drift
|
||||
too far away from the rest of the network will no longer propose blocks with
|
||||
valid timestamps. Additionally they will not view the timestamps of blocks
|
||||
proposed by their peers to be valid either.
|
||||
|
||||
## See Also
|
||||
|
||||
* [The PBTS specification](https://github.com/tendermint/tendermint/blob/master/spec/consensus/proposer-based-timestamp/README.md)
|
||||
contains all of the details of the algorithm.
|
||||
216
docs/tools/debugging/proposer-based-timestamps-runbook.md
Normal file
216
docs/tools/debugging/proposer-based-timestamps-runbook.md
Normal file
@@ -0,0 +1,216 @@
|
||||
---
|
||||
order: 3
|
||||
---
|
||||
|
||||
# Proposer-Based Timestamps Runbook
|
||||
|
||||
Version v0.36 of Tendermint added new constraints for the timestamps included in
|
||||
each block created by Tendermint. The new constraints mean that validators may
|
||||
fail to produce valid blocks or may issue `nil` `prevotes` for proposed blocks
|
||||
depending on the configuration of the validator's local clock.
|
||||
|
||||
## What is this document for?
|
||||
|
||||
This document provides a set of actionable steps for application developers and
|
||||
node operators to diagnose and fix issues related to clock synchronization and
|
||||
configuration of the Proposer-Based Timestamps [SynchronyParams](https://github.com/tendermint/tendermint/blob/master/spec/core/data_structures.md#synchronyparams).
|
||||
|
||||
Use this runbook if you observe that validators are frequently voting `nil` for a block that the rest
|
||||
of the network votes for or if validators are frequently producing block proposals
|
||||
that are not voted for by the rest of the network.
|
||||
|
||||
## Requirements
|
||||
|
||||
To use this runbook, you must be running a node that has the [Prometheus metrics endpoint enabled](https://github.com/tendermint/tendermint/blob/master/docs/nodes/metrics.md)
|
||||
and the Tendermint RPC endpoint enabled and accessible.
|
||||
|
||||
It is strongly recommended to also run a Prometheus metrics collector to gather and
|
||||
analyze metrics from the Tendermint node.
|
||||
|
||||
## Debugging a Single Node
|
||||
|
||||
If you observe that a single validator is frequently failing to produce blocks or
|
||||
voting nil for proposals that other validators vote for and suspect it may be
|
||||
related to clock synchronization, use the following steps to debug and correct the issue.
|
||||
|
||||
### Check Timely Metric
|
||||
|
||||
Tendermint exposes a histogram metric for the difference between the timestamp in the proposal
|
||||
the and the time read from the node's local clock when the proposal is received.
|
||||
|
||||
The histogram exposes multiple metrics on the Prometheus `/metrics` endpoint called
|
||||
* `tendermint_consensus_proposal_timestamp_difference_bucket`.
|
||||
* `tendermint_consensus_proposal_timestamp_difference_sum`.
|
||||
* `tendermint_consensus_proposal_timestamp_difference_count`.
|
||||
|
||||
Each metric is also labeled with the key `is_timely`, which can have a value of
|
||||
`true` or `false`.
|
||||
|
||||
#### From the Prometheus Collector UI
|
||||
|
||||
If you are running a Prometheus collector, navigate to the query web interface and select the 'Graph' tab.
|
||||
|
||||
Issue a query for the following:
|
||||
|
||||
```
|
||||
tendermint_consensus_proposal_timestamp_difference_count{is_timely="false"} /
|
||||
tendermint_consensus_proposal_timestamp_difference_count{is_timely="true"}
|
||||
```
|
||||
|
||||
This query will graph the ratio of proposals the node considered timely to those it
|
||||
considered untimely. If the ratio is increasing, it means that your node is consistently
|
||||
seeing more proposals that are far from its local clock. If this is the case, you should
|
||||
check to make sure your local clock is properly synchronized to NTP.
|
||||
|
||||
#### From the `/metrics` url
|
||||
|
||||
If you are not running a Prometheus collector, navigate to the `/metrics` endpoint
|
||||
exposed on the Prometheus metrics port with `curl` or a browser.
|
||||
|
||||
Search for the `tendermint_consensus_proposal_timestamp_difference_count` metrics.
|
||||
This metric is labeled with `is_timely`. Investigate the value of
|
||||
`tendermint_consensus_proposal_timestamp_difference_count` where `is_timely="false"`
|
||||
and where `is_timely="true"`. Refresh the endpoint and observe if the value of `is_timely="false"`
|
||||
is growing.
|
||||
|
||||
If you observe that `is_timely="false"` is growing, it means that your node is consistently
|
||||
seeing proposals that are far from its local clock. If this is the case, you should check
|
||||
to make sure your local clock is properly synchronized to NTP.
|
||||
|
||||
### Checking Clock Sync
|
||||
|
||||
NTP configuration and tooling is very specific to the operating system and distribution
|
||||
that your validator node is running. This guide assumes you have `timedatectl` installed with
|
||||
[chrony](https://chrony.tuxfamily.org/), a popular tool for interacting with time
|
||||
synchronization on Linux distributions. If you are using an operating system or
|
||||
distribution with a different time synchronization mechanism, please consult the
|
||||
documentation for your operating system to check the status and re-synchronize the daemon.
|
||||
|
||||
#### Check if NTP is Enabled
|
||||
|
||||
```shell
|
||||
$ timedatectl
|
||||
```
|
||||
|
||||
From the output, ensure that `NTP service` is `active`. If `NTP service` is `inactive`, run:
|
||||
|
||||
```shell
|
||||
$ timedatectl set-ntp true
|
||||
```
|
||||
|
||||
Re-run the `timedatectl` command and verify that the change has taken effect.
|
||||
|
||||
#### Check if Your NTP Daemon is Synchronized
|
||||
|
||||
Check the status of your local `chrony` NTP daemon using by running the following:
|
||||
|
||||
```shell
|
||||
$ chronyc tracking
|
||||
```
|
||||
|
||||
If the `chrony` daemon is running, you will see output that indicates its current status.
|
||||
If the `chrony` daemon is not running, restart it and re-run `chronyc tracking`.
|
||||
|
||||
The `System time` field of the response should show a value that is much smaller than 100
|
||||
milliseconds.
|
||||
|
||||
If the value is very large, restart the `chronyd` daemon.
|
||||
|
||||
## Debugging a Network
|
||||
|
||||
If you observe that a network is frequently failing to produce blocks and suspect
|
||||
it may be related to clock synchronization, use the following steps to debug and correct the issue.
|
||||
|
||||
### Check Prevote Message Delay
|
||||
|
||||
Tendermint exposes metrics that help determine how synchronized the clocks on a network are.
|
||||
|
||||
These metrics are visible on the Prometheus `/metrics` endpoint and are called:
|
||||
* `tendermint_consensus_quorum_prevote_delay`
|
||||
* `tendermint_consensus_full_prevote_delay`
|
||||
|
||||
These metrics calculate the difference between the timestamp in the proposal message and
|
||||
the timestamp of a prevote that was issued during consensus.
|
||||
|
||||
The `tendermint_consensus_quorum_prevote_delay` metric is the interval in seconds
|
||||
between the proposal timestamp and the timestamp of the earliest prevote that
|
||||
achieved a quorum during the prevote step.
|
||||
|
||||
The `tendermint_consensus_full_prevote_delay` metric is the interval in seconds
|
||||
between the proposal timestamp and the timestamp of the latest prevote in a round
|
||||
where 100% of the validators voted.
|
||||
|
||||
#### From the Prometheus Collector UI
|
||||
|
||||
If you are running a Prometheus collector, navigate to the query web interface and select the 'Graph' tab.
|
||||
|
||||
Issue a query for the following:
|
||||
|
||||
```
|
||||
sum(tendermint_consensus_quorum_prevote_delay) by (proposer_address)
|
||||
```
|
||||
|
||||
This query will graph the difference in seconds for each proposer on the network.
|
||||
|
||||
If the value is much larger for some proposers, then the issue is likely related to the clock
|
||||
synchronization of their nodes. Contact those proposers and ensure that their nodes
|
||||
are properly connected to NTP using the steps for [Debugging a Single Node](#debugging-a-single-node).
|
||||
|
||||
If the value is relatively similar for all proposers you should next compare this
|
||||
value to the `SynchronyParams` values for the network. Continue to the [Checking
|
||||
Sychrony](#checking-synchrony) steps.
|
||||
|
||||
#### From the `/metrics` url
|
||||
|
||||
If you are not running a Prometheus collector, navigate to the `/metrics` endpoint
|
||||
exposed on the Prometheus metrics port.
|
||||
|
||||
Search for the `tendermint_consensus_quorum_prevote_delay` metric. There will be one
|
||||
entry of this metric for each `proposer_address`. If the value of this metric is
|
||||
much larger for some proposers, then the issue is likely related to synchronization of their
|
||||
nodes with NTP. Contact those proposers and ensure that their nodes are properly connected
|
||||
to NTP using the steps for [Debugging a Single Node](#debugging-a-single-node).
|
||||
|
||||
If the values are relatively similar for all proposers you should next compare,
|
||||
you'll need to compare this value to the `SynchronyParams` for the network. Continue
|
||||
to the [Checking Sychrony](#checking-synchrony) steps.
|
||||
|
||||
### Checking Synchrony
|
||||
|
||||
To determine the currently configured `SynchronyParams` for your network, issue a
|
||||
request to your node's RPC endpoint. For a node running locally with the RPC server
|
||||
exposed on port `26657`, run the following command:
|
||||
|
||||
```shell
|
||||
$ curl localhost:26657/consensus_params
|
||||
```
|
||||
|
||||
The json output will contain a field named `synchrony`, with the following structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"precision": "500000000",
|
||||
"message_delay": "3000000000"
|
||||
}
|
||||
```
|
||||
|
||||
The `precision` and `message_delay` values returned are listed in nanoseconds:
|
||||
In the examples above, the precision is 500ms and the message delay is 3s.
|
||||
Remember, `tendermint_consensus_quorum_prevote_delay` is listed in seconds.
|
||||
If the `tendermint_consensus_quorum_prevote_delay` value approaches the sum of `precision` and `message_delay`,
|
||||
then the value selected for these parameters is too small. Your application will
|
||||
need to be modified to update the `SynchronyParams` to have larger values.
|
||||
|
||||
### Updating SynchronyParams
|
||||
|
||||
The `SynchronyParams` are `ConsensusParameters` which means they are set and updated
|
||||
by the application running alongside Tendermint. Updates to these parameters must
|
||||
be passed to the application during the `FinalizeBlock` ABCI method call.
|
||||
|
||||
If the application was built using the CosmosSDK, then these parameters can be updated
|
||||
programatically using a governance proposal. For more information, see the [CosmosSDK
|
||||
documentation](https://hub.cosmos.network/main/governance/submitting.html#sending-the-transaction-that-submits-your-governance-proposal).
|
||||
|
||||
If the application does not implement a way to update the consensus parameters
|
||||
programatically, then the application itself must be updated to do so. More information on updating
|
||||
the consensus parameters via ABCI can be found in the [FinalizeBlock documentation](https://github.com/tendermint/tendermint/blob/master/spec/abci++/abci++_methods_002_draft.md#finalizeblock).
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user