mirror of
https://github.com/tendermint/tendermint.git
synced 2026-02-01 17:42:03 +00:00
Compare commits
145 Commits
finalizeBl
...
abci++
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
53565cb815 | ||
|
|
f408849112 | ||
|
|
1bc7b40333 | ||
|
|
511bd3eb7f | ||
|
|
9a0081f076 | ||
|
|
7fe3e78a38 | ||
|
|
7169d26ddf | ||
|
|
c4df8a3840 | ||
|
|
f858ebeb88 | ||
|
|
c9347a0647 | ||
|
|
0df421b37f | ||
|
|
94e1eb8cfe | ||
|
|
23abb0de8b | ||
|
|
58a6cfff9a | ||
|
|
6e921f6644 | ||
|
|
a0a5d45cb1 | ||
|
|
9c8379ef30 | ||
|
|
e053643b95 | ||
|
|
41a361ed8d | ||
|
|
bc2b529b95 | ||
|
|
6d5ff590c3 | ||
|
|
d8642a941e | ||
|
|
d7c3a8f682 | ||
|
|
ce3c059a0d | ||
|
|
39dee8abc5 | ||
|
|
68dd6c6eb9 | ||
|
|
d0e33b4292 | ||
|
|
8700ca9d1a | ||
|
|
a374f74f7c | ||
|
|
3a234e1144 | ||
|
|
cce0a3c171 | ||
|
|
a4cc8317da | ||
|
|
69f6eee2e4 | ||
|
|
afb6af8bc3 | ||
|
|
0ed3ba6279 | ||
|
|
267aac2e90 | ||
|
|
471f83d557 | ||
|
|
393a02a729 | ||
|
|
bf77c0c544 | ||
|
|
511e52c2fc | ||
|
|
1f76cb1546 | ||
|
|
7494f0e498 | ||
|
|
d56a44b884 | ||
|
|
cbfc04df6d | ||
|
|
a6d20a6660 | ||
|
|
97435139ad | ||
|
|
66084a01dc | ||
|
|
53d53e6205 | ||
|
|
a7ecf49c10 | ||
|
|
31994cadc0 | ||
|
|
e5312942e3 | ||
|
|
4db71da68e | ||
|
|
e801328128 | ||
|
|
4cbaf70d1f | ||
|
|
e5f9dd2736 | ||
|
|
e922016121 | ||
|
|
dc7c212c41 | ||
|
|
4e96c6b234 | ||
|
|
02f8e4c0bd | ||
|
|
3aec71cdd4 | ||
|
|
6dd8984fef | ||
|
|
9a2a7d4307 | ||
|
|
8f06e0c9e7 | ||
|
|
6a94b55d12 | ||
|
|
9e41414a53 | ||
|
|
6ff4c3139c | ||
|
|
e87b0391cb | ||
|
|
4f73748bc8 | ||
|
|
9a3861fb82 | ||
|
|
44ac57489f | ||
|
|
7d30987cff | ||
|
|
76376e3161 | ||
|
|
dd97ac6e1c | ||
|
|
a751eee7f2 | ||
|
|
c5dc3b267f | ||
|
|
93f462ef86 | ||
|
|
91e277d7b7 | ||
|
|
a341a626e0 | ||
|
|
c3ae6f5b58 | ||
|
|
a393cf8bab | ||
|
|
0e2752ae42 | ||
|
|
97a8f125e0 | ||
|
|
84c15857e4 | ||
|
|
e70445f942 | ||
|
|
478f5321ad | ||
|
|
08e4e2ed3d | ||
|
|
7d63e991c5 | ||
|
|
7638235d33 | ||
|
|
2abfe20114 | ||
|
|
0bf7813c4e | ||
|
|
ff9038e2ce | ||
|
|
00a40835a2 | ||
|
|
c4f77ab6d1 | ||
|
|
2030875056 | ||
|
|
639e145729 | ||
|
|
68ffe8bc64 | ||
|
|
21309ccb7b | ||
|
|
f70396c6fd | ||
|
|
fdc246e4a8 | ||
|
|
78a0a5fe73 | ||
|
|
4f885209aa | ||
|
|
6dd0cf92c8 | ||
|
|
626d9b4fbe | ||
|
|
8addf99f90 | ||
|
|
76c6c67734 | ||
|
|
a46724e4f6 | ||
|
|
40fba3960d | ||
|
|
36a859ae54 | ||
|
|
ab5c63eff3 | ||
|
|
8228936155 | ||
|
|
a12e2bbb60 | ||
|
|
11bebfb6a0 | ||
|
|
4009102e2b | ||
|
|
cabd916517 | ||
|
|
363ea56680 | ||
|
|
aa4854ff8f | ||
|
|
581dd01d47 | ||
|
|
50b00dff71 | ||
|
|
051e127d38 | ||
|
|
5530726df8 | ||
|
|
decac693ab | ||
|
|
7ca0f24040 | ||
|
|
69848bef26 | ||
|
|
2c14d491f6 | ||
|
|
cd248576ea | ||
|
|
c256edc622 | ||
|
|
9d9360774f | ||
|
|
c7c11fc7d5 | ||
|
|
37bc1d74df | ||
|
|
d882f31569 | ||
|
|
ba3f7106b1 | ||
|
|
3ccfb26137 | ||
|
|
96863decca | ||
|
|
d4cda544ae | ||
|
|
800cce80b7 | ||
|
|
e850863296 | ||
|
|
1dec3e139a | ||
|
|
11b920480f | ||
|
|
4f8bcb1cce | ||
|
|
2d95e38986 | ||
|
|
6bb4b688e0 | ||
|
|
a1e1e6c290 | ||
|
|
736364178a | ||
|
|
a99c7188d7 | ||
|
|
a56b10fbef |
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -7,5 +7,4 @@
|
|||||||
# global owners are only requested if there isn't a more specific
|
# global owners are only requested if there isn't a more specific
|
||||||
# codeowner specified below. For this reason, the global codeowners
|
# codeowner specified below. For this reason, the global codeowners
|
||||||
# are often repeated in package-level definitions.
|
# are often repeated in package-level definitions.
|
||||||
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish
|
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield @creachadair
|
||||||
|
|
||||||
|
|||||||
8
.github/workflows/coverage.yml
vendored
8
.github/workflows/coverage.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: "1.16"
|
go-version: "1.16"
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
- uses: technote-space/get-diff-action@v4
|
- uses: technote-space/get-diff-action@v5
|
||||||
with:
|
with:
|
||||||
PATTERNS: |
|
PATTERNS: |
|
||||||
**/**.go
|
**/**.go
|
||||||
@@ -68,7 +68,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: "1.16"
|
go-version: "1.16"
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
- uses: technote-space/get-diff-action@v4
|
- uses: technote-space/get-diff-action@v5
|
||||||
with:
|
with:
|
||||||
PATTERNS: |
|
PATTERNS: |
|
||||||
**/**.go
|
**/**.go
|
||||||
@@ -96,7 +96,7 @@ jobs:
|
|||||||
needs: tests
|
needs: tests
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
- uses: technote-space/get-diff-action@v4
|
- uses: technote-space/get-diff-action@v5
|
||||||
with:
|
with:
|
||||||
PATTERNS: |
|
PATTERNS: |
|
||||||
**/**.go
|
**/**.go
|
||||||
@@ -121,7 +121,7 @@ jobs:
|
|||||||
- run: |
|
- run: |
|
||||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||||
if: env.GIT_DIFF
|
if: env.GIT_DIFF
|
||||||
- uses: codecov/codecov-action@v1.5.2
|
- uses: codecov/codecov-action@v2.0.3
|
||||||
with:
|
with:
|
||||||
file: ./coverage.txt
|
file: ./coverage.txt
|
||||||
if: env.GIT_DIFF
|
if: env.GIT_DIFF
|
||||||
|
|||||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
|||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Publish to Docker Hub
|
- name: Publish to Docker Hub
|
||||||
uses: docker/build-push-action@v2.6.1
|
uses: docker/build-push-action@v2.7.0
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ./DOCKER/Dockerfile
|
file: ./DOCKER/Dockerfile
|
||||||
|
|||||||
4
.github/workflows/e2e-nightly-master.yml
vendored
4
.github/workflows/e2e-nightly-master.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
p2p: ['legacy', 'new', 'hybrid']
|
p2p: ['legacy', 'new', 'hybrid']
|
||||||
group: ['00', '01', '02', '03']
|
group: ['00', '01']
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
@@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: Generate testnets
|
- name: Generate testnets
|
||||||
working-directory: test/e2e
|
working-directory: test/e2e
|
||||||
# When changing -g, also change the matrix groups above
|
# When changing -g, also change the matrix groups above
|
||||||
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||||
|
|
||||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||||
working-directory: test/e2e
|
working-directory: test/e2e
|
||||||
|
|||||||
2
.github/workflows/e2e.yml
vendored
2
.github/workflows/e2e.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: '1.16'
|
go-version: '1.16'
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
- uses: technote-space/get-diff-action@v4
|
- uses: technote-space/get-diff-action@v5
|
||||||
with:
|
with:
|
||||||
PATTERNS: |
|
PATTERNS: |
|
||||||
**/**.go
|
**/**.go
|
||||||
|
|||||||
9
.github/workflows/fuzz-nightly.yml
vendored
9
.github/workflows/fuzz-nightly.yml
vendored
@@ -23,9 +23,14 @@ jobs:
|
|||||||
working-directory: test/fuzz
|
working-directory: test/fuzz
|
||||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||||
|
|
||||||
- name: Fuzz mempool
|
- name: Fuzz mempool-v1
|
||||||
working-directory: test/fuzz
|
working-directory: test/fuzz
|
||||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
|
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
- name: Fuzz mempool-v0
|
||||||
|
working-directory: test/fuzz
|
||||||
|
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
|
|
||||||
- name: Fuzz p2p-addrbook
|
- name: Fuzz p2p-addrbook
|
||||||
|
|||||||
2
.github/workflows/janitor.yml
vendored
2
.github/workflows/janitor.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 3
|
timeout-minutes: 3
|
||||||
steps:
|
steps:
|
||||||
- uses: styfle/cancel-workflow-action@0.9.0
|
- uses: styfle/cancel-workflow-action@0.9.1
|
||||||
with:
|
with:
|
||||||
workflow_id: 1041851,1401230,2837803
|
workflow_id: 1041851,1401230,2837803
|
||||||
access_token: ${{ github.token }}
|
access_token: ${{ github.token }}
|
||||||
|
|||||||
2
.github/workflows/linkchecker.yml
vendored
2
.github/workflows/linkchecker.yml
vendored
@@ -7,6 +7,6 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
|
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||||
with:
|
with:
|
||||||
folder-path: "docs"
|
folder-path: "docs"
|
||||||
|
|||||||
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
timeout-minutes: 8
|
timeout-minutes: 8
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
- uses: technote-space/get-diff-action@v4
|
- uses: technote-space/get-diff-action@v5
|
||||||
with:
|
with:
|
||||||
PATTERNS: |
|
PATTERNS: |
|
||||||
**/**.go
|
**/**.go
|
||||||
|
|||||||
2
.github/workflows/proto-docker.yml
vendored
2
.github/workflows/proto-docker.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
|||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Publish to Docker Hub
|
- name: Publish to Docker Hub
|
||||||
uses: docker/build-push-action@v2.6.1
|
uses: docker/build-push-action@v2.7.0
|
||||||
with:
|
with:
|
||||||
context: ./tools/proto
|
context: ./tools/proto
|
||||||
file: ./tools/proto/Dockerfile
|
file: ./tools/proto/Dockerfile
|
||||||
|
|||||||
7
.github/workflows/release.yml
vendored
7
.github/workflows/release.yml
vendored
@@ -2,7 +2,7 @@ name: "Release"
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- "RC[0-9]/**"
|
- "RC[0-9]/**"
|
||||||
tags:
|
tags:
|
||||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||||
@@ -20,9 +20,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: '1.16'
|
go-version: '1.16'
|
||||||
|
|
||||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
uses: goreleaser/goreleaser-action@v2
|
uses: goreleaser/goreleaser-action@v2
|
||||||
if: ${{ github.event_name == 'pull_request' }}
|
if: ${{ github.event_name == 'pull_request' }}
|
||||||
@@ -35,6 +32,6 @@ jobs:
|
|||||||
if: startsWith(github.ref, 'refs/tags/')
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
args: release --rm-dist --release-notes=../release_notes.md
|
args: release --rm-dist
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|||||||
8
.github/workflows/stale.yml
vendored
8
.github/workflows/stale.yml
vendored
@@ -7,12 +7,14 @@ jobs:
|
|||||||
stale:
|
stale:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v3.0.19
|
- uses: actions/stale@v4
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||||
recent activity. It will be closed if no further activity occurs. Thank you
|
recent activity. It will be closed if no further activity occurs. Thank you
|
||||||
for your contributions."
|
for your contributions."
|
||||||
days-before-stale: 10
|
days-before-stale: -1
|
||||||
days-before-close: 4
|
days-before-close: -1
|
||||||
|
days-before-pr-stale: 10
|
||||||
|
days-before-pr-close: 4
|
||||||
exempt-pr-labels: "S:wip"
|
exempt-pr-labels: "S:wip"
|
||||||
|
|||||||
38
.github/workflows/tests.yml
vendored
38
.github/workflows/tests.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: "1.16"
|
go-version: "1.16"
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
- uses: technote-space/get-diff-action@v4
|
- uses: technote-space/get-diff-action@v5
|
||||||
with:
|
with:
|
||||||
PATTERNS: |
|
PATTERNS: |
|
||||||
**/**.go
|
**/**.go
|
||||||
@@ -42,38 +42,6 @@ jobs:
|
|||||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||||
if: env.GIT_DIFF
|
if: env.GIT_DIFF
|
||||||
|
|
||||||
test_abci_apps:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
timeout-minutes: 5
|
|
||||||
steps:
|
|
||||||
- uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: "1.16"
|
|
||||||
- uses: actions/checkout@v2.3.4
|
|
||||||
- uses: technote-space/get-diff-action@v4
|
|
||||||
with:
|
|
||||||
PATTERNS: |
|
|
||||||
**/**.go
|
|
||||||
go.mod
|
|
||||||
go.sum
|
|
||||||
- uses: actions/cache@v2.1.6
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
if: env.GIT_DIFF
|
|
||||||
- uses: actions/cache@v2.1.6
|
|
||||||
with:
|
|
||||||
path: ~/go/bin
|
|
||||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
|
||||||
if: env.GIT_DIFF
|
|
||||||
- name: test_abci_apps
|
|
||||||
run: abci/tests/test_app/test.sh
|
|
||||||
shell: bash
|
|
||||||
if: env.GIT_DIFF
|
|
||||||
|
|
||||||
test_abci_cli:
|
test_abci_cli:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build
|
needs: build
|
||||||
@@ -83,7 +51,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: "1.16"
|
go-version: "1.16"
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
- uses: technote-space/get-diff-action@v4
|
- uses: technote-space/get-diff-action@v5
|
||||||
with:
|
with:
|
||||||
PATTERNS: |
|
PATTERNS: |
|
||||||
**/**.go
|
**/**.go
|
||||||
@@ -114,7 +82,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: "1.16"
|
go-version: "1.16"
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
- uses: technote-space/get-diff-action@v4
|
- uses: technote-space/get-diff-action@v5
|
||||||
with:
|
with:
|
||||||
PATTERNS: |
|
PATTERNS: |
|
||||||
**/**.go
|
**/**.go
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -15,7 +15,7 @@
|
|||||||
.vagrant
|
.vagrant
|
||||||
.vendor-new/
|
.vendor-new/
|
||||||
.vscode/
|
.vscode/
|
||||||
abci-cli
|
abci/abci-cli
|
||||||
addrbook.json
|
addrbook.json
|
||||||
artifacts/*
|
artifacts/*
|
||||||
build/*
|
build/*
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ linters:
|
|||||||
# - maligned
|
# - maligned
|
||||||
- nakedret
|
- nakedret
|
||||||
- prealloc
|
- prealloc
|
||||||
- scopelint
|
- exportloopref
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- structcheck
|
- structcheck
|
||||||
- stylecheck
|
- stylecheck
|
||||||
|
|||||||
63
CHANGELOG.md
63
CHANGELOG.md
@@ -2,6 +2,27 @@
|
|||||||
|
|
||||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||||
|
|
||||||
|
## v0.34.12
|
||||||
|
|
||||||
|
*August 17, 2021*
|
||||||
|
|
||||||
|
Special thanks to external contributors on this release: @JayT106.
|
||||||
|
|
||||||
|
### FEATURES
|
||||||
|
|
||||||
|
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
|
||||||
|
`/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish)
|
||||||
|
|
||||||
|
### IMPROVEMENTS
|
||||||
|
|
||||||
|
- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
|
||||||
|
|
||||||
|
### BUG FIXES
|
||||||
|
|
||||||
|
- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug
|
||||||
|
with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters)
|
||||||
|
- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106)
|
||||||
|
|
||||||
## v0.34.11
|
## v0.34.11
|
||||||
|
|
||||||
*June 18, 2021*
|
*June 18, 2021*
|
||||||
@@ -12,25 +33,25 @@ adding two new parameters to the state sync config.
|
|||||||
### BREAKING CHANGES
|
### BREAKING CHANGES
|
||||||
|
|
||||||
- Apps
|
- Apps
|
||||||
- [Version] \#6494 `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
||||||
|
|
||||||
### IMPROVEMENTS
|
### IMPROVEMENTS
|
||||||
|
|
||||||
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||||
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
||||||
- [statesync] \#6582 Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
- [statesync] [\#6582](https://github.com/tendermint/tendermint/pull/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
||||||
|
|
||||||
### BUG FIXES
|
### BUG FIXES
|
||||||
|
|
||||||
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
||||||
|
|
||||||
## v0.34.10
|
## v0.34.10
|
||||||
|
|
||||||
*April 14, 2021*
|
*April 14, 2021*
|
||||||
|
|
||||||
This release fixes a bug where peers would sometimes try to send messages
|
This release fixes a bug where peers would sometimes try to send messages
|
||||||
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
|
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
|
||||||
this issue!
|
this issue!
|
||||||
|
|
||||||
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
|
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
|
||||||
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
|
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
|
||||||
@@ -39,7 +60,7 @@ this issue!
|
|||||||
|
|
||||||
*April 8, 2021*
|
*April 8, 2021*
|
||||||
|
|
||||||
This release fixes a moderate severity security issue, Security Advisory Alderfly,
|
This release fixes a moderate severity security issue, Security Advisory Alderfly,
|
||||||
which impacts all networks that rely on Tendermint light clients.
|
which impacts all networks that rely on Tendermint light clients.
|
||||||
Further details will be released once networks have upgraded.
|
Further details will be released once networks have upgraded.
|
||||||
|
|
||||||
@@ -112,7 +133,7 @@ shout-out to @marbar3778 for diagnosing it quickly.
|
|||||||
|
|
||||||
## v0.34.6
|
## v0.34.6
|
||||||
|
|
||||||
*February 18, 2021*
|
*February 18, 2021*
|
||||||
|
|
||||||
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling problems._
|
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling problems._
|
||||||
|
|
||||||
@@ -120,9 +141,9 @@ _Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling p
|
|||||||
|
|
||||||
*February 11, 2021*
|
*February 11, 2021*
|
||||||
|
|
||||||
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
|
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
|
||||||
All Tendermint clients are recommended to upgrade.
|
All Tendermint clients are recommended to upgrade.
|
||||||
Thank you to our friends at Crypto.com for the initial report of this memory leak!
|
Thank you to our friends at Crypto.com for the initial report of this memory leak!
|
||||||
|
|
||||||
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
|
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
|
||||||
|
|
||||||
@@ -132,17 +153,17 @@ Special thanks to other external contributors on this release: @yayajacky, @odid
|
|||||||
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
|
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
|
||||||
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
|
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
|
||||||
|
|
||||||
## v0.34.3
|
## v0.34.3
|
||||||
|
|
||||||
*January 19, 2021*
|
*January 19, 2021*
|
||||||
|
|
||||||
This release includes a fix for a high-severity security vulnerability,
|
This release includes a fix for a high-severity security vulnerability,
|
||||||
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
|
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
|
||||||
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
|
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
|
||||||
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||||
|
|
||||||
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||||
|
|
||||||
### BUG FIXES
|
### BUG FIXES
|
||||||
|
|
||||||
@@ -234,14 +255,14 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
|||||||
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
|
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
|
||||||
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
|
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
|
||||||
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
|
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
|
||||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||||
- `MaxBatchBytes` new config setting defines the max size of one batch.
|
- `MaxBatchBytes` new config setting defines the max size of one batch.
|
||||||
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
|
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
|
||||||
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
|
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
|
||||||
|
|
||||||
- Blockchain Protocol
|
- Blockchain Protocol
|
||||||
|
|
||||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||||
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
|
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
|
||||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
|
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
|
||||||
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
|
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
|
||||||
@@ -300,7 +321,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
|||||||
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
|
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
|
||||||
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
|
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
|
||||||
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
|
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
|
||||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
|
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
|
||||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
|
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
|
||||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
|
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
|
||||||
@@ -338,7 +359,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
|||||||
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
|
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
|
||||||
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
|
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
|
||||||
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
|
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
|
||||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||||
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
|
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
|
||||||
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
|
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
|
||||||
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)
|
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)
|
||||||
|
|||||||
@@ -21,15 +21,22 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
|||||||
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||||
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||||
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||||
- [libs/CList] \#6626 Automatically detach the prev/next elements in Remove function (@JayT106)
|
- [fastsync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
|
||||||
|
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
|
||||||
|
- [blockchain/v2] \#6730 Fast Sync v2 is deprecated, please use v0
|
||||||
|
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
|
||||||
|
- [rpc/jsonrpc/server] \#6785 `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield)
|
||||||
|
- [rpc] \#6820 Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
|
||||||
|
- [cli] \#6854 Remove deprecated snake case commands. (@tychoish)
|
||||||
- Apps
|
- Apps
|
||||||
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||||
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
|
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
|
||||||
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
|
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
|
||||||
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||||
|
- [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
|
||||||
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
|
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
|
||||||
- It is not required any longer to set ldflags to set version strings
|
- It is not required any longer to set ldflags to set version strings
|
||||||
|
- [abci/counter] \#6684 Delete counter example app
|
||||||
|
|
||||||
- P2P Protocol
|
- P2P Protocol
|
||||||
|
|
||||||
@@ -51,23 +58,25 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
|||||||
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
|
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||||
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
|
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
|
||||||
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
|
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
|
||||||
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
|
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
|
||||||
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
|
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
|
||||||
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
|
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
|
||||||
- [all] \#6077 Change spelling from British English to American (@cmwaters)
|
- [all] \#6077 Change spelling from British English to American (@cmwaters)
|
||||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||||
- Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2
|
- Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2
|
||||||
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
|
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
|
||||||
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
|
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
|
||||||
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
|
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
|
||||||
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
|
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
|
||||||
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
|
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
|
||||||
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
|
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
|
||||||
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
|
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
|
||||||
and `TxInfo`. (@alexanderbez)
|
and `TxInfo`. (@alexanderbez)
|
||||||
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||||
- [types] \#6627 Move `NodeKey` to types to make the type public.
|
- [types] \#6627 Move `NodeKey` to types to make the type public.
|
||||||
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
|
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
|
||||||
|
- [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync`
|
||||||
|
(@cmwaters)
|
||||||
|
|
||||||
- Blockchain Protocol
|
- Blockchain Protocol
|
||||||
|
|
||||||
@@ -78,6 +87,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
|||||||
|
|
||||||
- Tooling
|
- Tooling
|
||||||
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||||
|
- [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106)
|
||||||
|
|
||||||
### FEATURES
|
### FEATURES
|
||||||
|
|
||||||
@@ -95,8 +105,12 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
|||||||
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
||||||
- Transactions are gossiped in FIFO order as they are in `v0`.
|
- Transactions are gossiped in FIFO order as they are in `v0`.
|
||||||
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||||
|
- [fastsync/event] \#6619 Emit fastsync status event when switching consensus/fastsync (@JayT106)
|
||||||
|
- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106)
|
||||||
|
- [inspect] \#6785 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
|
||||||
|
|
||||||
### IMPROVEMENTS
|
### IMPROVEMENTS
|
||||||
|
|
||||||
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
|
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
|
||||||
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||||
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
|
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
|
||||||
@@ -144,6 +158,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
|||||||
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
||||||
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
||||||
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
||||||
- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106)
|
- [rpc] \#6507 Ensure RPC client can handle URLs without ports (@JayT106)
|
||||||
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||||
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)
|
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)
|
||||||
|
- [gitignore] \#6668 Fix gitignore of abci-cli (@tanyabouman)
|
||||||
|
|||||||
169
CONTRIBUTING.md
169
CONTRIBUTING.md
@@ -227,16 +227,96 @@ Fixes #nnnn
|
|||||||
|
|
||||||
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||||
|
|
||||||
### Release Procedure
|
### Release procedure
|
||||||
|
|
||||||
#### Major Release
|
#### A note about backport branches
|
||||||
|
Tendermint's `master` branch is under active development.
|
||||||
|
Releases are specified using tags and are built from long-lived "backport" branches.
|
||||||
|
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
|
||||||
|
and the backport branches have names like `v0.34.x` or `v0.33.x`
|
||||||
|
(literally, `x`; it is not a placeholder in this case).
|
||||||
|
|
||||||
|
As non-breaking changes land on `master`, they should also be backported (cherry-picked)
|
||||||
|
to these backport branches.
|
||||||
|
|
||||||
|
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
|
||||||
|
to the needed branch. There should be a label for any backport branch that you'll be targeting.
|
||||||
|
To notify the bot to backport a pull request, mark the pull request with
|
||||||
|
the label `S:backport-to-<backport_branch>`.
|
||||||
|
Once the original pull request is merged, the bot will try to cherry-pick the pull request
|
||||||
|
to the backport branch. If the bot fails to backport, it will open a pull request.
|
||||||
|
The author of the original pull request is responsible for solving the conflicts and
|
||||||
|
merging the pull request.
|
||||||
|
|
||||||
|
#### Creating a backport branch
|
||||||
|
If this is the first release candidate for a major release, you get to have the honor of creating
|
||||||
|
the backport branch!
|
||||||
|
|
||||||
|
Note that, after creating the backport branch, you'll also need to update the tags on `master`
|
||||||
|
so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag
|
||||||
|
that is "greater than" the backport branches tags. See #6072 for more context.
|
||||||
|
|
||||||
|
In the following example, we'll assume that we're making a backport branch for
|
||||||
|
the 0.35.x line.
|
||||||
|
|
||||||
|
1. Start on `master`
|
||||||
|
2. Create the backport branch:
|
||||||
|
`git checkout -b v0.35.x`
|
||||||
|
3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up:
|
||||||
|
`git tag -a v0.36.0-dev; git push v0.36.0-dev`
|
||||||
|
4. Create a new workflow to run the e2e nightlies for this backport branch.
|
||||||
|
(See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml
|
||||||
|
for an example.)
|
||||||
|
|
||||||
|
#### Release candidates
|
||||||
|
|
||||||
|
Before creating an official release, especially a major release, we may want to create a
|
||||||
|
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||||
|
create RCs, and we build them off of backport branches.
|
||||||
|
|
||||||
|
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
|
||||||
|
(for example, `v0.35.0-rc0`).
|
||||||
|
|
||||||
|
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||||
|
have distinct names from the tags/release names.)
|
||||||
|
|
||||||
|
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
|
||||||
|
Otherwise:
|
||||||
|
|
||||||
|
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||||
|
1. Run the integration tests and the e2e nightlies
|
||||||
|
(which can be triggered from the Github UI;
|
||||||
|
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||||
|
1. Prepare the changelog:
|
||||||
|
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`.
|
||||||
|
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||||
|
all PRs
|
||||||
|
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||||
|
or other upgrading flows.
|
||||||
|
- Bump TMVersionDefault version in `version.go`
|
||||||
|
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||||
|
- Bump ABCI protocol version in `version.go`, if necessary
|
||||||
|
1. Open a PR with these changes against the backport branch.
|
||||||
|
1. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||||
|
2. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||||
|
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||||
|
3. Push the tag back up to origin:
|
||||||
|
`git push origin v0.35.0-rc0`
|
||||||
|
Now the tag should be available on the repo's releases page.
|
||||||
|
4. Future RCs will continue to be built off of this branch.
|
||||||
|
|
||||||
|
Note that this process should only be used for "true" RCs--
|
||||||
|
release candidates that, if successful, will be the next release.
|
||||||
|
For more experimental "RCs," create a new, short-lived branch and tag that instead.
|
||||||
|
|
||||||
|
#### Major release
|
||||||
|
|
||||||
This major release process assumes that this release was preceded by release candidates.
|
This major release process assumes that this release was preceded by release candidates.
|
||||||
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
|
If there were no release candidates, begin by creating a backport branch, as described above.
|
||||||
|
|
||||||
1. Start on the latest RC branch (`RCx/vX.X.0`).
|
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||||
2. Run integration tests.
|
2. Run integration tests and the e2e nightlies.
|
||||||
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
|
3. Prepare the release:
|
||||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||||
@@ -248,58 +328,24 @@ If there were no release candidates, and you'd like to cut a major release direc
|
|||||||
- Bump TMVersionDefault version in `version.go`
|
- Bump TMVersionDefault version in `version.go`
|
||||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||||
- Bump ABCI protocol version in `version.go`, if necessary
|
- Bump ABCI protocol version in `version.go`, if necessary
|
||||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
4. Open a PR with these changes against the backport branch.
|
||||||
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
|
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||||
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
|
This will trigger the actual release `v0.35.0`.
|
||||||
- `git checkout RCx/vX.X.0`
|
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||||
- `git checkout -b release/vX.X.0`
|
- `git push origin v0.35.0`
|
||||||
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
|
|
||||||
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
|
|
||||||
- `git push origin vX.X.0`
|
|
||||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||||
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
|
|
||||||
new major release series.
|
|
||||||
|
|
||||||
##### Major Release (from `master`)
|
#### Minor release (point releases)
|
||||||
|
|
||||||
1. Start on `master`
|
|
||||||
2. Run integration tests (see `test_integrations` in Makefile)
|
|
||||||
3. Prepare release in a pull request against `master` (to be squash merged):
|
|
||||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
|
|
||||||
had release candidates, squash all the RC updates into one
|
|
||||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
|
||||||
all issues
|
|
||||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
|
||||||
release, and add the github aliases of external contributors to the top of
|
|
||||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
|
||||||
- Reset the `CHANGELOG_PENDING.md`
|
|
||||||
- Bump TMVersionDefault version in `version.go`
|
|
||||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
|
||||||
- Bump ABCI protocol version in `version.go`, if necessary
|
|
||||||
- Make sure all significant breaking changes are covered in `UPGRADING.md`
|
|
||||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
|
||||||
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
|
|
||||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
|
||||||
- `git push origin vX.X.x`
|
|
||||||
5. Update the `CHANGELOG.md` file on master with the releases changelog.
|
|
||||||
6. Delete any RC branches and tags for this release (if applicable)
|
|
||||||
|
|
||||||
#### Minor Release (Point Releases)
|
|
||||||
|
|
||||||
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
||||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
|
|
||||||
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
|
|
||||||
|
|
||||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
||||||
|
|
||||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport to the needed branch. Depending on which backport branch you need to backport to there will be labels for them. To notify the bot to backport a pull request, mark the pull request with the label `backport-to-<backport_branch>`. Once the original pull request is merged, the bot will try to cherry-pick the pull request to the backport branch. If the bot fails to backport, it will open a pull request. The author of the original pull request is responsible for solving the conflicts and merging the pull request.
|
|
||||||
|
|
||||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||||
|
|
||||||
To create a minor release:
|
To create a minor release:
|
||||||
|
|
||||||
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
|
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||||
2. Run integration tests: `make test_integrations`
|
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||||
3. Check out a new branch and prepare the release:
|
3. Check out a new branch and prepare the release:
|
||||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||||
@@ -308,35 +354,14 @@ To create a minor release:
|
|||||||
- Bump the ABCI version number, if necessary.
|
- Bump the ABCI version number, if necessary.
|
||||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||||
which can change during minor releases, and only field additions are valid minor changes.)
|
which can change during minor releases, and only field additions are valid minor changes.)
|
||||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||||
4. Open a PR with these changes that will land them back on `vX.X.x`
|
|
||||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||||
- `git push origin vX.X.x`
|
- `git push origin v0.35.1`
|
||||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||||
- Do not merge the backport branch into master.
|
- Do not merge the backport branch into master.
|
||||||
|
|
||||||
#### Release Candidates
|
|
||||||
|
|
||||||
Before creating an official release, especially a major release, we may want to create a
|
|
||||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
|
||||||
create RCs, and we build them off of RC branches. RC branches typically have names formatted
|
|
||||||
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
|
|
||||||
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
|
|
||||||
|
|
||||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
|
||||||
have distinct names from the tags/release names.)
|
|
||||||
|
|
||||||
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
|
|
||||||
2. Create the new tag, specifying a name and a tag "message":
|
|
||||||
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
|
|
||||||
3. Push the tag back up to origin:
|
|
||||||
`git push origin v0.34.0-rc4`
|
|
||||||
Now the tag should be available on the repo's releases page.
|
|
||||||
4. Create a new release candidate branch for any possible updates to the RC:
|
|
||||||
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
|
|
||||||
|
|
||||||
## Testing
|
## Testing
|
||||||
|
|
||||||
### Unit tests
|
### Unit tests
|
||||||
|
|||||||
11
Makefile
11
Makefile
@@ -202,7 +202,7 @@ format:
|
|||||||
|
|
||||||
lint:
|
lint:
|
||||||
@echo "--> Running linter"
|
@echo "--> Running linter"
|
||||||
@golangci-lint run
|
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||||
.PHONY: lint
|
.PHONY: lint
|
||||||
|
|
||||||
DESTINATION = ./index.html.md
|
DESTINATION = ./index.html.md
|
||||||
@@ -231,6 +231,15 @@ build-docker: build-linux
|
|||||||
rm -rf DOCKER/tendermint
|
rm -rf DOCKER/tendermint
|
||||||
.PHONY: build-docker
|
.PHONY: build-docker
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
### Mocks ###
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
mockery:
|
||||||
|
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||||
|
.PHONY: mockery
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
### Local testnet using docker ###
|
### Local testnet using docker ###
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|||||||
112
UPGRADING.md
112
UPGRADING.md
@@ -17,21 +17,45 @@ This guide provides instructions for upgrading to specific versions of Tendermin
|
|||||||
|
|
||||||
### Config Changes
|
### Config Changes
|
||||||
|
|
||||||
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
|
* `fast_sync = "v1"` and `fast_sync = "v2"` are no longer supported. Please use `v0` instead.
|
||||||
|
|
||||||
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
|
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
|
||||||
you have updated all the variables in your `config.toml` file.
|
you have updated all the variables in your `config.toml` file.
|
||||||
|
|
||||||
* Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`)
|
* Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`)
|
||||||
[ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
|
[ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
|
||||||
|
|
||||||
* `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace
|
* `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace
|
||||||
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
|
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
|
||||||
persistent peers, there's no gaurantee that the node will remain connected with these peers.
|
persistent peers, there's no gaurantee that the node will remain connected with these peers.
|
||||||
|
|
||||||
- configuration values starting with `priv-validator-` have moved to the new
|
* configuration values starting with `priv-validator-` have moved to the new
|
||||||
`priv-validator` section, without the `priv-validator-` prefix.
|
`priv-validator` section, without the `priv-validator-` prefix.
|
||||||
|
|
||||||
|
* The fast sync process as well as the blockchain package and service has all
|
||||||
|
been renamed to block sync
|
||||||
|
|
||||||
|
### Key Format Changes
|
||||||
|
|
||||||
|
The format of all tendermint on-disk database keys changes in
|
||||||
|
0.35. Upgrading nodes must either re-sync all data or run a migration
|
||||||
|
script provided in this release. The script located in
|
||||||
|
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
|
||||||
|
provides the function `Migrate(context.Context, db.DB)` which you can
|
||||||
|
operationalize as makes sense for your deployment.
|
||||||
|
|
||||||
|
For ease of use the `tendermint` command includes a CLI version of the
|
||||||
|
migration script, which you can invoke, as in:
|
||||||
|
|
||||||
|
tendermint key-migrate
|
||||||
|
|
||||||
|
This reads the configuration file as normal and allows the
|
||||||
|
`--db-backend` and `--db-dir` flags to change database operations as
|
||||||
|
needed.
|
||||||
|
|
||||||
|
The migration operation is idempotent and can be run more than once,
|
||||||
|
if needed.
|
||||||
|
|
||||||
### CLI Changes
|
### CLI Changes
|
||||||
|
|
||||||
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
|
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
|
||||||
@@ -63,7 +87,7 @@ are:
|
|||||||
- `blockchain`
|
- `blockchain`
|
||||||
- `evidence`
|
- `evidence`
|
||||||
|
|
||||||
Accordingly, the space `node` package was changed to reduce access to
|
Accordingly, the `node` package was changed to reduce access to
|
||||||
tendermint internals: applications that use tendermint as a library
|
tendermint internals: applications that use tendermint as a library
|
||||||
will need to change to accommodate these changes. Most notably:
|
will need to change to accommodate these changes. Most notably:
|
||||||
|
|
||||||
@@ -74,6 +98,34 @@ will need to change to accommodate these changes. Most notably:
|
|||||||
longer exported and have been replaced with `node.New` and
|
longer exported and have been replaced with `node.New` and
|
||||||
`node.NewDefault` which provide more functional interfaces.
|
`node.NewDefault` which provide more functional interfaces.
|
||||||
|
|
||||||
|
### RPC changes
|
||||||
|
|
||||||
|
#### gRPC Support
|
||||||
|
|
||||||
|
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
|
||||||
|
|
||||||
|
#### Peer Management Interface
|
||||||
|
|
||||||
|
When running with the new P2P Layer, the methods `UnsafeDialSeeds` and
|
||||||
|
`UnsafeDialPeers` RPC methods will always return an error. They are
|
||||||
|
deprecated and will be removed in 0.36 when the legacy peer stack is
|
||||||
|
removed.
|
||||||
|
|
||||||
|
Additionally the format of the Peer list returned in the `NetInfo`
|
||||||
|
method changes in this release to accommodate the different way that
|
||||||
|
the new stack tracks data about peers. This change affects users of
|
||||||
|
both stacks.
|
||||||
|
|
||||||
|
### Support for Custom Reactor and Mempool Implementations
|
||||||
|
|
||||||
|
The changes to p2p layer removed existing support for custom
|
||||||
|
reactors. Based on our understanding of how this functionality was
|
||||||
|
used, the introduction of the prioritized mempool covers nearly all of
|
||||||
|
the use cases for custom reactors. If you are currently running custom
|
||||||
|
reactors and mempools and are having trouble seeing the migration path
|
||||||
|
for your project please feel free to reach out to the Tendermint Core
|
||||||
|
development team directly.
|
||||||
|
|
||||||
## v0.34.0
|
## v0.34.0
|
||||||
|
|
||||||
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
|
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
|
||||||
@@ -227,8 +279,8 @@ Other user-relevant changes include:
|
|||||||
|
|
||||||
* The old `lite` package was removed; the new light client uses the `light` package.
|
* The old `lite` package was removed; the new light client uses the `light` package.
|
||||||
* The `Verifier` was broken up into two pieces:
|
* The `Verifier` was broken up into two pieces:
|
||||||
* Core verification logic (pure `VerifyX` functions)
|
* Core verification logic (pure `VerifyX` functions)
|
||||||
* `Client` object, which represents the complete light client
|
* `Client` object, which represents the complete light client
|
||||||
* The new light clients stores headers & validator sets as `LightBlock`s
|
* The new light clients stores headers & validator sets as `LightBlock`s
|
||||||
* The RPC client can be found in the `/rpc` directory.
|
* The RPC client can be found in the `/rpc` directory.
|
||||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||||
@@ -360,12 +412,12 @@ Evidence Params has been changed to include duration.
|
|||||||
### Go API
|
### Go API
|
||||||
|
|
||||||
* `libs/common` has been removed in favor of specific pkgs.
|
* `libs/common` has been removed in favor of specific pkgs.
|
||||||
* `async`
|
* `async`
|
||||||
* `service`
|
* `service`
|
||||||
* `rand`
|
* `rand`
|
||||||
* `net`
|
* `net`
|
||||||
* `strings`
|
* `strings`
|
||||||
* `cmap`
|
* `cmap`
|
||||||
* removal of `errors` pkg
|
* removal of `errors` pkg
|
||||||
|
|
||||||
### RPC Changes
|
### RPC Changes
|
||||||
@@ -434,9 +486,9 @@ Prior to the update, suppose your `ResponseDeliverTx` look like:
|
|||||||
```go
|
```go
|
||||||
abci.ResponseDeliverTx{
|
abci.ResponseDeliverTx{
|
||||||
Tags: []kv.Pair{
|
Tags: []kv.Pair{
|
||||||
{Key: []byte("sender"), Value: []byte("foo")},
|
{Key: []byte("sender"), Value: []byte("foo")},
|
||||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||||
{Key: []byte("amount"), Value: []byte("35")},
|
{Key: []byte("amount"), Value: []byte("35")},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -455,14 +507,14 @@ the following `Events`:
|
|||||||
```go
|
```go
|
||||||
abci.ResponseDeliverTx{
|
abci.ResponseDeliverTx{
|
||||||
Events: []abci.Event{
|
Events: []abci.Event{
|
||||||
{
|
{
|
||||||
Type: "transfer",
|
Type: "transfer",
|
||||||
Attributes: kv.Pairs{
|
Attributes: kv.Pairs{
|
||||||
{Key: []byte("sender"), Value: []byte("foo")},
|
{Key: []byte("sender"), Value: []byte("foo")},
|
||||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||||
{Key: []byte("amount"), Value: []byte("35")},
|
{Key: []byte("amount"), Value: []byte("35")},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -510,9 +562,9 @@ In this case, the WS client will receive an error with description:
|
|||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
"id": "{ID}#event",
|
"id": "{ID}#event",
|
||||||
"error": {
|
"error": {
|
||||||
"code": -32000,
|
"code": -32000,
|
||||||
"msg": "Server error",
|
"msg": "Server error",
|
||||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -718,9 +770,9 @@ just the `Data` field set:
|
|||||||
|
|
||||||
```go
|
```go
|
||||||
[]ProofOp{
|
[]ProofOp{
|
||||||
ProofOp{
|
ProofOp{
|
||||||
Data: <proof bytes>,
|
Data: <proof bytes>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ const (
|
|||||||
echoRetryIntervalSeconds = 1
|
echoRetryIntervalSeconds = 1
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate mockery --case underscore --name Client
|
//go:generate ../../scripts/mockery_generate.sh Client
|
||||||
|
|
||||||
// Client defines an interface for an ABCI client.
|
// Client defines an interface for an ABCI client.
|
||||||
//
|
//
|
||||||
@@ -35,29 +35,39 @@ type Client interface {
|
|||||||
FlushAsync(context.Context) (*ReqRes, error)
|
FlushAsync(context.Context) (*ReqRes, error)
|
||||||
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
||||||
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
||||||
|
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
|
||||||
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
||||||
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
||||||
CommitAsync(context.Context) (*ReqRes, error)
|
CommitAsync(context.Context) (*ReqRes, error)
|
||||||
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
||||||
|
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
|
||||||
|
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
|
||||||
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
||||||
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
||||||
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||||
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||||
FinalizeBlockAsync(context.Context, types.RequestFinalizeBlock) (*ReqRes, error)
|
ExtendVoteAsync(context.Context, types.RequestExtendVote) (*ReqRes, error)
|
||||||
|
VerifyVoteExtensionAsync(context.Context, types.RequestVerifyVoteExtension) (*ReqRes, error)
|
||||||
|
PrepareProposalAsync(context.Context, types.RequestPrepareProposal) (*ReqRes, error)
|
||||||
|
|
||||||
// Synchronous requests
|
// Synchronous requests
|
||||||
FlushSync(context.Context) error
|
FlushSync(context.Context) error
|
||||||
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||||
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||||
|
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||||
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||||
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||||
|
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||||
|
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||||
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||||
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||||
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||||
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||||
FinalizeBlockSync(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
|
ExtendVoteSync(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error)
|
||||||
|
VerifyVoteExtensionSync(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)
|
||||||
|
PrepareProposalSync(context.Context, types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
//----------------------------------------
|
//----------------------------------------
|
||||||
|
|||||||
@@ -194,6 +194,16 @@ func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo)
|
|||||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: call is synchronous, use ctx to break early if needed
|
||||||
|
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||||
|
req := types.ToRequestDeliverTx(params)
|
||||||
|
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||||
|
}
|
||||||
|
|
||||||
// NOTE: call is synchronous, use ctx to break early if needed
|
// NOTE: call is synchronous, use ctx to break early if needed
|
||||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
|
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
|
||||||
req := types.ToRequestCheckTx(params)
|
req := types.ToRequestCheckTx(params)
|
||||||
@@ -234,6 +244,26 @@ func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestI
|
|||||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: call is synchronous, use ctx to break early if needed
|
||||||
|
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
|
||||||
|
req := types.ToRequestBeginBlock(params)
|
||||||
|
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: call is synchronous, use ctx to break early if needed
|
||||||
|
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
|
||||||
|
req := types.ToRequestEndBlock(params)
|
||||||
|
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||||
|
}
|
||||||
|
|
||||||
// NOTE: call is synchronous, use ctx to break early if needed
|
// NOTE: call is synchronous, use ctx to break early if needed
|
||||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
||||||
req := types.ToRequestListSnapshots(params)
|
req := types.ToRequestListSnapshots(params)
|
||||||
@@ -284,19 +314,63 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cli *grpcClient) FinalizeBlockAsync(
|
// NOTE: call is synchronous, use ctx to break early if needed
|
||||||
|
func (cli *grpcClient) ExtendVoteAsync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
params types.RequestFinalizeBlock,
|
params types.RequestExtendVote,
|
||||||
) (*ReqRes, error) {
|
) (*ReqRes, error) {
|
||||||
req := types.ToRequestFinalizeBlock(params)
|
req := types.ToRequestExtendVote(params)
|
||||||
res, err := cli.client.FinalizeBlock(ctx, req.GetFinalizeBlock(), grpc.WaitForReady(true))
|
res, err := cli.client.ExtendVote(ctx, req.GetExtendVote(), grpc.WaitForReady(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return cli.finishAsyncCall(
|
return cli.finishAsyncCall(
|
||||||
ctx,
|
ctx,
|
||||||
req,
|
req,
|
||||||
&types.Response{Value: &types.Response_FinalizeBlock{FinalizeBlock: res}},
|
&types.Response{Value: &types.Response_ExtendVote{ExtendVote: res}},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: call is synchronous, use ctx to break early if needed
|
||||||
|
func (cli *grpcClient) VerifyVoteExtensionAsync(
|
||||||
|
ctx context.Context,
|
||||||
|
params types.RequestVerifyVoteExtension,
|
||||||
|
) (*ReqRes, error) {
|
||||||
|
req := types.ToRequestVerifyVoteExtension(params)
|
||||||
|
res, err := cli.client.VerifyVoteExtension(ctx, req.GetVerifyVoteExtension(), grpc.WaitForReady(true))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cli.finishAsyncCall(
|
||||||
|
ctx,
|
||||||
|
req,
|
||||||
|
&types.Response{
|
||||||
|
Value: &types.Response_VerifyVoteExtension{
|
||||||
|
VerifyVoteExtension: res,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *grpcClient) PrepareProposalAsync(
|
||||||
|
ctx context.Context,
|
||||||
|
params types.RequestPrepareProposal,
|
||||||
|
) (*ReqRes, error) {
|
||||||
|
|
||||||
|
req := types.ToRequestPrepareProposal(params)
|
||||||
|
res, err := cli.client.PrepareProposal(ctx, req.GetPrepareProposal(), grpc.WaitForReady(true))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return cli.finishAsyncCall(
|
||||||
|
ctx,
|
||||||
|
req,
|
||||||
|
&types.Response{
|
||||||
|
Value: &types.Response_PrepareProposal{
|
||||||
|
PrepareProposal: res,
|
||||||
|
},
|
||||||
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -366,6 +440,18 @@ func (cli *grpcClient) InfoSync(
|
|||||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cli *grpcClient) DeliverTxSync(
|
||||||
|
ctx context.Context,
|
||||||
|
params types.RequestDeliverTx,
|
||||||
|
) (*types.ResponseDeliverTx, error) {
|
||||||
|
|
||||||
|
reqres, err := cli.DeliverTxAsync(ctx, params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||||
|
}
|
||||||
|
|
||||||
func (cli *grpcClient) CheckTxSync(
|
func (cli *grpcClient) CheckTxSync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
params types.RequestCheckTx,
|
params types.RequestCheckTx,
|
||||||
@@ -409,6 +495,30 @@ func (cli *grpcClient) InitChainSync(
|
|||||||
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cli *grpcClient) BeginBlockSync(
|
||||||
|
ctx context.Context,
|
||||||
|
params types.RequestBeginBlock,
|
||||||
|
) (*types.ResponseBeginBlock, error) {
|
||||||
|
|
||||||
|
reqres, err := cli.BeginBlockAsync(ctx, params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *grpcClient) EndBlockSync(
|
||||||
|
ctx context.Context,
|
||||||
|
params types.RequestEndBlock,
|
||||||
|
) (*types.ResponseEndBlock, error) {
|
||||||
|
|
||||||
|
reqres, err := cli.EndBlockAsync(ctx, params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
|
||||||
|
}
|
||||||
|
|
||||||
func (cli *grpcClient) ListSnapshotsSync(
|
func (cli *grpcClient) ListSnapshotsSync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
params types.RequestListSnapshots,
|
params types.RequestListSnapshots,
|
||||||
@@ -455,13 +565,36 @@ func (cli *grpcClient) ApplySnapshotChunkSync(
|
|||||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cli *grpcClient) FinalizeBlockSync(
|
func (cli *grpcClient) ExtendVoteSync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
params types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||||
|
|
||||||
reqres, err := cli.FinalizeBlockAsync(ctx, params)
|
reqres, err := cli.ExtendVoteAsync(ctx, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return cli.finishSyncCall(reqres).GetFinalizeBlock(), cli.Error()
|
return cli.finishSyncCall(reqres).GetExtendVote(), cli.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *grpcClient) VerifyVoteExtensionSync(
|
||||||
|
ctx context.Context,
|
||||||
|
params types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||||
|
|
||||||
|
reqres, err := cli.VerifyVoteExtensionAsync(ctx, params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cli.finishSyncCall(reqres).GetVerifyVoteExtension(), cli.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *grpcClient) PrepareProposalSync(
|
||||||
|
ctx context.Context,
|
||||||
|
params types.RequestPrepareProposal,
|
||||||
|
) (*types.ResponsePrepareProposal, error) {
|
||||||
|
|
||||||
|
reqres, err := cli.PrepareProposalAsync(ctx, params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cli.finishSyncCall(reqres).GetPrepareProposal(), cli.Error()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -77,6 +77,17 @@ func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*
|
|||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||||
|
app.mtx.Lock()
|
||||||
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
|
res := app.Application.DeliverTx(params)
|
||||||
|
return app.callback(
|
||||||
|
types.ToRequestDeliverTx(params),
|
||||||
|
types.ToResponseDeliverTx(res),
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||||
app.mtx.Lock()
|
app.mtx.Lock()
|
||||||
defer app.mtx.Unlock()
|
defer app.mtx.Unlock()
|
||||||
@@ -121,6 +132,28 @@ func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestIni
|
|||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||||
|
app.mtx.Lock()
|
||||||
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
|
res := app.Application.BeginBlock(req)
|
||||||
|
return app.callback(
|
||||||
|
types.ToRequestBeginBlock(req),
|
||||||
|
types.ToResponseBeginBlock(res),
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||||
|
app.mtx.Lock()
|
||||||
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
|
res := app.Application.EndBlock(req)
|
||||||
|
return app.callback(
|
||||||
|
types.ToRequestEndBlock(req),
|
||||||
|
types.ToResponseEndBlock(res),
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||||
app.mtx.Lock()
|
app.mtx.Lock()
|
||||||
defer app.mtx.Unlock()
|
defer app.mtx.Unlock()
|
||||||
@@ -171,17 +204,45 @@ func (app *localClient) ApplySnapshotChunkAsync(
|
|||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *localClient) FinalizeBlockAsync(
|
func (app *localClient) ExtendVoteAsync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req types.RequestFinalizeBlock,
|
req types.RequestExtendVote,
|
||||||
) (*ReqRes, error) {
|
) (*ReqRes, error) {
|
||||||
app.mtx.Lock()
|
app.mtx.Lock()
|
||||||
defer app.mtx.Unlock()
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
res := app.Application.FinalizeBlock(req)
|
res := app.Application.ExtendVote(req)
|
||||||
return app.callback(
|
return app.callback(
|
||||||
types.ToRequestFinalizeBlock(req),
|
types.ToRequestExtendVote(req),
|
||||||
types.ToResponseFinalizeBlock(res),
|
types.ToResponseExtendVote(res),
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *localClient) VerifyVoteExtensionAsync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestVerifyVoteExtension,
|
||||||
|
) (*ReqRes, error) {
|
||||||
|
app.mtx.Lock()
|
||||||
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
|
res := app.Application.VerifyVoteExtension(req)
|
||||||
|
return app.callback(
|
||||||
|
types.ToRequestVerifyVoteExtension(req),
|
||||||
|
types.ToResponseVerifyVoteExtension(res),
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *localClient) PrepareProposalAsync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestPrepareProposal,
|
||||||
|
) (*ReqRes, error) {
|
||||||
|
app.mtx.Lock()
|
||||||
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
|
res := app.Application.PrepareProposal(req)
|
||||||
|
return app.callback(
|
||||||
|
types.ToRequestPrepareProposal(req),
|
||||||
|
types.ToResponsePrepareProposal(res),
|
||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -203,6 +264,18 @@ func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*t
|
|||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (app *localClient) DeliverTxSync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestDeliverTx,
|
||||||
|
) (*types.ResponseDeliverTx, error) {
|
||||||
|
|
||||||
|
app.mtx.Lock()
|
||||||
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
|
res := app.Application.DeliverTx(req)
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (app *localClient) CheckTxSync(
|
func (app *localClient) CheckTxSync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req types.RequestCheckTx,
|
req types.RequestCheckTx,
|
||||||
@@ -245,6 +318,30 @@ func (app *localClient) InitChainSync(
|
|||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (app *localClient) BeginBlockSync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestBeginBlock,
|
||||||
|
) (*types.ResponseBeginBlock, error) {
|
||||||
|
|
||||||
|
app.mtx.Lock()
|
||||||
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
|
res := app.Application.BeginBlock(req)
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *localClient) EndBlockSync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestEndBlock,
|
||||||
|
) (*types.ResponseEndBlock, error) {
|
||||||
|
|
||||||
|
app.mtx.Lock()
|
||||||
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
|
res := app.Application.EndBlock(req)
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (app *localClient) ListSnapshotsSync(
|
func (app *localClient) ListSnapshotsSync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req types.RequestListSnapshots,
|
req types.RequestListSnapshots,
|
||||||
@@ -291,14 +388,36 @@ func (app *localClient) ApplySnapshotChunkSync(
|
|||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *localClient) FinalizeBlockSync(
|
func (app *localClient) ExtendVoteSync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||||
|
|
||||||
app.mtx.Lock()
|
app.mtx.Lock()
|
||||||
defer app.mtx.Unlock()
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
res := app.Application.FinalizeBlock(req)
|
res := app.Application.ExtendVote(req)
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *localClient) VerifyVoteExtensionSync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||||
|
|
||||||
|
app.mtx.Lock()
|
||||||
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
|
res := app.Application.VerifyVoteExtension(req)
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *localClient) PrepareProposalSync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestPrepareProposal,
|
||||||
|
) (*types.ResponsePrepareProposal, error) {
|
||||||
|
app.mtx.Lock()
|
||||||
|
defer app.mtx.Unlock()
|
||||||
|
|
||||||
|
res := app.Application.PrepareProposal(req)
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
// Code generated by mockery. DO NOT EDIT.
|
||||||
|
|
||||||
package mocks
|
package mocks
|
||||||
|
|
||||||
@@ -65,6 +65,52 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
|
|||||||
return r0, r1
|
return r0, r1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *abcicli.ReqRes
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// BeginBlockSync provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *types.ResponseBeginBlock
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||||
ret := _m.Called(_a0, _a1)
|
ret := _m.Called(_a0, _a1)
|
||||||
@@ -157,6 +203,52 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
|
|||||||
return r0, r1
|
return r0, r1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *abcicli.ReqRes
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeliverTxSync provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *types.ResponseDeliverTx
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
// EchoAsync provides a mock function with given fields: ctx, msg
|
// EchoAsync provides a mock function with given fields: ctx, msg
|
||||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
|
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
|
||||||
ret := _m.Called(ctx, msg)
|
ret := _m.Called(ctx, msg)
|
||||||
@@ -203,6 +295,52 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
|
|||||||
return r0, r1
|
return r0, r1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EndBlockAsync provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *abcicli.ReqRes
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndBlockSync provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *types.ResponseEndBlock
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*types.ResponseEndBlock)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
// Error provides a mock function with given fields:
|
// Error provides a mock function with given fields:
|
||||||
func (_m *Client) Error() error {
|
func (_m *Client) Error() error {
|
||||||
ret := _m.Called()
|
ret := _m.Called()
|
||||||
@@ -217,12 +355,12 @@ func (_m *Client) Error() error {
|
|||||||
return r0
|
return r0
|
||||||
}
|
}
|
||||||
|
|
||||||
// FinalizeBlockAsync provides a mock function with given fields: _a0, _a1
|
// ExtendVoteAsync provides a mock function with given fields: _a0, _a1
|
||||||
func (_m *Client) FinalizeBlockAsync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*abcicli.ReqRes, error) {
|
func (_m *Client) ExtendVoteAsync(_a0 context.Context, _a1 types.RequestExtendVote) (*abcicli.ReqRes, error) {
|
||||||
ret := _m.Called(_a0, _a1)
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
var r0 *abcicli.ReqRes
|
var r0 *abcicli.ReqRes
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *abcicli.ReqRes); ok {
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestExtendVote) *abcicli.ReqRes); ok {
|
||||||
r0 = rf(_a0, _a1)
|
r0 = rf(_a0, _a1)
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
@@ -231,7 +369,7 @@ func (_m *Client) FinalizeBlockAsync(_a0 context.Context, _a1 types.RequestFinal
|
|||||||
}
|
}
|
||||||
|
|
||||||
var r1 error
|
var r1 error
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestExtendVote) error); ok {
|
||||||
r1 = rf(_a0, _a1)
|
r1 = rf(_a0, _a1)
|
||||||
} else {
|
} else {
|
||||||
r1 = ret.Error(1)
|
r1 = ret.Error(1)
|
||||||
@@ -240,21 +378,21 @@ func (_m *Client) FinalizeBlockAsync(_a0 context.Context, _a1 types.RequestFinal
|
|||||||
return r0, r1
|
return r0, r1
|
||||||
}
|
}
|
||||||
|
|
||||||
// FinalizeBlockSync provides a mock function with given fields: _a0, _a1
|
// ExtendVoteSync provides a mock function with given fields: _a0, _a1
|
||||||
func (_m *Client) FinalizeBlockSync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
func (_m *Client) ExtendVoteSync(_a0 context.Context, _a1 types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||||
ret := _m.Called(_a0, _a1)
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
var r0 *types.ResponseFinalizeBlock
|
var r0 *types.ResponseExtendVote
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestExtendVote) *types.ResponseExtendVote); ok {
|
||||||
r0 = rf(_a0, _a1)
|
r0 = rf(_a0, _a1)
|
||||||
} else {
|
} else {
|
||||||
if ret.Get(0) != nil {
|
if ret.Get(0) != nil {
|
||||||
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
|
r0 = ret.Get(0).(*types.ResponseExtendVote)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var r1 error
|
var r1 error
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestExtendVote) error); ok {
|
||||||
r1 = rf(_a0, _a1)
|
r1 = rf(_a0, _a1)
|
||||||
} else {
|
} else {
|
||||||
r1 = ret.Error(1)
|
r1 = ret.Error(1)
|
||||||
@@ -577,6 +715,52 @@ func (_m *Client) OnStop() {
|
|||||||
_m.Called()
|
_m.Called()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrepareProposalAsync provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) PrepareProposalAsync(_a0 context.Context, _a1 types.RequestPrepareProposal) (*abcicli.ReqRes, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *abcicli.ReqRes
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestPrepareProposal) *abcicli.ReqRes); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestPrepareProposal) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrepareProposalSync provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) PrepareProposalSync(_a0 context.Context, _a1 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *types.ResponsePrepareProposal
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestPrepareProposal) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
// QueryAsync provides a mock function with given fields: _a0, _a1
|
// QueryAsync provides a mock function with given fields: _a0, _a1
|
||||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) {
|
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) {
|
||||||
ret := _m.Called(_a0, _a1)
|
ret := _m.Called(_a0, _a1)
|
||||||
@@ -705,6 +889,52 @@ func (_m *Client) String() string {
|
|||||||
return r0
|
return r0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VerifyVoteExtensionAsync provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) VerifyVoteExtensionAsync(_a0 context.Context, _a1 types.RequestVerifyVoteExtension) (*abcicli.ReqRes, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *abcicli.ReqRes
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestVerifyVoteExtension) *abcicli.ReqRes); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestVerifyVoteExtension) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyVoteExtensionSync provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) VerifyVoteExtensionSync(_a0 context.Context, _a1 types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *types.ResponseVerifyVoteExtension
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, types.RequestVerifyVoteExtension) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
// Wait provides a mock function with given fields:
|
// Wait provides a mock function with given fields:
|
||||||
func (_m *Client) Wait() {
|
func (_m *Client) Wait() {
|
||||||
_m.Called()
|
_m.Called()
|
||||||
|
|||||||
@@ -245,6 +245,10 @@ func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (
|
|||||||
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
|
||||||
|
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
|
||||||
|
}
|
||||||
|
|
||||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||||
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
||||||
}
|
}
|
||||||
@@ -261,6 +265,14 @@ func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestIn
|
|||||||
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||||
|
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||||
|
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
|
||||||
|
}
|
||||||
|
|
||||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||||
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
||||||
}
|
}
|
||||||
@@ -283,11 +295,25 @@ func (cli *socketClient) ApplySnapshotChunkAsync(
|
|||||||
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cli *socketClient) FinalizeBlockAsync(
|
func (cli *socketClient) ExtendVoteAsync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req types.RequestFinalizeBlock,
|
req types.RequestExtendVote,
|
||||||
) (*ReqRes, error) {
|
) (*ReqRes, error) {
|
||||||
return cli.queueRequestAsync(ctx, types.ToRequestFinalizeBlock(req))
|
return cli.queueRequestAsync(ctx, types.ToRequestExtendVote(req))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *socketClient) VerifyVoteExtensionAsync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestVerifyVoteExtension,
|
||||||
|
) (*ReqRes, error) {
|
||||||
|
return cli.queueRequestAsync(ctx, types.ToRequestVerifyVoteExtension(req))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *socketClient) PrepareProposalAsync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestPrepareProposal,
|
||||||
|
) (*ReqRes, error) {
|
||||||
|
return cli.queueRequestAsync(ctx, types.ToRequestPrepareProposal(req))
|
||||||
}
|
}
|
||||||
|
|
||||||
//----------------------------------------
|
//----------------------------------------
|
||||||
@@ -336,6 +362,18 @@ func (cli *socketClient) InfoSync(
|
|||||||
return reqres.Response.GetInfo(), nil
|
return reqres.Response.GetInfo(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cli *socketClient) DeliverTxSync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestDeliverTx,
|
||||||
|
) (*types.ResponseDeliverTx, error) {
|
||||||
|
|
||||||
|
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return reqres.Response.GetDeliverTx(), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (cli *socketClient) CheckTxSync(
|
func (cli *socketClient) CheckTxSync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req types.RequestCheckTx,
|
req types.RequestCheckTx,
|
||||||
@@ -378,6 +416,30 @@ func (cli *socketClient) InitChainSync(
|
|||||||
return reqres.Response.GetInitChain(), nil
|
return reqres.Response.GetInitChain(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cli *socketClient) BeginBlockSync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestBeginBlock,
|
||||||
|
) (*types.ResponseBeginBlock, error) {
|
||||||
|
|
||||||
|
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return reqres.Response.GetBeginBlock(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *socketClient) EndBlockSync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestEndBlock,
|
||||||
|
) (*types.ResponseEndBlock, error) {
|
||||||
|
|
||||||
|
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return reqres.Response.GetEndBlock(), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (cli *socketClient) ListSnapshotsSync(
|
func (cli *socketClient) ListSnapshotsSync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req types.RequestListSnapshots,
|
req types.RequestListSnapshots,
|
||||||
@@ -424,15 +486,38 @@ func (cli *socketClient) ApplySnapshotChunkSync(
|
|||||||
return reqres.Response.GetApplySnapshotChunk(), nil
|
return reqres.Response.GetApplySnapshotChunk(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cli *socketClient) FinalizeBlockSync(
|
func (cli *socketClient) ExtendVoteSync(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||||
|
|
||||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestFinalizeBlock(req))
|
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestExtendVote(req))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return reqres.Response.GetFinalizeBlock(), nil
|
return reqres.Response.GetExtendVote(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *socketClient) VerifyVoteExtensionSync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||||
|
|
||||||
|
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestVerifyVoteExtension(req))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return reqres.Response.GetVerifyVoteExtension(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli *socketClient) PrepareProposalSync(
|
||||||
|
ctx context.Context,
|
||||||
|
req types.RequestPrepareProposal,
|
||||||
|
) (*types.ResponsePrepareProposal, error) {
|
||||||
|
|
||||||
|
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestPrepareProposal(req))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return reqres.Response.GetPrepareProposal(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//----------------------------------------
|
//----------------------------------------
|
||||||
@@ -539,6 +624,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
|||||||
_, ok = res.Value.(*types.Response_Flush)
|
_, ok = res.Value.(*types.Response_Flush)
|
||||||
case *types.Request_Info:
|
case *types.Request_Info:
|
||||||
_, ok = res.Value.(*types.Response_Info)
|
_, ok = res.Value.(*types.Response_Info)
|
||||||
|
case *types.Request_DeliverTx:
|
||||||
|
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||||
case *types.Request_CheckTx:
|
case *types.Request_CheckTx:
|
||||||
_, ok = res.Value.(*types.Response_CheckTx)
|
_, ok = res.Value.(*types.Response_CheckTx)
|
||||||
case *types.Request_Commit:
|
case *types.Request_Commit:
|
||||||
@@ -547,6 +634,10 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
|||||||
_, ok = res.Value.(*types.Response_Query)
|
_, ok = res.Value.(*types.Response_Query)
|
||||||
case *types.Request_InitChain:
|
case *types.Request_InitChain:
|
||||||
_, ok = res.Value.(*types.Response_InitChain)
|
_, ok = res.Value.(*types.Response_InitChain)
|
||||||
|
case *types.Request_BeginBlock:
|
||||||
|
_, ok = res.Value.(*types.Response_BeginBlock)
|
||||||
|
case *types.Request_EndBlock:
|
||||||
|
_, ok = res.Value.(*types.Response_EndBlock)
|
||||||
case *types.Request_ApplySnapshotChunk:
|
case *types.Request_ApplySnapshotChunk:
|
||||||
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
|
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
|
||||||
case *types.Request_LoadSnapshotChunk:
|
case *types.Request_LoadSnapshotChunk:
|
||||||
@@ -555,8 +646,12 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
|||||||
_, ok = res.Value.(*types.Response_ListSnapshots)
|
_, ok = res.Value.(*types.Response_ListSnapshots)
|
||||||
case *types.Request_OfferSnapshot:
|
case *types.Request_OfferSnapshot:
|
||||||
_, ok = res.Value.(*types.Response_OfferSnapshot)
|
_, ok = res.Value.(*types.Response_OfferSnapshot)
|
||||||
case *types.Request_FinalizeBlock:
|
case *types.Request_ExtendVote:
|
||||||
_, ok = res.Value.(*types.Response_FinalizeBlock)
|
_, ok = res.Value.(*types.Response_ExtendVote)
|
||||||
|
case *types.Request_VerifyVoteExtension:
|
||||||
|
_, ok = res.Value.(*types.Response_VerifyVoteExtension)
|
||||||
|
case *types.Request_PrepareProposal:
|
||||||
|
_, ok = res.Value.(*types.Response_PrepareProposal)
|
||||||
}
|
}
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,11 +37,11 @@ func TestProperSyncCalls(t *testing.T) {
|
|||||||
resp := make(chan error, 1)
|
resp := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
// This is BeginBlockSync unrolled....
|
// This is BeginBlockSync unrolled....
|
||||||
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
|
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = c.FlushSync(context.Background())
|
err = c.FlushSync(context.Background())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
res := reqres.Response.GetFinalizeBlock()
|
res := reqres.Response.GetBeginBlock()
|
||||||
assert.NotNil(t, res)
|
assert.NotNil(t, res)
|
||||||
resp <- c.Error()
|
resp <- c.Error()
|
||||||
}()
|
}()
|
||||||
@@ -73,7 +73,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
|||||||
resp := make(chan error, 1)
|
resp := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
// Start BeginBlock and flush it
|
// Start BeginBlock and flush it
|
||||||
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
|
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
flush, err := c.FlushAsync(ctx)
|
flush, err := c.FlushAsync(ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -84,7 +84,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
|||||||
err = s.Stop()
|
err = s.Stop()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// wait for the response from FinalizeBlock
|
// wait for the response from BeginBlock
|
||||||
reqres.Wait()
|
reqres.Wait()
|
||||||
flush.Wait()
|
flush.Wait()
|
||||||
resp <- c.Error()
|
resp <- c.Error()
|
||||||
@@ -121,7 +121,7 @@ type slowApp struct {
|
|||||||
types.BaseApplication
|
types.BaseApplication
|
||||||
}
|
}
|
||||||
|
|
||||||
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
return types.ResponseFinalizeBlock{}
|
return types.ResponseBeginBlock{}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ import (
|
|||||||
|
|
||||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||||
"github.com/tendermint/tendermint/abci/example/code"
|
"github.com/tendermint/tendermint/abci/example/code"
|
||||||
"github.com/tendermint/tendermint/abci/example/counter"
|
|
||||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||||
"github.com/tendermint/tendermint/abci/server"
|
"github.com/tendermint/tendermint/abci/server"
|
||||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||||
@@ -47,9 +46,6 @@ var (
|
|||||||
flagHeight int
|
flagHeight int
|
||||||
flagProve bool
|
flagProve bool
|
||||||
|
|
||||||
// counter
|
|
||||||
flagSerial bool
|
|
||||||
|
|
||||||
// kvstore
|
// kvstore
|
||||||
flagPersist string
|
flagPersist string
|
||||||
)
|
)
|
||||||
@@ -61,9 +57,7 @@ var RootCmd = &cobra.Command{
|
|||||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
|
||||||
switch cmd.Use {
|
switch cmd.Use {
|
||||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
case "kvstore", "version":
|
||||||
return nil
|
|
||||||
case "version": // skip running for version command
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -135,10 +129,6 @@ func addQueryFlags() {
|
|||||||
"whether or not to return a merkle proof of the query result")
|
"whether or not to return a merkle proof of the query result")
|
||||||
}
|
}
|
||||||
|
|
||||||
func addCounterFlags() {
|
|
||||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
|
||||||
}
|
|
||||||
|
|
||||||
func addKVStoreFlags() {
|
func addKVStoreFlags() {
|
||||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||||
}
|
}
|
||||||
@@ -157,8 +147,6 @@ func addCommands() {
|
|||||||
RootCmd.AddCommand(queryCmd)
|
RootCmd.AddCommand(queryCmd)
|
||||||
|
|
||||||
// examples
|
// examples
|
||||||
addCounterFlags()
|
|
||||||
RootCmd.AddCommand(counterCmd)
|
|
||||||
addKVStoreFlags()
|
addKVStoreFlags()
|
||||||
RootCmd.AddCommand(kvstoreCmd)
|
RootCmd.AddCommand(kvstoreCmd)
|
||||||
}
|
}
|
||||||
@@ -258,14 +246,6 @@ var queryCmd = &cobra.Command{
|
|||||||
RunE: cmdQuery,
|
RunE: cmdQuery,
|
||||||
}
|
}
|
||||||
|
|
||||||
var counterCmd = &cobra.Command{
|
|
||||||
Use: "counter",
|
|
||||||
Short: "ABCI demo example",
|
|
||||||
Long: "ABCI demo example",
|
|
||||||
Args: cobra.ExactArgs(0),
|
|
||||||
RunE: cmdCounter,
|
|
||||||
}
|
|
||||||
|
|
||||||
var kvstoreCmd = &cobra.Command{
|
var kvstoreCmd = &cobra.Command{
|
||||||
Use: "kvstore",
|
Use: "kvstore",
|
||||||
Short: "ABCI demo example",
|
Short: "ABCI demo example",
|
||||||
@@ -504,18 +484,16 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, tx := range res.Txs {
|
printResponse(cmd, args, response{
|
||||||
printResponse(cmd, args, response{
|
Code: res.Code,
|
||||||
Code: tx.Code,
|
Data: res.Data,
|
||||||
Data: tx.Data,
|
Info: res.Info,
|
||||||
Info: tx.Info,
|
Log: res.Log,
|
||||||
Log: tx.Log,
|
})
|
||||||
})
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -595,32 +573,6 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
|
||||||
app := counter.NewApplication(flagSerial)
|
|
||||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
|
||||||
|
|
||||||
// Start the listener
|
|
||||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
srv.SetLogger(logger.With("module", "abci-server"))
|
|
||||||
if err := srv.Start(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop upon receiving SIGTERM or CTRL-C.
|
|
||||||
tmos.TrapSignal(logger, func() {
|
|
||||||
// Cleanup
|
|
||||||
if err := srv.Stop(); err != nil {
|
|
||||||
logger.Error("Error while stopping server", "err", err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Run forever.
|
|
||||||
select {}
|
|
||||||
}
|
|
||||||
|
|
||||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||||
|
|
||||||
|
|||||||
@@ -1,92 +0,0 @@
|
|||||||
package counter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/tendermint/tendermint/abci/example/code"
|
|
||||||
"github.com/tendermint/tendermint/abci/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Application struct {
|
|
||||||
types.BaseApplication
|
|
||||||
|
|
||||||
hashCount int
|
|
||||||
txCount int
|
|
||||||
serial bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewApplication(serial bool) *Application {
|
|
||||||
return &Application{serial: serial}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
|
||||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
|
||||||
|
|
||||||
if app.serial {
|
|
||||||
for _, tx := range req.Txs {
|
|
||||||
if len(tx) > 8 {
|
|
||||||
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{
|
|
||||||
Code: code.CodeTypeEncodingError,
|
|
||||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tx8 := make([]byte, 8)
|
|
||||||
copy(tx8[len(tx8)-len(tx):], tx)
|
|
||||||
txValue := binary.BigEndian.Uint64(tx8)
|
|
||||||
if txValue != uint64(app.txCount) {
|
|
||||||
return types.ResponseFinalizeBlock{
|
|
||||||
Txs: []*types.ResponseDeliverTx{{
|
|
||||||
Code: code.CodeTypeBadNonce,
|
|
||||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
app.txCount++
|
|
||||||
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
|
||||||
if app.serial {
|
|
||||||
if len(req.Tx) > 8 {
|
|
||||||
return types.ResponseCheckTx{
|
|
||||||
Code: code.CodeTypeEncodingError,
|
|
||||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
|
||||||
}
|
|
||||||
|
|
||||||
tx8 := make([]byte, 8)
|
|
||||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
|
||||||
txValue := binary.BigEndian.Uint64(tx8)
|
|
||||||
if txValue < uint64(app.txCount) {
|
|
||||||
return types.ResponseCheckTx{
|
|
||||||
Code: code.CodeTypeBadNonce,
|
|
||||||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return types.ResponseCheckTx{Code: code.CodeTypeOK}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *Application) Commit() (resp types.ResponseCommit) {
|
|
||||||
app.hashCount++
|
|
||||||
if app.txCount == 0 {
|
|
||||||
return types.ResponseCommit{}
|
|
||||||
}
|
|
||||||
hash := make([]byte, 8)
|
|
||||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
|
||||||
return types.ResponseCommit{Data: hash}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
|
||||||
switch reqQuery.Path {
|
|
||||||
case "hash":
|
|
||||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
|
||||||
case "tx":
|
|
||||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
|
|
||||||
default:
|
|
||||||
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -76,22 +76,20 @@ func testStream(t *testing.T, app types.Application) {
|
|||||||
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
|
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
|
||||||
// Process response
|
// Process response
|
||||||
switch r := res.Value.(type) {
|
switch r := res.Value.(type) {
|
||||||
case *types.Response_FinalizeBlock:
|
case *types.Response_DeliverTx:
|
||||||
for _, tx := range r.FinalizeBlock.Txs {
|
counter++
|
||||||
counter++
|
if r.DeliverTx.Code != code.CodeTypeOK {
|
||||||
if tx.Code != code.CodeTypeOK {
|
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
|
||||||
t.Error("DeliverTx failed with ret_code", tx.Code)
|
}
|
||||||
}
|
if counter > numDeliverTxs {
|
||||||
if counter > numDeliverTxs {
|
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
|
||||||
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
|
}
|
||||||
}
|
if counter == numDeliverTxs {
|
||||||
if counter == numDeliverTxs {
|
go func() {
|
||||||
go func() {
|
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
close(done)
|
||||||
close(done)
|
}()
|
||||||
}()
|
return
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
case *types.Response_Flush:
|
case *types.Response_Flush:
|
||||||
// ignore
|
// ignore
|
||||||
@@ -105,8 +103,7 @@ func testStream(t *testing.T, app types.Application) {
|
|||||||
// Write requests
|
// Write requests
|
||||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||||
// Send request
|
// Send request
|
||||||
tx := []byte("test")
|
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
|
||||||
_, err = client.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Sometimes send flush messages
|
// Sometimes send flush messages
|
||||||
@@ -166,25 +163,22 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
|||||||
// Write requests
|
// Write requests
|
||||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||||
// Send request
|
// Send request
|
||||||
txt := []byte("test")
|
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
|
||||||
response, err := client.FinalizeBlock(context.Background(), &types.RequestFinalizeBlock{Txs: [][]byte{txt}})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
|
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
|
||||||
}
|
}
|
||||||
counter++
|
counter++
|
||||||
for _, tx := range response.Txs {
|
if response.Code != code.CodeTypeOK {
|
||||||
if tx.Code != code.CodeTypeOK {
|
t.Error("DeliverTx failed with ret_code", response.Code)
|
||||||
t.Error("DeliverTx failed with ret_code", tx.Code)
|
}
|
||||||
}
|
if counter > numDeliverTxs {
|
||||||
if counter > numDeliverTxs {
|
t.Fatal("Too many DeliverTx responses")
|
||||||
t.Fatal("Too many DeliverTx responses")
|
}
|
||||||
}
|
t.Log("response", counter)
|
||||||
t.Log("response", counter)
|
if counter == numDeliverTxs {
|
||||||
if counter == numDeliverTxs {
|
go func() {
|
||||||
go func() {
|
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
}()
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ There are two app's here: the KVStoreApplication and the PersistentKVStoreApplic
|
|||||||
|
|
||||||
## KVStoreApplication
|
## KVStoreApplication
|
||||||
|
|
||||||
The KVStoreApplication is a simple merkle key-value store.
|
The KVStoreApplication is a simple merkle key-value store.
|
||||||
Transactions of the form `key=value` are stored as key-value pairs in the tree.
|
Transactions of the form `key=value` are stored as key-value pairs in the tree.
|
||||||
Transactions without an `=` sign set the value to the key.
|
Transactions without an `=` sign set the value to the key.
|
||||||
The app has no replay protection (other than what the mempool provides).
|
The app has no replay protection (other than what the mempool provides).
|
||||||
@@ -12,7 +12,7 @@ The app has no replay protection (other than what the mempool provides).
|
|||||||
## PersistentKVStoreApplication
|
## PersistentKVStoreApplication
|
||||||
|
|
||||||
The PersistentKVStoreApplication wraps the KVStoreApplication
|
The PersistentKVStoreApplication wraps the KVStoreApplication
|
||||||
and provides two additional features:
|
and provides three additional features:
|
||||||
|
|
||||||
1) persistence of state across app restarts (using Tendermint's ABCI-Handshake mechanism)
|
1) persistence of state across app restarts (using Tendermint's ABCI-Handshake mechanism)
|
||||||
2) validator set changes
|
2) validator set changes
|
||||||
@@ -27,4 +27,4 @@ Validator set changes are effected using the following transaction format:
|
|||||||
|
|
||||||
where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one).
|
where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one).
|
||||||
To remove a validator from the validator set, set power to `0`.
|
To remove a validator from the validator set, set power to `0`.
|
||||||
There is no sybil protection against new validators joining.
|
There is no sybil protection against new validators joining.
|
||||||
|
|||||||
@@ -86,40 +86,35 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// tx is either "key=value" or just arbitrary bytes
|
// tx is either "key=value" or just arbitrary bytes
|
||||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||||
var key, value string
|
var key, value string
|
||||||
var txs = make([]*types.ResponseDeliverTx, len(req.Txs))
|
|
||||||
|
|
||||||
for i, tx := range req.Txs {
|
parts := bytes.Split(req.Tx, []byte("="))
|
||||||
parts := bytes.Split(tx, []byte("="))
|
if len(parts) == 2 {
|
||||||
if len(parts) == 2 {
|
key, value = string(parts[0]), string(parts[1])
|
||||||
key, value = string(parts[0]), string(parts[1])
|
} else {
|
||||||
} else {
|
key, value = string(req.Tx), string(req.Tx)
|
||||||
key, value = string(tx), string(tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
app.state.Size++
|
|
||||||
|
|
||||||
events := []types.Event{
|
|
||||||
{
|
|
||||||
Type: "app",
|
|
||||||
Attributes: []types.EventAttribute{
|
|
||||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
|
||||||
{Key: "key", Value: key, Index: true},
|
|
||||||
{Key: "index_key", Value: "index is working", Index: true},
|
|
||||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
txs[i] = &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return types.ResponseFinalizeBlock{Txs: txs}
|
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
app.state.Size++
|
||||||
|
|
||||||
|
events := []types.Event{
|
||||||
|
{
|
||||||
|
Type: "app",
|
||||||
|
Attributes: []types.EventAttribute{
|
||||||
|
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||||
|
{Key: "key", Value: key, Index: true},
|
||||||
|
{Key: "index_key", Value: "index is working", Index: true},
|
||||||
|
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||||
@@ -176,3 +171,9 @@ func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.Respo
|
|||||||
|
|
||||||
return resQuery
|
return resQuery
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (app *Application) PrepareProposal(
|
||||||
|
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||||
|
return types.ResponsePrepareProposal{
|
||||||
|
BlockData: req.BlockData}
|
||||||
|
}
|
||||||
|
|||||||
@@ -27,16 +27,12 @@ const (
|
|||||||
var ctx = context.Background()
|
var ctx = context.Background()
|
||||||
|
|
||||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||||
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
|
req := types.RequestDeliverTx{Tx: tx}
|
||||||
ar := app.FinalizeBlock(req)
|
ar := app.DeliverTx(req)
|
||||||
for _, tx := range ar.Txs {
|
require.False(t, ar.IsErr(), ar)
|
||||||
require.False(t, tx.IsErr(), ar)
|
|
||||||
}
|
|
||||||
// repeating tx doesn't raise error
|
// repeating tx doesn't raise error
|
||||||
ar = app.FinalizeBlock(req)
|
ar = app.DeliverTx(req)
|
||||||
for _, tx := range ar.Txs {
|
require.False(t, ar.IsErr(), ar)
|
||||||
require.False(t, tx.IsErr(), ar)
|
|
||||||
}
|
|
||||||
// commit
|
// commit
|
||||||
app.Commit()
|
app.Commit()
|
||||||
|
|
||||||
@@ -113,7 +109,8 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
|||||||
header := tmproto.Header{
|
header := tmproto.Header{
|
||||||
Height: height,
|
Height: height,
|
||||||
}
|
}
|
||||||
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
|
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||||
|
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||||
kvstore.Commit()
|
kvstore.Commit()
|
||||||
|
|
||||||
resInfo = kvstore.Info(types.RequestInfo{})
|
resInfo = kvstore.Info(types.RequestInfo{})
|
||||||
@@ -203,15 +200,16 @@ func makeApplyBlock(
|
|||||||
Height: height,
|
Height: height,
|
||||||
}
|
}
|
||||||
|
|
||||||
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
|
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||||
Hash: hash,
|
for _, tx := range txs {
|
||||||
Header: header,
|
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
|
||||||
Txs: txs,
|
t.Fatal(r)
|
||||||
})
|
}
|
||||||
|
}
|
||||||
|
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||||
kvstore.Commit()
|
kvstore.Commit()
|
||||||
|
|
||||||
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
|
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -328,16 +326,13 @@ func runClientTests(t *testing.T, client abcicli.Client) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||||
ar, err := app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||||
for _, tx := range ar.Txs {
|
|
||||||
require.False(t, tx.IsErr(), ar)
|
|
||||||
}
|
|
||||||
|
|
||||||
ar, err = app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) // repeating tx doesn't raise error
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, tx := range ar.Txs {
|
require.False(t, ar.IsErr(), ar)
|
||||||
require.False(t, tx.IsErr(), ar)
|
// repeating tx doesn't raise error
|
||||||
}
|
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, ar.IsErr(), ar)
|
||||||
// commit
|
// commit
|
||||||
_, err = app.CommitSync(ctx)
|
_, err = app.CommitSync(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||||
"github.com/tendermint/tendermint/libs/log"
|
"github.com/tendermint/tendermint/libs/log"
|
||||||
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||||
|
ptypes "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -66,19 +67,23 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||||
// func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||||
// // if it starts with "val:", update the validator set
|
// if it starts with "val:", update the validator set
|
||||||
// // format is "val:pubkey!power"
|
// format is "val:pubkey!power"
|
||||||
// if isValidatorTx(req.Tx) {
|
if isValidatorTx(req.Tx) {
|
||||||
// // update validators in the merkle tree
|
// update validators in the merkle tree
|
||||||
// // and in app.ValUpdates
|
// and in app.ValUpdates
|
||||||
// return app.execValidatorTx(req.Tx)
|
return app.execValidatorTx(req.Tx)
|
||||||
// }
|
}
|
||||||
|
|
||||||
// // otherwise, update the key-value store
|
if isPrepareTx(req.Tx) {
|
||||||
// return app.app.DeliverTx(req)
|
return app.execPrepareTx(req.Tx)
|
||||||
// }
|
}
|
||||||
|
|
||||||
|
// otherwise, update the key-value store
|
||||||
|
return app.app.DeliverTx(req)
|
||||||
|
}
|
||||||
|
|
||||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||||
return app.app.CheckTx(req)
|
return app.app.CheckTx(req)
|
||||||
@@ -119,40 +124,8 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
|
|||||||
return types.ResponseInitChain{}
|
return types.ResponseInitChain{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
// Track the block hash and header information
|
||||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||||
return types.ResponseListSnapshots{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
|
||||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
|
||||||
return types.ResponseLoadSnapshotChunk{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
|
||||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
|
||||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
|
||||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
|
||||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *PersistentKVStoreApplication) FinalizeBlock(
|
|
||||||
req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
|
||||||
// for i, tx := range req.Txs {
|
|
||||||
// // if it starts with "val:", update the validator set
|
|
||||||
// // format is "val:pubkey!power"
|
|
||||||
// if isValidatorTx(tx) {
|
|
||||||
// // update validators in the merkle tree
|
|
||||||
// // and in app.ValUpdates
|
|
||||||
// return app.execValidatorTx(req.Tx)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // otherwise, update the key-value store
|
|
||||||
// return app.app.DeliverTx(tx)
|
|
||||||
// }
|
|
||||||
// reset valset changes
|
// reset valset changes
|
||||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||||
|
|
||||||
@@ -174,7 +147,50 @@ func (app *PersistentKVStoreApplication) FinalizeBlock(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return types.ResponseFinalizeBlock{ValidatorUpdates: app.ValUpdates}
|
return types.ResponseBeginBlock{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the validator set
|
||||||
|
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||||
|
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||||
|
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||||
|
return types.ResponseListSnapshots{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||||
|
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||||
|
return types.ResponseLoadSnapshotChunk{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||||
|
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||||
|
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||||
|
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||||
|
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *PersistentKVStoreApplication) ExtendVote(
|
||||||
|
req types.RequestExtendVote) types.ResponseExtendVote {
|
||||||
|
return types.ResponseExtendVote{
|
||||||
|
VoteExtension: ConstructVoteExtension(req.Vote.ValidatorAddress),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *PersistentKVStoreApplication) VerifyVoteExtension(
|
||||||
|
req types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
|
||||||
|
return types.RespondVerifyVoteExtension(
|
||||||
|
app.verifyExtension(req.Vote.ValidatorAddress, req.Vote.VoteExtension))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *PersistentKVStoreApplication) PrepareProposal(
|
||||||
|
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||||
|
return types.ResponsePrepareProposal{BlockData: app.substPrepareTx(req.BlockData)}
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------
|
//---------------------------------------------
|
||||||
@@ -291,3 +307,51 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
|||||||
|
|
||||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// -----------------------------
|
||||||
|
|
||||||
|
const PreparePrefix = "prepare"
|
||||||
|
|
||||||
|
func isPrepareTx(tx []byte) bool {
|
||||||
|
return strings.HasPrefix(string(tx), PreparePrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// execPrepareTx is noop. tx data is considered as placeholder
|
||||||
|
// and is substitute at the PrepareProposal.
|
||||||
|
func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.ResponseDeliverTx {
|
||||||
|
// noop
|
||||||
|
return types.ResponseDeliverTx{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// substPrepareTx subst all the preparetx in the blockdata
|
||||||
|
// to null string(could be any arbitrary string).
|
||||||
|
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte) [][]byte {
|
||||||
|
for i, tx := range blockData {
|
||||||
|
if isPrepareTx(tx) {
|
||||||
|
blockData[i] = make([]byte, len(tx))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return blockData
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConstructVoteExtension(valAddr []byte) *ptypes.VoteExtension {
|
||||||
|
return &ptypes.VoteExtension{
|
||||||
|
AppDataToSign: valAddr,
|
||||||
|
AppDataSelfAuthenticating: valAddr,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *PersistentKVStoreApplication) verifyExtension(valAddr []byte, ext *ptypes.VoteExtension) bool {
|
||||||
|
if ext == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
canonical := ConstructVoteExtension(valAddr)
|
||||||
|
if !bytes.Equal(canonical.AppDataToSign, ext.AppDataToSign) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !bytes.Equal(canonical.AppDataSelfAuthenticating, ext.AppDataSelfAuthenticating) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|||||||
@@ -200,6 +200,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
|||||||
case *types.Request_Info:
|
case *types.Request_Info:
|
||||||
res := s.app.Info(*r.Info)
|
res := s.app.Info(*r.Info)
|
||||||
responses <- types.ToResponseInfo(res)
|
responses <- types.ToResponseInfo(res)
|
||||||
|
case *types.Request_DeliverTx:
|
||||||
|
res := s.app.DeliverTx(*r.DeliverTx)
|
||||||
|
responses <- types.ToResponseDeliverTx(res)
|
||||||
case *types.Request_CheckTx:
|
case *types.Request_CheckTx:
|
||||||
res := s.app.CheckTx(*r.CheckTx)
|
res := s.app.CheckTx(*r.CheckTx)
|
||||||
responses <- types.ToResponseCheckTx(res)
|
responses <- types.ToResponseCheckTx(res)
|
||||||
@@ -212,21 +215,33 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
|||||||
case *types.Request_InitChain:
|
case *types.Request_InitChain:
|
||||||
res := s.app.InitChain(*r.InitChain)
|
res := s.app.InitChain(*r.InitChain)
|
||||||
responses <- types.ToResponseInitChain(res)
|
responses <- types.ToResponseInitChain(res)
|
||||||
|
case *types.Request_BeginBlock:
|
||||||
|
res := s.app.BeginBlock(*r.BeginBlock)
|
||||||
|
responses <- types.ToResponseBeginBlock(res)
|
||||||
|
case *types.Request_EndBlock:
|
||||||
|
res := s.app.EndBlock(*r.EndBlock)
|
||||||
|
responses <- types.ToResponseEndBlock(res)
|
||||||
case *types.Request_ListSnapshots:
|
case *types.Request_ListSnapshots:
|
||||||
res := s.app.ListSnapshots(*r.ListSnapshots)
|
res := s.app.ListSnapshots(*r.ListSnapshots)
|
||||||
responses <- types.ToResponseListSnapshots(res)
|
responses <- types.ToResponseListSnapshots(res)
|
||||||
case *types.Request_OfferSnapshot:
|
case *types.Request_OfferSnapshot:
|
||||||
res := s.app.OfferSnapshot(*r.OfferSnapshot)
|
res := s.app.OfferSnapshot(*r.OfferSnapshot)
|
||||||
responses <- types.ToResponseOfferSnapshot(res)
|
responses <- types.ToResponseOfferSnapshot(res)
|
||||||
|
case *types.Request_PrepareProposal:
|
||||||
|
res := s.app.PrepareProposal(*r.PrepareProposal)
|
||||||
|
responses <- types.ToResponsePrepareProposal(res)
|
||||||
case *types.Request_LoadSnapshotChunk:
|
case *types.Request_LoadSnapshotChunk:
|
||||||
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
|
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
|
||||||
responses <- types.ToResponseLoadSnapshotChunk(res)
|
responses <- types.ToResponseLoadSnapshotChunk(res)
|
||||||
case *types.Request_ApplySnapshotChunk:
|
case *types.Request_ApplySnapshotChunk:
|
||||||
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
|
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
|
||||||
responses <- types.ToResponseApplySnapshotChunk(res)
|
responses <- types.ToResponseApplySnapshotChunk(res)
|
||||||
case *types.Request_FinalizeBlock:
|
case *types.Request_ExtendVote:
|
||||||
res := s.app.FinalizeBlock(*r.FinalizeBlock)
|
res := s.app.ExtendVote(*r.ExtendVote)
|
||||||
responses <- types.ToResponseFinalizeBlock(res)
|
responses <- types.ToResponseExtendVote(res)
|
||||||
|
case *types.Request_VerifyVoteExtension:
|
||||||
|
res := s.app.VerifyVoteExtension(*r.VerifyVoteExtension)
|
||||||
|
responses <- types.ToResponseVerifyVoteExtension(res)
|
||||||
default:
|
default:
|
||||||
responses <- types.ToResponseException("Unknown request")
|
responses <- types.ToResponseException("Unknown request")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,22 +51,20 @@ func Commit(client abcicli.Client, hashExp []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func FinalizeBlock(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||||
res, _ := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||||
for _, tx := range res.Txs {
|
code, data, log := res.Code, res.Data, res.Log
|
||||||
code, data, log := tx.Code, tx.Data, tx.Log
|
if code != codeExp {
|
||||||
if code != codeExp {
|
fmt.Println("Failed test: DeliverTx")
|
||||||
fmt.Println("Failed test: DeliverTx")
|
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||||
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
code, codeExp, log)
|
||||||
code, codeExp, log)
|
return errors.New("deliverTx error")
|
||||||
return errors.New("deliverTx error")
|
}
|
||||||
}
|
if !bytes.Equal(data, dataExp) {
|
||||||
if !bytes.Equal(data, dataExp) {
|
fmt.Println("Failed test: DeliverTx")
|
||||||
fmt.Println("Failed test: DeliverTx")
|
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
||||||
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
data, dataExp)
|
||||||
data, dataExp)
|
return errors.New("deliverTx error")
|
||||||
return errors.New("deliverTx error")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
fmt.Println("Passed test: DeliverTx")
|
fmt.Println("Passed test: DeliverTx")
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -1,68 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
|
||||||
"github.com/tendermint/tendermint/abci/types"
|
|
||||||
"github.com/tendermint/tendermint/libs/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ctx = context.Background()
|
|
||||||
|
|
||||||
func startClient(abciType string) abcicli.Client {
|
|
||||||
// Start client
|
|
||||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
|
||||||
client.SetLogger(logger.With("module", "abcicli"))
|
|
||||||
if err := client.Start(); err != nil {
|
|
||||||
panicf("connecting to abci_app: %v", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return client
|
|
||||||
}
|
|
||||||
|
|
||||||
func commit(client abcicli.Client, hashExp []byte) {
|
|
||||||
res, err := client.CommitSync(ctx)
|
|
||||||
if err != nil {
|
|
||||||
panicf("client error: %v", err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(res.Data, hashExp) {
|
|
||||||
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type tx struct {
|
|
||||||
Data []byte
|
|
||||||
CodeExp uint32
|
|
||||||
DataExp []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func finalizeBlock(client abcicli.Client, txs []tx) {
|
|
||||||
var txsData = make([][]byte, len(txs))
|
|
||||||
for i, tx := range txs {
|
|
||||||
txsData[i] = tx.Data
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: txsData})
|
|
||||||
if err != nil {
|
|
||||||
panicf("client error: %v", err)
|
|
||||||
}
|
|
||||||
for i, tx := range res.Txs {
|
|
||||||
if tx.Code != txs[i].CodeExp {
|
|
||||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", tx.Code, txs[i].CodeExp, tx.Log)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(tx.Data, txs[i].DataExp) {
|
|
||||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", tx.Data, txs[i].DataExp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func panicf(format string, a ...interface{}) {
|
|
||||||
panic(fmt.Sprintf(format, a...))
|
|
||||||
}
|
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/tendermint/tendermint/abci/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
var abciType string
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
abciType = os.Getenv("ABCI")
|
|
||||||
if abciType == "" {
|
|
||||||
abciType = "socket"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
testCounter()
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxABCIConnectTries = 10
|
|
||||||
)
|
|
||||||
|
|
||||||
func ensureABCIIsUp(typ string, n int) error {
|
|
||||||
var err error
|
|
||||||
cmdString := "abci-cli echo hello"
|
|
||||||
if typ == "grpc" {
|
|
||||||
cmdString = "abci-cli --abci grpc echo hello"
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
cmd := exec.Command("bash", "-c", cmdString)
|
|
||||||
_, err = cmd.CombinedOutput()
|
|
||||||
if err == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCounter() {
|
|
||||||
abciApp := os.Getenv("ABCI_APP")
|
|
||||||
if abciApp == "" {
|
|
||||||
panic("No ABCI_APP specified")
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
|
||||||
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
|
|
||||||
cmd := exec.Command("bash", "-c", subCommand)
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := cmd.Process.Kill(); err != nil {
|
|
||||||
log.Printf("error on process kill: %v", err)
|
|
||||||
}
|
|
||||||
if err := cmd.Wait(); err != nil {
|
|
||||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
|
||||||
log.Fatalf("echo failed: %v", err) //nolint:gocritic
|
|
||||||
}
|
|
||||||
|
|
||||||
client := startClient(abciType)
|
|
||||||
defer func() {
|
|
||||||
if err := client.Stop(); err != nil {
|
|
||||||
log.Printf("error trying client stop: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// commit(client, nil)
|
|
||||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
|
||||||
commit(client, nil)
|
|
||||||
finalizeBlock(client, []tx{{Data: []byte{0x00}, CodeExp: types.CodeTypeOK, DataExp: nil}})
|
|
||||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
|
||||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
|
||||||
txs := []tx{
|
|
||||||
{Data: []byte{0x01}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
|
||||||
{Data: []byte{0x00, 0x02}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
|
||||||
{Data: []byte{0x00, 0x03}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
|
||||||
{Data: []byte{0x00, 0x00, 0x04}, DataExp: nil, CodeExp: types.CodeTypeOK}}
|
|
||||||
finalizeBlock(client, txs)
|
|
||||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
|
||||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
|
||||||
}
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
#! /bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
|
||||||
|
|
||||||
# Get the directory of where this script is.
|
|
||||||
export PATH="$GOBIN:$PATH"
|
|
||||||
SOURCE="${BASH_SOURCE[0]}"
|
|
||||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
|
||||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
|
||||||
|
|
||||||
# Change into that dir because we expect that.
|
|
||||||
cd "$DIR"
|
|
||||||
|
|
||||||
echo "RUN COUNTER OVER SOCKET"
|
|
||||||
# test golang counter
|
|
||||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
|
||||||
echo "----------------------"
|
|
||||||
|
|
||||||
|
|
||||||
echo "RUN COUNTER OVER GRPC"
|
|
||||||
# test golang counter via grpc
|
|
||||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
|
||||||
echo "----------------------"
|
|
||||||
|
|
||||||
# test nodejs counter
|
|
||||||
# TODO: fix node app
|
|
||||||
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter
|
|
||||||
@@ -37,7 +37,6 @@ function testExample() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||||
testExample 2 tests/test_cli/ex2.abci abci-cli counter
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "PASS"
|
echo "PASS"
|
||||||
|
|||||||
@@ -18,8 +18,19 @@ type Application interface {
|
|||||||
|
|
||||||
// Consensus Connection
|
// Consensus Connection
|
||||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||||
FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock
|
PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal
|
||||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
// Signals the beginning of a block
|
||||||
|
BeginBlock(RequestBeginBlock) ResponseBeginBlock
|
||||||
|
// Deliver a tx for full processing
|
||||||
|
DeliverTx(RequestDeliverTx) ResponseDeliverTx
|
||||||
|
// Signals the end of a block, returns changes to the validator set
|
||||||
|
EndBlock(RequestEndBlock) ResponseEndBlock
|
||||||
|
// Commit the state and return the application Merkle root hash
|
||||||
|
Commit() ResponseCommit
|
||||||
|
// Create application specific vote extension
|
||||||
|
ExtendVote(RequestExtendVote) ResponseExtendVote
|
||||||
|
// Verify application's vote extension data
|
||||||
|
VerifyVoteExtension(RequestVerifyVoteExtension) ResponseVerifyVoteExtension
|
||||||
|
|
||||||
// State Sync Connection
|
// State Sync Connection
|
||||||
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
|
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
|
||||||
@@ -44,6 +55,10 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
|||||||
return ResponseInfo{}
|
return ResponseInfo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||||
|
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||||
|
}
|
||||||
|
|
||||||
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
|
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
|
||||||
return ResponseCheckTx{Code: CodeTypeOK}
|
return ResponseCheckTx{Code: CodeTypeOK}
|
||||||
}
|
}
|
||||||
@@ -52,6 +67,14 @@ func (BaseApplication) Commit() ResponseCommit {
|
|||||||
return ResponseCommit{}
|
return ResponseCommit{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (BaseApplication) ExtendVote(req RequestExtendVote) ResponseExtendVote {
|
||||||
|
return ResponseExtendVote{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (BaseApplication) VerifyVoteExtension(req RequestVerifyVoteExtension) ResponseVerifyVoteExtension {
|
||||||
|
return ResponseVerifyVoteExtension{}
|
||||||
|
}
|
||||||
|
|
||||||
func (BaseApplication) Query(req RequestQuery) ResponseQuery {
|
func (BaseApplication) Query(req RequestQuery) ResponseQuery {
|
||||||
return ResponseQuery{Code: CodeTypeOK}
|
return ResponseQuery{Code: CodeTypeOK}
|
||||||
}
|
}
|
||||||
@@ -60,6 +83,14 @@ func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
|
|||||||
return ResponseInitChain{}
|
return ResponseInitChain{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
|
||||||
|
return ResponseBeginBlock{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
|
||||||
|
return ResponseEndBlock{}
|
||||||
|
}
|
||||||
|
|
||||||
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
|
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
|
||||||
return ResponseListSnapshots{}
|
return ResponseListSnapshots{}
|
||||||
}
|
}
|
||||||
@@ -76,8 +107,8 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
|
|||||||
return ResponseApplySnapshotChunk{}
|
return ResponseApplySnapshotChunk{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
|
func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal {
|
||||||
return ResponseFinalizeBlock{}
|
return ResponsePrepareProposal{}
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------------------------------------------------
|
//-------------------------------------------------------
|
||||||
@@ -104,6 +135,11 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
|
|||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||||
|
res := app.app.DeliverTx(*req)
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||||
res := app.app.CheckTx(*req)
|
res := app.app.CheckTx(*req)
|
||||||
return &res, nil
|
return &res, nil
|
||||||
@@ -124,6 +160,16 @@ func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain
|
|||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
|
||||||
|
res := app.app.BeginBlock(*req)
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
|
||||||
|
res := app.app.EndBlock(*req)
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (app *GRPCApplication) ListSnapshots(
|
func (app *GRPCApplication) ListSnapshots(
|
||||||
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||||
res := app.app.ListSnapshots(*req)
|
res := app.app.ListSnapshots(*req)
|
||||||
@@ -148,8 +194,20 @@ func (app *GRPCApplication) ApplySnapshotChunk(
|
|||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *GRPCApplication) FinalizeBlock(
|
func (app *GRPCApplication) ExtendVote(
|
||||||
ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
|
ctx context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) {
|
||||||
res := app.app.FinalizeBlock(*req)
|
res := app.app.ExtendVote(*req)
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *GRPCApplication) VerifyVoteExtension(
|
||||||
|
ctx context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) {
|
||||||
|
res := app.app.VerifyVoteExtension(*req)
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *GRPCApplication) PrepareProposal(
|
||||||
|
ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
|
||||||
|
res := app.app.PrepareProposal(*req)
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,11 +15,7 @@ const (
|
|||||||
func WriteMessage(msg proto.Message, w io.Writer) error {
|
func WriteMessage(msg proto.Message, w io.Writer) error {
|
||||||
protoWriter := protoio.NewDelimitedWriter(w)
|
protoWriter := protoio.NewDelimitedWriter(w)
|
||||||
_, err := protoWriter.WriteMsg(msg)
|
_, err := protoWriter.WriteMsg(msg)
|
||||||
if err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadMessage reads a varint length-delimited protobuf message.
|
// ReadMessage reads a varint length-delimited protobuf message.
|
||||||
@@ -48,6 +44,12 @@ func ToRequestInfo(req RequestInfo) *Request {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||||
|
return &Request{
|
||||||
|
Value: &Request_DeliverTx{&req},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func ToRequestCheckTx(req RequestCheckTx) *Request {
|
func ToRequestCheckTx(req RequestCheckTx) *Request {
|
||||||
return &Request{
|
return &Request{
|
||||||
Value: &Request_CheckTx{&req},
|
Value: &Request_CheckTx{&req},
|
||||||
@@ -72,6 +74,18 @@ func ToRequestInitChain(req RequestInitChain) *Request {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
|
||||||
|
return &Request{
|
||||||
|
Value: &Request_BeginBlock{&req},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToRequestEndBlock(req RequestEndBlock) *Request {
|
||||||
|
return &Request{
|
||||||
|
Value: &Request_EndBlock{&req},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
|
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
|
||||||
return &Request{
|
return &Request{
|
||||||
Value: &Request_ListSnapshots{&req},
|
Value: &Request_ListSnapshots{&req},
|
||||||
@@ -96,9 +110,21 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request {
|
func ToRequestExtendVote(req RequestExtendVote) *Request {
|
||||||
return &Request{
|
return &Request{
|
||||||
Value: &Request_FinalizeBlock{&req},
|
Value: &Request_ExtendVote{&req},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToRequestVerifyVoteExtension(req RequestVerifyVoteExtension) *Request {
|
||||||
|
return &Request{
|
||||||
|
Value: &Request_VerifyVoteExtension{&req},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToRequestPrepareProposal(req RequestPrepareProposal) *Request {
|
||||||
|
return &Request{
|
||||||
|
Value: &Request_PrepareProposal{&req},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,6 +153,11 @@ func ToResponseInfo(res ResponseInfo) *Response {
|
|||||||
Value: &Response_Info{&res},
|
Value: &Response_Info{&res},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
|
||||||
|
return &Response{
|
||||||
|
Value: &Response_DeliverTx{&res},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func ToResponseCheckTx(res ResponseCheckTx) *Response {
|
func ToResponseCheckTx(res ResponseCheckTx) *Response {
|
||||||
return &Response{
|
return &Response{
|
||||||
@@ -152,6 +183,18 @@ func ToResponseInitChain(res ResponseInitChain) *Response {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
|
||||||
|
return &Response{
|
||||||
|
Value: &Response_BeginBlock{&res},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToResponseEndBlock(res ResponseEndBlock) *Response {
|
||||||
|
return &Response{
|
||||||
|
Value: &Response_EndBlock{&res},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
|
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
|
||||||
return &Response{
|
return &Response{
|
||||||
Value: &Response_ListSnapshots{&res},
|
Value: &Response_ListSnapshots{&res},
|
||||||
@@ -176,8 +219,20 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response {
|
func ToResponseExtendVote(res ResponseExtendVote) *Response {
|
||||||
return &Response{
|
return &Response{
|
||||||
Value: &Response_FinalizeBlock{&res},
|
Value: &Response_ExtendVote{&res},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToResponseVerifyVoteExtension(res ResponseVerifyVoteExtension) *Response {
|
||||||
|
return &Response{
|
||||||
|
Value: &Response_VerifyVoteExtension{&res},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ToResponsePrepareProposal(res ResponsePrepareProposal) *Response {
|
||||||
|
return &Response{
|
||||||
|
Value: &Response_PrepareProposal{&res},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,8 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
"github.com/gogo/protobuf/jsonpb"
|
"github.com/gogo/protobuf/jsonpb"
|
||||||
|
|
||||||
|
types "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -41,6 +43,16 @@ func (r ResponseQuery) IsErr() bool {
|
|||||||
return r.Code != CodeTypeOK
|
return r.Code != CodeTypeOK
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsOK returns true if Code is OK
|
||||||
|
func (r ResponseVerifyVoteExtension) IsOK() bool {
|
||||||
|
return r.Result <= ResponseVerifyVoteExtension_ACCEPT
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErr returns true if Code is something other than OK.
|
||||||
|
func (r ResponseVerifyVoteExtension) IsErr() bool {
|
||||||
|
return r.Result > ResponseVerifyVoteExtension_ACCEPT
|
||||||
|
}
|
||||||
|
|
||||||
//---------------------------------------------------------------------------
|
//---------------------------------------------------------------------------
|
||||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||||
|
|
||||||
@@ -118,3 +130,25 @@ var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
|
|||||||
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
|
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
|
||||||
|
|
||||||
var _ jsonRoundTripper = (*EventAttribute)(nil)
|
var _ jsonRoundTripper = (*EventAttribute)(nil)
|
||||||
|
|
||||||
|
// -----------------------------------------------
|
||||||
|
// construct Result data
|
||||||
|
|
||||||
|
func RespondExtendVote(appDataToSign, appDataSelfAuthenticating []byte) ResponseExtendVote {
|
||||||
|
return ResponseExtendVote{
|
||||||
|
VoteExtension: &types.VoteExtension{
|
||||||
|
AppDataToSign: appDataToSign,
|
||||||
|
AppDataSelfAuthenticating: appDataSelfAuthenticating,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RespondVerifyVoteExtension(ok bool) ResponseVerifyVoteExtension {
|
||||||
|
result := ResponseVerifyVoteExtension_REJECT
|
||||||
|
if ok {
|
||||||
|
result = ResponseVerifyVoteExtension_ACCEPT
|
||||||
|
}
|
||||||
|
return ResponseVerifyVoteExtension{
|
||||||
|
Result: result,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -12,11 +12,9 @@ import (
|
|||||||
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
|
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
|
||||||
// NodeKey to the standard output.
|
// NodeKey to the standard output.
|
||||||
var GenNodeKeyCmd = &cobra.Command{
|
var GenNodeKeyCmd = &cobra.Command{
|
||||||
Use: "gen-node-key",
|
Use: "gen-node-key",
|
||||||
Aliases: []string{"gen_node_key"},
|
Short: "Generate a new node key",
|
||||||
Short: "Generate a new node key",
|
RunE: genNodeKey,
|
||||||
RunE: genNodeKey,
|
|
||||||
PreRun: deprecateSnakeCase,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func genNodeKey(cmd *cobra.Command, args []string) error {
|
func genNodeKey(cmd *cobra.Command, args []string) error {
|
||||||
|
|||||||
@@ -13,11 +13,9 @@ import (
|
|||||||
// GenValidatorCmd allows the generation of a keypair for a
|
// GenValidatorCmd allows the generation of a keypair for a
|
||||||
// validator.
|
// validator.
|
||||||
var GenValidatorCmd = &cobra.Command{
|
var GenValidatorCmd = &cobra.Command{
|
||||||
Use: "gen-validator",
|
Use: "gen-validator",
|
||||||
Aliases: []string{"gen_validator"},
|
Short: "Generate new validator keypair",
|
||||||
Short: "Generate new validator keypair",
|
RunE: genValidator,
|
||||||
RunE: genValidator,
|
|
||||||
PreRun: deprecateSnakeCase,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
87
cmd/tendermint/commands/inspect.go
Normal file
87
cmd/tendermint/commands/inspect.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
cfg "github.com/tendermint/tendermint/config"
|
||||||
|
"github.com/tendermint/tendermint/inspect"
|
||||||
|
"github.com/tendermint/tendermint/state"
|
||||||
|
"github.com/tendermint/tendermint/state/indexer/sink"
|
||||||
|
"github.com/tendermint/tendermint/store"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InspectCmd is the command for starting an inspect server.
|
||||||
|
var InspectCmd = &cobra.Command{
|
||||||
|
Use: "inspect",
|
||||||
|
Short: "Run an inspect server for investigating Tendermint state",
|
||||||
|
Long: `
|
||||||
|
inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging
|
||||||
|
issues with Tendermint.
|
||||||
|
|
||||||
|
When the Tendermint consensus engine detects inconsistent state, it will crash the
|
||||||
|
tendermint process. Tendermint will not start up while in this inconsistent state.
|
||||||
|
The inspect command can be used to query the block and state store using Tendermint
|
||||||
|
RPC calls to debug issues of inconsistent state.
|
||||||
|
`,
|
||||||
|
|
||||||
|
RunE: runInspect,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
InspectCmd.Flags().
|
||||||
|
String("rpc.laddr",
|
||||||
|
config.RPC.ListenAddress, "RPC listenener address. Port required")
|
||||||
|
InspectCmd.Flags().
|
||||||
|
String("db-backend",
|
||||||
|
config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb")
|
||||||
|
InspectCmd.Flags().
|
||||||
|
String("db-dir", config.DBPath, "database directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
func runInspect(cmd *cobra.Command, args []string) error {
|
||||||
|
ctx, cancel := context.WithCancel(cmd.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
|
||||||
|
go func() {
|
||||||
|
<-c
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
|
||||||
|
blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
blockStore := store.NewBlockStore(blockStoreDB)
|
||||||
|
stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config})
|
||||||
|
if err != nil {
|
||||||
|
if err := blockStoreDB.Close(); err != nil {
|
||||||
|
logger.Error("error closing block store db", "error", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sinks, err := sink.EventSinksFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stateStore := state.NewStore(stateDB)
|
||||||
|
|
||||||
|
ins := inspect.New(config.RPC, blockStore, stateStore, sinks, logger)
|
||||||
|
|
||||||
|
logger.Info("starting inspect server")
|
||||||
|
if err := ins.Run(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
64
cmd/tendermint/commands/key_migrate.go
Normal file
64
cmd/tendermint/commands/key_migrate.go
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
cfg "github.com/tendermint/tendermint/config"
|
||||||
|
"github.com/tendermint/tendermint/scripts/keymigrate"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MakeKeyMigrateCommand() *cobra.Command {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "key-migrate",
|
||||||
|
Short: "Run Database key migration",
|
||||||
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
ctx, cancel := context.WithCancel(cmd.Context())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
contexts := []string{
|
||||||
|
// this is ordered to put the
|
||||||
|
// (presumably) biggest/most important
|
||||||
|
// subsets first.
|
||||||
|
"blockstore",
|
||||||
|
"state",
|
||||||
|
"peerstore",
|
||||||
|
"tx_index",
|
||||||
|
"evidence",
|
||||||
|
"light",
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, dbctx := range contexts {
|
||||||
|
logger.Info("beginning a key migration",
|
||||||
|
"dbctx", dbctx,
|
||||||
|
"num", idx+1,
|
||||||
|
"total", len(contexts),
|
||||||
|
)
|
||||||
|
|
||||||
|
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
|
||||||
|
ID: dbctx,
|
||||||
|
Config: config,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("constructing database handle: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = keymigrate.Migrate(ctx, db); err != nil {
|
||||||
|
return fmt.Errorf("running migration for context %q: %w",
|
||||||
|
dbctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info("completed database migration successfully")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// allow database info to be overridden via cli
|
||||||
|
addDBFlags(cmd)
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
@@ -11,11 +11,9 @@ import (
|
|||||||
|
|
||||||
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.
|
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.
|
||||||
var ProbeUpnpCmd = &cobra.Command{
|
var ProbeUpnpCmd = &cobra.Command{
|
||||||
Use: "probe-upnp",
|
Use: "probe-upnp",
|
||||||
Aliases: []string{"probe_upnp"},
|
Short: "Test UPnP functionality",
|
||||||
Short: "Test UPnP functionality",
|
RunE: probeUpnp,
|
||||||
RunE: probeUpnp,
|
|
||||||
PreRun: deprecateSnakeCase,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func probeUpnp(cmd *cobra.Command, args []string) error {
|
func probeUpnp(cmd *cobra.Command, args []string) error {
|
||||||
|
|||||||
251
cmd/tendermint/commands/reindex_event.go
Normal file
251
cmd/tendermint/commands/reindex_event.go
Normal file
@@ -0,0 +1,251 @@
|
|||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
tmdb "github.com/tendermint/tm-db"
|
||||||
|
|
||||||
|
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||||
|
tmcfg "github.com/tendermint/tendermint/config"
|
||||||
|
"github.com/tendermint/tendermint/internal/libs/progressbar"
|
||||||
|
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||||
|
"github.com/tendermint/tendermint/state"
|
||||||
|
"github.com/tendermint/tendermint/state/indexer"
|
||||||
|
"github.com/tendermint/tendermint/state/indexer/sink/kv"
|
||||||
|
"github.com/tendermint/tendermint/state/indexer/sink/psql"
|
||||||
|
"github.com/tendermint/tendermint/store"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
reindexFailed = "event re-index failed: "
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReIndexEventCmd allows re-index the event by given block height interval
|
||||||
|
var ReIndexEventCmd = &cobra.Command{
|
||||||
|
Use: "reindex-event",
|
||||||
|
Short: "reindex events to the event store backends",
|
||||||
|
Long: `
|
||||||
|
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
|
||||||
|
you can run this command when the event store backend dropped/disconnected or you want to replace the backend.
|
||||||
|
The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the
|
||||||
|
default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits
|
||||||
|
either or both arguments.
|
||||||
|
`,
|
||||||
|
Example: `
|
||||||
|
tendermint reindex-event
|
||||||
|
tendermint reindex-event --start-height 2
|
||||||
|
tendermint reindex-event --end-height 10
|
||||||
|
tendermint reindex-event --start-height 2 --end-height 10
|
||||||
|
`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
bs, ss, err := loadStateAndBlockStore(config)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(reindexFailed, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := checkValidHeight(bs); err != nil {
|
||||||
|
fmt.Println(reindexFailed, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
es, err := loadEventSinks(config)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(reindexFailed, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = eventReIndex(cmd, es, bs, ss); err != nil {
|
||||||
|
fmt.Println(reindexFailed, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("event re-index finished")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
startHeight int64
|
||||||
|
endHeight int64
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index")
|
||||||
|
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
|
||||||
|
// Check duplicated sinks.
|
||||||
|
sinks := map[string]bool{}
|
||||||
|
for _, s := range cfg.TxIndex.Indexer {
|
||||||
|
sl := strings.ToLower(s)
|
||||||
|
if sinks[sl] {
|
||||||
|
return nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
|
||||||
|
}
|
||||||
|
sinks[sl] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
eventSinks := []indexer.EventSink{}
|
||||||
|
|
||||||
|
for k := range sinks {
|
||||||
|
switch k {
|
||||||
|
case string(indexer.NULL):
|
||||||
|
return nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
|
||||||
|
case string(indexer.KV):
|
||||||
|
store, err := tmcfg.DefaultDBProvider(&tmcfg.DBContext{ID: "tx_index", Config: cfg})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
eventSinks = append(eventSinks, kv.NewEventSink(store))
|
||||||
|
case string(indexer.PSQL):
|
||||||
|
conn := cfg.TxIndex.PsqlConn
|
||||||
|
if conn == "" {
|
||||||
|
return nil, errors.New("the psql connection settings cannot be empty")
|
||||||
|
}
|
||||||
|
es, err := psql.NewEventSink(conn, chainID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
eventSinks = append(eventSinks, es)
|
||||||
|
default:
|
||||||
|
return nil, errors.New("unsupported event sink type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(eventSinks) == 0 {
|
||||||
|
return nil, errors.New("no proper event sink can do event re-indexing," +
|
||||||
|
" please check the tx-index section in the config.toml")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !indexer.IndexingEnabled(eventSinks) {
|
||||||
|
return nil, fmt.Errorf("no event sink has been enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
return eventSinks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
|
||||||
|
dbType := tmdb.BackendType(cfg.DBBackend)
|
||||||
|
|
||||||
|
// Get BlockStore
|
||||||
|
blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
blockStore := store.NewBlockStore(blockStoreDB)
|
||||||
|
|
||||||
|
// Get StateStore
|
||||||
|
stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
stateStore := state.NewStore(stateDB)
|
||||||
|
|
||||||
|
return blockStore, stateStore, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStore, ss state.Store) error {
|
||||||
|
|
||||||
|
var bar progressbar.Bar
|
||||||
|
bar.NewOption(startHeight-1, endHeight)
|
||||||
|
|
||||||
|
fmt.Println("start re-indexing events:")
|
||||||
|
defer bar.Finish()
|
||||||
|
for i := startHeight; i <= endHeight; i++ {
|
||||||
|
select {
|
||||||
|
case <-cmd.Context().Done():
|
||||||
|
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
|
||||||
|
default:
|
||||||
|
b := bs.LoadBlock(i)
|
||||||
|
if b == nil {
|
||||||
|
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := ss.LoadABCIResponses(i)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
e := types.EventDataNewBlockHeader{
|
||||||
|
Header: b.Header,
|
||||||
|
NumTxs: int64(len(b.Txs)),
|
||||||
|
ResultBeginBlock: *r.BeginBlock,
|
||||||
|
ResultEndBlock: *r.EndBlock,
|
||||||
|
}
|
||||||
|
|
||||||
|
var batch *indexer.Batch
|
||||||
|
if e.NumTxs > 0 {
|
||||||
|
batch = indexer.NewBatch(e.NumTxs)
|
||||||
|
|
||||||
|
for i, tx := range b.Data.Txs {
|
||||||
|
tr := abcitypes.TxResult{
|
||||||
|
Height: b.Height,
|
||||||
|
Index: uint32(i),
|
||||||
|
Tx: tx,
|
||||||
|
Result: *(r.DeliverTxs[i]),
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = batch.Add(&tr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sink := range es {
|
||||||
|
if err := sink.IndexBlockEvents(e); err != nil {
|
||||||
|
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if batch != nil {
|
||||||
|
if err := sink.IndexTxEvents(batch.Ops); err != nil {
|
||||||
|
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bar.Play(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkValidHeight(bs state.BlockStore) error {
|
||||||
|
base := bs.Base()
|
||||||
|
|
||||||
|
if startHeight == 0 {
|
||||||
|
startHeight = base
|
||||||
|
fmt.Printf("set the start block height to the base height of the blockstore %d \n", base)
|
||||||
|
}
|
||||||
|
|
||||||
|
if startHeight < base {
|
||||||
|
return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base)
|
||||||
|
}
|
||||||
|
|
||||||
|
height := bs.Height()
|
||||||
|
|
||||||
|
if startHeight > height {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height)
|
||||||
|
}
|
||||||
|
|
||||||
|
if endHeight == 0 || endHeight > height {
|
||||||
|
endHeight = height
|
||||||
|
fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height)
|
||||||
|
}
|
||||||
|
|
||||||
|
if endHeight < base {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base)
|
||||||
|
}
|
||||||
|
|
||||||
|
if endHeight < startHeight {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s (requested the end height: %d is less than the start height: %d)",
|
||||||
|
ctypes.ErrInvalidRequest, startHeight, endHeight)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
171
cmd/tendermint/commands/reindex_event_test.go
Normal file
171
cmd/tendermint/commands/reindex_event_test.go
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||||
|
tmcfg "github.com/tendermint/tendermint/config"
|
||||||
|
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||||
|
"github.com/tendermint/tendermint/state/indexer"
|
||||||
|
"github.com/tendermint/tendermint/state/mocks"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
height int64 = 10
|
||||||
|
base int64 = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
func setupReIndexEventCmd() *cobra.Command {
|
||||||
|
reIndexEventCmd := &cobra.Command{
|
||||||
|
Use: ReIndexEventCmd.Use,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {},
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = reIndexEventCmd.ExecuteContext(context.Background())
|
||||||
|
|
||||||
|
return reIndexEventCmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReIndexEventCheckHeight(t *testing.T) {
|
||||||
|
mockBlockStore := &mocks.BlockStore{}
|
||||||
|
mockBlockStore.
|
||||||
|
On("Base").Return(base).
|
||||||
|
On("Height").Return(height)
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
startHeight int64
|
||||||
|
endHeight int64
|
||||||
|
validHeight bool
|
||||||
|
}{
|
||||||
|
{0, 0, true},
|
||||||
|
{0, base, true},
|
||||||
|
{0, base - 1, false},
|
||||||
|
{0, height, true},
|
||||||
|
{0, height + 1, true},
|
||||||
|
{0, 0, true},
|
||||||
|
{base - 1, 0, false},
|
||||||
|
{base, 0, true},
|
||||||
|
{base, base, true},
|
||||||
|
{base, base - 1, false},
|
||||||
|
{base, height, true},
|
||||||
|
{base, height + 1, true},
|
||||||
|
{height, 0, true},
|
||||||
|
{height, base, false},
|
||||||
|
{height, height - 1, false},
|
||||||
|
{height, height, true},
|
||||||
|
{height, height + 1, true},
|
||||||
|
{height + 1, 0, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
startHeight = tc.startHeight
|
||||||
|
endHeight = tc.endHeight
|
||||||
|
|
||||||
|
err := checkValidHeight(mockBlockStore)
|
||||||
|
if tc.validHeight {
|
||||||
|
require.NoError(t, err)
|
||||||
|
} else {
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadEventSink(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
sinks []string
|
||||||
|
connURL string
|
||||||
|
loadErr bool
|
||||||
|
}{
|
||||||
|
{[]string{}, "", true},
|
||||||
|
{[]string{"NULL"}, "", true},
|
||||||
|
{[]string{"KV"}, "", false},
|
||||||
|
{[]string{"KV", "KV"}, "", true},
|
||||||
|
{[]string{"PSQL"}, "", true}, // true because empty connect url
|
||||||
|
{[]string{"PSQL"}, "wrongUrl", true}, // true because wrong connect url
|
||||||
|
// skip to test PSQL connect with correct url
|
||||||
|
{[]string{"UnsupportedSinkType"}, "wrongUrl", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
cfg := tmcfg.TestConfig()
|
||||||
|
cfg.TxIndex.Indexer = tc.sinks
|
||||||
|
cfg.TxIndex.PsqlConn = tc.connURL
|
||||||
|
_, err := loadEventSinks(cfg)
|
||||||
|
if tc.loadErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadBlockStore(t *testing.T) {
|
||||||
|
bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, bs)
|
||||||
|
require.NotNil(t, ss)
|
||||||
|
|
||||||
|
}
|
||||||
|
func TestReIndexEvent(t *testing.T) {
|
||||||
|
mockBlockStore := &mocks.BlockStore{}
|
||||||
|
mockStateStore := &mocks.Store{}
|
||||||
|
mockEventSink := &mocks.EventSink{}
|
||||||
|
|
||||||
|
mockBlockStore.
|
||||||
|
On("Base").Return(base).
|
||||||
|
On("Height").Return(height).
|
||||||
|
On("LoadBlock", base).Return(nil).Once().
|
||||||
|
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
|
||||||
|
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
|
||||||
|
|
||||||
|
mockEventSink.
|
||||||
|
On("Type").Return(indexer.KV).
|
||||||
|
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
|
||||||
|
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil).
|
||||||
|
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
|
||||||
|
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
|
||||||
|
|
||||||
|
dtx := abcitypes.ResponseDeliverTx{}
|
||||||
|
abciResp := &prototmstate.ABCIResponses{
|
||||||
|
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
|
||||||
|
EndBlock: &abcitypes.ResponseEndBlock{},
|
||||||
|
BeginBlock: &abcitypes.ResponseBeginBlock{},
|
||||||
|
}
|
||||||
|
|
||||||
|
mockStateStore.
|
||||||
|
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
|
||||||
|
On("LoadABCIResponses", base).Return(abciResp, nil).
|
||||||
|
On("LoadABCIResponses", height).Return(abciResp, nil)
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
startHeight int64
|
||||||
|
endHeight int64
|
||||||
|
reIndexErr bool
|
||||||
|
}{
|
||||||
|
{base, height, true}, // LoadBlock error
|
||||||
|
{base, height, true}, // LoadABCIResponses error
|
||||||
|
{base, height, true}, // index block event error
|
||||||
|
{base, height, true}, // index tx event error
|
||||||
|
{base, base, false},
|
||||||
|
{height, height, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
startHeight = tc.startHeight
|
||||||
|
endHeight = tc.endHeight
|
||||||
|
|
||||||
|
err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
|
||||||
|
if tc.reIndexErr {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -17,11 +17,9 @@ var ReplayCmd = &cobra.Command{
|
|||||||
// ReplayConsoleCmd allows replaying of messages from the WAL in a
|
// ReplayConsoleCmd allows replaying of messages from the WAL in a
|
||||||
// console.
|
// console.
|
||||||
var ReplayConsoleCmd = &cobra.Command{
|
var ReplayConsoleCmd = &cobra.Command{
|
||||||
Use: "replay-console",
|
Use: "replay-console",
|
||||||
Aliases: []string{"replay_console"},
|
Short: "Replay messages from WAL in a console",
|
||||||
Short: "Replay messages from WAL in a console",
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
consensus.RunReplayFile(config.BaseConfig, config.Consensus, true)
|
consensus.RunReplayFile(config.BaseConfig, config.Consensus, true)
|
||||||
},
|
},
|
||||||
PreRun: deprecateSnakeCase,
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,11 +14,9 @@ import (
|
|||||||
// ResetAllCmd removes the database of this Tendermint core
|
// ResetAllCmd removes the database of this Tendermint core
|
||||||
// instance.
|
// instance.
|
||||||
var ResetAllCmd = &cobra.Command{
|
var ResetAllCmd = &cobra.Command{
|
||||||
Use: "unsafe-reset-all",
|
Use: "unsafe-reset-all",
|
||||||
Aliases: []string{"unsafe_reset_all"},
|
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
RunE: resetAll,
|
||||||
RunE: resetAll,
|
|
||||||
PreRun: deprecateSnakeCase,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var keepAddrBook bool
|
var keepAddrBook bool
|
||||||
@@ -31,11 +29,9 @@ func init() {
|
|||||||
|
|
||||||
// ResetPrivValidatorCmd resets the private validator files.
|
// ResetPrivValidatorCmd resets the private validator files.
|
||||||
var ResetPrivValidatorCmd = &cobra.Command{
|
var ResetPrivValidatorCmd = &cobra.Command{
|
||||||
Use: "unsafe-reset-priv-validator",
|
Use: "unsafe-reset-priv-validator",
|
||||||
Aliases: []string{"unsafe_reset_priv_validator"},
|
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
RunE: resetPrivValidator,
|
||||||
RunE: resetPrivValidator,
|
|
||||||
PreRun: deprecateSnakeCase,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: this is totally unsafe.
|
// XXX: this is totally unsafe.
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package commands
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@@ -65,10 +64,3 @@ var RootCmd = &cobra.Command{
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// deprecateSnakeCase is a util function for 0.34.1. Should be removed in 0.35
|
|
||||||
func deprecateSnakeCase(cmd *cobra.Command, args []string) {
|
|
||||||
if strings.Contains(cmd.CalledAs(), "_") {
|
|
||||||
fmt.Println("Deprecated: snake_case commands will be replaced by hyphen-case commands in the next major release")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -48,9 +48,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
|||||||
"proxy-app",
|
"proxy-app",
|
||||||
config.ProxyApp,
|
config.ProxyApp,
|
||||||
"proxy app address, or one of: 'kvstore',"+
|
"proxy app address, or one of: 'kvstore',"+
|
||||||
" 'persistent_kvstore',"+
|
" 'persistent_kvstore' or 'noop' for local testing.")
|
||||||
" 'counter',"+
|
|
||||||
" 'counter_serial' or 'noop' for local testing.")
|
|
||||||
cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)")
|
cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)")
|
||||||
|
|
||||||
// rpc flags
|
// rpc flags
|
||||||
@@ -85,7 +83,10 @@ func AddNodeFlags(cmd *cobra.Command) {
|
|||||||
config.Consensus.CreateEmptyBlocksInterval.String(),
|
config.Consensus.CreateEmptyBlocksInterval.String(),
|
||||||
"the possible interval between empty blocks")
|
"the possible interval between empty blocks")
|
||||||
|
|
||||||
// db flags
|
addDBFlags(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func addDBFlags(cmd *cobra.Command) {
|
||||||
cmd.Flags().String(
|
cmd.Flags().String(
|
||||||
"db-backend",
|
"db-backend",
|
||||||
config.DBBackend,
|
config.DBBackend,
|
||||||
|
|||||||
@@ -8,11 +8,9 @@ import (
|
|||||||
|
|
||||||
// ShowNodeIDCmd dumps node's ID to the standard output.
|
// ShowNodeIDCmd dumps node's ID to the standard output.
|
||||||
var ShowNodeIDCmd = &cobra.Command{
|
var ShowNodeIDCmd = &cobra.Command{
|
||||||
Use: "show-node-id",
|
Use: "show-node-id",
|
||||||
Aliases: []string{"show_node_id"},
|
Short: "Show this node's ID",
|
||||||
Short: "Show this node's ID",
|
RunE: showNodeID,
|
||||||
RunE: showNodeID,
|
|
||||||
PreRun: deprecateSnakeCase,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func showNodeID(cmd *cobra.Command, args []string) error {
|
func showNodeID(cmd *cobra.Command, args []string) error {
|
||||||
|
|||||||
@@ -16,11 +16,9 @@ import (
|
|||||||
|
|
||||||
// ShowValidatorCmd adds capabilities for showing the validator info.
|
// ShowValidatorCmd adds capabilities for showing the validator info.
|
||||||
var ShowValidatorCmd = &cobra.Command{
|
var ShowValidatorCmd = &cobra.Command{
|
||||||
Use: "show-validator",
|
Use: "show-validator",
|
||||||
Aliases: []string{"show_validator"},
|
Short: "Show this node's validator info",
|
||||||
Short: "Show this node's validator info",
|
RunE: showValidator,
|
||||||
RunE: showValidator,
|
|
||||||
PreRun: deprecateSnakeCase,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func showValidator(cmd *cobra.Command, args []string) error {
|
func showValidator(cmd *cobra.Command, args []string) error {
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ func main() {
|
|||||||
rootCmd := cmd.RootCmd
|
rootCmd := cmd.RootCmd
|
||||||
rootCmd.AddCommand(
|
rootCmd.AddCommand(
|
||||||
cmd.GenValidatorCmd,
|
cmd.GenValidatorCmd,
|
||||||
|
cmd.ReIndexEventCmd,
|
||||||
cmd.InitFilesCmd,
|
cmd.InitFilesCmd,
|
||||||
cmd.ProbeUpnpCmd,
|
cmd.ProbeUpnpCmd,
|
||||||
cmd.LightCmd,
|
cmd.LightCmd,
|
||||||
@@ -27,6 +28,8 @@ func main() {
|
|||||||
cmd.ShowNodeIDCmd,
|
cmd.ShowNodeIDCmd,
|
||||||
cmd.GenNodeKeyCmd,
|
cmd.GenNodeKeyCmd,
|
||||||
cmd.VersionCmd,
|
cmd.VersionCmd,
|
||||||
|
cmd.InspectCmd,
|
||||||
|
cmd.MakeKeyMigrateCommand(),
|
||||||
debug.DebugCmd,
|
debug.DebugCmd,
|
||||||
cli.NewCompletionCmd(rootCmd, true),
|
cli.NewCompletionCmd(rootCmd, true),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -29,8 +29,8 @@ const (
|
|||||||
ModeValidator = "validator"
|
ModeValidator = "validator"
|
||||||
ModeSeed = "seed"
|
ModeSeed = "seed"
|
||||||
|
|
||||||
BlockchainV0 = "v0"
|
BlockSyncV0 = "v0"
|
||||||
BlockchainV2 = "v2"
|
BlockSyncV2 = "v2"
|
||||||
|
|
||||||
MempoolV0 = "v0"
|
MempoolV0 = "v0"
|
||||||
MempoolV1 = "v1"
|
MempoolV1 = "v1"
|
||||||
@@ -76,7 +76,7 @@ type Config struct {
|
|||||||
P2P *P2PConfig `mapstructure:"p2p"`
|
P2P *P2PConfig `mapstructure:"p2p"`
|
||||||
Mempool *MempoolConfig `mapstructure:"mempool"`
|
Mempool *MempoolConfig `mapstructure:"mempool"`
|
||||||
StateSync *StateSyncConfig `mapstructure:"statesync"`
|
StateSync *StateSyncConfig `mapstructure:"statesync"`
|
||||||
FastSync *FastSyncConfig `mapstructure:"fastsync"`
|
BlockSync *BlockSyncConfig `mapstructure:"fastsync"`
|
||||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||||
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
|
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
|
||||||
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
||||||
@@ -91,7 +91,7 @@ func DefaultConfig() *Config {
|
|||||||
P2P: DefaultP2PConfig(),
|
P2P: DefaultP2PConfig(),
|
||||||
Mempool: DefaultMempoolConfig(),
|
Mempool: DefaultMempoolConfig(),
|
||||||
StateSync: DefaultStateSyncConfig(),
|
StateSync: DefaultStateSyncConfig(),
|
||||||
FastSync: DefaultFastSyncConfig(),
|
BlockSync: DefaultBlockSyncConfig(),
|
||||||
Consensus: DefaultConsensusConfig(),
|
Consensus: DefaultConsensusConfig(),
|
||||||
TxIndex: DefaultTxIndexConfig(),
|
TxIndex: DefaultTxIndexConfig(),
|
||||||
Instrumentation: DefaultInstrumentationConfig(),
|
Instrumentation: DefaultInstrumentationConfig(),
|
||||||
@@ -114,7 +114,7 @@ func TestConfig() *Config {
|
|||||||
P2P: TestP2PConfig(),
|
P2P: TestP2PConfig(),
|
||||||
Mempool: TestMempoolConfig(),
|
Mempool: TestMempoolConfig(),
|
||||||
StateSync: TestStateSyncConfig(),
|
StateSync: TestStateSyncConfig(),
|
||||||
FastSync: TestFastSyncConfig(),
|
BlockSync: TestBlockSyncConfig(),
|
||||||
Consensus: TestConsensusConfig(),
|
Consensus: TestConsensusConfig(),
|
||||||
TxIndex: TestTxIndexConfig(),
|
TxIndex: TestTxIndexConfig(),
|
||||||
Instrumentation: TestInstrumentationConfig(),
|
Instrumentation: TestInstrumentationConfig(),
|
||||||
@@ -151,7 +151,7 @@ func (cfg *Config) ValidateBasic() error {
|
|||||||
if err := cfg.StateSync.ValidateBasic(); err != nil {
|
if err := cfg.StateSync.ValidateBasic(); err != nil {
|
||||||
return fmt.Errorf("error in [statesync] section: %w", err)
|
return fmt.Errorf("error in [statesync] section: %w", err)
|
||||||
}
|
}
|
||||||
if err := cfg.FastSync.ValidateBasic(); err != nil {
|
if err := cfg.BlockSync.ValidateBasic(); err != nil {
|
||||||
return fmt.Errorf("error in [fastsync] section: %w", err)
|
return fmt.Errorf("error in [fastsync] section: %w", err)
|
||||||
}
|
}
|
||||||
if err := cfg.Consensus.ValidateBasic(); err != nil {
|
if err := cfg.Consensus.ValidateBasic(); err != nil {
|
||||||
@@ -197,6 +197,7 @@ type BaseConfig struct { //nolint: maligned
|
|||||||
// If this node is many blocks behind the tip of the chain, FastSync
|
// If this node is many blocks behind the tip of the chain, FastSync
|
||||||
// allows them to catchup quickly by downloading blocks in parallel
|
// allows them to catchup quickly by downloading blocks in parallel
|
||||||
// and verifying their commits
|
// and verifying their commits
|
||||||
|
// TODO: This should be moved to the blocksync config
|
||||||
FastSyncMode bool `mapstructure:"fast-sync"`
|
FastSyncMode bool `mapstructure:"fast-sync"`
|
||||||
|
|
||||||
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
|
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
|
||||||
@@ -446,6 +447,7 @@ type RPCConfig struct {
|
|||||||
|
|
||||||
// TCP or UNIX socket address for the gRPC server to listen on
|
// TCP or UNIX socket address for the gRPC server to listen on
|
||||||
// NOTE: This server only supports /broadcast_tx_commit
|
// NOTE: This server only supports /broadcast_tx_commit
|
||||||
|
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||||
GRPCListenAddress string `mapstructure:"grpc-laddr"`
|
GRPCListenAddress string `mapstructure:"grpc-laddr"`
|
||||||
|
|
||||||
// Maximum number of simultaneous connections.
|
// Maximum number of simultaneous connections.
|
||||||
@@ -453,6 +455,7 @@ type RPCConfig struct {
|
|||||||
// If you want to accept a larger number than the default, make sure
|
// If you want to accept a larger number than the default, make sure
|
||||||
// you increase your OS limits.
|
// you increase your OS limits.
|
||||||
// 0 - unlimited.
|
// 0 - unlimited.
|
||||||
|
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||||
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
|
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
|
||||||
|
|
||||||
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
|
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
|
||||||
@@ -691,13 +694,14 @@ type P2PConfig struct { //nolint: maligned
|
|||||||
// Force dial to fail
|
// Force dial to fail
|
||||||
TestDialFail bool `mapstructure:"test-dial-fail"`
|
TestDialFail bool `mapstructure:"test-dial-fail"`
|
||||||
|
|
||||||
// DisableLegacy is used mostly for testing to enable or disable the legacy
|
// UseLegacy enables the "legacy" P2P implementation and
|
||||||
// P2P stack.
|
// disables the newer default implementation. This flag will
|
||||||
DisableLegacy bool `mapstructure:"disable-legacy"`
|
// be removed in a future release.
|
||||||
|
UseLegacy bool `mapstructure:"use-legacy"`
|
||||||
|
|
||||||
// Makes it possible to configure which queue backend the p2p
|
// Makes it possible to configure which queue backend the p2p
|
||||||
// layer uses. Options are: "fifo", "priority" and "wdrr",
|
// layer uses. Options are: "fifo", "priority" and "wdrr",
|
||||||
// with the default being "fifo".
|
// with the default being "priority".
|
||||||
QueueType string `mapstructure:"queue-type"`
|
QueueType string `mapstructure:"queue-type"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -729,6 +733,7 @@ func DefaultP2PConfig() *P2PConfig {
|
|||||||
DialTimeout: 3 * time.Second,
|
DialTimeout: 3 * time.Second,
|
||||||
TestDialFail: false,
|
TestDialFail: false,
|
||||||
QueueType: "priority",
|
QueueType: "priority",
|
||||||
|
UseLegacy: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -782,25 +787,47 @@ type MempoolConfig struct {
|
|||||||
RootDir string `mapstructure:"home"`
|
RootDir string `mapstructure:"home"`
|
||||||
Recheck bool `mapstructure:"recheck"`
|
Recheck bool `mapstructure:"recheck"`
|
||||||
Broadcast bool `mapstructure:"broadcast"`
|
Broadcast bool `mapstructure:"broadcast"`
|
||||||
|
|
||||||
// Maximum number of transactions in the mempool
|
// Maximum number of transactions in the mempool
|
||||||
Size int `mapstructure:"size"`
|
Size int `mapstructure:"size"`
|
||||||
|
|
||||||
// Limit the total size of all txs in the mempool.
|
// Limit the total size of all txs in the mempool.
|
||||||
// This only accounts for raw transactions (e.g. given 1MB transactions and
|
// This only accounts for raw transactions (e.g. given 1MB transactions and
|
||||||
// max-txs-bytes=5MB, mempool will only accept 5 transactions).
|
// max-txs-bytes=5MB, mempool will only accept 5 transactions).
|
||||||
MaxTxsBytes int64 `mapstructure:"max-txs-bytes"`
|
MaxTxsBytes int64 `mapstructure:"max-txs-bytes"`
|
||||||
|
|
||||||
// Size of the cache (used to filter transactions we saw earlier) in transactions
|
// Size of the cache (used to filter transactions we saw earlier) in transactions
|
||||||
CacheSize int `mapstructure:"cache-size"`
|
CacheSize int `mapstructure:"cache-size"`
|
||||||
|
|
||||||
// Do not remove invalid transactions from the cache (default: false)
|
// Do not remove invalid transactions from the cache (default: false)
|
||||||
// Set to true if it's not possible for any invalid transaction to become
|
// Set to true if it's not possible for any invalid transaction to become
|
||||||
// valid again in the future.
|
// valid again in the future.
|
||||||
KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"`
|
KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"`
|
||||||
|
|
||||||
// Maximum size of a single transaction
|
// Maximum size of a single transaction
|
||||||
// NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
|
// NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
|
||||||
MaxTxBytes int `mapstructure:"max-tx-bytes"`
|
MaxTxBytes int `mapstructure:"max-tx-bytes"`
|
||||||
|
|
||||||
// Maximum size of a batch of transactions to send to a peer
|
// Maximum size of a batch of transactions to send to a peer
|
||||||
// Including space needed by encoding (one varint per transaction).
|
// Including space needed by encoding (one varint per transaction).
|
||||||
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||||
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
|
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
|
||||||
|
|
||||||
|
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
|
||||||
|
// can exist for in the mempool.
|
||||||
|
//
|
||||||
|
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
|
||||||
|
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
|
||||||
|
// insertion time into the mempool is beyond TTLDuration.
|
||||||
|
TTLDuration time.Duration `mapstructure:"ttl-duration"`
|
||||||
|
|
||||||
|
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
|
||||||
|
// can exist for in the mempool.
|
||||||
|
//
|
||||||
|
// Note, if TTLDuration is also defined, a transaction will be removed if it
|
||||||
|
// has existed in the mempool at least TTLNumBlocks number of blocks or if
|
||||||
|
// it's insertion time into the mempool is beyond TTLDuration.
|
||||||
|
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
|
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
|
||||||
@@ -811,10 +838,12 @@ func DefaultMempoolConfig() *MempoolConfig {
|
|||||||
Broadcast: true,
|
Broadcast: true,
|
||||||
// Each signature verification takes .5ms, Size reduced until we implement
|
// Each signature verification takes .5ms, Size reduced until we implement
|
||||||
// ABCI Recheck
|
// ABCI Recheck
|
||||||
Size: 5000,
|
Size: 5000,
|
||||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||||
CacheSize: 10000,
|
CacheSize: 10000,
|
||||||
MaxTxBytes: 1024 * 1024, // 1MB
|
MaxTxBytes: 1024 * 1024, // 1MB
|
||||||
|
TTLDuration: 0 * time.Second,
|
||||||
|
TTLNumBlocks: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -840,6 +869,13 @@ func (cfg *MempoolConfig) ValidateBasic() error {
|
|||||||
if cfg.MaxTxBytes < 0 {
|
if cfg.MaxTxBytes < 0 {
|
||||||
return errors.New("max-tx-bytes can't be negative")
|
return errors.New("max-tx-bytes can't be negative")
|
||||||
}
|
}
|
||||||
|
if cfg.TTLDuration < 0 {
|
||||||
|
return errors.New("ttl-duration can't be negative")
|
||||||
|
}
|
||||||
|
if cfg.TTLNumBlocks < 0 {
|
||||||
|
return errors.New("ttl-num-blocks can't be negative")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -878,7 +914,7 @@ func DefaultStateSyncConfig() *StateSyncConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestFastSyncConfig returns a default configuration for the state sync service
|
// TestStateSyncConfig returns a default configuration for the state sync service
|
||||||
func TestStateSyncConfig() *StateSyncConfig {
|
func TestStateSyncConfig() *StateSyncConfig {
|
||||||
return DefaultStateSyncConfig()
|
return DefaultStateSyncConfig()
|
||||||
}
|
}
|
||||||
@@ -934,34 +970,33 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------
|
//-----------------------------------------------------------------------------
|
||||||
// FastSyncConfig
|
|
||||||
|
|
||||||
// FastSyncConfig defines the configuration for the Tendermint fast sync service
|
// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service
|
||||||
type FastSyncConfig struct {
|
type BlockSyncConfig struct {
|
||||||
Version string `mapstructure:"version"`
|
Version string `mapstructure:"version"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultFastSyncConfig returns a default configuration for the fast sync service
|
// DefaultBlockSyncConfig returns a default configuration for the block sync service
|
||||||
func DefaultFastSyncConfig() *FastSyncConfig {
|
func DefaultBlockSyncConfig() *BlockSyncConfig {
|
||||||
return &FastSyncConfig{
|
return &BlockSyncConfig{
|
||||||
Version: BlockchainV0,
|
Version: BlockSyncV0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestFastSyncConfig returns a default configuration for the fast sync.
|
// TestBlockSyncConfig returns a default configuration for the block sync.
|
||||||
func TestFastSyncConfig() *FastSyncConfig {
|
func TestBlockSyncConfig() *BlockSyncConfig {
|
||||||
return DefaultFastSyncConfig()
|
return DefaultBlockSyncConfig()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateBasic performs basic validation.
|
// ValidateBasic performs basic validation.
|
||||||
func (cfg *FastSyncConfig) ValidateBasic() error {
|
func (cfg *BlockSyncConfig) ValidateBasic() error {
|
||||||
switch cfg.Version {
|
switch cfg.Version {
|
||||||
case BlockchainV0:
|
case BlockSyncV0:
|
||||||
return nil
|
|
||||||
case BlockchainV2:
|
|
||||||
return nil
|
return nil
|
||||||
|
case BlockSyncV2:
|
||||||
|
return errors.New("blocksync version v2 is no longer supported. Please use v0")
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
|
return fmt.Errorf("unknown blocksync version %s", cfg.Version)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -125,13 +125,13 @@ func TestStateSyncConfigValidateBasic(t *testing.T) {
|
|||||||
require.NoError(t, cfg.ValidateBasic())
|
require.NoError(t, cfg.ValidateBasic())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFastSyncConfigValidateBasic(t *testing.T) {
|
func TestBlockSyncConfigValidateBasic(t *testing.T) {
|
||||||
cfg := TestFastSyncConfig()
|
cfg := TestBlockSyncConfig()
|
||||||
assert.NoError(t, cfg.ValidateBasic())
|
assert.NoError(t, cfg.ValidateBasic())
|
||||||
|
|
||||||
// tamper with version
|
// tamper with version
|
||||||
cfg.Version = "v2"
|
cfg.Version = "v2"
|
||||||
assert.NoError(t, cfg.ValidateBasic())
|
assert.Error(t, cfg.ValidateBasic())
|
||||||
|
|
||||||
cfg.Version = "invalid"
|
cfg.Version = "invalid"
|
||||||
assert.Error(t, cfg.ValidateBasic())
|
assert.Error(t, cfg.ValidateBasic())
|
||||||
|
|||||||
@@ -200,6 +200,7 @@ cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}
|
|||||||
|
|
||||||
# TCP or UNIX socket address for the gRPC server to listen on
|
# TCP or UNIX socket address for the gRPC server to listen on
|
||||||
# NOTE: This server only supports /broadcast_tx_commit
|
# NOTE: This server only supports /broadcast_tx_commit
|
||||||
|
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||||
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
|
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||||
|
|
||||||
# Maximum number of simultaneous connections.
|
# Maximum number of simultaneous connections.
|
||||||
@@ -209,6 +210,7 @@ grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
|
|||||||
# 0 - unlimited.
|
# 0 - unlimited.
|
||||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||||
# 1024 - 40 - 10 - 50 = 924 = ~900
|
# 1024 - 40 - 10 - 50 = 924 = ~900
|
||||||
|
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||||
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
|
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
|
||||||
|
|
||||||
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
|
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
|
||||||
@@ -269,7 +271,7 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}"
|
|||||||
[p2p]
|
[p2p]
|
||||||
|
|
||||||
# Enable the new p2p layer.
|
# Enable the new p2p layer.
|
||||||
disable-legacy = {{ .P2P.DisableLegacy }}
|
use-legacy = {{ .P2P.UseLegacy }}
|
||||||
|
|
||||||
# Select the p2p internal queue
|
# Select the p2p internal queue
|
||||||
queue-type = "{{ .P2P.QueueType }}"
|
queue-type = "{{ .P2P.QueueType }}"
|
||||||
@@ -397,6 +399,22 @@ max-tx-bytes = {{ .Mempool.MaxTxBytes }}
|
|||||||
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||||
max-batch-bytes = {{ .Mempool.MaxBatchBytes }}
|
max-batch-bytes = {{ .Mempool.MaxBatchBytes }}
|
||||||
|
|
||||||
|
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
|
||||||
|
# can exist for in the mempool.
|
||||||
|
#
|
||||||
|
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
|
||||||
|
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
|
||||||
|
# insertion time into the mempool is beyond ttl-duration.
|
||||||
|
ttl-duration = "{{ .Mempool.TTLDuration }}"
|
||||||
|
|
||||||
|
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
|
||||||
|
# can exist for in the mempool.
|
||||||
|
#
|
||||||
|
# Note, if ttl-duration is also defined, a transaction will be removed if it
|
||||||
|
# has existed in the mempool at least ttl-num-blocks number of blocks or if
|
||||||
|
# it's insertion time into the mempool is beyond ttl-duration.
|
||||||
|
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
|
||||||
|
|
||||||
#######################################################
|
#######################################################
|
||||||
### State Sync Configuration Options ###
|
### State Sync Configuration Options ###
|
||||||
#######################################################
|
#######################################################
|
||||||
@@ -434,14 +452,14 @@ chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}"
|
|||||||
fetchers = "{{ .StateSync.Fetchers }}"
|
fetchers = "{{ .StateSync.Fetchers }}"
|
||||||
|
|
||||||
#######################################################
|
#######################################################
|
||||||
### Fast Sync Configuration Connections ###
|
### Block Sync Configuration Connections ###
|
||||||
#######################################################
|
#######################################################
|
||||||
[fastsync]
|
[fastsync]
|
||||||
|
|
||||||
# Fast Sync version to use:
|
# Block Sync version to use:
|
||||||
# 1) "v0" (default) - the legacy fast sync implementation
|
# 1) "v0" (default) - the legacy block sync implementation
|
||||||
# 2) "v2" - complete redesign of v0, optimized for testability & readability
|
# 2) "v2" - DEPRECATED, please use v0
|
||||||
version = "{{ .FastSync.Version }}"
|
version = "{{ .BlockSync.Version }}"
|
||||||
|
|
||||||
#######################################################
|
#######################################################
|
||||||
### Consensus Configuration Options ###
|
### Consensus Configuration Options ###
|
||||||
|
|||||||
@@ -204,7 +204,10 @@ func (spn *ProofNode) FlattenAunts() [][]byte {
|
|||||||
case spn.Right != nil:
|
case spn.Right != nil:
|
||||||
innerHashes = append(innerHashes, spn.Right.Hash)
|
innerHashes = append(innerHashes, spn.Right.Hash)
|
||||||
default:
|
default:
|
||||||
break
|
// FIXME(fromberger): Per the documentation above, exactly one of
|
||||||
|
// these fields should be set. If that is true, this should probably
|
||||||
|
// be a panic since it violates the invariant. If not, when can it
|
||||||
|
// be OK to have no siblings? Does this occur at the leaves?
|
||||||
}
|
}
|
||||||
spn = spn.Parent
|
spn = spn.Parent
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||||
|
|
||||||
// necessary for Bitcoin address format
|
// necessary for Bitcoin address format
|
||||||
"golang.org/x/crypto/ripemd160" // nolint: staticcheck
|
"golang.org/x/crypto/ripemd160" // nolint
|
||||||
)
|
)
|
||||||
|
|
||||||
//-------------------------------------
|
//-------------------------------------
|
||||||
|
|||||||
@@ -31,7 +31,6 @@ Available Commands:
|
|||||||
check_tx Validate a tx
|
check_tx Validate a tx
|
||||||
commit Commit the application state and return the Merkle root hash
|
commit Commit the application state and return the Merkle root hash
|
||||||
console Start an interactive abci console for multiple commands
|
console Start an interactive abci console for multiple commands
|
||||||
counter ABCI demo example
|
|
||||||
deliver_tx Deliver a new tx to the application
|
deliver_tx Deliver a new tx to the application
|
||||||
kvstore ABCI demo example
|
kvstore ABCI demo example
|
||||||
echo Have the application echo a message
|
echo Have the application echo a message
|
||||||
@@ -214,137 +213,9 @@ we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
|
|||||||
Similarly, you could put the commands in a file and run
|
Similarly, you could put the commands in a file and run
|
||||||
`abci-cli --verbose batch < myfile`.
|
`abci-cli --verbose batch < myfile`.
|
||||||
|
|
||||||
## Counter - Another Example
|
|
||||||
|
|
||||||
Now that we've got the hang of it, let's try another application, the
|
|
||||||
"counter" app.
|
|
||||||
|
|
||||||
Like the kvstore app, its code can be found
|
|
||||||
[here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go)
|
|
||||||
and looks like:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
|
||||||
|
|
||||||
app := counter.NewCounterApplication(flagSerial)
|
|
||||||
|
|
||||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
|
||||||
|
|
||||||
// Start the listener
|
|
||||||
srv, err := server.NewServer(flagAddrC, flagAbci, app)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
srv.SetLogger(logger.With("module", "abci-server"))
|
|
||||||
if err := srv.Start(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop upon receiving SIGTERM or CTRL-C.
|
|
||||||
tmos.TrapSignal(logger, func() {
|
|
||||||
// Cleanup
|
|
||||||
srv.Stop()
|
|
||||||
})
|
|
||||||
|
|
||||||
// Run forever.
|
|
||||||
select {}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The counter app doesn't use a Merkle tree, it just counts how many times
|
|
||||||
we've sent a transaction, asked for a hash, or committed the state. The
|
|
||||||
result of `commit` is just the number of transactions sent.
|
|
||||||
|
|
||||||
This application has two modes: `serial=off` and `serial=on`.
|
|
||||||
|
|
||||||
When `serial=on`, transactions must be a big-endian encoded incrementing
|
|
||||||
integer, starting at 0.
|
|
||||||
|
|
||||||
If `serial=off`, there are no restrictions on transactions.
|
|
||||||
|
|
||||||
We can toggle the value of `serial` using the `set_option` ABCI message.
|
|
||||||
|
|
||||||
When `serial=on`, some transactions are invalid. In a live blockchain,
|
|
||||||
transactions collect in memory before they are committed into blocks. To
|
|
||||||
avoid wasting resources on invalid transactions, ABCI provides the
|
|
||||||
`check_tx` message, which application developers can use to accept or
|
|
||||||
reject transactions, before they are stored in memory or gossipped to
|
|
||||||
other peers.
|
|
||||||
|
|
||||||
In this instance of the counter app, `check_tx` only allows transactions
|
|
||||||
whose integer is greater than the last committed one.
|
|
||||||
|
|
||||||
Let's kill the console and the kvstore application, and start the
|
|
||||||
counter app:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
abci-cli counter
|
|
||||||
```
|
|
||||||
|
|
||||||
In another window, start the `abci-cli console`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
|
|
||||||
> check_tx 0x00
|
|
||||||
-> code: OK
|
|
||||||
|
|
||||||
> check_tx 0xff
|
|
||||||
-> code: OK
|
|
||||||
|
|
||||||
> deliver_tx 0x00
|
|
||||||
-> code: OK
|
|
||||||
|
|
||||||
> check_tx 0x00
|
|
||||||
-> code: BadNonce
|
|
||||||
-> log: Invalid nonce. Expected >= 1, got 0
|
|
||||||
|
|
||||||
> deliver_tx 0x01
|
|
||||||
-> code: OK
|
|
||||||
|
|
||||||
> deliver_tx 0x04
|
|
||||||
-> code: BadNonce
|
|
||||||
-> log: Invalid nonce. Expected 2, got 4
|
|
||||||
|
|
||||||
> info
|
|
||||||
-> code: OK
|
|
||||||
-> data: {"hashes":0,"txs":2}
|
|
||||||
-> data.hex: 0x7B22686173686573223A302C22747873223A327D
|
|
||||||
```
|
|
||||||
|
|
||||||
This is a very simple application, but between `counter` and `kvstore`,
|
|
||||||
its easy to see how you can build out arbitrary application states on
|
|
||||||
top of the ABCI. [Hyperledger's
|
|
||||||
Burrow](https://github.com/hyperledger/burrow) also runs atop ABCI,
|
|
||||||
bringing with it Ethereum-like accounts, the Ethereum virtual-machine,
|
|
||||||
Monax's permissioning scheme, and native contracts extensions.
|
|
||||||
|
|
||||||
But the ultimate flexibility comes from being able to write the
|
|
||||||
application easily in any language.
|
|
||||||
|
|
||||||
We have implemented the counter in a number of languages [see the
|
|
||||||
example directory](https://github.com/tendermint/tendermint/tree/master/abci/example).
|
|
||||||
|
|
||||||
To run the Node.js version, fist download & install [the Javascript ABCI server](https://github.com/tendermint/js-abci):
|
|
||||||
|
|
||||||
```sh
|
|
||||||
git clone https://github.com/tendermint/js-abci.git
|
|
||||||
cd js-abci
|
|
||||||
npm install abci
|
|
||||||
```
|
|
||||||
|
|
||||||
Now you can start the app:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
node example/counter.js
|
|
||||||
```
|
|
||||||
|
|
||||||
(you'll have to kill the other counter application process). In another
|
|
||||||
window, run the console and those previous ABCI commands. You should get
|
|
||||||
the same results as for the Go version.
|
|
||||||
|
|
||||||
## Bounties
|
## Bounties
|
||||||
|
|
||||||
Want to write the counter app in your favorite language?! We'd be happy
|
Want to write an app in your favorite language?! We'd be happy
|
||||||
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
|
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
|
||||||
See [funding](https://github.com/interchainio/funding) opportunities from the
|
See [funding](https://github.com/interchainio/funding) opportunities from the
|
||||||
[Interchain Foundation](https://interchain.io/) for implementations in new languages and more.
|
[Interchain Foundation](https://interchain.io/) for implementations in new languages and more.
|
||||||
|
|||||||
@@ -37,8 +37,8 @@ cd $GOPATH/src/github.com/tendermint/tendermint
|
|||||||
make install_abci
|
make install_abci
|
||||||
```
|
```
|
||||||
|
|
||||||
Now you should have the `abci-cli` installed; you'll see a couple of
|
Now you should have the `abci-cli` installed; you'll notice the `kvstore`
|
||||||
commands (`counter` and `kvstore`) that are example applications written
|
command, an example application written
|
||||||
in Go. See below for an application written in JavaScript.
|
in Go. See below for an application written in JavaScript.
|
||||||
|
|
||||||
Now, let's run some apps!
|
Now, let's run some apps!
|
||||||
@@ -165,92 +165,6 @@ curl -s 'localhost:26657/abci_query?data="name"'
|
|||||||
Try some other transactions and queries to make sure everything is
|
Try some other transactions and queries to make sure everything is
|
||||||
working!
|
working!
|
||||||
|
|
||||||
## Counter - Another Example
|
|
||||||
|
|
||||||
Now that we've got the hang of it, let's try another application, the
|
|
||||||
`counter` app.
|
|
||||||
|
|
||||||
The counter app doesn't use a Merkle tree, it just counts how many times
|
|
||||||
we've sent a transaction, or committed the state.
|
|
||||||
|
|
||||||
This application has two modes: `serial=off` and `serial=on`.
|
|
||||||
|
|
||||||
When `serial=on`, transactions must be a big-endian encoded incrementing
|
|
||||||
integer, starting at 0.
|
|
||||||
|
|
||||||
If `serial=off`, there are no restrictions on transactions.
|
|
||||||
|
|
||||||
In a live blockchain, transactions collect in memory before they are
|
|
||||||
committed into blocks. To avoid wasting resources on invalid
|
|
||||||
transactions, ABCI provides the `CheckTx` message, which application
|
|
||||||
developers can use to accept or reject transactions, before they are
|
|
||||||
stored in memory or gossipped to other peers.
|
|
||||||
|
|
||||||
In this instance of the counter app, with `serial=on`, `CheckTx` only
|
|
||||||
allows transactions whose integer is greater than the last committed
|
|
||||||
one.
|
|
||||||
|
|
||||||
Let's kill the previous instance of `tendermint` and the `kvstore`
|
|
||||||
application, and start the counter app. We can enable `serial=on` with a
|
|
||||||
flag:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
abci-cli counter --serial
|
|
||||||
```
|
|
||||||
|
|
||||||
In another window, reset then start Tendermint:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
tendermint unsafe_reset_all
|
|
||||||
tendermint start
|
|
||||||
```
|
|
||||||
|
|
||||||
Once again, you can see the blocks streaming by. Let's send some
|
|
||||||
transactions. Since we have set `serial=on`, the first transaction must
|
|
||||||
be the number `0`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
curl localhost:26657/broadcast_tx_commit?tx=0x00
|
|
||||||
```
|
|
||||||
|
|
||||||
Note the empty (hence successful) response. The next transaction must be
|
|
||||||
the number `1`. If instead, we try to send a `5`, we get an error:
|
|
||||||
|
|
||||||
```json
|
|
||||||
> curl localhost:26657/broadcast_tx_commit?tx=0x05
|
|
||||||
{
|
|
||||||
"jsonrpc": "2.0",
|
|
||||||
"id": "",
|
|
||||||
"result": {
|
|
||||||
"check_tx": {},
|
|
||||||
"deliver_tx": {
|
|
||||||
"code": 2,
|
|
||||||
"log": "Invalid nonce. Expected 1, got 5"
|
|
||||||
},
|
|
||||||
"hash": "33B93DFF98749B0D6996A70F64071347060DC19C",
|
|
||||||
"height": 34
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
But if we send a `1`, it works again:
|
|
||||||
|
|
||||||
```json
|
|
||||||
> curl localhost:26657/broadcast_tx_commit?tx=0x01
|
|
||||||
{
|
|
||||||
"jsonrpc": "2.0",
|
|
||||||
"id": "",
|
|
||||||
"result": {
|
|
||||||
"check_tx": {},
|
|
||||||
"deliver_tx": {},
|
|
||||||
"hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D",
|
|
||||||
"height": 60
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
For more details on the `broadcast_tx` API, see [the guide on using
|
|
||||||
Tendermint](../tendermint-core/using-tendermint.md).
|
|
||||||
|
|
||||||
## CounterJS - Example in Another Language
|
## CounterJS - Example in Another Language
|
||||||
|
|
||||||
|
|||||||
@@ -31,24 +31,61 @@ For example:
|
|||||||
|
|
||||||
would be equal to the composite key of `jack.account.number`.
|
would be equal to the composite key of `jack.account.number`.
|
||||||
|
|
||||||
Let's take a look at the `[tx_index]` config section:
|
By default, Tendermint will index all transactions by their respective hashes
|
||||||
|
and height and blocks by their height.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Operators can configure indexing via the `[tx_index]` section. The `indexer`
|
||||||
|
field takes a series of supported indexers. If `null` is included, indexing will
|
||||||
|
be turned off regardless of other values provided.
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
##### transactions indexer configuration options #####
|
[tx-index]
|
||||||
[tx_index]
|
|
||||||
|
|
||||||
# What indexer to use for transactions
|
# The backend database list to back the indexer.
|
||||||
|
# If list contains null, meaning no indexer service will be used.
|
||||||
|
#
|
||||||
|
# The application will set which txs to index. In some cases a node operator will be able
|
||||||
|
# to decide which txs to index based on configuration set in the application.
|
||||||
#
|
#
|
||||||
# Options:
|
# Options:
|
||||||
# 1) "null"
|
# 1) "null"
|
||||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||||
indexer = "kv"
|
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||||
|
# 3) "psql" - the indexer services backed by PostgreSQL.
|
||||||
|
# indexer = []
|
||||||
```
|
```
|
||||||
|
|
||||||
By default, Tendermint will index all transactions by their respective hashes
|
### Supported Indexers
|
||||||
and height and blocks by their height.
|
|
||||||
|
|
||||||
You can turn off indexing completely by setting `tx_index` to `null`.
|
#### KV
|
||||||
|
|
||||||
|
The `kv` indexer type is an embedded key-value store supported by the main
|
||||||
|
underling Tendermint database. Using the `kv` indexer type allows you to query
|
||||||
|
for block and transaction events directly against Tendermint's RPC. However, the
|
||||||
|
query syntax is limited and so this indexer type might be deprecated or removed
|
||||||
|
entirely in the future.
|
||||||
|
|
||||||
|
#### PostgreSQL
|
||||||
|
|
||||||
|
The `psql` indexer type allows an operator to enable block and transaction event
|
||||||
|
indexing by proxying it to an external PostgreSQL instance allowing for the events
|
||||||
|
to be stored in relational models. Since the events are stored in a RDBMS, operators
|
||||||
|
can leverage SQL to perform a series of rich and complex queries that are not
|
||||||
|
supported by the `kv` indexer type. Since operators can leverage SQL directly,
|
||||||
|
searching is not enabled for the `psql` indexer type via Tendermint's RPC -- any
|
||||||
|
such query will fail.
|
||||||
|
|
||||||
|
Note, the SQL schema is stored in `state/indexer/sink/psql/schema.sql` and operators
|
||||||
|
must explicitly create the relations prior to starting Tendermint and enabling
|
||||||
|
the `psql` indexer type.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ psql ... -f state/indexer/sink/psql/schema.sql
|
||||||
|
```
|
||||||
|
|
||||||
## Default Indexes
|
## Default Indexes
|
||||||
|
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ Note the context/background should be written in the present tense.
|
|||||||
- [ADR-053: State-Sync-Prototype](./adr-053-state-sync-prototype.md)
|
- [ADR-053: State-Sync-Prototype](./adr-053-state-sync-prototype.md)
|
||||||
- [ADR-054: Crypto-Encoding-2](./adr-054-crypto-encoding-2.md)
|
- [ADR-054: Crypto-Encoding-2](./adr-054-crypto-encoding-2.md)
|
||||||
- [ADR-055: Protobuf-Design](./adr-055-protobuf-design.md)
|
- [ADR-055: Protobuf-Design](./adr-055-protobuf-design.md)
|
||||||
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks)
|
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks.md)
|
||||||
- [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md)
|
- [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md)
|
||||||
- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md)
|
- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md)
|
||||||
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
||||||
@@ -97,3 +97,6 @@ Note the context/background should be written in the present tense.
|
|||||||
- [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md)
|
- [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md)
|
||||||
- [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md)
|
- [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md)
|
||||||
- [ADR-057: RPC](./adr-057-RPC.md)
|
- [ADR-057: RPC](./adr-057-RPC.md)
|
||||||
|
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
|
||||||
|
- [ADR-071: Proposer-Based Timestamps](adr-071-proposer-based-timestamps.md)
|
||||||
|
- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md)
|
||||||
|
|||||||
@@ -97,8 +97,7 @@ design for tendermint was originally tracked in
|
|||||||
[#828](https://github.com/tendermint/tendermint/issues/828).
|
[#828](https://github.com/tendermint/tendermint/issues/828).
|
||||||
|
|
||||||
#### Eager StateSync
|
#### Eager StateSync
|
||||||
Warp Sync as implemented in Parity
|
Warp Sync as implemented in OpenEthereum to rapidly
|
||||||
["Warp Sync"](https://wiki.parity.io/Warp-Sync-Snapshot-Format.html) to rapidly
|
|
||||||
download both blocks and state snapshots from peers. Data is carved into ~4MB
|
download both blocks and state snapshots from peers. Data is carved into ~4MB
|
||||||
chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a
|
chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a
|
||||||
manifest file which co-ordinates the state-sync. Obtaining a correct manifest
|
manifest file which co-ordinates the state-sync. Obtaining a correct manifest
|
||||||
@@ -234,5 +233,3 @@ Proposed
|
|||||||
[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal
|
[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal
|
||||||
[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos
|
[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos
|
||||||
[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed
|
[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ network usage.
|
|||||||
---
|
---
|
||||||
|
|
||||||
Check out the formal specification
|
Check out the formal specification
|
||||||
[here](https://docs.tendermint.com/master/spec/consensus/light-client.html).
|
[here](https://github.com/tendermint/spec/tree/master/spec/light-client).
|
||||||
|
|
||||||
## Status
|
## Status
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ graceful here, but that's for another day.
|
|||||||
|
|
||||||
It's possible to fool lite clients without there being a fork on the
|
It's possible to fool lite clients without there being a fork on the
|
||||||
main chain - so called Fork-Lite. See the
|
main chain - so called Fork-Lite. See the
|
||||||
[fork accountability](https://docs.tendermint.com/master/spec/consensus/fork-accountability.html)
|
[fork accountability](https://docs.tendermint.com/master/spec/light-client/accountability/)
|
||||||
document for more details. For a sequential lite client, this can happen via
|
document for more details. For a sequential lite client, this can happen via
|
||||||
equivocation or amnesia attacks. For a skipping lite client this can also happen
|
equivocation or amnesia attacks. For a skipping lite client this can also happen
|
||||||
via lunatic validator attacks. There must be some way for applications to punish
|
via lunatic validator attacks. There must be some way for applications to punish
|
||||||
|
|||||||
@@ -179,7 +179,7 @@ This then ends the process and the verify function that was called at the start
|
|||||||
the user.
|
the user.
|
||||||
|
|
||||||
For a detailed overview of how each of these three attacks can be conducted please refer to the
|
For a detailed overview of how each of these three attacks can be conducted please refer to the
|
||||||
[fork accountability spec]((https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md)).
|
[fork accountability spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md).
|
||||||
|
|
||||||
## Full Node Verification
|
## Full Node Verification
|
||||||
|
|
||||||
|
|||||||
@@ -24,6 +24,7 @@
|
|||||||
- April 1, 2021: Initial Draft (@alexanderbez)
|
- April 1, 2021: Initial Draft (@alexanderbez)
|
||||||
- April 28, 2021: Specify search capabilities are only supported through the KV indexer (@marbar3778)
|
- April 28, 2021: Specify search capabilities are only supported through the KV indexer (@marbar3778)
|
||||||
- May 19, 2021: Update the SQL schema and the eventsink interface (@jayt106)
|
- May 19, 2021: Update the SQL schema and the eventsink interface (@jayt106)
|
||||||
|
- Aug 30, 2021: Update the SQL schema and the psql implementation (@creachadair)
|
||||||
|
|
||||||
## Status
|
## Status
|
||||||
|
|
||||||
@@ -145,163 +146,190 @@ The postgres eventsink will not support `tx_search`, `block_search`, `GetTxByHas
|
|||||||
```sql
|
```sql
|
||||||
-- Table Definition ----------------------------------------------
|
-- Table Definition ----------------------------------------------
|
||||||
|
|
||||||
CREATE TYPE block_event_type AS ENUM ('begin_block', 'end_block', '');
|
-- The blocks table records metadata about each block.
|
||||||
|
-- The block record does not include its events or transactions (see tx_results).
|
||||||
|
CREATE TABLE blocks (
|
||||||
|
rowid BIGSERIAL PRIMARY KEY,
|
||||||
|
|
||||||
CREATE TABLE block_events (
|
height BIGINT NOT NULL,
|
||||||
id SERIAL PRIMARY KEY,
|
chain_id VARCHAR NOT NULL,
|
||||||
key VARCHAR NOT NULL,
|
|
||||||
value VARCHAR NOT NULL,
|
-- When this block header was logged into the sink, in UTC.
|
||||||
height INTEGER NOT NULL,
|
created_at TIMESTAMPTZ NOT NULL,
|
||||||
type block_event_type,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL,
|
UNIQUE (height, chain_id)
|
||||||
chain_id VARCHAR NOT NULL
|
|
||||||
);
|
);
|
||||||
|
|
||||||
|
-- Index blocks by height and chain, since we need to resolve block IDs when
|
||||||
|
-- indexing transaction records and transaction events.
|
||||||
|
CREATE INDEX idx_blocks_height_chain ON blocks(height, chain_id);
|
||||||
|
|
||||||
|
-- The tx_results table records metadata about transaction results. Note that
|
||||||
|
-- the events from a transaction are stored separately.
|
||||||
CREATE TABLE tx_results (
|
CREATE TABLE tx_results (
|
||||||
id SERIAL PRIMARY KEY,
|
rowid BIGSERIAL PRIMARY KEY,
|
||||||
tx_result BYTEA NOT NULL,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL
|
-- The block to which this transaction belongs.
|
||||||
|
block_id BIGINT NOT NULL REFERENCES blocks(rowid),
|
||||||
|
-- The sequential index of the transaction within the block.
|
||||||
|
index INTEGER NOT NULL,
|
||||||
|
-- When this result record was logged into the sink, in UTC.
|
||||||
|
created_at TIMESTAMPTZ NOT NULL,
|
||||||
|
-- The hex-encoded hash of the transaction.
|
||||||
|
tx_hash VARCHAR NOT NULL,
|
||||||
|
-- The protobuf wire encoding of the TxResult message.
|
||||||
|
tx_result BYTEA NOT NULL,
|
||||||
|
|
||||||
|
UNIQUE (block_id, index)
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE tx_events (
|
-- The events table records events. All events (both block and transaction) are
|
||||||
id SERIAL PRIMARY KEY,
|
-- associated with a block ID; transaction events also have a transaction ID.
|
||||||
key VARCHAR NOT NULL,
|
CREATE TABLE events (
|
||||||
value VARCHAR NOT NULL,
|
rowid BIGSERIAL PRIMARY KEY,
|
||||||
height INTEGER NOT NULL,
|
|
||||||
hash VARCHAR NOT NULL,
|
-- The block and transaction this event belongs to.
|
||||||
tx_result_id SERIAL,
|
-- If tx_id is NULL, this is a block event.
|
||||||
created_at TIMESTAMPTZ NOT NULL,
|
block_id BIGINT NOT NULL REFERENCES blocks(rowid),
|
||||||
chain_id VARCHAR NOT NULL,
|
tx_id BIGINT NULL REFERENCES tx_results(rowid),
|
||||||
FOREIGN KEY (tx_result_id)
|
|
||||||
REFERENCES tx_results(id)
|
-- The application-defined type label for the event.
|
||||||
ON DELETE CASCADE
|
type VARCHAR NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
-- Indices -------------------------------------------------------
|
-- The attributes table records event attributes.
|
||||||
|
CREATE TABLE attributes (
|
||||||
|
event_id BIGINT NOT NULL REFERENCES events(rowid),
|
||||||
|
key VARCHAR NOT NULL, -- bare key
|
||||||
|
composite_key VARCHAR NOT NULL, -- composed type.key
|
||||||
|
value VARCHAR NULL,
|
||||||
|
|
||||||
CREATE INDEX idx_block_events_key_value ON block_events(key, value);
|
UNIQUE (event_id, key)
|
||||||
CREATE INDEX idx_tx_events_key_value ON tx_events(key, value);
|
);
|
||||||
CREATE INDEX idx_tx_events_hash ON tx_events(hash);
|
|
||||||
|
-- A joined view of events and their attributes. Events that do not have any
|
||||||
|
-- attributes are represented as a single row with empty key and value fields.
|
||||||
|
CREATE VIEW event_attributes AS
|
||||||
|
SELECT block_id, tx_id, type, key, composite_key, value
|
||||||
|
FROM events LEFT JOIN attributes ON (events.rowid = attributes.event_id);
|
||||||
|
|
||||||
|
-- A joined view of all block events (those having tx_id NULL).
|
||||||
|
CREATE VIEW block_events AS
|
||||||
|
SELECT blocks.rowid as block_id, height, chain_id, type, key, composite_key, value
|
||||||
|
FROM blocks JOIN event_attributes ON (blocks.rowid = event_attributes.block_id)
|
||||||
|
WHERE event_attributes.tx_id IS NULL;
|
||||||
|
|
||||||
|
-- A joined view of all transaction events.
|
||||||
|
CREATE VIEW tx_events AS
|
||||||
|
SELECT height, index, chain_id, type, key, composite_key, value, tx_results.created_at
|
||||||
|
FROM blocks JOIN tx_results ON (blocks.rowid = tx_results.block_id)
|
||||||
|
JOIN event_attributes ON (tx_results.rowid = event_attributes.tx_id)
|
||||||
|
WHERE event_attributes.tx_id IS NOT NULL;
|
||||||
```
|
```
|
||||||
|
|
||||||
The `PSQLEventSink` will implement the `EventSink` interface as follows
|
The `PSQLEventSink` will implement the `EventSink` interface as follows
|
||||||
(some details omitted for brevity):
|
(some details omitted for brevity):
|
||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
func NewPSQLEventSink(connStr string, chainID string) (*PSQLEventSink, error) {
|
func NewEventSink(connStr, chainID string) (*EventSink, error) {
|
||||||
db, err := sql.Open("postgres", connStr)
|
db, err := sql.Open(driverName, connStr)
|
||||||
if err != nil {
|
// ...
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ...
|
return &EventSink{
|
||||||
|
store: db,
|
||||||
|
chainID: chainID,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *PSQLEventSink) IndexBlockEvents(h types.EventDataNewBlockHeader) error {
|
func (es *EventSink) IndexBlockEvents(h types.EventDataNewBlockHeader) error {
|
||||||
sqlStmt := sq.Insert("block_events").Columns("key", "value", "height", "type", "created_at", "chain_id")
|
ts := time.Now().UTC()
|
||||||
|
|
||||||
// index the reserved block height index
|
return runInTransaction(es.store, func(tx *sql.Tx) error {
|
||||||
ts := time.Now()
|
// Add the block to the blocks table and report back its row ID for use
|
||||||
sqlStmt = sqlStmt.Values(types.BlockHeightKey, h.Header.Height, h.Header.Height, "", ts, es.chainID)
|
// in indexing the events for the block.
|
||||||
|
blockID, err := queryWithID(tx, `
|
||||||
|
INSERT INTO blocks (height, chain_id, created_at)
|
||||||
|
VALUES ($1, $2, $3)
|
||||||
|
ON CONFLICT DO NOTHING
|
||||||
|
RETURNING rowid;
|
||||||
|
`, h.Header.Height, es.chainID, ts)
|
||||||
|
// ...
|
||||||
|
|
||||||
for _, event := range h.ResultBeginBlock.Events {
|
// Insert the special block meta-event for height.
|
||||||
// only index events with a non-empty type
|
if err := insertEvents(tx, blockID, 0, []abci.Event{
|
||||||
if len(event.Type) == 0 {
|
makeIndexedEvent(types.BlockHeightKey, fmt.Sprint(h.Header.Height)),
|
||||||
continue
|
}); err != nil {
|
||||||
}
|
return fmt.Errorf("block meta-events: %w", err)
|
||||||
|
}
|
||||||
for _, attr := range event.Attributes {
|
// Insert all the block events. Order is important here,
|
||||||
if len(attr.Key) == 0 {
|
if err := insertEvents(tx, blockID, 0, h.ResultBeginBlock.Events); err != nil {
|
||||||
continue
|
return fmt.Errorf("begin-block events: %w", err)
|
||||||
}
|
}
|
||||||
|
if err := insertEvents(tx, blockID, 0, h.ResultEndBlock.Events); err != nil {
|
||||||
// index iff the event specified index:true and it's not a reserved event
|
return fmt.Errorf("end-block events: %w", err)
|
||||||
compositeKey := fmt.Sprintf("%s.%s", event.Type, string(attr.Key))
|
}
|
||||||
if compositeKey == types.BlockHeightKey {
|
return nil
|
||||||
return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeKey)
|
})
|
||||||
}
|
|
||||||
|
|
||||||
if attr.GetIndex() {
|
|
||||||
sqlStmt = sqlStmt.Values(compositeKey, string(attr.Value), h.Header.Height, BlockEventTypeBeginBlock, ts, es.chainID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// index end_block events...
|
|
||||||
// execute sqlStmt db query...
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *PSQLEventSink) IndexTxEvents(txr []*abci.TxResult) error {
|
func (es *EventSink) IndexTxEvents(txrs []*abci.TxResult) error {
|
||||||
sqlStmtEvents := sq.Insert("tx_events").Columns("key", "value", "height", "hash", "tx_result_id", "created_at", "chain_id")
|
ts := time.Now().UTC()
|
||||||
sqlStmtTxResult := sq.Insert("tx_results").Columns("tx_result", "created_at")
|
|
||||||
|
|
||||||
ts := time.Now()
|
for _, txr := range txrs {
|
||||||
for _, tx := range txr {
|
// Encode the result message in protobuf wire format for indexing.
|
||||||
// store the tx result
|
resultData, err := proto.Marshal(txr)
|
||||||
txBz, err := proto.Marshal(tx)
|
// ...
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
sqlStmtTxResult = sqlStmtTxResult.Values(txBz, ts)
|
// Index the hash of the underlying transaction as a hex string.
|
||||||
|
txHash := fmt.Sprintf("%X", types.Tx(txr.Tx).Hash())
|
||||||
|
|
||||||
// execute sqlStmtTxResult db query...
|
if err := runInTransaction(es.store, func(tx *sql.Tx) error {
|
||||||
var txID uint32
|
// Find the block associated with this transaction.
|
||||||
err = sqlStmtTxResult.QueryRow().Scan(&txID)
|
blockID, err := queryWithID(tx, `
|
||||||
if err != nil {
|
SELECT rowid FROM blocks WHERE height = $1 AND chain_id = $2;
|
||||||
|
`, txr.Height, es.chainID)
|
||||||
|
// ...
|
||||||
|
|
||||||
|
// Insert a record for this tx_result and capture its ID for indexing events.
|
||||||
|
txID, err := queryWithID(tx, `
|
||||||
|
INSERT INTO tx_results (block_id, index, created_at, tx_hash, tx_result)
|
||||||
|
VALUES ($1, $2, $3, $4, $5)
|
||||||
|
ON CONFLICT DO NOTHING
|
||||||
|
RETURNING rowid;
|
||||||
|
`, blockID, txr.Index, ts, txHash, resultData)
|
||||||
|
// ...
|
||||||
|
|
||||||
|
// Insert the special transaction meta-events for hash and height.
|
||||||
|
if err := insertEvents(tx, blockID, txID, []abci.Event{
|
||||||
|
makeIndexedEvent(types.TxHashKey, txHash),
|
||||||
|
makeIndexedEvent(types.TxHeightKey, fmt.Sprint(txr.Height)),
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("indexing transaction meta-events: %w", err)
|
||||||
|
}
|
||||||
|
// Index any events packaged with the transaction.
|
||||||
|
if err := insertEvents(tx, blockID, txID, txr.Result.Events); err != nil {
|
||||||
|
return fmt.Errorf("indexing transaction events: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// index the reserved height and hash indices
|
return nil
|
||||||
hash := types.Tx(tx.Tx).Hash()
|
|
||||||
sqlStmtEvents = sqlStmtEvents.Values(types.TxHashKey, hash, tx.Height, hash, txID, ts, es.chainID)
|
|
||||||
sqlStmtEvents = sqlStmtEvents.Values(types.TxHeightKey, tx.Height, tx.Height, hash, txID, ts, es.chainID)
|
|
||||||
|
|
||||||
for _, event := range result.Result.Events {
|
|
||||||
// only index events with a non-empty type
|
|
||||||
if len(event.Type) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, attr := range event.Attributes {
|
|
||||||
if len(attr.Key) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// index if `index: true` is set
|
|
||||||
compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key))
|
|
||||||
|
|
||||||
// ensure event does not conflict with a reserved prefix key
|
|
||||||
if compositeTag == types.TxHashKey || compositeTag == types.TxHeightKey {
|
|
||||||
return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeTag)
|
|
||||||
}
|
|
||||||
|
|
||||||
if attr.GetIndex() {
|
|
||||||
sqlStmtEvents = sqlStmtEvents.Values(compositeKey, string(attr.Value), tx.Height, hash, txID, ts, es.chainID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// execute sqlStmtEvents db query...
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *PSQLEventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error) {
|
// SearchBlockEvents is not implemented by this sink, and reports an error for all queries.
|
||||||
return nil, errors.New("block search is not supported via the postgres event sink")
|
func (es *EventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error)
|
||||||
}
|
|
||||||
|
|
||||||
func (es *PSQLEventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) {
|
// SearchTxEvents is not implemented by this sink, and reports an error for all queries.
|
||||||
return nil, errors.New("tx search is not supported via the postgres event sink")
|
func (es *EventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error)
|
||||||
}
|
|
||||||
|
|
||||||
func (es *PSQLEventSink) GetTxByHash(hash []byte) (*abci.TxResult, error) {
|
// GetTxByHash is not implemented by this sink, and reports an error for all queries.
|
||||||
return nil, errors.New("getTxByHash is not supported via the postgres event sink")
|
func (es *EventSink) GetTxByHash(hash []byte) (*abci.TxResult, error)
|
||||||
}
|
|
||||||
|
|
||||||
func (es *PSQLEventSink) HasBlock(h int64) (bool, error) {
|
// HasBlock is not implemented by this sink, and reports an error for all queries.
|
||||||
return false, errors.New("hasBlock is not supported via the postgres event sink")
|
func (es *EventSink) HasBlock(h int64) (bool, error)
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|||||||
273
docs/architecture/adr-069-flexible-node-intitalization.md
Normal file
273
docs/architecture/adr-069-flexible-node-intitalization.md
Normal file
@@ -0,0 +1,273 @@
|
|||||||
|
# ADR 069: Flexible Node Initialization
|
||||||
|
|
||||||
|
## Changlog
|
||||||
|
|
||||||
|
- 2021-06-09: Initial Draft (@tychoish)
|
||||||
|
|
||||||
|
- 2021-07-21: Major Revision (@tychoish)
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
Proposed.
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
In an effort to support [Go-API-Stability](./adr-060-go-api-stability.md),
|
||||||
|
during the 0.35 development cycle, we have attempted to reduce the the API
|
||||||
|
surface area by moving most of the interface of the `node` package into
|
||||||
|
unexported functions, as well as moving the reactors to an `internal`
|
||||||
|
package. Having this coincide with the 0.35 release made a lot of sense
|
||||||
|
because these interfaces were _already_ changing as a result of the `p2p`
|
||||||
|
[refactor](./adr-061-p2p-refactor-scope.md), so it made sense to think a bit
|
||||||
|
more about how tendermint exposes this API.
|
||||||
|
|
||||||
|
While the interfaces of the P2P layer and most of the node package are already
|
||||||
|
internalized, this precludes some operational patterns that are important to
|
||||||
|
users who use tendermint as a library. Specifically, introspecting the
|
||||||
|
tendermint node service and replacing components is not supported in the latest
|
||||||
|
version of the code, and some of these use cases would require maintaining a
|
||||||
|
vendor copy of the code. Adding these features requires rather extensive
|
||||||
|
(internal/implementation) changes to the `node` and `rpc` packages, and this
|
||||||
|
ADR describes a model for changing the way that tendermint nodes initialize, in
|
||||||
|
service of providing this kind of functionality.
|
||||||
|
|
||||||
|
We consider node initialization, because the current implemention
|
||||||
|
provides strong connections between all components, as well as between
|
||||||
|
the components of the node and the RPC layer, and being able to think
|
||||||
|
about the interactions of these components will help enable these
|
||||||
|
features and help define the requirements of the node package.
|
||||||
|
|
||||||
|
## Alternative Approaches
|
||||||
|
|
||||||
|
These alternatives are presented to frame the design space and to
|
||||||
|
contextualize the decision in terms of product requirements. These
|
||||||
|
ideas are not inherently bad, and may even be possible or desireable
|
||||||
|
in the (distant) future, and merely provide additional context for how
|
||||||
|
we, in the moment came to our decision(s).
|
||||||
|
|
||||||
|
### Do Nothing
|
||||||
|
|
||||||
|
The current implementation is functional and sufficient for the vast
|
||||||
|
majority of use cases (e.g., all users of the Cosmos-SDK as well as
|
||||||
|
anyone who runs tendermint and the ABCI application in separate
|
||||||
|
processes). In the current implementation, and even previous versions,
|
||||||
|
modifying node initialization or injecting custom components required
|
||||||
|
copying most of the `node` package, which required such users
|
||||||
|
to maintain a vendored copy of tendermint.
|
||||||
|
|
||||||
|
While this is (likely) not tenable in the long term, as users do want
|
||||||
|
more modularity, and the current service implementation is brittle and
|
||||||
|
difficult to maintain, in the short term it may be possible to delay
|
||||||
|
implementation somewhat. Eventually, however, we will need to make the
|
||||||
|
`node` package easier to maintain and reason about.
|
||||||
|
|
||||||
|
### Generic Service Pluggability
|
||||||
|
|
||||||
|
One possible system design would export interfaces (in the Golang
|
||||||
|
sense) for all components of the system, to permit runtime dependency
|
||||||
|
injection of all components in the system, so that users can compose
|
||||||
|
tendermint nodes of arbitrary user-supplied components.
|
||||||
|
|
||||||
|
Although this level of customization would provide benefits, it would be a huge
|
||||||
|
undertaking (particularly with regards to API design work) that we do not have
|
||||||
|
scope for at the moment. Eventually providing support for some kinds of
|
||||||
|
pluggability may be useful, so the current solution does not explicitly
|
||||||
|
foreclose the possibility of this alternative.
|
||||||
|
|
||||||
|
### Abstract Dependency Based Startup and Shutdown
|
||||||
|
|
||||||
|
The main proposal in this document makes tendermint node initialization simpler
|
||||||
|
and more abstract, but the system lacks a number of
|
||||||
|
features which daemon/service initialization could provide, such as a
|
||||||
|
system allowing the authors of services to control initialization and shutdown order
|
||||||
|
of components using dependency relationships.
|
||||||
|
|
||||||
|
Such a system could work by allowing services to declare
|
||||||
|
initialization order dependencies to other reactors (by ID, perhaps)
|
||||||
|
so that the node could decide the initialization based on the
|
||||||
|
dependencies declared by services rather than requiring the node to
|
||||||
|
encode this logic directly.
|
||||||
|
|
||||||
|
This level of configuration is probably more complicated than is needed. Given
|
||||||
|
that the authors of components in the current implementation of tendermint
|
||||||
|
already *do* need to know about other components, a dependency-based system
|
||||||
|
would probably be overly-abstract at this stage.
|
||||||
|
|
||||||
|
## Decisions
|
||||||
|
|
||||||
|
- To the greatest extent possible, factor the code base so that
|
||||||
|
packages are responsible for their own initialization, and minimize
|
||||||
|
the amount of code in the `node` package itself.
|
||||||
|
|
||||||
|
- As a design goal, reduce direct coupling and dependencies between
|
||||||
|
components in the implementation of `node`.
|
||||||
|
|
||||||
|
- Begin iterating on a more-flexible internal framework for
|
||||||
|
initializing tendermint nodes to make the initatilization process
|
||||||
|
less hard-coded by the implementation of the node objects.
|
||||||
|
|
||||||
|
- Reactors should not need to expose their interfaces *within* the
|
||||||
|
implementation of the node type
|
||||||
|
|
||||||
|
- This refactoring should be entirely opaque to users.
|
||||||
|
|
||||||
|
- These node initialization changes should not require a
|
||||||
|
reevaluation of the `service.Service` or a generic initialization
|
||||||
|
orchestration framework.
|
||||||
|
|
||||||
|
- Do not proactively provide a system for injecting
|
||||||
|
components/services within a tendtermint node, though make it
|
||||||
|
possible to retrofit this kind of plugability in the future if
|
||||||
|
needed.
|
||||||
|
|
||||||
|
- Prioritize implementation of p2p-based statesync reactor to obviate
|
||||||
|
need for users to inject a custom state-sync provider.
|
||||||
|
|
||||||
|
## Detailed Design
|
||||||
|
|
||||||
|
The [current
|
||||||
|
nodeImpl](https://github.com/tendermint/tendermint/blob/master/node/node.go#L47)
|
||||||
|
includes direct references to the implementations of each of the
|
||||||
|
reactors, which should be replaced by references to `service.Service`
|
||||||
|
objects. This will require moving construction of the [rpc
|
||||||
|
service](https://github.com/tendermint/tendermint/blob/master/node/node.go#L771)
|
||||||
|
into the constructor of
|
||||||
|
[makeNode](https://github.com/tendermint/tendermint/blob/master/node/node.go#L126). One
|
||||||
|
possible implementation of this would be to eliminate the current
|
||||||
|
`ConfigureRPC` method on the node package and instead [configure it
|
||||||
|
here](https://github.com/tendermint/tendermint/pull/6798/files#diff-375d57e386f20eaa5f09f02bb9d28bfc48ac3dca18d0325f59492208219e5618R441).
|
||||||
|
|
||||||
|
To avoid adding complexity to the `node` package, we will add a
|
||||||
|
composite service implementation to the `service` package
|
||||||
|
that implements `service.Service` and is composed of a sequence of
|
||||||
|
underlying `service.Service` objects and handles their
|
||||||
|
startup/shutdown in the specified sequential order.
|
||||||
|
|
||||||
|
Consensus, blocksync (*née* fast sync), and statesync all depend on
|
||||||
|
each other, and have significant initialization dependencies that are
|
||||||
|
presently encoded in the `node` package. As part of this change, a
|
||||||
|
new package/component (likely named `blocks` located at
|
||||||
|
`internal/blocks`) will encapsulate the initialization of these block
|
||||||
|
management areas of the code.
|
||||||
|
|
||||||
|
### Injectable Component Option
|
||||||
|
|
||||||
|
This section briefly describes a possible implementation for
|
||||||
|
user-supplied services running within a node. This should not be
|
||||||
|
implemented unless user-supplied components are a hard requirement for
|
||||||
|
a user.
|
||||||
|
|
||||||
|
In order to allow components to be replaced, a new public function
|
||||||
|
will be added to the public interface of `node` with a signature that
|
||||||
|
resembles the following:
|
||||||
|
|
||||||
|
```go
|
||||||
|
func NewWithServices(conf *config.Config,
|
||||||
|
logger log.Logger,
|
||||||
|
cf proxy.ClientCreator,
|
||||||
|
gen *types.GenesisDoc,
|
||||||
|
srvs []service.Service,
|
||||||
|
) (service.Service, error) {
|
||||||
|
```
|
||||||
|
|
||||||
|
The `service.Service` objects will be initialized in the order supplied, after
|
||||||
|
all pre-configured/default services have started (and shut down in reverse
|
||||||
|
order). The given services may implement additional interfaces, allowing them
|
||||||
|
to replace specific default services. `NewWithServices` will validate input
|
||||||
|
service lists with the following rules:
|
||||||
|
|
||||||
|
- None of the services may already be running.
|
||||||
|
- The caller may not supply more than one replacement reactor for a given
|
||||||
|
default service type.
|
||||||
|
|
||||||
|
If callers violate any of these rules, `NewWithServices` will return
|
||||||
|
an error. To retract support for this kind of operation in the future,
|
||||||
|
the function can be modified to *always* return an error.
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
### Positive
|
||||||
|
|
||||||
|
- The node package will become easier to maintain.
|
||||||
|
|
||||||
|
- It will become easier to add additional services within tendermint
|
||||||
|
nodes.
|
||||||
|
|
||||||
|
- It will become possible to replace default components in the node
|
||||||
|
package without vendoring the tendermint repo and modifying internal
|
||||||
|
code.
|
||||||
|
|
||||||
|
- The current end-to-end (e2e) test suite will be able to prevent any
|
||||||
|
regressions, and the new functionality can be thoroughly unit tested.
|
||||||
|
|
||||||
|
- The scope of this project is very narrow, which minimizes risk.
|
||||||
|
|
||||||
|
### Negative
|
||||||
|
|
||||||
|
- This increases our reliance on the `service.Service` interface which
|
||||||
|
is probably not an interface that we want to fully commit to.
|
||||||
|
|
||||||
|
- This proposal implements a fairly minimal set of functionality and
|
||||||
|
leaves open the possibility for many additional features which are
|
||||||
|
not included in the scope of this proposal.
|
||||||
|
|
||||||
|
### Neutral
|
||||||
|
|
||||||
|
N/A
|
||||||
|
|
||||||
|
## Open Questions
|
||||||
|
|
||||||
|
- To what extent does this new initialization framework need to accommodate
|
||||||
|
the legacy p2p stack? Would it be possible to delay a great deal of this
|
||||||
|
work to the 0.36 cycle to avoid this complexity?
|
||||||
|
|
||||||
|
- Answer: _depends on timing_, and the requirement to ship pluggable reactors in 0.35.
|
||||||
|
|
||||||
|
- Where should additional public types be exported for the 0.35
|
||||||
|
release?
|
||||||
|
|
||||||
|
Related to the general project of API stabilization we want to deprecate
|
||||||
|
the `types` package, and move its contents into a new `pkg` hierarchy;
|
||||||
|
however, the design of the `pkg` interface is currently underspecified.
|
||||||
|
If `types` is going to remain for the 0.35 release, then we should consider
|
||||||
|
the impact of using multiple organizing modalities for this code within a
|
||||||
|
single release.
|
||||||
|
|
||||||
|
## Future Work
|
||||||
|
|
||||||
|
- Improve or simplify the `service.Service` interface. There are some
|
||||||
|
pretty clear limitations with this interface as written (there's no
|
||||||
|
way to timeout slow startup or shut down, the cycle between the
|
||||||
|
`service.BaseService` and `service.Service` implementations is
|
||||||
|
troubling, the default panic in `OnReset` seems troubling.)
|
||||||
|
|
||||||
|
- As part of the refactor of `service.Service` have all services/nodes
|
||||||
|
respect the lifetime of a `context.Context` object, and avoid the
|
||||||
|
current practice of creating `context.Context` objects in p2p and
|
||||||
|
reactor code. This would be required for in-process multi-tenancy.
|
||||||
|
|
||||||
|
- Support explicit dependencies between components and allow for
|
||||||
|
parallel startup, so that different reactors can startup at the same
|
||||||
|
time, where possible.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [this
|
||||||
|
branch](https://github.com/tendermint/tendermint/tree/tychoish/scratch-node-minimize)
|
||||||
|
contains experimental work in the implementation of the node package
|
||||||
|
to unwind some of the hard dependencies between components.
|
||||||
|
|
||||||
|
- [the component
|
||||||
|
graph](https://peter.bourgon.org/go-for-industrial-programming/#the-component-graph)
|
||||||
|
as a framing for internal service construction.
|
||||||
|
|
||||||
|
## Appendix
|
||||||
|
|
||||||
|
### Dependencies
|
||||||
|
|
||||||
|
There's a relationship between the blockchain and consensus reactor
|
||||||
|
described by the following dependency graph makes replacing some of
|
||||||
|
these components more difficult relative to other reactors or
|
||||||
|
components.
|
||||||
|
|
||||||
|

|
||||||
445
docs/architecture/adr-071-proposer-based-timestamps.md
Normal file
445
docs/architecture/adr-071-proposer-based-timestamps.md
Normal file
@@ -0,0 +1,445 @@
|
|||||||
|
# ADR 71: Proposer-Based Timestamps
|
||||||
|
|
||||||
|
* [Changelog](#changelog)
|
||||||
|
* [Status](#status)
|
||||||
|
* [Context](#context)
|
||||||
|
* [Alternative Approaches](#alternative-approaches)
|
||||||
|
* [Remove timestamps altogether](#remove-timestamps-altogether)
|
||||||
|
* [Decision](#decision)
|
||||||
|
* [Detailed Design](#detailed-design)
|
||||||
|
* [Overview](#overview)
|
||||||
|
* [Proposal Timestamp and Block Timestamp](#proposal-timestamp-and-block-timestamp)
|
||||||
|
* [Saving the timestamp across heights](#saving-the-timestamp-across-heights)
|
||||||
|
* [Changes to `CommitSig`](#changes-to-commitsig)
|
||||||
|
* [Changes to `Commit`](#changes-to-commit)
|
||||||
|
* [Changes to `Vote` messages](#changes-to-vote-messages)
|
||||||
|
* [New consensus parameters](#new-consensus-parameters)
|
||||||
|
* [Changes to `Header`](#changes-to-header)
|
||||||
|
* [Changes to the block proposal step](#changes-to-the-block-proposal-step)
|
||||||
|
* [Proposer selects proposal timestamp](#proposer-selects-proposal-timestamp)
|
||||||
|
* [Proposer selects block timestamp](#proposer-selects-block-timestamp)
|
||||||
|
* [Proposer waits](#proposer-waits)
|
||||||
|
* [Changes to the propose step timeout](#changes-to-the-propose-step-timeout)
|
||||||
|
* [Changes to validation rules](#changes-to-validation-rules)
|
||||||
|
* [Proposal timestamp validation](#proposal-timestamp-validation)
|
||||||
|
* [Block timestamp validation](#block-timestamp-validation)
|
||||||
|
* [Changes to the prevote step](#changes-to-the-prevote-step)
|
||||||
|
* [Changes to the precommit step](#changes-to-the-precommit-step)
|
||||||
|
* [Changes to locking a block](#changes-to-locking-a-block)
|
||||||
|
* [Remove voteTime Completely](#remove-votetime-completely)
|
||||||
|
* [Future Improvements](#future-improvements)
|
||||||
|
* [Consequences](#consequences)
|
||||||
|
* [Positive](#positive)
|
||||||
|
* [Neutral](#neutral)
|
||||||
|
* [Negative](#negative)
|
||||||
|
* [References](#references)
|
||||||
|
|
||||||
|
## Changelog
|
||||||
|
|
||||||
|
- July 15 2021: Created by @williambanfield
|
||||||
|
- Aug 4 2021: Draft completed by @williambanfield
|
||||||
|
- Aug 5 2021: Draft updated to include data structure changes by @williambanfield
|
||||||
|
- Aug 20 2021: Language edits completed by @williambanfield
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
**Accepted**
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
Tendermint currently provides a monotonically increasing source of time known as [BFTTime](https://github.com/tendermint/spec/blob/master/spec/consensus/bft-time.md).
|
||||||
|
This mechanism for producing a source of time is reasonably simple.
|
||||||
|
Each correct validator adds a timestamp to each `Precommit` message it sends.
|
||||||
|
The timestamp it sends is either the validator's current known Unix time or one millisecond greater than the previous block time, depending on which value is greater.
|
||||||
|
When a block is produced, the proposer chooses the block timestamp as the weighted median of the times in all of the `Precommit` messages the proposer received.
|
||||||
|
The weighting is proportional to the amount of voting power, or stake, a validator has on the network.
|
||||||
|
This mechanism for producing timestamps is both deterministic and byzantine fault tolerant.
|
||||||
|
|
||||||
|
This current mechanism for producing timestamps has a few drawbacks.
|
||||||
|
Validators do not have to agree at all on how close the selected block timestamp is to their own currently known Unix time.
|
||||||
|
Additionally, any amount of voting power `>1/3` may directly control the block timestamp.
|
||||||
|
As a result, it is quite possible that the timestamp is not particularly meaningful.
|
||||||
|
|
||||||
|
These drawbacks present issues in the Tendermint protocol.
|
||||||
|
Timestamps are used by light clients to verify blocks.
|
||||||
|
Light clients rely on correspondence between their own currently known Unix time and the block timestamp to verify blocks they see;
|
||||||
|
However, their currently known Unix time may be greatly divergent from the block timestamp as a result of the limitations of `BFTTime`.
|
||||||
|
|
||||||
|
The proposer-based timestamps specification suggests an alternative approach for producing block timestamps that remedies these issues.
|
||||||
|
Proposer-based timestamps alter the current mechanism for producing block timestamps in two main ways:
|
||||||
|
|
||||||
|
1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block.
|
||||||
|
1. Correct validators only approve the proposed block timestamp if it is close enough to their own currently known Unix time.
|
||||||
|
|
||||||
|
The result of these changes is a more meaningful timestamp that cannot be controlled by `<= 2/3` of the validator voting power.
|
||||||
|
This document outlines the necessary code changes in Tendermint to implement the corresponding [proposer-based timestamps specification](https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp).
|
||||||
|
|
||||||
|
## Alternative Approaches
|
||||||
|
|
||||||
|
### Remove timestamps altogether
|
||||||
|
|
||||||
|
Computer clocks are bound to skew for a variety of reasons.
|
||||||
|
Using timestamps in our protocol means either accepting the timestamps as not reliable or impacting the protocol’s liveness guarantees.
|
||||||
|
This design requires impacting the protocol’s liveness in order to make the timestamps more reliable.
|
||||||
|
An alternate approach is to remove timestamps altogether from the block protocol.
|
||||||
|
`BFTTime` is deterministic but may be arbitrarily inaccurate.
|
||||||
|
However, having a reliable source of time is quite useful for applications and protocols built on top of a blockchain.
|
||||||
|
|
||||||
|
We therefore decided not to remove the timestamp.
|
||||||
|
Applications often wish for some transactions to occur on a certain day, on a regular period, or after some time following a different event.
|
||||||
|
All of these require some meaningful representation of agreed upon time.
|
||||||
|
The following protocols and application features require a reliable source of time:
|
||||||
|
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/spec/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification.
|
||||||
|
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/spec/blob/8029cf7a0fcc89a5004e173ec065aa48ad5ba3c8/spec/consensus/evidence.md#verification).
|
||||||
|
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/ce75de4019b0129f6efcbb0e752cd2cc9e6136d3/params-change/Staking.md#unbondingtime).
|
||||||
|
* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.43/ibc/overview.html#acknowledgements).
|
||||||
|
|
||||||
|
Finally, inflation distribution in the Cosmos Hub uses an approximation of time to calculate an annual percentage rate.
|
||||||
|
This approximation of time is calculated using [block heights with an estimated number of blocks produced in a year](https://github.com/cosmos/governance/blob/master/params-change/Mint.md#blocksperyear).
|
||||||
|
Proposer-based timestamps will allow this inflation calculation to use a more meaningful and accurate source of time.
|
||||||
|
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Implement proposer-based timestamps and remove `BFTTime`.
|
||||||
|
|
||||||
|
## Detailed Design
|
||||||
|
|
||||||
|
### Overview
|
||||||
|
|
||||||
|
Implementing proposer-based timestamps will require a few changes to Tendermint’s code.
|
||||||
|
These changes will be to the following components:
|
||||||
|
* The `internal/consensus/` package.
|
||||||
|
* The `state/` package.
|
||||||
|
* The `Vote`, `CommitSig`, `Commit` and `Header` types.
|
||||||
|
* The consensus parameters.
|
||||||
|
|
||||||
|
### Proposal Timestamp and Block Timestamp
|
||||||
|
|
||||||
|
This design discusses two timestamps: (1) The timestamp in the block and (2) the timestamp in the proposal message.
|
||||||
|
The existence and use of both of these timestamps can get a bit confusing, so some background is given here to clarify their uses.
|
||||||
|
|
||||||
|
The [proposal message currently has a timestamp](https://github.com/tendermint/tendermint/blob/e5312942e30331e7c42b75426da2c6c9c00ae476/types/proposal.go#L31).
|
||||||
|
This timestamp is the current Unix time known to the proposer when sending the `Proposal` message.
|
||||||
|
This timestamp is not currently used as part of consensus.
|
||||||
|
The changes in this ADR will begin using the proposal message timestamp as part of consensus.
|
||||||
|
We will refer to this as the **proposal timestamp** throughout this design.
|
||||||
|
|
||||||
|
The block has a timestamp field [in the header](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/block.go#L338).
|
||||||
|
This timestamp is set currently as part of Tendermint’s `BFTtime` algorithm.
|
||||||
|
It is set when a block is proposed and it is checked by the validators when they are deciding to prevote the block.
|
||||||
|
This field will continue to be used but the logic for creating and validating this timestamp will change.
|
||||||
|
We will refer to this as the **block timestamp** throughout this design.
|
||||||
|
|
||||||
|
At a high level, the proposal timestamp from height `H` is used as the block timestamp at height `H+1`.
|
||||||
|
The following image shows this relationship.
|
||||||
|
The rest of this document describes the code changes that will make this possible.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### Saving the timestamp across heights
|
||||||
|
|
||||||
|
Currently, `BFTtime` uses `LastCommit` to construct the block timestamp.
|
||||||
|
The `LastCommit` is created at height `H-1` and is saved in the state store to be included in the block at height `H`.
|
||||||
|
`BFTtime` takes the weighted median of the timestamps in `LastCommit.CommitSig` to build the timestamp for height `H`.
|
||||||
|
|
||||||
|
For proposer-based timestamps, the `LastCommit.CommitSig` timestamps will no longer be used to build the timestamps for height `H`.
|
||||||
|
Instead, the proposal timestamp from height `H-1` will become the block timestamp for height `H`.
|
||||||
|
To enable this, we will add a `Timestamp` field to the `Commit` struct.
|
||||||
|
This field will be populated at each height with the proposal timestamp decided on at the previous height.
|
||||||
|
This timestamp will also be saved with the rest of the commit in the state store [when the commit is finalized](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L1611) so that it can be recovered if Tendermint crashes.
|
||||||
|
Changes to the `CommitSig` and `Commit` struct are detailed below.
|
||||||
|
|
||||||
|
### Changes to `CommitSig`
|
||||||
|
|
||||||
|
The [CommitSig](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L604) struct currently contains a timestamp.
|
||||||
|
This timestamp is the current Unix time known to the validator when it issued a `Precommit` for the block.
|
||||||
|
This timestamp is no longer used and will be removed in this change.
|
||||||
|
|
||||||
|
`CommitSig` will be updated as follows:
|
||||||
|
|
||||||
|
```diff
|
||||||
|
type CommitSig struct {
|
||||||
|
BlockIDFlag BlockIDFlag `json:"block_id_flag"`
|
||||||
|
ValidatorAddress Address `json:"validator_address"`
|
||||||
|
-- Timestamp time.Time `json:"timestamp"`
|
||||||
|
Signature []byte `json:"signature"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Changes to `Commit`
|
||||||
|
|
||||||
|
The [Commit](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L746) struct does not currently contain a timestamp.
|
||||||
|
The timestamps in the `Commit.CommitSig` entries are currently used to build the block timestamp.
|
||||||
|
With these timestamps removed, the commit time will instead be stored in the `Commit` struct.
|
||||||
|
|
||||||
|
`Commit` will be updated as follows.
|
||||||
|
|
||||||
|
```diff
|
||||||
|
type Commit struct {
|
||||||
|
Height int64 `json:"height"`
|
||||||
|
Round int32 `json:"round"`
|
||||||
|
++ Timestamp time.Time `json:"timestamp"`
|
||||||
|
BlockID BlockID `json:"block_id"`
|
||||||
|
Signatures []CommitSig `json:"signatures"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Changes to `Vote` messages
|
||||||
|
|
||||||
|
`Precommit` and `Prevote` messages use a common [Vote struct](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/vote.go#L50).
|
||||||
|
This struct currently contains a timestamp.
|
||||||
|
This timestamp is set using the [voteTime](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2241) function and therefore vote times correspond to the current Unix time known to the validator.
|
||||||
|
For precommits, this timestamp is used to construct the [CommitSig that is included in the block in the LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L754) field.
|
||||||
|
For prevotes, this field is unused.
|
||||||
|
Proposer-based timestamps will use the [RoundState.Proposal](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/internal/consensus/types/round_state.go#L76) timestamp to construct the `signedBytes` `CommitSig`.
|
||||||
|
This timestamp is therefore no longer useful and will be dropped.
|
||||||
|
|
||||||
|
`Vote` will be updated as follows:
|
||||||
|
|
||||||
|
```diff
|
||||||
|
type Vote struct {
|
||||||
|
Type tmproto.SignedMsgType `json:"type"`
|
||||||
|
Height int64 `json:"height"`
|
||||||
|
Round int32 `json:"round"`
|
||||||
|
BlockID BlockID `json:"block_id"` // zero if vote is nil.
|
||||||
|
-- Timestamp time.Time `json:"timestamp"`
|
||||||
|
ValidatorAddress Address `json:"validator_address"`
|
||||||
|
ValidatorIndex int32 `json:"validator_index"`
|
||||||
|
Signature []byte `json:"signature"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### New consensus parameters
|
||||||
|
|
||||||
|
The proposer-based timestamp specification includes multiple new parameters that must be the same among all validators.
|
||||||
|
These parameters are `PRECISION`, `MSGDELAY`, and `ACCURACY`.
|
||||||
|
|
||||||
|
The `PRECISION` and `MSGDELAY` parameters are used to determine if the proposed timestamp is acceptable.
|
||||||
|
A validator will only Prevote a proposal if the proposal timestamp is considered `timely`.
|
||||||
|
A proposal timestamp is considered `timely` if it is within `PRECISION` and `MSGDELAY` of the Unix time known to the validator.
|
||||||
|
More specifically, a proposal timestamp is `timely` if `validatorLocalTime - PRECISION < proposalTime < validatorLocalTime + PRECISION + MSGDELAY`.
|
||||||
|
|
||||||
|
Because the `PRECISION` and `MSGDELAY` parameters must be the same across all validators, they will be added to the [consensus parameters](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/types/params.proto#L13) as [durations](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration).
|
||||||
|
|
||||||
|
The proposer-based timestamp specification also includes a [new ACCURACY parameter](https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md#pbts-clocksync-external0).
|
||||||
|
Intuitively, `ACCURACY` represents the difference between the ‘real’ time and the currently known time of correct validators.
|
||||||
|
The currently known Unix time of any validator is always somewhat different from real time.
|
||||||
|
`ACCURACY` is the largest such difference between each validator's time and real time taken as an absolute value.
|
||||||
|
This is not something a computer can determine on its own and must be specified as an estimate by community running a Tendermint-based chain.
|
||||||
|
It is used in the new algorithm to [calculate a timeout for the propose step](https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md#pbts-alg-startround0).
|
||||||
|
`ACCURACY` is assumed to be the same across all validators and therefore should be included as a consensus parameter.
|
||||||
|
|
||||||
|
The consensus will be updated to include this `Timestamp` field as follows:
|
||||||
|
|
||||||
|
```diff
|
||||||
|
type ConsensusParams struct {
|
||||||
|
Block BlockParams `json:"block"`
|
||||||
|
Evidence EvidenceParams `json:"evidence"`
|
||||||
|
Validator ValidatorParams `json:"validator"`
|
||||||
|
Version VersionParams `json:"version"`
|
||||||
|
++ Timestamp TimestampParams `json:"timestamp"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
type TimestampParams struct {
|
||||||
|
Accuracy time.Duration `json:"accuracy"`
|
||||||
|
Precision time.Duration `json:"precision"`
|
||||||
|
MsgDelay time.Duration `json:"msg_delay"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Changes to `Header`
|
||||||
|
|
||||||
|
The [Header](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L338) struct currently contains a timestamp.
|
||||||
|
This timestamp is set as the `BFTtime` derived from the block's `LastCommit.CommitSig` timestamps.
|
||||||
|
This timestamp will no longer be derived from the `LastCommit.CommitSig` timestamps and will instead be included directly into the block's `LastCommit`.
|
||||||
|
This timestamp will therfore be identical in both the `Header` and the `LastCommit`.
|
||||||
|
To clarify that the timestamp in the header corresponds to the `LastCommit`'s time, we will rename this timestamp field to `last_timestamp`.
|
||||||
|
|
||||||
|
`Header` will be updated as follows:
|
||||||
|
|
||||||
|
```diff
|
||||||
|
type Header struct {
|
||||||
|
// basic block info
|
||||||
|
Version version.Consensus `json:"version"`
|
||||||
|
ChainID string `json:"chain_id"`
|
||||||
|
Height int64 `json:"height"`
|
||||||
|
-- Time time.Time `json:"time"`
|
||||||
|
++ LastTimestamp time.Time `json:"last_timestamp"`
|
||||||
|
|
||||||
|
// prev block info
|
||||||
|
LastBlockID BlockID `json:"last_block_id"`
|
||||||
|
|
||||||
|
// hashes of block data
|
||||||
|
LastCommitHash tmbytes.HexBytes `json:"last_commit_hash"`
|
||||||
|
DataHash tmbytes.HexBytes `json:"data_hash"`
|
||||||
|
|
||||||
|
// hashes from the app output from the prev block
|
||||||
|
ValidatorsHash tmbytes.HexBytes `json:"validators_hash"`
|
||||||
|
NextValidatorsHash tmbytes.HexBytes `json:"next_validators_hash"`
|
||||||
|
ConsensusHash tmbytes.HexBytes `json:"consensus_hash"`
|
||||||
|
AppHash tmbytes.HexBytes `json:"app_hash"`
|
||||||
|
|
||||||
|
// root hash of all results from the txs from the previous block
|
||||||
|
LastResultsHash tmbytes.HexBytes `json:"last_results_hash"`
|
||||||
|
|
||||||
|
// consensus info
|
||||||
|
EvidenceHash tmbytes.HexBytes `json:"evidence_hash"`
|
||||||
|
ProposerAddress Address `json:"proposer_address"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Changes to the block proposal step
|
||||||
|
|
||||||
|
#### Proposer selects proposal timestamp
|
||||||
|
|
||||||
|
The proposal logic already [sets the Unix time known to the validator](https://github.com/tendermint/tendermint/blob/2abfe20114ee3bb3adfee817589033529a804e4d/types/proposal.go#L44) into the `Proposal` message.
|
||||||
|
This satisfies the proposer-based timestamp specification and does not need to change.
|
||||||
|
|
||||||
|
#### Proposer selects block timestamp
|
||||||
|
|
||||||
|
The proposal timestamp that was decided in height `H-1` will be stored in the `State` struct's in the `RoundState.LastCommit` field.
|
||||||
|
The proposer will select this timestamp to use as the block timestamp at height `H`.
|
||||||
|
|
||||||
|
#### Proposer waits
|
||||||
|
|
||||||
|
Block timestamps must be monotonically increasing.
|
||||||
|
In `BFTTime`, if a validator’s clock was behind, the [validator added 1 millisecond to the previous block’s time and used that in its vote messages](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2246).
|
||||||
|
A goal of adding proposer-based timestamps is to enforce some degree of clock synchronization, so having a mechanism that completely ignores the Unix time of the validator time no longer works.
|
||||||
|
|
||||||
|
Validator clocks will not be perfectly in sync.
|
||||||
|
Therefore, the proposer’s current known Unix time may be less than the `LastCommit.Timestamp`.
|
||||||
|
If the proposer’s current known Unix time is less than the `LastCommit.Timestamp`, the proposer will sleep until its known Unix time exceeds `LastCommit.Timestamp`.
|
||||||
|
|
||||||
|
This change will require amending the [defaultDecideProposal](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1180) method.
|
||||||
|
This method should now block until the proposer’s time is greater than `LastCommit.Timestamp`.
|
||||||
|
|
||||||
|
#### Changes to the propose step timeout
|
||||||
|
|
||||||
|
Currently, a validator waiting for a proposal will proceed past the propose step if the configured propose timeout is reached and no proposal is seen.
|
||||||
|
Proposer-based timestamps requires changing this timeout logic.
|
||||||
|
|
||||||
|
The proposer will now wait until its current known Unix time exceeds the `LastCommit.Timestamp` to propose a block.
|
||||||
|
The validators must now take this and some other factors into account when deciding when to timeout the propose step.
|
||||||
|
Specifically, the propose step timeout must also take into account potential inaccuracy in the validator’s clock and in the clock of the proposer.
|
||||||
|
Additionally, there may be a delay communicating the proposal message from the proposer to the other validators.
|
||||||
|
|
||||||
|
Therefore, validators waiting for a proposal must wait until after the `LastCommit.Timestamp` before timing out.
|
||||||
|
To account for possible inaccuracy in its own clock, inaccuracy in the proposer’s clock, and message delay, validators waiting for a proposal will wait until `LastCommit.Timesatmp + 2*ACCURACY + MSGDELAY`.
|
||||||
|
The spec defines this as `waitingTime`.
|
||||||
|
|
||||||
|
The [propose step’s timeout is set in enterPropose](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1108) in `state.go`.
|
||||||
|
`enterPropose` will be changed to calculate waiting time using the new consensus parameters.
|
||||||
|
The timeout in `enterPropose` will then be set as the maximum of `waitingTime` and the [configured proposal step timeout](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/config/config.go#L1013).
|
||||||
|
|
||||||
|
### Changes to validation rules
|
||||||
|
|
||||||
|
The rules for validating that a proposal is valid will need slight modification to implement proposer-based timestamps.
|
||||||
|
Specifically, we will change the validation logic to ensure that the proposal timestamp is `timely` and we will modify the way the block timestamp is validated as well.
|
||||||
|
|
||||||
|
#### Proposal timestamp validation
|
||||||
|
|
||||||
|
Adding proposal timestamp validation is a reasonably straightforward change.
|
||||||
|
The current Unix time known to the proposer is already included in the [Proposal message](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/proposal.go#L31).
|
||||||
|
Once the proposal is received, the complete message is stored in the `RoundState.Proposal` field.
|
||||||
|
The precommit and prevote validation logic does not currently use this timestamp.
|
||||||
|
This validation logic will be updated to check that the proposal timestamp is within `PRECISION` of the current Unix time known to the validators.
|
||||||
|
If the timestamp is not within `PRECISION` of the current Unix time known to the validator, the proposal will not be considered it valid.
|
||||||
|
The validator will also check that the proposal time is greater than the block timestamp from the previous height.
|
||||||
|
|
||||||
|
If no valid proposal is received by the proposal timeout, the validator will prevote nil.
|
||||||
|
This is identical to the current logic.
|
||||||
|
|
||||||
|
#### Block timestamp validation
|
||||||
|
|
||||||
|
The [validBlock function](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L14) currently [validates the proposed block timestamp in three ways](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L118).
|
||||||
|
First, the validation logic checks that this timestamp is greater than the previous block’s timestamp.
|
||||||
|
Additionally, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48).
|
||||||
|
Finally, the logic also authenticates the timestamps in the `LastCommit`.
|
||||||
|
The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the validator’s private key.
|
||||||
|
One of the items in this `signedBytes` hash is derived from the timestamp in the `CommitSig`.
|
||||||
|
To authenticate the `CommitSig` timestamp, the validator builds a hash of fields that includes the timestamp and checks this hash against the provided signature.
|
||||||
|
This takes place in the [VerifyCommit function](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/validation.go#L25).
|
||||||
|
|
||||||
|
The logic to validate that the block timestamp is greater than the previous block’s timestamp also works for proposer-based timestamps and will not change.
|
||||||
|
|
||||||
|
`BFTTime` validation is no longer applicable and will be removed.
|
||||||
|
Validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps.
|
||||||
|
This will mean removing the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117).
|
||||||
|
The `MedianTime` function can be completely removed.
|
||||||
|
The `LastCommit` timestamps may also be removed.
|
||||||
|
|
||||||
|
The `signedBytes` validation logic in `VerifyCommit` will be slightly altered.
|
||||||
|
The `CommitSig`s in the block’s `LastCommit` will no longer each contain a timestamp.
|
||||||
|
The validation logic will instead include the `LastCommit.Timestamp` in the hash of fields for generating the `signedBytes`.
|
||||||
|
The cryptographic signatures included in the `CommitSig`s will then be checked against this `signedBytes` hash to authenticate the timestamp.
|
||||||
|
Specifically, the `VerifyCommit` function will be updated to use this new timestamp.
|
||||||
|
|
||||||
|
### Changes to the prevote step
|
||||||
|
|
||||||
|
Currently, a validator will prevote a proposal in one of three cases:
|
||||||
|
|
||||||
|
* Case 1: Validator has no locked block and receives a valid proposal.
|
||||||
|
* Case 2: Validator has a locked block and receives a valid proposal matching its locked block.
|
||||||
|
* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +⅔ prevotes for the new proposal’s block.
|
||||||
|
|
||||||
|
The only change we will make to the prevote step is to what a validator considers a valid proposal as detailed above.
|
||||||
|
|
||||||
|
### Changes to the precommit step
|
||||||
|
|
||||||
|
The precommit step will not require much modification.
|
||||||
|
Its proposal validation rules will change in the same ways that validation will change in the prevote step.
|
||||||
|
|
||||||
|
### Changes to locking a block
|
||||||
|
When a validator receives a valid proposed block and +2/3 prevotes for that block, it stores the block as its ‘locked block’ in the [RoundState.ValidBlock](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/types/round_state.go#L85) field.
|
||||||
|
In each subsequent round it will prevote that block.
|
||||||
|
A validator will only change which block it has locked if it sees +2/3 prevotes for a different block.
|
||||||
|
|
||||||
|
This mechanism will remain largely unchanged.
|
||||||
|
The only difference is the addition of proposal timestamp validation.
|
||||||
|
A validator will prevote nil in a round if the proposal message it received is not `timely`.
|
||||||
|
Prevoting nil in this case will not cause a validator to ‘unlock’ its locked block.
|
||||||
|
This difference is an incidental result of the changes to prevote validation.
|
||||||
|
It is included in this design for completeness and to clarify that no additional changes will be made to block locking.
|
||||||
|
|
||||||
|
### Remove voteTime Completely
|
||||||
|
|
||||||
|
[voteTime](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L2229) is a mechanism for calculating the next `BFTTime` given both the validator's current known Unix time and the previous block timestamp.
|
||||||
|
If the previous block timestamp is greater than the validator's current known Unix time, then voteTime returns a value one millisecond greater than the previous block timestamp.
|
||||||
|
This logic is used in multiple places and is no longer needed for proposer-based timestamps.
|
||||||
|
It should therefore be removed completely.
|
||||||
|
|
||||||
|
## Future Improvements
|
||||||
|
|
||||||
|
* Implement BLS signature aggregation.
|
||||||
|
By removing fields from the `Precommit` messages, we are able to aggregate signatures.
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
### Positive
|
||||||
|
|
||||||
|
* `<2/3` of validators can no longer influence block timestamps.
|
||||||
|
* Block timestamp will have stronger correspondence to real time.
|
||||||
|
* Improves the reliability of light client block verification.
|
||||||
|
* Enables BLS signature aggregation.
|
||||||
|
* Enables evidence handling to use time instead of height for evidence validity.
|
||||||
|
|
||||||
|
### Neutral
|
||||||
|
|
||||||
|
* Alters Tendermint’s liveness properties.
|
||||||
|
Liveness now requires that all correct validators have synchronized clocks within a bound.
|
||||||
|
Liveness will now also require that validators’ clocks move forward, which was not required under `BFTTime`.
|
||||||
|
|
||||||
|
### Negative
|
||||||
|
|
||||||
|
* May increase the length of the propose step if there is a large skew between the previous proposer and the current proposer’s local Unix time.
|
||||||
|
This skew will be bound by the `PRECISION` value, so it is unlikely to be too large.
|
||||||
|
|
||||||
|
* Current chains with block timestamps far in the future will either need to pause consensus until after the erroneous block timestamp or must maintain synchronized but very inaccurate clocks.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
* [PBTS Spec](https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp)
|
||||||
|
* [BFTTime spec](https://github.com/tendermint/spec/blob/master/spec/consensus/bft-time.md)
|
||||||
105
docs/architecture/adr-072-request-for-comments.md
Normal file
105
docs/architecture/adr-072-request-for-comments.md
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
# ADR 72: Restore Requests for Comments
|
||||||
|
|
||||||
|
## Changelog
|
||||||
|
|
||||||
|
- 20-Aug-2021: Initial draft (@creachadair)
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
Proposed
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
In the past, we kept a collection of Request for Comments (RFC) documents in `docs/rfc`.
|
||||||
|
Prior to the creation of the ADR process, these documents were used to document
|
||||||
|
design and implementation decisions about Tendermint Core. The RFC directory
|
||||||
|
was removed in favor of ADRs, in commit 3761aa69 (PR
|
||||||
|
[\#6345](https://github.com/tendermint/tendermint/pull/6345)).
|
||||||
|
|
||||||
|
For issues where an explicit design decision or implementation change is
|
||||||
|
required, an ADR is generally preferable to an open-ended RFC: An ADR is
|
||||||
|
relatively narrowly-focused, identifies a specific design or implementation
|
||||||
|
question, and documents the consensus answer to that question.
|
||||||
|
|
||||||
|
Some discussions are more open-ended, however, or don't require a specific
|
||||||
|
decision to be made (yet). Such conversations are still valuable to document,
|
||||||
|
and several members of the Tendermint team have been doing so by writing gists
|
||||||
|
or Google docs to share them around. That works well enough in the moment, but
|
||||||
|
gists do not support any kind of collaborative editing, and both gists and docs
|
||||||
|
are hard to discover after the fact. Google docs have much better collaborative
|
||||||
|
editing, but are worse for discoverability, especially when contributors span
|
||||||
|
different Google accounts.
|
||||||
|
|
||||||
|
Discoverability is important, because these kinds of open-ended discussions are
|
||||||
|
useful to people who come later -- either as new team members or as outside
|
||||||
|
contributors seeking to use and understand the thoughts behind our designs and
|
||||||
|
the architectural decisions that arose from those discussion.
|
||||||
|
|
||||||
|
With these in mind, I propose that:
|
||||||
|
|
||||||
|
- We re-create a new, initially empty `docs/rfc` directory in the repository,
|
||||||
|
and use it to capture these kinds of open-ended discussions in supplement to
|
||||||
|
ADRs.
|
||||||
|
|
||||||
|
- Unlike in the previous RFC scheme, documents in this new directory will
|
||||||
|
_not_ be used directly for decision-making. This is the key difference
|
||||||
|
between an RFC and an ADR.
|
||||||
|
|
||||||
|
Instead, an RFC will exist to document background, articulate general
|
||||||
|
principles, and serve as a historical record of discussion and motivation.
|
||||||
|
|
||||||
|
In this system, an RFC may _only_ result in a decision indirectly, via ADR
|
||||||
|
documents created in response to the RFC.
|
||||||
|
|
||||||
|
**In short:** If a decision is required, write an ADR; otherwise if a
|
||||||
|
sufficiently broad discussion is needed, write an RFC.
|
||||||
|
|
||||||
|
Just so that there is a consistent format, I also propose that:
|
||||||
|
|
||||||
|
- RFC files are named `rfc-XXX-title.{md,rst,txt}` and are written in plain
|
||||||
|
text, Markdown, or ReStructured Text.
|
||||||
|
|
||||||
|
- Like an ADR, an RFC should include a high-level change log at the top of the
|
||||||
|
document, and sections for:
|
||||||
|
|
||||||
|
* Abstract: A brief, high-level synopsis of the topic.
|
||||||
|
* Background: Any background necessary to understand the topic.
|
||||||
|
* Discussion: Detailed discussion of the issue being considered.
|
||||||
|
|
||||||
|
- Unlike an ADR, an RFC does _not_ include sections for Decisions, Detailed
|
||||||
|
Design, or evaluation of proposed solutions. If an RFC leads to a proposal
|
||||||
|
for an actual architectural change, that must be recorded in an ADR in the
|
||||||
|
usual way, and may refer back to the RFC in its References section.
|
||||||
|
|
||||||
|
## Alternative Approaches
|
||||||
|
|
||||||
|
Leaving aside implementation details, the main alternative to this proposal is
|
||||||
|
to leave things as they are now, with ADRs as the only log of record and other
|
||||||
|
discussions being held informally in whatever medium is convenient at the time.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
(pending)
|
||||||
|
|
||||||
|
## Detailed Design
|
||||||
|
|
||||||
|
- Create a new `docs/rfc` directory in the `tendermint` repository. Note that
|
||||||
|
this proposal intentionally does _not_ pull back the previous contents of
|
||||||
|
that path from Git history, as those documents were appropriately merged into
|
||||||
|
the ADR process.
|
||||||
|
|
||||||
|
- Create a `README.md` for RFCs that explains the rules and their relationship
|
||||||
|
to ADRs.
|
||||||
|
|
||||||
|
- Create an `rfc-template.md` file for RFC files.
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
### Positive
|
||||||
|
|
||||||
|
- We will have a more discoverable place to record open-ended discussions that
|
||||||
|
do not immediately result in a design change.
|
||||||
|
|
||||||
|
### Negative
|
||||||
|
|
||||||
|
- Potentially some people could be confused about the RFC/ADR distinction.
|
||||||
BIN
docs/architecture/img/consensus_blockchain.png
Normal file
BIN
docs/architecture/img/consensus_blockchain.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 672 KiB |
BIN
docs/architecture/img/pbts-message.png
Normal file
BIN
docs/architecture/img/pbts-message.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 31 KiB |
@@ -36,7 +36,7 @@ proxy-app = "tcp://127.0.0.1:26658"
|
|||||||
# A custom human readable name for this node
|
# A custom human readable name for this node
|
||||||
moniker = "anonymous"
|
moniker = "anonymous"
|
||||||
|
|
||||||
# If this node is many blocks behind the tip of the chain, FastSync
|
# If this node is many blocks behind the tip of the chain, BlockSync
|
||||||
# allows them to catchup quickly by downloading blocks in parallel
|
# allows them to catchup quickly by downloading blocks in parallel
|
||||||
# and verifying their commits
|
# and verifying their commits
|
||||||
fast-sync = true
|
fast-sync = true
|
||||||
@@ -275,9 +275,13 @@ dial-timeout = "3s"
|
|||||||
#######################################################
|
#######################################################
|
||||||
[mempool]
|
[mempool]
|
||||||
|
|
||||||
|
# Mempool version to use:
|
||||||
|
# 1) "v0" - The legacy non-prioritized mempool reactor.
|
||||||
|
# 2) "v1" (default) - The prioritized mempool reactor.
|
||||||
|
version = "v1"
|
||||||
|
|
||||||
recheck = true
|
recheck = true
|
||||||
broadcast = true
|
broadcast = true
|
||||||
wal-dir = ""
|
|
||||||
|
|
||||||
# Maximum number of transactions in the mempool
|
# Maximum number of transactions in the mempool
|
||||||
size = 5000
|
size = 5000
|
||||||
@@ -304,6 +308,22 @@ max-tx-bytes = 1048576
|
|||||||
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||||
max-batch-bytes = 0
|
max-batch-bytes = 0
|
||||||
|
|
||||||
|
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
|
||||||
|
# can exist for in the mempool.
|
||||||
|
#
|
||||||
|
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
|
||||||
|
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
|
||||||
|
# insertion time into the mempool is beyond ttl-duration.
|
||||||
|
ttl-duration = "0s"
|
||||||
|
|
||||||
|
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
|
||||||
|
# can exist for in the mempool.
|
||||||
|
#
|
||||||
|
# Note, if ttl-duration is also defined, a transaction will be removed if it
|
||||||
|
# has existed in the mempool at least ttl-num-blocks number of blocks or if
|
||||||
|
# it's insertion time into the mempool is beyond ttl-duration.
|
||||||
|
ttl-num-blocks = 0
|
||||||
|
|
||||||
#######################################################
|
#######################################################
|
||||||
### State Sync Configuration Options ###
|
### State Sync Configuration Options ###
|
||||||
#######################################################
|
#######################################################
|
||||||
@@ -334,12 +354,12 @@ discovery-time = "15s"
|
|||||||
temp-dir = ""
|
temp-dir = ""
|
||||||
|
|
||||||
#######################################################
|
#######################################################
|
||||||
### Fast Sync Configuration Connections ###
|
### BlockSync Configuration Connections ###
|
||||||
#######################################################
|
#######################################################
|
||||||
[fastsync]
|
[fastsync]
|
||||||
|
|
||||||
# Fast Sync version to use:
|
# Block Sync version to use:
|
||||||
# 1) "v0" (default) - the legacy fast sync implementation
|
# 1) "v0" (default) - the legacy block sync implementation
|
||||||
# 2) "v2" - complete redesign of v0, optimized for testability & readability
|
# 2) "v2" - complete redesign of v0, optimized for testability & readability
|
||||||
version = "v0"
|
version = "v0"
|
||||||
|
|
||||||
@@ -421,7 +441,6 @@ max-open-connections = 3
|
|||||||
|
|
||||||
# Instrumentation namespace
|
# Instrumentation namespace
|
||||||
namespace = "tendermint"
|
namespace = "tendermint"
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Empty blocks VS no empty blocks
|
## Empty blocks VS no empty blocks
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ tendermint start --log-level "info"
|
|||||||
Here is the list of modules you may encounter in Tendermint's log and a
|
Here is the list of modules you may encounter in Tendermint's log and a
|
||||||
little overview what they do.
|
little overview what they do.
|
||||||
|
|
||||||
- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI
|
- `abci-client` As mentioned in [Application Architecture Guide](../app-dev/app-architecture.md), Tendermint acts as an ABCI
|
||||||
client with respect to the application and maintains 3 connections:
|
client with respect to the application and maintains 3 connections:
|
||||||
mempool, consensus and query. The code used by Tendermint Core can
|
mempool, consensus and query. The code used by Tendermint Core can
|
||||||
be found [here](https://github.com/tendermint/tendermint/tree/master/abci/client).
|
be found [here](https://github.com/tendermint/tendermint/tree/master/abci/client).
|
||||||
@@ -45,12 +45,12 @@ little overview what they do.
|
|||||||
from a crash.
|
from a crash.
|
||||||
[here](https://github.com/tendermint/tendermint/blob/master/types/events.go).
|
[here](https://github.com/tendermint/tendermint/blob/master/types/events.go).
|
||||||
You can subscribe to them by calling `subscribe` RPC method. Refer
|
You can subscribe to them by calling `subscribe` RPC method. Refer
|
||||||
to [RPC docs](./rpc.md) for additional information.
|
to [RPC docs](../tendermint-core/rpc.md) for additional information.
|
||||||
- `mempool` Mempool module handles all incoming transactions, whenever
|
- `mempool` Mempool module handles all incoming transactions, whenever
|
||||||
they are coming from peers or the application.
|
they are coming from peers or the application.
|
||||||
- `p2p` Provides an abstraction around peer-to-peer communication. For
|
- `p2p` Provides an abstraction around peer-to-peer communication. For
|
||||||
more details, please check out the
|
more details, please check out the
|
||||||
[README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md).
|
[README](https://github.com/tendermint/spec/tree/master/spec/p2p).
|
||||||
- `rpc-server` RPC server. For implementation details, please read the
|
- `rpc-server` RPC server. For implementation details, please read the
|
||||||
[doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/jsonrpc/doc.go).
|
[doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/jsonrpc/doc.go).
|
||||||
- `state` Represents the latest state and execution submodule, which
|
- `state` Represents the latest state and execution submodule, which
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ Default logging level (`log-level = "info"`) should suffice for
|
|||||||
normal operation mode. Read [this
|
normal operation mode. Read [this
|
||||||
post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756)
|
post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756)
|
||||||
for details on how to configure `log-level` config variable. Some of the
|
for details on how to configure `log-level` config variable. Some of the
|
||||||
modules can be found [here](../nodes/logging#list-of-modules). If
|
modules can be found [here](logging.md#list-of-modules). If
|
||||||
you're trying to debug Tendermint or asked to provide logs with debug
|
you're trying to debug Tendermint or asked to provide logs with debug
|
||||||
logging level, you can do so by running Tendermint with
|
logging level, you can do so by running Tendermint with
|
||||||
`--log-level="debug"`.
|
`--log-level="debug"`.
|
||||||
@@ -114,7 +114,7 @@ just the votes seen at the current height.
|
|||||||
If, after consulting with the logs and above endpoints, you still have no idea
|
If, after consulting with the logs and above endpoints, you still have no idea
|
||||||
what's happening, consider using `tendermint debug kill` sub-command. This
|
what's happening, consider using `tendermint debug kill` sub-command. This
|
||||||
command will scrap all the available info and kill the process. See
|
command will scrap all the available info and kill the process. See
|
||||||
[Debugging](../tools/debugging.md) for the exact format.
|
[Debugging](../tools/debugging/README.md) for the exact format.
|
||||||
|
|
||||||
You can inspect the resulting archive yourself or create an issue on
|
You can inspect the resulting archive yourself or create an issue on
|
||||||
[Github](https://github.com/tendermint/tendermint). Before opening an issue
|
[Github](https://github.com/tendermint/tendermint). Before opening an issue
|
||||||
@@ -134,7 +134,7 @@ Tendermint also can report and serve Prometheus metrics. See
|
|||||||
[Metrics](./metrics.md).
|
[Metrics](./metrics.md).
|
||||||
|
|
||||||
`tendermint debug dump` sub-command can be used to periodically dump useful
|
`tendermint debug dump` sub-command can be used to periodically dump useful
|
||||||
information into an archive. See [Debugging](../tools/debugging.md) for more
|
information into an archive. See [Debugging](../tools/debugging/README.md) for more
|
||||||
information.
|
information.
|
||||||
|
|
||||||
## What happens when my app dies
|
## What happens when my app dies
|
||||||
@@ -268,6 +268,8 @@ While we do not favor any operation system, more secure and stable Linux server
|
|||||||
distributions (like Centos) should be preferred over desktop operation systems
|
distributions (like Centos) should be preferred over desktop operation systems
|
||||||
(like Mac OS).
|
(like Mac OS).
|
||||||
|
|
||||||
|
Native Windows support is not provided. If you are using a windows machine, you can try using the [bash shell](https://docs.microsoft.com/en-us/windows/wsl/install-win10).
|
||||||
|
|
||||||
### Miscellaneous
|
### Miscellaneous
|
||||||
|
|
||||||
NOTE: if you are going to use Tendermint in a public domain, make sure
|
NOTE: if you are going to use Tendermint in a public domain, make sure
|
||||||
@@ -313,7 +315,7 @@ We want `skip-timeout-commit=false` when there is economics on the line
|
|||||||
because proposers should wait to hear for more votes. But if you don't
|
because proposers should wait to hear for more votes. But if you don't
|
||||||
care about that and want the fastest consensus, you can skip it. It will
|
care about that and want the fastest consensus, you can skip it. It will
|
||||||
be kept false by default for public deployments (e.g. [Cosmos
|
be kept false by default for public deployments (e.g. [Cosmos
|
||||||
Hub](https://cosmos.network/intro/hub)) while for enterprise
|
Hub](https://hub.cosmos.network/main/hub-overview/overview.html)) while for enterprise
|
||||||
applications, setting it to true is not a problem.
|
applications, setting it to true is not a problem.
|
||||||
|
|
||||||
- `consensus.peer-gossip-sleep-duration`
|
- `consensus.peer-gossip-sleep-duration`
|
||||||
|
|||||||
42
docs/rfc/README.md
Normal file
42
docs/rfc/README.md
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
order: 1
|
||||||
|
parent:
|
||||||
|
order: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Requests for Comments
|
||||||
|
|
||||||
|
A Request for Comments (RFC) is a record of discussion on an open-ended topic
|
||||||
|
related to the design and implementation of Tendermint Core, for which no
|
||||||
|
immediate decision is required.
|
||||||
|
|
||||||
|
The purpose of an RFC is to serve as a historical record of a high-level
|
||||||
|
discussion that might otherwise only be recorded in an ad hoc way (for example,
|
||||||
|
via gists or Google docs) that are difficult to discover for someone after the
|
||||||
|
fact. An RFC _may_ give rise to more specific architectural _decisions_ for
|
||||||
|
Tendermint, but those decisions must be recorded separately in [Architecture
|
||||||
|
Decision Records (ADR)](./../architecture).
|
||||||
|
|
||||||
|
As a rule of thumb, if you can articulate a specific question that needs to be
|
||||||
|
answered, write an ADR. If you need to explore the topic and get input from
|
||||||
|
others to know what questions need to be answered, an RFC may be appropriate.
|
||||||
|
|
||||||
|
## RFC Content
|
||||||
|
|
||||||
|
An RFC should provide:
|
||||||
|
|
||||||
|
- A **changelog**, documenting when and how the RFC has changed.
|
||||||
|
- An **abstract**, briefly summarizing the topic so the reader can quickly tell
|
||||||
|
whether it is relevant to their interest.
|
||||||
|
- Any **background** a reader will need to understand and participate in the
|
||||||
|
substance of the discussion (links to other documents are fine here).
|
||||||
|
- The **discussion**, the primary content of the document.
|
||||||
|
|
||||||
|
The [rfc-template.md](./rfc-template.md) file includes placeholders for these
|
||||||
|
sections.
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
- [RFC-000: P2P Roadmap](./rfc-000-p2p-roadmap.rst)
|
||||||
|
|
||||||
|
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->
|
||||||
316
docs/rfc/rfc-000-p2p-roadmap.rst
Normal file
316
docs/rfc/rfc-000-p2p-roadmap.rst
Normal file
@@ -0,0 +1,316 @@
|
|||||||
|
====================
|
||||||
|
RFC 000: P2P Roadmap
|
||||||
|
====================
|
||||||
|
|
||||||
|
Changelog
|
||||||
|
---------
|
||||||
|
|
||||||
|
- 2021-08-20: Completed initial draft and distributed via a gist
|
||||||
|
- 2021-08-25: Migrated as an RFC and changed format
|
||||||
|
|
||||||
|
Abstract
|
||||||
|
--------
|
||||||
|
|
||||||
|
This document discusses the future of peer network management in Tendermint, with
|
||||||
|
a particular focus on features, semantics, and a proposed roadmap.
|
||||||
|
Specifically, we consider libp2p as a tool kit for implementing some fundamentals.
|
||||||
|
|
||||||
|
Background
|
||||||
|
----------
|
||||||
|
|
||||||
|
For the 0.35 release cycle the switching/routing layer of Tendermint was
|
||||||
|
replaced. This work was done "in place," and produced a version of Tendermint
|
||||||
|
that was backward-compatible and interoperable with previous versions of the
|
||||||
|
software. While there are new p2p/peer management constructs in the new
|
||||||
|
version (e.g. ``PeerManager`` and ``Router``), the main effect of this change
|
||||||
|
was to simplify the ways that other components within Tendermint interacted with
|
||||||
|
the peer management layer, and to make it possible for higher-level components
|
||||||
|
(specifically the reactors), to be used and tested more independently.
|
||||||
|
|
||||||
|
This refactoring, which was a major undertaking, was entirely necessary to
|
||||||
|
enable areas for future development and iteration on this aspect of
|
||||||
|
Tendermint. There are also a number of potential user-facing features that
|
||||||
|
depend heavily on the p2p layer: additional transport protocols, transport
|
||||||
|
compression, improved resilience to network partitions. These improvements to
|
||||||
|
modularity, stability, and reliability of the p2p system will also make
|
||||||
|
ongoing maintenance and feature development easier in the rest of Tendermint.
|
||||||
|
|
||||||
|
Critique of Current Peer-to-Peer Infrastructure
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
The current (refactored) P2P stack is an improvement on the previous iteration
|
||||||
|
(legacy), but as of 0.35, there remains room for improvement in the design and
|
||||||
|
implementation of the P2P layer.
|
||||||
|
|
||||||
|
Some limitations of the current stack include:
|
||||||
|
|
||||||
|
- heavy reliance on buffering to avoid backups in the flow of components,
|
||||||
|
which is fragile to maintain and can lead to unexpected memory usage
|
||||||
|
patterns and forces the routing layer to make decisions about when messages
|
||||||
|
should be discarded.
|
||||||
|
|
||||||
|
- the current p2p stack relies on convention (rather than the compiler) to
|
||||||
|
enforce the API boundaries and conventions between reactors and the router,
|
||||||
|
making it very easy to write "wrong" reactor code or introduce a bad
|
||||||
|
dependency.
|
||||||
|
|
||||||
|
- the current stack is probably more complex and difficult to maintain because
|
||||||
|
the legacy system must coexist with the new components in 0.35. When the
|
||||||
|
legacy stack is removed there are some simple changes that will become
|
||||||
|
possible and could reduce the complexity of the new system. (e.g. `#6598
|
||||||
|
<https://github.com/tendermint/tendermint/issues/6598>`_.)
|
||||||
|
|
||||||
|
- the current stack encapsulates a lot of information about peers, and makes it
|
||||||
|
difficult to expose that information to monitoring/observability tools. This
|
||||||
|
general opacity also makes it difficult to interact with the peer system
|
||||||
|
from other areas of the code base (e.g. tests, reactors).
|
||||||
|
|
||||||
|
- the legacy stack provided some control to operators to force the system to
|
||||||
|
dial new peers or seed nodes or manipulate the topology of the system _in
|
||||||
|
situ_. The current stack can't easily provide this, and while the new stack
|
||||||
|
may have better behavior, it does leave operators hands tied.
|
||||||
|
|
||||||
|
Some of these issues will be resolved early in the 0.36 cycle, with the
|
||||||
|
removal of the legacy components.
|
||||||
|
|
||||||
|
The 0.36 release also provides the opportunity to make changes to the
|
||||||
|
protocol, as the release will not be compatible with previous releases.
|
||||||
|
|
||||||
|
Areas for Development
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
These sections describe features that may make sense to include in a Phase 2 of
|
||||||
|
a P2P project.
|
||||||
|
|
||||||
|
Internal Message Passing
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Currently, there's no provision for intranode communication using the P2P
|
||||||
|
layer, which means when two reactors need to interact with each other they
|
||||||
|
have to have dependencies on each other's interfaces, and
|
||||||
|
initialization. Changing these interactions (e.g. transitions between
|
||||||
|
blocksync and consensus) from procedure calls to message passing.
|
||||||
|
|
||||||
|
This is a relatively simple change and could be implemented with the following
|
||||||
|
components:
|
||||||
|
|
||||||
|
- a constant to represent "local" delivery as the ``To``` field on
|
||||||
|
``p2p.Envelope``.
|
||||||
|
|
||||||
|
- special path for routing local messages that doesn't require message
|
||||||
|
serialization (protobuf marshalling/unmarshaling).
|
||||||
|
|
||||||
|
Adding these semantics, particularly if in conjunction with synchronous
|
||||||
|
semantics provides a solution to dependency graph problems currently present
|
||||||
|
in the Tendermint codebase, which will simplify development, make it possible
|
||||||
|
to isolate components for testing.
|
||||||
|
|
||||||
|
Eventually, this will also make it possible to have a logical Tendermint node
|
||||||
|
running in multiple processes or in a collection of containers, although the
|
||||||
|
usecase of this may be debatable.
|
||||||
|
|
||||||
|
Synchronous Semantics (Paired Request/Response)
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
In the current system, all messages are sent with fire-and-forget semantics,
|
||||||
|
and there's no coupling between a request sent via the p2p layer, and a
|
||||||
|
response. These kinds of semantics would simplify the implementation of
|
||||||
|
state and block sync reactors, and make intra-node message passing more
|
||||||
|
powerful.
|
||||||
|
|
||||||
|
For some interactions, like gossiping transactions between the mempools of
|
||||||
|
different nodes, fire-and-forget semantics make sense, but for other
|
||||||
|
operations the missing link between requests/responses leads to either
|
||||||
|
inefficiency when a node fails to respond or becomes unavailable, or code that
|
||||||
|
is just difficult to follow.
|
||||||
|
|
||||||
|
To support this kind of work, the protocol would need to accommodate some kind
|
||||||
|
of request/response ID to allow identifying out-of-order responses over a
|
||||||
|
single connection. Additionally, expanded the programming model of the
|
||||||
|
``p2p.Channel`` to accommodate some kind of _future_ or similar paradigm to
|
||||||
|
make it viable to write reactor code without needing for the reactor developer
|
||||||
|
to wrestle with lower level concurency constructs.
|
||||||
|
|
||||||
|
|
||||||
|
Timeout Handling (QoS)
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Currently, all timeouts, buffering, and QoS features are handled at the router
|
||||||
|
layer, and the reactors are implemented in ways that assume/require
|
||||||
|
asynchronous operation. This both increases the required complexity at the
|
||||||
|
routing layer, and means that misbehavior at the reactor level is difficult to
|
||||||
|
detect or attribute. Additionally, the current system provides three main
|
||||||
|
parameters to control quality of service:
|
||||||
|
|
||||||
|
- buffer sizes for channels and queues.
|
||||||
|
|
||||||
|
- priorities for channels
|
||||||
|
|
||||||
|
- queue implementation details for shedding load.
|
||||||
|
|
||||||
|
These end up being quite coarse controls, and changing the settings are
|
||||||
|
difficult because as the queues and channels are able to buffer large numbers
|
||||||
|
of messages it can be hard to see the impact of a given change, particularly
|
||||||
|
in our extant test environment. In general, we should endeavor to:
|
||||||
|
|
||||||
|
- set real timeouts, via contexts, on most message send operations, so that
|
||||||
|
senders rather than queues can be responsible for timeout
|
||||||
|
logic. Additionally, this will make it possible to avoid sending messages
|
||||||
|
during shutdown.
|
||||||
|
|
||||||
|
- reduce (to the greatest extent possible) the amount of buffering in
|
||||||
|
channels and the queues, to more readily surface backpressure and reduce the
|
||||||
|
potential for buildup of stale messages.
|
||||||
|
|
||||||
|
Stream Based Connection Handling
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Currently the transport layer is message based, which makes sense from a
|
||||||
|
mental model of how the protocol works, but makes it more difficult to
|
||||||
|
implement transports and connection types, as it forces a higher level view of
|
||||||
|
the connection and interaction which makes it harder to implement for novel
|
||||||
|
transport types and makes it more likely that message-based caching and rate
|
||||||
|
limiting will be implemented at the transport layer rather than at a more
|
||||||
|
appropriate level.
|
||||||
|
|
||||||
|
The transport then, would be responsible for negitating the connection and the
|
||||||
|
handshake and otherwise behave like a socket/file discriptor with ``Read` and
|
||||||
|
``Write`` methods.
|
||||||
|
|
||||||
|
While this was included in the initial design for the new P2P layer, it may be
|
||||||
|
obviated entirely if the transport and peer layer is replaced with libp2p,
|
||||||
|
which is primarily stream based.
|
||||||
|
|
||||||
|
Service Discovery
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
In the current system, Tendermint assumes that all nodes in a network are
|
||||||
|
largely equivelent, and nodes tend to be "chatty" making many requests of
|
||||||
|
large numbers of peers and waiting for peers to (hopefully) respond. While
|
||||||
|
this works and has allowed Tendermint to get to a certain point, this both
|
||||||
|
produces a theoretical scaling bottle neck and makes it harder to test and
|
||||||
|
verify components of the system.
|
||||||
|
|
||||||
|
In addition to peer's identity and connection information, peers should be
|
||||||
|
able to advertise a number of services or capabilities, and node operators or
|
||||||
|
developers should be able to specify peer capability requirements (e.g. target
|
||||||
|
at least <x>-percent of peers with <y> capability.)
|
||||||
|
|
||||||
|
These capabilities may be useful in selecting peers to send messages to, it
|
||||||
|
may make sense to extend Tendermint's message addressing capability to allow
|
||||||
|
reactors to send messages to groups of peers based on role rather than only
|
||||||
|
allowing addressing to one or all peers.
|
||||||
|
|
||||||
|
Having a good service discovery mechanism may pair well with the synchronous
|
||||||
|
semantics (request/response) work, as it allows reactors to "make a request of
|
||||||
|
a peer with <x> capability and wait for the response," rather force the
|
||||||
|
reactors to need to track the capabilities or state of specific peers.
|
||||||
|
|
||||||
|
Solutions
|
||||||
|
---------
|
||||||
|
|
||||||
|
Continued Homegrown Implementation
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The current peer system is homegrown and is conceptually compatible with the
|
||||||
|
needs of the project, and while there are limitations to the system, the p2p
|
||||||
|
layer is not (currently as of 0.35) a major source of bugs or friction during
|
||||||
|
development.
|
||||||
|
|
||||||
|
However, the current implementation makes a number of allowances for
|
||||||
|
interoperability, and there are a collection of iterative improvements that
|
||||||
|
should be considered in the next couple of releases. To maintain the current
|
||||||
|
implementation, upcoming work would include:
|
||||||
|
|
||||||
|
- change the ``Transport`` mechanism to facilitate easier implementations.
|
||||||
|
|
||||||
|
- implement different ``Transport`` handlers to be able to manage peer
|
||||||
|
connections using different protocols (e.g. QUIC, etc.)
|
||||||
|
|
||||||
|
- entirely remove the constructs and implementations of the legacy peer
|
||||||
|
implementation.
|
||||||
|
|
||||||
|
- establish and enforce clearer chains of responsibility for connection
|
||||||
|
establishment (e.g. handshaking, setup,) which is currently shared between
|
||||||
|
three components.
|
||||||
|
|
||||||
|
- report better metrics regarding the into the state of peers and network
|
||||||
|
connectivity, which are opaque outside of the system. This is constrained at
|
||||||
|
the moment as a side effect of the split responsibility for connection
|
||||||
|
establishment.
|
||||||
|
|
||||||
|
- extend the PEX system to include service information so that ndoes in the
|
||||||
|
network weren't necessarily homogeneous.
|
||||||
|
|
||||||
|
While maintaining a bespoke peer management layer would seem to distract from
|
||||||
|
development of core functionality, the truth is that (once the legacy code is
|
||||||
|
removed,) the scope of the peer layer is relatively small from a maintenance
|
||||||
|
perspective, and having control at this layer might actually afford the
|
||||||
|
project with the ability to more rapidly iterate on some features.
|
||||||
|
|
||||||
|
LibP2P
|
||||||
|
~~~~~~
|
||||||
|
|
||||||
|
LibP2P provides components that, approximately, account for the
|
||||||
|
``PeerManager`` and ``Transport`` components of the current (new) P2P
|
||||||
|
stack. The Go APIs seem reasonable, and being able to externalize the
|
||||||
|
implementation details of peer and connection management seems like it could
|
||||||
|
provide a lot of benefits, particularly in supporting a more active ecosystem.
|
||||||
|
|
||||||
|
In general the API provides the kind of stream-based, multi-protocol
|
||||||
|
supporting, and idiomatic baseline for implementing a peer layer. Additionally
|
||||||
|
because it handles peer exchange and connection management at a lower
|
||||||
|
level, by using libp2p it'd be possible to remove a good deal of code in favor
|
||||||
|
of just using libp2p. Having said that, Tendermint's P2P layer covers a
|
||||||
|
greater scope (e.g. message routing to different peers) and that layer is
|
||||||
|
something that Tendermint might want to retain.
|
||||||
|
|
||||||
|
The are a number of unknowns that require more research including how much of
|
||||||
|
a peer database the Tendermint engine itself needs to maintain, in order to
|
||||||
|
support higher level operations (consensus, statesync), but it might be the
|
||||||
|
case that our internal systems need to know much less about peers than
|
||||||
|
otherwise specified. Similarly, the current system has a notion of peer
|
||||||
|
scoring that cannot be communicated to libp2p, which may be fine as this is
|
||||||
|
only used to support peer exchange (PEX,) which would become a property libp2p
|
||||||
|
and not expressed in it's current higher-level form.
|
||||||
|
|
||||||
|
In general, the effort to switch to libp2p would involve:
|
||||||
|
|
||||||
|
- timing it during an appropriate protocol-breaking window, as it doesn't seem
|
||||||
|
viable to support both libp2p *and* the current p2p protocol.
|
||||||
|
|
||||||
|
- providing some in-memory testing network to support the use case that the
|
||||||
|
current ``p2p.MemoryNetwork`` provides.
|
||||||
|
|
||||||
|
- re-homing the ``p2p.Router`` implementation on top of libp2p components to
|
||||||
|
be able to maintain the current reactor implementations.
|
||||||
|
|
||||||
|
Open question include:
|
||||||
|
|
||||||
|
- how much local buffering should we be doing? It sort of seems like we should
|
||||||
|
figure out what the expected behavior is for libp2p for QoS-type
|
||||||
|
functionality, and if our requirements mean that we should be implementing
|
||||||
|
this on top of things ourselves?
|
||||||
|
|
||||||
|
- if Tendermint was going to use libp2p, how would libp2p's stability
|
||||||
|
guarantees (protocol, etc.) impact/constrain Tendermint's stability
|
||||||
|
guarantees?
|
||||||
|
|
||||||
|
- what kind of introspection does libp2p provide, and to what extend would
|
||||||
|
this change or constrain the kind of observability that Tendermint is able
|
||||||
|
to provide?
|
||||||
|
|
||||||
|
- how do efforts to select "the best" (healthy, close, well-behaving, etc.)
|
||||||
|
peers work out if Tendermint is not maintaining a local peer database?
|
||||||
|
|
||||||
|
- would adding additional higher level semantics (internal message passing,
|
||||||
|
request/response pairs, service discovery, etc.) facilitate removing some of
|
||||||
|
the direct linkages between constructs/components in the system and reduce
|
||||||
|
the need for Tendermint nodes to maintain state about its peers?
|
||||||
|
|
||||||
|
References
|
||||||
|
----------
|
||||||
|
|
||||||
|
- `Tracking Ticket for P2P Refactor Project <https://github.com/tendermint/tendermint/issues/5670>`_
|
||||||
|
- `ADR 61: P2P Refactor Scope <../architecture/adr-061-p2p-refactor-scope.md>`_
|
||||||
|
- `ADR 62: P2P Architecture and Abstraction <../architecture/adr-061-p2p-architecture.md>`_
|
||||||
35
docs/rfc/rfc-template.md
Normal file
35
docs/rfc/rfc-template.md
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# RFC {RFC-NUMBER}: {TITLE}
|
||||||
|
|
||||||
|
## Changelog
|
||||||
|
|
||||||
|
- {date}: {changelog}
|
||||||
|
|
||||||
|
## Abstract
|
||||||
|
|
||||||
|
> A brief high-level synopsis of the topic of discussion for this RFC, ideally
|
||||||
|
> just a few sentences. This should help the reader quickly decide whether the
|
||||||
|
> rest of the discussion is relevant to their interest.
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
> Any context or orientation needed for a reader to understand and participate
|
||||||
|
> in the substance of the Discussion. If necessary, this section may include
|
||||||
|
> links to other documentation or sources rather than restating existing
|
||||||
|
> material, but should provide enough detail that the reader can tell what they
|
||||||
|
> need to read to be up-to-date.
|
||||||
|
|
||||||
|
### References
|
||||||
|
|
||||||
|
> Links to external materials needed to follow the discussion may be added here.
|
||||||
|
>
|
||||||
|
> In addition, if the discussion in a request for comments leads to any design
|
||||||
|
> decisions, it may be helpful to add links to the ADR documents here after the
|
||||||
|
> discussion has settled.
|
||||||
|
|
||||||
|
## Discussion
|
||||||
|
|
||||||
|
> This section contains the core of the discussion.
|
||||||
|
>
|
||||||
|
> There is no fixed format for this section, but ideally changes to this
|
||||||
|
> section should be updated before merging to reflect any discussion that took
|
||||||
|
> place on the PR that made those changes.
|
||||||
@@ -14,7 +14,7 @@ This section dives into the internals of Go-Tendermint.
|
|||||||
- [Subscribing to events](./subscription.md)
|
- [Subscribing to events](./subscription.md)
|
||||||
- [Block Structure](./block-structure.md)
|
- [Block Structure](./block-structure.md)
|
||||||
- [RPC](./rpc.md)
|
- [RPC](./rpc.md)
|
||||||
- [Fast Sync](./fast-sync.md)
|
- [Block Sync](./block-sync.md)
|
||||||
- [State Sync](./state-sync.md)
|
- [State Sync](./state-sync.md)
|
||||||
- [Mempool](./mempool.md)
|
- [Mempool](./mempool.md)
|
||||||
- [Light Client](./light-client.md)
|
- [Light Client](./light-client.md)
|
||||||
|
|||||||
@@ -2,7 +2,8 @@
|
|||||||
order: 10
|
order: 10
|
||||||
---
|
---
|
||||||
|
|
||||||
# Fast Sync
|
# Block Sync
|
||||||
|
*Formerly known as Fast Sync*
|
||||||
|
|
||||||
In a proof of work blockchain, syncing with the chain is the same
|
In a proof of work blockchain, syncing with the chain is the same
|
||||||
process as staying up-to-date with the consensus: download blocks, and
|
process as staying up-to-date with the consensus: download blocks, and
|
||||||
@@ -14,7 +15,7 @@ scratch can take a very long time. It's much faster to just download
|
|||||||
blocks and check the merkle tree of validators than to run the real-time
|
blocks and check the merkle tree of validators than to run the real-time
|
||||||
consensus gossip protocol.
|
consensus gossip protocol.
|
||||||
|
|
||||||
## Using Fast Sync
|
## Using Block Sync
|
||||||
|
|
||||||
To support faster syncing, Tendermint offers a `fast-sync` mode, which
|
To support faster syncing, Tendermint offers a `fast-sync` mode, which
|
||||||
is enabled by default, and can be toggled in the `config.toml` or via
|
is enabled by default, and can be toggled in the `config.toml` or via
|
||||||
@@ -22,26 +23,36 @@ is enabled by default, and can be toggled in the `config.toml` or via
|
|||||||
|
|
||||||
In this mode, the Tendermint daemon will sync hundreds of times faster
|
In this mode, the Tendermint daemon will sync hundreds of times faster
|
||||||
than if it used the real-time consensus process. Once caught up, the
|
than if it used the real-time consensus process. Once caught up, the
|
||||||
daemon will switch out of fast sync and into the normal consensus mode.
|
daemon will switch out of Block Sync and into the normal consensus mode.
|
||||||
After running for some time, the node is considered `caught up` if it
|
After running for some time, the node is considered `caught up` if it
|
||||||
has at least one peer and it's height is at least as high as the max
|
has at least one peer and it's height is at least as high as the max
|
||||||
reported peer height. See [the IsCaughtUp
|
reported peer height. See [the IsCaughtUp
|
||||||
method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128).
|
method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128).
|
||||||
|
|
||||||
Note: There are three versions of fast sync. We recommend using v0 as v2 is still in beta.
|
Note: There are two versions of Block Sync. We recommend using v0 as v2 is still in beta.
|
||||||
If you would like to use a different version you can do so by changing the version in the `config.toml`:
|
If you would like to use a different version you can do so by changing the version in the `config.toml`:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
#######################################################
|
#######################################################
|
||||||
### Fast Sync Configuration Connections ###
|
### Block Sync Configuration Connections ###
|
||||||
#######################################################
|
#######################################################
|
||||||
[fastsync]
|
[fastsync]
|
||||||
|
|
||||||
# Fast Sync version to use:
|
# Block Sync version to use:
|
||||||
# 1) "v0" (default) - the legacy fast sync implementation
|
# 1) "v0" (default) - the legacy Block Sync implementation
|
||||||
# 2) "v2" - complete redesign of v0, optimized for testability & readability
|
# 2) "v2" - complete redesign of v0, optimized for testability & readability
|
||||||
version = "v0"
|
version = "v0"
|
||||||
```
|
```
|
||||||
|
|
||||||
If we're lagging sufficiently, we should go back to fast syncing, but
|
If we're lagging sufficiently, we should go back to block syncing, but
|
||||||
this is an [open issue](https://github.com/tendermint/tendermint/issues/129).
|
this is an [open issue](https://github.com/tendermint/tendermint/issues/129).
|
||||||
|
|
||||||
|
## The Block Sync event
|
||||||
|
When the tendermint blockchain core launches, it might switch to the `block-sync`
|
||||||
|
mode to catch up the states to the current network best height. the core will emits
|
||||||
|
a fast-sync event to expose the current status and the sync height. Once it catched
|
||||||
|
the network best height, it will switches to the state sync mechanism and then emit
|
||||||
|
another event for exposing the fast-sync `complete` status and the state `height`.
|
||||||
|
|
||||||
|
The user can query the events by subscribing `EventQueryBlockSyncStatus`
|
||||||
|
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.
|
||||||
@@ -4,8 +4,15 @@ order: 11
|
|||||||
|
|
||||||
# State Sync
|
# State Sync
|
||||||
|
|
||||||
With fast sync a node is downloading all of the data of an application from genesis and verifying it.
|
With block sync a node is downloading all of the data of an application from genesis and verifying it.
|
||||||
With state sync your node will download data related to the head or near the head of the chain and verify the data.
|
With state sync your node will download data related to the head or near the head of the chain and verify the data.
|
||||||
This leads to drastically shorter times for joining a network.
|
This leads to drastically shorter times for joining a network.
|
||||||
|
|
||||||
Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md)
|
Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md)
|
||||||
|
|
||||||
|
## Events
|
||||||
|
|
||||||
|
When a node starts with the statesync flag enabled in the config file, it will emit two events: one upon starting statesync and the other upon completion.
|
||||||
|
|
||||||
|
The user can query the events by subscribing `EventQueryStateSyncStatus`
|
||||||
|
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.
|
||||||
@@ -36,7 +36,7 @@ more information on query syntax and other options.
|
|||||||
|
|
||||||
You can also use tags, given you had included them into DeliverTx
|
You can also use tags, given you had included them into DeliverTx
|
||||||
response, to query transaction results. See [Indexing
|
response, to query transaction results. See [Indexing
|
||||||
transactions](./indexing-transactions.md) for details.
|
transactions](../app-dev/indexing-transactions.md) for details.
|
||||||
|
|
||||||
## ValidatorSetUpdates
|
## ValidatorSetUpdates
|
||||||
|
|
||||||
|
|||||||
@@ -552,8 +552,7 @@ To make a Tendermint network that can tolerate one of the validators
|
|||||||
failing, you need at least four validator nodes (e.g., 2/3).
|
failing, you need at least four validator nodes (e.g., 2/3).
|
||||||
|
|
||||||
Updating validators in a live network is supported but must be
|
Updating validators in a live network is supported but must be
|
||||||
explicitly programmed by the application developer. See the [application
|
explicitly programmed by the application developer.
|
||||||
developers guide](../app-dev/app-development.md) for more details.
|
|
||||||
|
|
||||||
### Local Network
|
### Local Network
|
||||||
|
|
||||||
|
|||||||
@@ -62,3 +62,30 @@ given destination directory. Each archive will contain:
|
|||||||
|
|
||||||
Note: goroutine.out and heap.out will only be written if a profile address is
|
Note: goroutine.out and heap.out will only be written if a profile address is
|
||||||
provided and is operational. This command is blocking and will log any error.
|
provided and is operational. This command is blocking and will log any error.
|
||||||
|
|
||||||
|
## Tendermint Inspect
|
||||||
|
|
||||||
|
Tendermint includes an `inspect` command for querying Tendermint's state store and block
|
||||||
|
store over Tendermint RPC.
|
||||||
|
|
||||||
|
When the Tendermint consensus engine detects inconsistent state, it will crash the
|
||||||
|
entire Tendermint process.
|
||||||
|
While in this inconsistent state, a node running Tendermint's consensus engine will not start up.
|
||||||
|
The `inspect` command runs only a subset of Tendermint's RPC endpoints for querying the block store
|
||||||
|
and state store.
|
||||||
|
`inspect` allows operators to query a read-only view of the stage.
|
||||||
|
`inspect` does not run the consensus engine at all and can therefore be used to debug
|
||||||
|
processes that have crashed due to inconsistent state.
|
||||||
|
|
||||||
|
|
||||||
|
To start the `inspect` process, run
|
||||||
|
```bash
|
||||||
|
tendermint inspect
|
||||||
|
```
|
||||||
|
|
||||||
|
### RPC endpoints
|
||||||
|
The list of available RPC endpoints can be found by making a request to the RPC port.
|
||||||
|
For an `inspect` process running on `127.0.0.1:26657`, navigate your browser to
|
||||||
|
`http://127.0.0.1:26657/` to retrieve the list of enabled RPC endpoints.
|
||||||
|
|
||||||
|
Additional information on the Tendermint RPC endpoints can be found in the [rpc documentation](https://docs.tendermint.com/master/rpc).
|
||||||
|
|||||||
@@ -64,13 +64,42 @@ It won’t kill the node, but it will gather all of the above data and package i
|
|||||||
|
|
||||||
At this point, depending on how severe the degradation is, you may want to restart the process.
|
At this point, depending on how severe the degradation is, you may want to restart the process.
|
||||||
|
|
||||||
|
## Tendermint Inspect
|
||||||
|
|
||||||
|
What if the Tendermint node will not start up due to inconsistent consensus state?
|
||||||
|
|
||||||
|
When a node running the Tendermint consensus engine detects an inconsistent state
|
||||||
|
it will crash the entire Tendermint process.
|
||||||
|
The Tendermint consensus engine cannot be run in this inconsistent state and the so node
|
||||||
|
will fail to start up as a result.
|
||||||
|
The Tendermint RPC server can provide valuable information for debugging in this situation.
|
||||||
|
The Tendermint `inspect` command will run a subset of the Tendermint RPC server
|
||||||
|
that is useful for debugging inconsistent state.
|
||||||
|
|
||||||
|
### Running inspect
|
||||||
|
|
||||||
|
Start up the `inspect` tool on the machine where Tendermint crashed using:
|
||||||
|
```bash
|
||||||
|
tendermint inspect --home=</path/to/app.d>
|
||||||
|
```
|
||||||
|
|
||||||
|
`inspect` will use the data directory specified in your Tendermint configuration file.
|
||||||
|
`inspect` will also run the RPC server at the address specified in your Tendermint configuration file.
|
||||||
|
|
||||||
|
### Using inspect
|
||||||
|
|
||||||
|
With the `inspect` server running, you can access RPC endpoints that are critically important
|
||||||
|
for debugging.
|
||||||
|
Calling the `/status`, `/consensus_state` and `/dump_consensus_state` RPC endpoint
|
||||||
|
will return useful information about the Tendermint consensus state.
|
||||||
|
|
||||||
## Outro
|
## Outro
|
||||||
|
|
||||||
We’re hoping that the `tendermint debug` subcommand will become de facto the first response to any accidents.
|
We’re hoping that these Tendermint tools will become de facto the first response for any accidents.
|
||||||
|
|
||||||
Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` yet?
|
Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` or `tendermint inspect` yet?
|
||||||
|
|
||||||
Join our chat, where we discuss the current issues and future improvements.
|
Join our [discord chat](https://discord.gg/vcExX9T), where we discuss the current issues and future improvements.
|
||||||
|
|
||||||
—
|
—
|
||||||
|
|
||||||
|
|||||||
@@ -388,7 +388,6 @@ func main() {
|
|||||||
c := make(chan os.Signal, 1)
|
c := make(chan os.Signal, 1)
|
||||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||||
<-c
|
<-c
|
||||||
os.Exit(0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTendermint(app abci.Application, configFile string) (*nm.Node, error) {
|
func newTendermint(app abci.Application, configFile string) (*nm.Node, error) {
|
||||||
@@ -564,7 +563,6 @@ defer func() {
|
|||||||
c := make(chan os.Signal, 1)
|
c := make(chan os.Signal, 1)
|
||||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||||
<-c
|
<-c
|
||||||
os.Exit(0)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 1.5 Getting Up and Running
|
## 1.5 Getting Up and Running
|
||||||
|
|||||||
23
go.mod
23
go.mod
@@ -3,40 +3,43 @@ module github.com/tendermint/tendermint
|
|||||||
go 1.16
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v0.3.1
|
github.com/BurntSushi/toml v0.4.1
|
||||||
github.com/Masterminds/squirrel v1.5.0
|
github.com/Masterminds/squirrel v1.5.0
|
||||||
github.com/Workiva/go-datastructures v1.0.53
|
github.com/Workiva/go-datastructures v1.0.53
|
||||||
github.com/adlio/schema v1.1.13
|
github.com/adlio/schema v1.1.13
|
||||||
github.com/btcsuite/btcd v0.22.0-beta
|
github.com/btcsuite/btcd v0.22.0-beta
|
||||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||||
github.com/fortytw2/leaktest v1.3.0
|
github.com/fortytw2/leaktest v1.3.0
|
||||||
github.com/go-kit/kit v0.10.0
|
github.com/go-kit/kit v0.11.0
|
||||||
github.com/gogo/protobuf v1.3.2
|
github.com/gogo/protobuf v1.3.2
|
||||||
github.com/golang/protobuf v1.5.2
|
github.com/golang/protobuf v1.5.2
|
||||||
|
github.com/golangci/golangci-lint v1.42.0
|
||||||
github.com/google/orderedcode v0.0.1
|
github.com/google/orderedcode v0.0.1
|
||||||
github.com/google/uuid v1.2.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/gorilla/websocket v1.4.2
|
github.com/gorilla/websocket v1.4.2
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||||
github.com/lib/pq v1.10.2
|
github.com/lib/pq v1.10.2
|
||||||
github.com/libp2p/go-buffer-pool v0.0.2
|
github.com/libp2p/go-buffer-pool v0.0.2
|
||||||
github.com/minio/highwayhash v1.0.2
|
github.com/minio/highwayhash v1.0.2
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
github.com/mroth/weightedrand v0.4.1
|
||||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
|
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
|
||||||
github.com/ory/dockertest v3.3.5+incompatible
|
github.com/ory/dockertest v3.3.5+incompatible
|
||||||
github.com/pkg/errors v0.9.1
|
|
||||||
github.com/prometheus/client_golang v1.11.0
|
github.com/prometheus/client_golang v1.11.0
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
|
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
|
||||||
github.com/rs/cors v1.8.0
|
github.com/rs/cors v1.8.0
|
||||||
github.com/rs/zerolog v1.23.0
|
github.com/rs/zerolog v1.24.0
|
||||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
|
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
|
||||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||||
github.com/spf13/cobra v1.2.0
|
github.com/spf13/cobra v1.2.1
|
||||||
github.com/spf13/viper v1.8.1
|
github.com/spf13/viper v1.8.1
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/tendermint/tm-db v0.6.4
|
github.com/tendermint/tm-db v0.6.4
|
||||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
github.com/vektra/mockery/v2 v2.9.0
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
|
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
|
||||||
google.golang.org/grpc v1.39.0
|
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
|
google.golang.org/grpc v1.40.0
|
||||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||||
|
pgregory.net/rapid v0.4.7
|
||||||
)
|
)
|
||||||
|
|||||||
36
inspect/doc.go
Normal file
36
inspect/doc.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
/*
|
||||||
|
Package inspect provides a tool for investigating the state of a
|
||||||
|
failed Tendermint node.
|
||||||
|
|
||||||
|
This package provides the Inspector type. The Inspector type runs a subset of the Tendermint
|
||||||
|
RPC endpoints that are useful for debugging issues with Tendermint consensus.
|
||||||
|
|
||||||
|
When a node running the Tendermint consensus engine detects an inconsistent consensus state,
|
||||||
|
the entire node will crash. The Tendermint consensus engine cannot run in this
|
||||||
|
inconsistent state so the node will not be able to start up again.
|
||||||
|
|
||||||
|
The RPC endpoints provided by the Inspector type allow for a node operator to inspect
|
||||||
|
the block store and state store to better understand what may have caused the inconsistent state.
|
||||||
|
|
||||||
|
|
||||||
|
The Inspector type's lifecycle is controlled by a context.Context
|
||||||
|
ins := inspect.NewFromConfig(rpcConfig)
|
||||||
|
ctx, cancelFunc:= context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
// Run blocks until the Inspector server is shut down.
|
||||||
|
go ins.Run(ctx)
|
||||||
|
...
|
||||||
|
|
||||||
|
// calling the cancel function will stop the running inspect server
|
||||||
|
cancelFunc()
|
||||||
|
|
||||||
|
Inspector serves its RPC endpoints on the address configured in the RPC configuration
|
||||||
|
|
||||||
|
rpcConfig.ListenAddress = "tcp://127.0.0.1:26657"
|
||||||
|
ins := inspect.NewFromConfig(rpcConfig)
|
||||||
|
go ins.Run(ctx)
|
||||||
|
|
||||||
|
The list of available RPC endpoints can then be viewed by navigating to
|
||||||
|
http://127.0.0.1:26657/ in the web browser.
|
||||||
|
*/
|
||||||
|
package inspect
|
||||||
149
inspect/inspect.go
Normal file
149
inspect/inspect.go
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
package inspect
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/tendermint/tendermint/config"
|
||||||
|
"github.com/tendermint/tendermint/inspect/rpc"
|
||||||
|
"github.com/tendermint/tendermint/libs/log"
|
||||||
|
tmstrings "github.com/tendermint/tendermint/libs/strings"
|
||||||
|
rpccore "github.com/tendermint/tendermint/rpc/core"
|
||||||
|
"github.com/tendermint/tendermint/state"
|
||||||
|
"github.com/tendermint/tendermint/state/indexer"
|
||||||
|
"github.com/tendermint/tendermint/state/indexer/sink"
|
||||||
|
"github.com/tendermint/tendermint/store"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
|
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Inspector manages an RPC service that exports methods to debug a failed node.
|
||||||
|
// After a node shuts down due to a consensus failure, it will no longer start
|
||||||
|
// up its state cannot easily be inspected. An Inspector value provides a similar interface
|
||||||
|
// to the node, using the underlying Tendermint data stores, without bringing up
|
||||||
|
// any other components. A caller can query the Inspector service to inspect the
|
||||||
|
// persisted state and debug the failure.
|
||||||
|
type Inspector struct {
|
||||||
|
routes rpccore.RoutesMap
|
||||||
|
|
||||||
|
config *config.RPCConfig
|
||||||
|
|
||||||
|
indexerService *indexer.Service
|
||||||
|
eventBus *types.EventBus
|
||||||
|
logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns an Inspector that serves RPC on the specified BlockStore and StateStore.
|
||||||
|
// The Inspector type does not modify the state or block stores.
|
||||||
|
// The sinks are used to enable block and transaction querying via the RPC server.
|
||||||
|
// The caller is responsible for starting and stopping the Inspector service.
|
||||||
|
///
|
||||||
|
//nolint:lll
|
||||||
|
func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, es []indexer.EventSink, logger log.Logger) *Inspector {
|
||||||
|
routes := rpc.Routes(*cfg, ss, bs, es, logger)
|
||||||
|
eb := types.NewEventBus()
|
||||||
|
eb.SetLogger(logger.With("module", "events"))
|
||||||
|
is := indexer.NewIndexerService(es, eb)
|
||||||
|
is.SetLogger(logger.With("module", "txindex"))
|
||||||
|
return &Inspector{
|
||||||
|
routes: routes,
|
||||||
|
config: cfg,
|
||||||
|
logger: logger,
|
||||||
|
eventBus: eb,
|
||||||
|
indexerService: is,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFromConfig constructs an Inspector using the values defined in the passed in config.
|
||||||
|
func NewFromConfig(cfg *config.Config) (*Inspector, error) {
|
||||||
|
bsDB, err := config.DefaultDBProvider(&config.DBContext{ID: "blockstore", Config: cfg})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
bs := store.NewBlockStore(bsDB)
|
||||||
|
sDB, err := config.DefaultDBProvider(&config.DBContext{ID: "state", Config: cfg})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sinks, err := sink.EventSinksFromConfig(cfg, config.DefaultDBProvider, genDoc.ChainID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||||
|
ss := state.NewStore(sDB)
|
||||||
|
return New(cfg.RPC, bs, ss, sinks, logger), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run starts the Inspector servers and blocks until the servers shut down. The passed
|
||||||
|
// in context is used to control the lifecycle of the servers.
|
||||||
|
func (ins *Inspector) Run(ctx context.Context) error {
|
||||||
|
err := ins.eventBus.Start()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error starting event bus: %s", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err := ins.eventBus.Stop()
|
||||||
|
if err != nil {
|
||||||
|
ins.logger.Error("event bus stopped with error", "err", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
err = ins.indexerService.Start()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error starting indexer service: %s", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err := ins.indexerService.Stop()
|
||||||
|
if err != nil {
|
||||||
|
ins.logger.Error("indexer service stopped with error", "err", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return startRPCServers(ctx, ins.config, ins.logger, ins.routes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logger, routes rpccore.RoutesMap) error {
|
||||||
|
g, tctx := errgroup.WithContext(ctx)
|
||||||
|
listenAddrs := tmstrings.SplitAndTrimEmpty(cfg.ListenAddress, ",", " ")
|
||||||
|
rh := rpc.Handler(cfg, routes, logger)
|
||||||
|
for _, listenerAddr := range listenAddrs {
|
||||||
|
server := rpc.Server{
|
||||||
|
Logger: logger,
|
||||||
|
Config: cfg,
|
||||||
|
Handler: rh,
|
||||||
|
Addr: listenerAddr,
|
||||||
|
}
|
||||||
|
if cfg.IsTLSEnabled() {
|
||||||
|
keyFile := cfg.KeyFile()
|
||||||
|
certFile := cfg.CertFile()
|
||||||
|
listenerAddr := listenerAddr
|
||||||
|
g.Go(func() error {
|
||||||
|
logger.Info("RPC HTTPS server starting", "address", listenerAddr,
|
||||||
|
"certfile", certFile, "keyfile", keyFile)
|
||||||
|
err := server.ListenAndServeTLS(tctx, certFile, keyFile)
|
||||||
|
if !errors.Is(err, net.ErrClosed) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger.Info("RPC HTTPS server stopped", "address", listenerAddr)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
listenerAddr := listenerAddr
|
||||||
|
g.Go(func() error {
|
||||||
|
logger.Info("RPC HTTP server starting", "address", listenerAddr)
|
||||||
|
err := server.ListenAndServe(tctx)
|
||||||
|
if !errors.Is(err, net.ErrClosed) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger.Info("RPC HTTP server stopped", "address", listenerAddr)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return g.Wait()
|
||||||
|
}
|
||||||
583
inspect/inspect_test.go
Normal file
583
inspect/inspect_test.go
Normal file
@@ -0,0 +1,583 @@
|
|||||||
|
package inspect_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fortytw2/leaktest"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||||
|
"github.com/tendermint/tendermint/config"
|
||||||
|
"github.com/tendermint/tendermint/inspect"
|
||||||
|
"github.com/tendermint/tendermint/libs/log"
|
||||||
|
"github.com/tendermint/tendermint/libs/pubsub/query"
|
||||||
|
"github.com/tendermint/tendermint/proto/tendermint/state"
|
||||||
|
httpclient "github.com/tendermint/tendermint/rpc/client/http"
|
||||||
|
"github.com/tendermint/tendermint/state/indexer"
|
||||||
|
indexermocks "github.com/tendermint/tendermint/state/indexer/mocks"
|
||||||
|
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInspectConstructor(t *testing.T) {
|
||||||
|
cfg := config.ResetTestRoot("test")
|
||||||
|
t.Cleanup(leaktest.Check(t))
|
||||||
|
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
|
||||||
|
t.Run("from config", func(t *testing.T) {
|
||||||
|
d, err := inspect.NewFromConfig(cfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, d)
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInspectRun(t *testing.T) {
|
||||||
|
cfg := config.ResetTestRoot("test")
|
||||||
|
t.Cleanup(leaktest.Check(t))
|
||||||
|
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
|
||||||
|
t.Run("from config", func(t *testing.T) {
|
||||||
|
d, err := inspect.NewFromConfig(cfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
stoppedWG := &sync.WaitGroup{}
|
||||||
|
stoppedWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
require.NoError(t, d.Run(ctx))
|
||||||
|
stoppedWG.Done()
|
||||||
|
}()
|
||||||
|
cancel()
|
||||||
|
stoppedWG.Wait()
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlock(t *testing.T) {
|
||||||
|
testHeight := int64(1)
|
||||||
|
testBlock := new(types.Block)
|
||||||
|
testBlock.Header.Height = testHeight
|
||||||
|
testBlock.Header.LastCommitHash = []byte("test hash")
|
||||||
|
stateStoreMock := &statemocks.Store{}
|
||||||
|
|
||||||
|
blockStoreMock := &statemocks.BlockStore{}
|
||||||
|
blockStoreMock.On("Height").Return(testHeight)
|
||||||
|
blockStoreMock.On("Base").Return(int64(0))
|
||||||
|
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{})
|
||||||
|
blockStoreMock.On("LoadBlock", testHeight).Return(testBlock)
|
||||||
|
eventSinkMock := &indexermocks.EventSink{}
|
||||||
|
eventSinkMock.On("Stop").Return(nil)
|
||||||
|
|
||||||
|
rpcConfig := config.TestRPCConfig()
|
||||||
|
l := log.TestingLogger()
|
||||||
|
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
startedWG := &sync.WaitGroup{}
|
||||||
|
startedWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
startedWG.Done()
|
||||||
|
defer wg.Done()
|
||||||
|
require.NoError(t, d.Run(ctx))
|
||||||
|
}()
|
||||||
|
// FIXME: used to induce context switch.
|
||||||
|
// Determine more deterministic method for prompting a context switch
|
||||||
|
startedWG.Wait()
|
||||||
|
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||||
|
cli, err := httpclient.New(rpcConfig.ListenAddress)
|
||||||
|
require.NoError(t, err)
|
||||||
|
resultBlock, err := cli.Block(context.Background(), &testHeight)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, testBlock.Height, resultBlock.Block.Height)
|
||||||
|
require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash)
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
blockStoreMock.AssertExpectations(t)
|
||||||
|
stateStoreMock.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTxSearch(t *testing.T) {
|
||||||
|
testHash := []byte("test")
|
||||||
|
testTx := []byte("tx")
|
||||||
|
testQuery := fmt.Sprintf("tx.hash='%s'", string(testHash))
|
||||||
|
testTxResult := &abcitypes.TxResult{
|
||||||
|
Height: 1,
|
||||||
|
Index: 100,
|
||||||
|
Tx: testTx,
|
||||||
|
}
|
||||||
|
|
||||||
|
stateStoreMock := &statemocks.Store{}
|
||||||
|
blockStoreMock := &statemocks.BlockStore{}
|
||||||
|
eventSinkMock := &indexermocks.EventSink{}
|
||||||
|
eventSinkMock.On("Stop").Return(nil)
|
||||||
|
eventSinkMock.On("Type").Return(indexer.KV)
|
||||||
|
eventSinkMock.On("SearchTxEvents", mock.Anything,
|
||||||
|
mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })).
|
||||||
|
Return([]*abcitypes.TxResult{testTxResult}, nil)
|
||||||
|
|
||||||
|
rpcConfig := config.TestRPCConfig()
|
||||||
|
l := log.TestingLogger()
|
||||||
|
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
startedWG := &sync.WaitGroup{}
|
||||||
|
startedWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
startedWG.Done()
|
||||||
|
defer wg.Done()
|
||||||
|
require.NoError(t, d.Run(ctx))
|
||||||
|
}()
|
||||||
|
// FIXME: used to induce context switch.
|
||||||
|
// Determine more deterministic method for prompting a context switch
|
||||||
|
startedWG.Wait()
|
||||||
|
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||||
|
cli, err := httpclient.New(rpcConfig.ListenAddress)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var page = 1
|
||||||
|
resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, resultTxSearch.Txs, 1)
|
||||||
|
require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
eventSinkMock.AssertExpectations(t)
|
||||||
|
stateStoreMock.AssertExpectations(t)
|
||||||
|
blockStoreMock.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
func TestTx(t *testing.T) {
|
||||||
|
testHash := []byte("test")
|
||||||
|
testTx := []byte("tx")
|
||||||
|
|
||||||
|
stateStoreMock := &statemocks.Store{}
|
||||||
|
blockStoreMock := &statemocks.BlockStore{}
|
||||||
|
eventSinkMock := &indexermocks.EventSink{}
|
||||||
|
eventSinkMock.On("Stop").Return(nil)
|
||||||
|
eventSinkMock.On("Type").Return(indexer.KV)
|
||||||
|
eventSinkMock.On("GetTxByHash", testHash).Return(&abcitypes.TxResult{
|
||||||
|
Tx: testTx,
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
rpcConfig := config.TestRPCConfig()
|
||||||
|
l := log.TestingLogger()
|
||||||
|
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
startedWG := &sync.WaitGroup{}
|
||||||
|
startedWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
startedWG.Done()
|
||||||
|
defer wg.Done()
|
||||||
|
require.NoError(t, d.Run(ctx))
|
||||||
|
}()
|
||||||
|
// FIXME: used to induce context switch.
|
||||||
|
// Determine more deterministic method for prompting a context switch
|
||||||
|
startedWG.Wait()
|
||||||
|
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||||
|
cli, err := httpclient.New(rpcConfig.ListenAddress)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res, err := cli.Tx(context.Background(), testHash, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, types.Tx(testTx), res.Tx)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
eventSinkMock.AssertExpectations(t)
|
||||||
|
stateStoreMock.AssertExpectations(t)
|
||||||
|
blockStoreMock.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
func TestConsensusParams(t *testing.T) {
|
||||||
|
testHeight := int64(1)
|
||||||
|
testMaxGas := int64(55)
|
||||||
|
stateStoreMock := &statemocks.Store{}
|
||||||
|
blockStoreMock := &statemocks.BlockStore{}
|
||||||
|
blockStoreMock.On("Height").Return(testHeight)
|
||||||
|
blockStoreMock.On("Base").Return(int64(0))
|
||||||
|
stateStoreMock.On("LoadConsensusParams", testHeight).Return(types.ConsensusParams{
|
||||||
|
Block: types.BlockParams{
|
||||||
|
MaxGas: testMaxGas,
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
eventSinkMock := &indexermocks.EventSink{}
|
||||||
|
eventSinkMock.On("Stop").Return(nil)
|
||||||
|
rpcConfig := config.TestRPCConfig()
|
||||||
|
l := log.TestingLogger()
|
||||||
|
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
startedWG := &sync.WaitGroup{}
|
||||||
|
startedWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
startedWG.Done()
|
||||||
|
defer wg.Done()
|
||||||
|
require.NoError(t, d.Run(ctx))
|
||||||
|
}()
|
||||||
|
// FIXME: used to induce context switch.
|
||||||
|
// Determine more deterministic method for prompting a context switch
|
||||||
|
startedWG.Wait()
|
||||||
|
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||||
|
cli, err := httpclient.New(rpcConfig.ListenAddress)
|
||||||
|
require.NoError(t, err)
|
||||||
|
params, err := cli.ConsensusParams(context.Background(), &testHeight)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
blockStoreMock.AssertExpectations(t)
|
||||||
|
stateStoreMock.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlockResults(t *testing.T) {
|
||||||
|
testHeight := int64(1)
|
||||||
|
testGasUsed := int64(100)
|
||||||
|
stateStoreMock := &statemocks.Store{}
|
||||||
|
// tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||||
|
stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{
|
||||||
|
DeliverTxs: []*abcitypes.ResponseDeliverTx{
|
||||||
|
{
|
||||||
|
GasUsed: testGasUsed,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
EndBlock: &abcitypes.ResponseEndBlock{},
|
||||||
|
BeginBlock: &abcitypes.ResponseBeginBlock{},
|
||||||
|
}, nil)
|
||||||
|
blockStoreMock := &statemocks.BlockStore{}
|
||||||
|
blockStoreMock.On("Base").Return(int64(0))
|
||||||
|
blockStoreMock.On("Height").Return(testHeight)
|
||||||
|
eventSinkMock := &indexermocks.EventSink{}
|
||||||
|
eventSinkMock.On("Stop").Return(nil)
|
||||||
|
rpcConfig := config.TestRPCConfig()
|
||||||
|
l := log.TestingLogger()
|
||||||
|
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
startedWG := &sync.WaitGroup{}
|
||||||
|
startedWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
startedWG.Done()
|
||||||
|
defer wg.Done()
|
||||||
|
require.NoError(t, d.Run(ctx))
|
||||||
|
}()
|
||||||
|
// FIXME: used to induce context switch.
|
||||||
|
// Determine more deterministic method for prompting a context switch
|
||||||
|
startedWG.Wait()
|
||||||
|
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||||
|
cli, err := httpclient.New(rpcConfig.ListenAddress)
|
||||||
|
require.NoError(t, err)
|
||||||
|
res, err := cli.BlockResults(context.Background(), &testHeight)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, res.TotalGasUsed, testGasUsed)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
blockStoreMock.AssertExpectations(t)
|
||||||
|
stateStoreMock.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCommit(t *testing.T) {
|
||||||
|
testHeight := int64(1)
|
||||||
|
testRound := int32(101)
|
||||||
|
stateStoreMock := &statemocks.Store{}
|
||||||
|
blockStoreMock := &statemocks.BlockStore{}
|
||||||
|
blockStoreMock.On("Base").Return(int64(0))
|
||||||
|
blockStoreMock.On("Height").Return(testHeight)
|
||||||
|
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}, nil)
|
||||||
|
blockStoreMock.On("LoadSeenCommit").Return(&types.Commit{
|
||||||
|
Height: testHeight,
|
||||||
|
Round: testRound,
|
||||||
|
}, nil)
|
||||||
|
eventSinkMock := &indexermocks.EventSink{}
|
||||||
|
eventSinkMock.On("Stop").Return(nil)
|
||||||
|
rpcConfig := config.TestRPCConfig()
|
||||||
|
l := log.TestingLogger()
|
||||||
|
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
startedWG := &sync.WaitGroup{}
|
||||||
|
startedWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
startedWG.Done()
|
||||||
|
defer wg.Done()
|
||||||
|
require.NoError(t, d.Run(ctx))
|
||||||
|
}()
|
||||||
|
// FIXME: used to induce context switch.
|
||||||
|
// Determine more deterministic method for prompting a context switch
|
||||||
|
startedWG.Wait()
|
||||||
|
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||||
|
cli, err := httpclient.New(rpcConfig.ListenAddress)
|
||||||
|
require.NoError(t, err)
|
||||||
|
res, err := cli.Commit(context.Background(), &testHeight)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, res)
|
||||||
|
require.Equal(t, res.SignedHeader.Commit.Round, testRound)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
blockStoreMock.AssertExpectations(t)
|
||||||
|
stateStoreMock.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlockByHash(t *testing.T) {
|
||||||
|
testHeight := int64(1)
|
||||||
|
testHash := []byte("test hash")
|
||||||
|
testBlock := new(types.Block)
|
||||||
|
testBlock.Header.Height = testHeight
|
||||||
|
testBlock.Header.LastCommitHash = testHash
|
||||||
|
stateStoreMock := &statemocks.Store{}
|
||||||
|
blockStoreMock := &statemocks.BlockStore{}
|
||||||
|
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
|
||||||
|
BlockID: types.BlockID{
|
||||||
|
Hash: testHash,
|
||||||
|
},
|
||||||
|
Header: types.Header{
|
||||||
|
Height: testHeight,
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil)
|
||||||
|
eventSinkMock := &indexermocks.EventSink{}
|
||||||
|
eventSinkMock.On("Stop").Return(nil)
|
||||||
|
rpcConfig := config.TestRPCConfig()
|
||||||
|
l := log.TestingLogger()
|
||||||
|
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
startedWG := &sync.WaitGroup{}
|
||||||
|
startedWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
startedWG.Done()
|
||||||
|
defer wg.Done()
|
||||||
|
require.NoError(t, d.Run(ctx))
|
||||||
|
}()
|
||||||
|
// FIXME: used to induce context switch.
|
||||||
|
// Determine more deterministic method for prompting a context switch
|
||||||
|
startedWG.Wait()
|
||||||
|
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||||
|
cli, err := httpclient.New(rpcConfig.ListenAddress)
|
||||||
|
require.NoError(t, err)
|
||||||
|
res, err := cli.BlockByHash(context.Background(), testHash)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, res)
|
||||||
|
require.Equal(t, []byte(res.BlockID.Hash), testHash)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
blockStoreMock.AssertExpectations(t)
|
||||||
|
stateStoreMock.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlockchain(t *testing.T) {
|
||||||
|
testHeight := int64(1)
|
||||||
|
testBlock := new(types.Block)
|
||||||
|
testBlockHash := []byte("test hash")
|
||||||
|
testBlock.Header.Height = testHeight
|
||||||
|
testBlock.Header.LastCommitHash = testBlockHash
|
||||||
|
stateStoreMock := &statemocks.Store{}
|
||||||
|
|
||||||
|
blockStoreMock := &statemocks.BlockStore{}
|
||||||
|
blockStoreMock.On("Height").Return(testHeight)
|
||||||
|
blockStoreMock.On("Base").Return(int64(0))
|
||||||
|
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
|
||||||
|
BlockID: types.BlockID{
|
||||||
|
Hash: testBlockHash,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
eventSinkMock := &indexermocks.EventSink{}
|
||||||
|
eventSinkMock.On("Stop").Return(nil)
|
||||||
|
rpcConfig := config.TestRPCConfig()
|
||||||
|
l := log.TestingLogger()
|
||||||
|
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
startedWG := &sync.WaitGroup{}
|
||||||
|
startedWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
startedWG.Done()
|
||||||
|
defer wg.Done()
|
||||||
|
require.NoError(t, d.Run(ctx))
|
||||||
|
}()
|
||||||
|
// FIXME: used to induce context switch.
|
||||||
|
// Determine more deterministic method for prompting a context switch
|
||||||
|
startedWG.Wait()
|
||||||
|
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||||
|
cli, err := httpclient.New(rpcConfig.ListenAddress)
|
||||||
|
require.NoError(t, err)
|
||||||
|
res, err := cli.BlockchainInfo(context.Background(), 0, 100)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, res)
|
||||||
|
require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash))
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
blockStoreMock.AssertExpectations(t)
|
||||||
|
stateStoreMock.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidators(t *testing.T) {
|
||||||
|
testHeight := int64(1)
|
||||||
|
testVotingPower := int64(100)
|
||||||
|
testValidators := types.ValidatorSet{
|
||||||
|
Validators: []*types.Validator{
|
||||||
|
{
|
||||||
|
VotingPower: testVotingPower,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
stateStoreMock := &statemocks.Store{}
|
||||||
|
stateStoreMock.On("LoadValidators", testHeight).Return(&testValidators, nil)
|
||||||
|
|
||||||
|
blockStoreMock := &statemocks.BlockStore{}
|
||||||
|
blockStoreMock.On("Height").Return(testHeight)
|
||||||
|
blockStoreMock.On("Base").Return(int64(0))
|
||||||
|
eventSinkMock := &indexermocks.EventSink{}
|
||||||
|
eventSinkMock.On("Stop").Return(nil)
|
||||||
|
rpcConfig := config.TestRPCConfig()
|
||||||
|
l := log.TestingLogger()
|
||||||
|
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
startedWG := &sync.WaitGroup{}
|
||||||
|
startedWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
startedWG.Done()
|
||||||
|
defer wg.Done()
|
||||||
|
require.NoError(t, d.Run(ctx))
|
||||||
|
}()
|
||||||
|
// FIXME: used to induce context switch.
|
||||||
|
// Determine more deterministic method for prompting a context switch
|
||||||
|
startedWG.Wait()
|
||||||
|
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||||
|
cli, err := httpclient.New(rpcConfig.ListenAddress)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
testPage := 1
|
||||||
|
testPerPage := 100
|
||||||
|
res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, res)
|
||||||
|
require.Equal(t, testVotingPower, res.Validators[0].VotingPower)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
blockStoreMock.AssertExpectations(t)
|
||||||
|
stateStoreMock.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlockSearch(t *testing.T) {
|
||||||
|
testHeight := int64(1)
|
||||||
|
testBlockHash := []byte("test hash")
|
||||||
|
testQuery := "block.height = 1"
|
||||||
|
stateStoreMock := &statemocks.Store{}
|
||||||
|
|
||||||
|
blockStoreMock := &statemocks.BlockStore{}
|
||||||
|
eventSinkMock := &indexermocks.EventSink{}
|
||||||
|
eventSinkMock.On("Stop").Return(nil)
|
||||||
|
eventSinkMock.On("Type").Return(indexer.KV)
|
||||||
|
blockStoreMock.On("LoadBlock", testHeight).Return(&types.Block{
|
||||||
|
Header: types.Header{
|
||||||
|
Height: testHeight,
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
|
||||||
|
BlockID: types.BlockID{
|
||||||
|
Hash: testBlockHash,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
eventSinkMock.On("SearchBlockEvents", mock.Anything,
|
||||||
|
mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })).
|
||||||
|
Return([]int64{testHeight}, nil)
|
||||||
|
rpcConfig := config.TestRPCConfig()
|
||||||
|
l := log.TestingLogger()
|
||||||
|
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
startedWG := &sync.WaitGroup{}
|
||||||
|
startedWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
startedWG.Done()
|
||||||
|
defer wg.Done()
|
||||||
|
require.NoError(t, d.Run(ctx))
|
||||||
|
}()
|
||||||
|
// FIXME: used to induce context switch.
|
||||||
|
// Determine more deterministic method for prompting a context switch
|
||||||
|
startedWG.Wait()
|
||||||
|
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||||
|
cli, err := httpclient.New(rpcConfig.ListenAddress)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
testPage := 1
|
||||||
|
testPerPage := 100
|
||||||
|
testOrderBy := "desc"
|
||||||
|
res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, res)
|
||||||
|
require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash))
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
blockStoreMock.AssertExpectations(t)
|
||||||
|
stateStoreMock.AssertExpectations(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func requireConnect(t testing.TB, addr string, retries int) {
|
||||||
|
parts := strings.SplitN(addr, "://", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
t.Fatalf("malformed address to dial: %s", addr)
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
for i := 0; i < retries; i++ {
|
||||||
|
var conn net.Conn
|
||||||
|
conn, err = net.Dial(parts[0], parts[1])
|
||||||
|
if err == nil {
|
||||||
|
conn.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// FIXME attempt to yield and let the other goroutine continue execution.
|
||||||
|
time.Sleep(time.Microsecond * 100)
|
||||||
|
}
|
||||||
|
t.Fatalf("unable to connect to server %s after %d tries: %s", addr, retries, err)
|
||||||
|
}
|
||||||
143
inspect/rpc/rpc.go
Normal file
143
inspect/rpc/rpc.go
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
package rpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rs/cors"
|
||||||
|
|
||||||
|
"github.com/tendermint/tendermint/config"
|
||||||
|
"github.com/tendermint/tendermint/internal/consensus"
|
||||||
|
"github.com/tendermint/tendermint/libs/log"
|
||||||
|
"github.com/tendermint/tendermint/libs/pubsub"
|
||||||
|
"github.com/tendermint/tendermint/rpc/core"
|
||||||
|
"github.com/tendermint/tendermint/rpc/jsonrpc/server"
|
||||||
|
"github.com/tendermint/tendermint/state"
|
||||||
|
"github.com/tendermint/tendermint/state/indexer"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server defines parameters for running an Inspector rpc server.
|
||||||
|
type Server struct {
|
||||||
|
Addr string // TCP address to listen on, ":http" if empty
|
||||||
|
Handler http.Handler
|
||||||
|
Logger log.Logger
|
||||||
|
Config *config.RPCConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// Routes returns the set of routes used by the Inspector server.
|
||||||
|
//
|
||||||
|
//nolint: lll
|
||||||
|
func Routes(cfg config.RPCConfig, s state.Store, bs state.BlockStore, es []indexer.EventSink, logger log.Logger) core.RoutesMap {
|
||||||
|
env := &core.Environment{
|
||||||
|
Config: cfg,
|
||||||
|
EventSinks: es,
|
||||||
|
StateStore: s,
|
||||||
|
BlockStore: bs,
|
||||||
|
ConsensusReactor: waitSyncCheckerImpl{},
|
||||||
|
Logger: logger,
|
||||||
|
}
|
||||||
|
return core.RoutesMap{
|
||||||
|
"blockchain": server.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight", true),
|
||||||
|
"consensus_params": server.NewRPCFunc(env.ConsensusParams, "height", true),
|
||||||
|
"block": server.NewRPCFunc(env.Block, "height", true),
|
||||||
|
"block_by_hash": server.NewRPCFunc(env.BlockByHash, "hash", true),
|
||||||
|
"block_results": server.NewRPCFunc(env.BlockResults, "height", true),
|
||||||
|
"commit": server.NewRPCFunc(env.Commit, "height", true),
|
||||||
|
"validators": server.NewRPCFunc(env.Validators, "height,page,per_page", true),
|
||||||
|
"tx": server.NewRPCFunc(env.Tx, "hash,prove", true),
|
||||||
|
"tx_search": server.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by", false),
|
||||||
|
"block_search": server.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by", false),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler returns the http.Handler configured for use with an Inspector server. Handler
|
||||||
|
// registers the routes on the http.Handler and also registers the websocket handler
|
||||||
|
// and the CORS handler if specified by the configuration options.
|
||||||
|
func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logger) http.Handler {
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
wmLogger := logger.With("protocol", "websocket")
|
||||||
|
|
||||||
|
var eventBus types.EventBusSubscriber
|
||||||
|
|
||||||
|
websocketDisconnectFn := func(remoteAddr string) {
|
||||||
|
err := eventBus.UnsubscribeAll(context.Background(), remoteAddr)
|
||||||
|
if err != nil && err != pubsub.ErrSubscriptionNotFound {
|
||||||
|
wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wm := server.NewWebsocketManager(routes,
|
||||||
|
server.OnDisconnect(websocketDisconnectFn),
|
||||||
|
server.ReadLimit(rpcConfig.MaxBodyBytes))
|
||||||
|
wm.SetLogger(wmLogger)
|
||||||
|
mux.HandleFunc("/websocket", wm.WebsocketHandler)
|
||||||
|
|
||||||
|
server.RegisterRPCFuncs(mux, routes, logger)
|
||||||
|
var rootHandler http.Handler = mux
|
||||||
|
if rpcConfig.IsCorsEnabled() {
|
||||||
|
rootHandler = addCORSHandler(rpcConfig, mux)
|
||||||
|
}
|
||||||
|
return rootHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func addCORSHandler(rpcConfig *config.RPCConfig, h http.Handler) http.Handler {
|
||||||
|
corsMiddleware := cors.New(cors.Options{
|
||||||
|
AllowedOrigins: rpcConfig.CORSAllowedOrigins,
|
||||||
|
AllowedMethods: rpcConfig.CORSAllowedMethods,
|
||||||
|
AllowedHeaders: rpcConfig.CORSAllowedHeaders,
|
||||||
|
})
|
||||||
|
h = corsMiddleware.Handler(h)
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
type waitSyncCheckerImpl struct{}
|
||||||
|
|
||||||
|
func (waitSyncCheckerImpl) WaitSync() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (waitSyncCheckerImpl) GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenAndServe listens on the address specified in srv.Addr and handles any
|
||||||
|
// incoming requests over HTTP using the Inspector rpc handler specified on the server.
|
||||||
|
func (srv *Server) ListenAndServe(ctx context.Context) error {
|
||||||
|
listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
listener.Close()
|
||||||
|
}()
|
||||||
|
return server.Serve(listener, srv.Handler, srv.Logger, serverRPCConfig(srv.Config))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenAndServeTLS listens on the address specified in srv.Addr. ListenAndServeTLS handles
|
||||||
|
// incoming requests over HTTPS using the Inspector rpc handler specified on the server.
|
||||||
|
func (srv *Server) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error {
|
||||||
|
listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
listener.Close()
|
||||||
|
}()
|
||||||
|
return server.ServeTLS(listener, srv.Handler, certFile, keyFile, srv.Logger, serverRPCConfig(srv.Config))
|
||||||
|
}
|
||||||
|
|
||||||
|
func serverRPCConfig(r *config.RPCConfig) *server.Config {
|
||||||
|
cfg := server.DefaultConfig()
|
||||||
|
cfg.MaxBodyBytes = r.MaxBodyBytes
|
||||||
|
cfg.MaxHeaderBytes = r.MaxHeaderBytes
|
||||||
|
// If necessary adjust global WriteTimeout to ensure it's greater than
|
||||||
|
// TimeoutBroadcastTxCommit.
|
||||||
|
// See https://github.com/tendermint/tendermint/issues/3435
|
||||||
|
if cfg.WriteTimeout <= r.TimeoutBroadcastTxCommit {
|
||||||
|
cfg.WriteTimeout = r.TimeoutBroadcastTxCommit + 1*time.Second
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
/*
|
|
||||||
Package blockchain provides two implementations of the fast-sync protocol.
|
|
||||||
|
|
||||||
- v0 was the very first implementation. it's battle tested, but does not have a
|
|
||||||
lot of test coverage.
|
|
||||||
- v2 is the newest implementation, with a focus on testability and readability.
|
|
||||||
|
|
||||||
Check out ADR-40 for the formal model and requirements.
|
|
||||||
|
|
||||||
# Termination criteria
|
|
||||||
|
|
||||||
1. the maximum peer height is reached
|
|
||||||
2. termination timeout is triggered, which is set if the peer set is empty or
|
|
||||||
there are no pending requests.
|
|
||||||
|
|
||||||
*/
|
|
||||||
package blockchain
|
|
||||||
36
internal/blocksync/doc.go
Normal file
36
internal/blocksync/doc.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
/*
|
||||||
|
Package blocksync implements two versions of a reactor Service that are
|
||||||
|
responsible for block propagation and gossip between peers. This mechanism was
|
||||||
|
formerly known as fast-sync.
|
||||||
|
|
||||||
|
In order for a full node to successfully participate in consensus, it must have
|
||||||
|
the latest view of state. The blocksync protocol is a mechanism in which peers
|
||||||
|
may exchange and gossip entire blocks with one another, in a request/response
|
||||||
|
type model, until they've successfully synced to the latest head block. Once
|
||||||
|
succussfully synced, the full node can switch to an active role in consensus and
|
||||||
|
will no longer blocksync and thus no longer run the blocksync process.
|
||||||
|
|
||||||
|
Note, the blocksync reactor Service gossips entire block and relevant data such
|
||||||
|
that each receiving peer may construct the entire view of the blocksync state.
|
||||||
|
|
||||||
|
There are currently two versions of the blocksync reactor Service:
|
||||||
|
|
||||||
|
- v0: The initial implementation that is battle-tested, but whose test coverage
|
||||||
|
is lacking and is not formally verifiable.
|
||||||
|
- v2: The latest implementation that has much higher test coverage and is formally
|
||||||
|
verified. However, the current implementation of v2 is not as battle-tested and
|
||||||
|
is known to have various bugs that could make it unreliable in production
|
||||||
|
environments.
|
||||||
|
|
||||||
|
The v0 blocksync reactor Service has one p2p channel, BlockchainChannel. This
|
||||||
|
channel is responsible for handling messages that both request blocks and respond
|
||||||
|
to block requests from peers. For every block request from a peer, the reactor
|
||||||
|
will execute respondToPeer which will fetch the block from the node's state store
|
||||||
|
and respond to the peer. For every block response, the node will add the block
|
||||||
|
to its pool via AddBlock.
|
||||||
|
|
||||||
|
Internally, v0 runs a poolRoutine that constantly checks for what blocks it needs
|
||||||
|
and requests them. The poolRoutine is also responsible for taking blocks from the
|
||||||
|
pool, saving and executing each block.
|
||||||
|
*/
|
||||||
|
package blocksync
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
package blockchain
|
package blocksync
|
||||||
|
|
||||||
import (
|
import (
|
||||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -65,7 +65,7 @@ type BlockRequest struct {
|
|||||||
PeerID types.NodeID
|
PeerID types.NodeID
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
// BlockPool keeps track of the block sync peers, block requests and block responses.
|
||||||
type BlockPool struct {
|
type BlockPool struct {
|
||||||
service.BaseService
|
service.BaseService
|
||||||
lastAdvance time.Time
|
lastAdvance time.Time
|
||||||
@@ -83,6 +83,10 @@ type BlockPool struct {
|
|||||||
|
|
||||||
requestsCh chan<- BlockRequest
|
requestsCh chan<- BlockRequest
|
||||||
errorsCh chan<- peerError
|
errorsCh chan<- peerError
|
||||||
|
|
||||||
|
startHeight int64
|
||||||
|
lastHundredBlockTimeStamp time.Time
|
||||||
|
lastSyncRate float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBlockPool returns a new BlockPool with the height equal to start. Block
|
// NewBlockPool returns a new BlockPool with the height equal to start. Block
|
||||||
@@ -91,12 +95,14 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
|||||||
bp := &BlockPool{
|
bp := &BlockPool{
|
||||||
peers: make(map[types.NodeID]*bpPeer),
|
peers: make(map[types.NodeID]*bpPeer),
|
||||||
|
|
||||||
requesters: make(map[int64]*bpRequester),
|
requesters: make(map[int64]*bpRequester),
|
||||||
height: start,
|
height: start,
|
||||||
numPending: 0,
|
startHeight: start,
|
||||||
|
numPending: 0,
|
||||||
|
|
||||||
requestsCh: requestsCh,
|
requestsCh: requestsCh,
|
||||||
errorsCh: errorsCh,
|
errorsCh: errorsCh,
|
||||||
|
lastSyncRate: 0,
|
||||||
}
|
}
|
||||||
bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp)
|
bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp)
|
||||||
return bp
|
return bp
|
||||||
@@ -106,6 +112,7 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
|||||||
// pool's start time.
|
// pool's start time.
|
||||||
func (pool *BlockPool) OnStart() error {
|
func (pool *BlockPool) OnStart() error {
|
||||||
pool.lastAdvance = time.Now()
|
pool.lastAdvance = time.Now()
|
||||||
|
pool.lastHundredBlockTimeStamp = pool.lastAdvance
|
||||||
go pool.makeRequestersRoutine()
|
go pool.makeRequestersRoutine()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -216,6 +223,19 @@ func (pool *BlockPool) PopRequest() {
|
|||||||
delete(pool.requesters, pool.height)
|
delete(pool.requesters, pool.height)
|
||||||
pool.height++
|
pool.height++
|
||||||
pool.lastAdvance = time.Now()
|
pool.lastAdvance = time.Now()
|
||||||
|
|
||||||
|
// the lastSyncRate will be updated every 100 blocks, it uses the adaptive filter
|
||||||
|
// to smooth the block sync rate and the unit represents the number of blocks per second.
|
||||||
|
if (pool.height-pool.startHeight)%100 == 0 {
|
||||||
|
newSyncRate := 100 / time.Since(pool.lastHundredBlockTimeStamp).Seconds()
|
||||||
|
if pool.lastSyncRate == 0 {
|
||||||
|
pool.lastSyncRate = newSyncRate
|
||||||
|
} else {
|
||||||
|
pool.lastSyncRate = 0.9*pool.lastSyncRate + 0.1*newSyncRate
|
||||||
|
}
|
||||||
|
pool.lastHundredBlockTimeStamp = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
|
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
|
||||||
}
|
}
|
||||||
@@ -428,6 +448,20 @@ func (pool *BlockPool) debug() string {
|
|||||||
return str
|
return str
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pool *BlockPool) targetSyncBlocks() int64 {
|
||||||
|
pool.mtx.RLock()
|
||||||
|
defer pool.mtx.RUnlock()
|
||||||
|
|
||||||
|
return pool.maxPeerHeight - pool.startHeight + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pool *BlockPool) getLastSyncRate() float64 {
|
||||||
|
pool.mtx.RLock()
|
||||||
|
defer pool.mtx.RUnlock()
|
||||||
|
|
||||||
|
return pool.lastSyncRate
|
||||||
|
}
|
||||||
|
|
||||||
//-------------------------------------
|
//-------------------------------------
|
||||||
|
|
||||||
type bpPeer struct {
|
type bpPeer struct {
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user