mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-12 07:42:48 +00:00
Compare commits
150 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9d1be86031 | ||
|
|
35581cf54e | ||
|
|
64747b2b18 | ||
|
|
014cdcf098 | ||
|
|
08e5d0bf78 | ||
|
|
c645fd0b71 | ||
|
|
ec471ba27e | ||
|
|
413f5f7115 | ||
|
|
e0f68fe640 | ||
|
|
dae7b69af3 | ||
|
|
7917485bc7 | ||
|
|
e7b9ee7cef | ||
|
|
7417ddf351 | ||
|
|
161611cb3c | ||
|
|
bdedf2ec20 | ||
|
|
6e7fa2a09f | ||
|
|
e914fe40ec | ||
|
|
a6dd0d270a | ||
|
|
bf4688b37c | ||
|
|
dbf22de42b | ||
|
|
3ec6e424d6 | ||
|
|
df5fe1fb21 | ||
|
|
a28c987f5a | ||
|
|
1d160a5a86 | ||
|
|
430afb23e7 | ||
|
|
df5ba80914 | ||
|
|
bda1dd4734 | ||
|
|
14efa9ce89 | ||
|
|
a349a67ad9 | ||
|
|
dc6d73a408 | ||
|
|
7e05d43b60 | ||
|
|
95a7cc14cc | ||
|
|
a71811f38a | ||
|
|
4999643e2a | ||
|
|
5164dc6d7d | ||
|
|
1b2963e9e6 | ||
|
|
1135889847 | ||
|
|
f2cbc2220e | ||
|
|
6354c99dbf | ||
|
|
014d0d6ca0 | ||
|
|
441405eb9e | ||
|
|
3ab015127c | ||
|
|
d47d110528 | ||
|
|
71a8fcfb16 | ||
|
|
f573d3d2a6 | ||
|
|
29c5a062d2 | ||
|
|
eed27addec | ||
|
|
a41c5eec11 | ||
|
|
bca737c2d3 | ||
|
|
58669ae8c1 | ||
|
|
1f0cf7762b | ||
|
|
dd86d3e8bb | ||
|
|
27404910de | ||
|
|
08f55593dd | ||
|
|
9fdbd2e466 | ||
|
|
a80c6a229a | ||
|
|
d513c925dd | ||
|
|
dad439f115 | ||
|
|
fbd754b4de | ||
|
|
65367d7e94 | ||
|
|
c72335712b | ||
|
|
fb5cd16de2 | ||
|
|
708a62fc31 | ||
|
|
7e902dc79a | ||
|
|
0846f3e4c7 | ||
|
|
46badfabd9 | ||
|
|
eb465a3f4f | ||
|
|
d6be59788b | ||
|
|
f9f3bed808 | ||
|
|
64dfeb775c | ||
|
|
6688db7be1 | ||
|
|
d32df22c8b | ||
|
|
223ece93c8 | ||
|
|
ba1711e706 | ||
|
|
8df725f92f | ||
|
|
2b3737333f | ||
|
|
bbb5f3bfef | ||
|
|
d6b413ff8e | ||
|
|
7b615f8123 | ||
|
|
7d9447198c | ||
|
|
5276400c30 | ||
|
|
493dd69f31 | ||
|
|
1d9d947d88 | ||
|
|
479bdd71e6 | ||
|
|
17f4ea3680 | ||
|
|
5c32cfa00e | ||
|
|
5e354a3a57 | ||
|
|
9e14e954f9 | ||
|
|
6b7d30cf37 | ||
|
|
25101d1116 | ||
|
|
b83cc0aeda | ||
|
|
4a1df4911d | ||
|
|
a3cc3d98b9 | ||
|
|
fe024521ef | ||
|
|
02def9ca64 | ||
|
|
ce2409f3ff | ||
|
|
30915e9337 | ||
|
|
8a7affe3a0 | ||
|
|
851f404305 | ||
|
|
f63496dcd6 | ||
|
|
044b12585f | ||
|
|
ac2e7fab3d | ||
|
|
e6f0711648 | ||
|
|
603a1d6610 | ||
|
|
ad72896ca5 | ||
|
|
9afdac6b52 | ||
|
|
a694dad540 | ||
|
|
9d1556a5bc | ||
|
|
7ae73be891 | ||
|
|
bbb1506f5e | ||
|
|
7cb014cf27 | ||
|
|
004967f962 | ||
|
|
6670c24e5f | ||
|
|
b685ec7f2d | ||
|
|
016c91b50f | ||
|
|
34f23ab88a | ||
|
|
aef0bd874d | ||
|
|
d2bd4471bc | ||
|
|
c7006af6fd | ||
|
|
972eee6ebc | ||
|
|
b06540b5ff | ||
|
|
d1213f7e5f | ||
|
|
624bbac8f6 | ||
|
|
874c9a0951 | ||
|
|
986f8d6532 | ||
|
|
e40e7ea46e | ||
|
|
7fa34c63af | ||
|
|
da6ec8f082 | ||
|
|
efddab0734 | ||
|
|
3454f8cb89 | ||
|
|
2f231ceb95 | ||
|
|
6e85f46d9a | ||
|
|
89139784c5 | ||
|
|
ecf19029b4 | ||
|
|
9ae5797866 | ||
|
|
07670318a9 | ||
|
|
1e32a149dd | ||
|
|
2c553d735a | ||
|
|
799489e474 | ||
|
|
66b1a3ee4c | ||
|
|
06e8620621 | ||
|
|
b2dd100a76 | ||
|
|
314b139ac3 | ||
|
|
551072c962 | ||
|
|
4b5472c387 | ||
|
|
fd3bfb38e7 | ||
|
|
186e0e4df2 | ||
|
|
a97bb37d44 | ||
|
|
9e8837ad63 | ||
|
|
6b4e9078de |
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -7,4 +7,5 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair
|
||||
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
|
||||
|
||||
37
.github/ISSUE_TEMPLATE/proposal.md
vendored
Normal file
37
.github/ISSUE_TEMPLATE/proposal.md
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
name: Protocol Change Proposal
|
||||
about: Create a proposal to request a change to the protocol
|
||||
|
||||
---
|
||||
|
||||
<!-- < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < ☺
|
||||
v ✰ Thanks for opening an issue! ✰
|
||||
v Before smashing the submit button please review the template.
|
||||
v Word of caution: Under-specified proposals may be rejected summarily
|
||||
☺ > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > -->
|
||||
|
||||
# Protocol Change Proposal
|
||||
|
||||
## Summary
|
||||
|
||||
<!-- Short, concise description of the proposed change -->
|
||||
|
||||
## Problem Definition
|
||||
|
||||
<!-- Why do we need this change?
|
||||
What problems may be addressed by introducing this change?
|
||||
What benefits does Tendermint stand to gain by including this change?
|
||||
Are there any disadvantages of including this change? -->
|
||||
|
||||
## Proposal
|
||||
|
||||
<!-- Detailed description of requirements of implementation -->
|
||||
|
||||
____
|
||||
|
||||
#### For Admin Use
|
||||
|
||||
- [ ] Not duplicate issue
|
||||
- [ ] Appropriate labels applied
|
||||
- [ ] Appropriate contributors tagged
|
||||
- [ ] Contributor assigned/self-assigned
|
||||
27
.github/dependabot.yml
vendored
27
.github/dependabot.yml
vendored
@@ -1,27 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- fadeev
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- melekes
|
||||
- tessr
|
||||
labels:
|
||||
- T:dependencies
|
||||
75
.github/workflows/check-generated.yml
vendored
Normal file
75
.github/workflows/check-generated.yml
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
# Verify that generated code is up-to-date.
|
||||
#
|
||||
# Note that we run these checks regardless whether the input files have
|
||||
# changed, because generated code can change in response to toolchain updates
|
||||
# even if no files in the repository are modified.
|
||||
name: Check generated code
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check-mocks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: "Check generated mocks"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
readonly MOCKERY=2.12.3 # N.B. no leading "v"
|
||||
curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf -
|
||||
make mockery 2>/dev/null
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Generated mocks require update (either Mockery or source files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check-proto:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1 # we need a .git directory to run git diff
|
||||
|
||||
- name: "Check protobuf generated code"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Install buf and gogo tools, so that differences that arise from
|
||||
# toolchain differences are also caught.
|
||||
readonly tools="$(mktemp -d)"
|
||||
export PATH="${PATH}:${tools}/bin"
|
||||
export GOBIN="${tools}/bin"
|
||||
|
||||
go install github.com/bufbuild/buf/cmd/buf
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
|
||||
make proto-gen
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make proto-gen' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
44
.github/workflows/coverage.yml
vendored
44
.github/workflows/coverage.yml
vendored
@@ -10,25 +10,25 @@ jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
|
||||
# cache multiple
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00"
|
||||
path: ./pkgs.txt.part.00
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01"
|
||||
path: ./pkgs.txt.part.01
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02"
|
||||
path: ./pkgs.txt.part.02
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03"
|
||||
path: ./pkgs.txt.part.03
|
||||
@@ -42,11 +42,11 @@ jobs:
|
||||
goarch: ["arm", "amd64"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -64,17 +64,17 @@ jobs:
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}"
|
||||
if: env.GIT_DIFF
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./${{ matrix.part }}profile.out
|
||||
@@ -91,33 +91,33 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v1.0.13
|
||||
- uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
8
.github/workflows/docker.yml
vendored
8
.github/workflows/docker.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -40,17 +40,17 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
6
.github/workflows/e2e-manual.yml
vendored
6
.github/workflows/e2e-manual.yml
vendored
@@ -14,11 +14,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
|
||||
10
.github/workflows/e2e-nightly-34x.yml
vendored
10
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -21,11 +21,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.15.4'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@e9db0ef
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
10
.github/workflows/e2e-nightly-master.yml
vendored
10
.github/workflows/e2e-nightly-master.yml
vendored
@@ -20,11 +20,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
8
.github/workflows/e2e.yml
vendored
8
.github/workflows/e2e.yml
vendored
@@ -13,11 +13,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.15.4'
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: '1.18'
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
|
||||
12
.github/workflows/fuzz-nightly.yml
vendored
12
.github/workflows/fuzz-nightly.yml
vendored
@@ -9,11 +9,11 @@ jobs:
|
||||
fuzz-nightly-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
@@ -45,14 +45,14 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive crashers
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 1
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
12
.github/workflows/linkchecker.yml
vendored
12
.github/workflows/linkchecker.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: Check Markdown links
|
||||
on:
|
||||
schedule:
|
||||
- cron: '* */24 * * *'
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.7
|
||||
with:
|
||||
folder-path: "docs"
|
||||
15
.github/workflows/lint.yml
vendored
15
.github/workflows/lint.yml
vendored
@@ -13,17 +13,22 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v2.5.2
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.42.1
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.50.1
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
2
.github/workflows/linter.yml
vendored
2
.github/workflows/linter.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2.4.0
|
||||
uses: actions/checkout@v3
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v3
|
||||
env:
|
||||
|
||||
65
.github/workflows/pre-release.yml
vendored
Normal file
65
.github/workflows/pre-release.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: "Pre-release"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
prerelease:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
# Link to CHANGELOG_PENDING.md as release notes.
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG_PENDING.md > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
prerelease-success:
|
||||
needs: prerelease
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon pre-release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
51
.github/workflows/proto-docker.yml
vendored
51
.github/workflows/proto-docker.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: Build & Push TM Proto Builder
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
schedule:
|
||||
# run this job once a month to recieve any go or buf updates
|
||||
- cron: "* * 1 * *"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermintdev/docker-build-proto
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
21
.github/workflows/proto-lint.yml
vendored
Normal file
21
.github/workflows/proto-lint.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Protobuf Lint
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'proto/**'
|
||||
push:
|
||||
branches:
|
||||
- v0.34.x
|
||||
paths:
|
||||
- 'proto/**'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.8.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
22
.github/workflows/proto.yml
vendored
22
.github/workflows/proto.yml
vendored
@@ -1,22 +0,0 @@
|
||||
name: Protobuf
|
||||
# Protobuf runs buf (https://buf.build/) lint and check-breakage
|
||||
# This workflow is only run when a .proto file has been modified
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "**.proto"
|
||||
jobs:
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
49
.github/workflows/release.yml
vendored
49
.github/workflows/release.yml
vendored
@@ -3,27 +3,60 @@ name: "Release"
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.15.4'
|
||||
go-version: '1.18'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
release-success:
|
||||
needs: release
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
- uses: actions/stale@v6
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
|
||||
48
.github/workflows/tests.yml
vendored
48
.github/workflows/tests.yml
vendored
@@ -23,11 +23,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -55,24 +55,24 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "^1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -87,24 +87,24 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "^1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -118,24 +118,24 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -22,6 +22,7 @@ docs/.vuepress/dist
|
||||
*.log
|
||||
abci-cli
|
||||
docs/node_modules/
|
||||
docs/.vuepress/public/rpc
|
||||
index.html.md
|
||||
|
||||
scripts/wal2json/wal2json
|
||||
@@ -46,6 +47,8 @@ terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
profile\.out
|
||||
test/app/grpc_client
|
||||
test/loadtime/build
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
@@ -56,3 +59,11 @@ test/fuzz/**/corpus
|
||||
test/fuzz/**/crashers
|
||||
test/fuzz/**/suppressions
|
||||
test/fuzz/**/*.zip
|
||||
*.aux
|
||||
*.bbl
|
||||
*.blg
|
||||
*.pdf
|
||||
*.gz
|
||||
*.dvi
|
||||
# Python virtual environments
|
||||
.venv
|
||||
|
||||
@@ -2,20 +2,12 @@ linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- exportloopref
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
- goconst
|
||||
# - gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
@@ -23,48 +15,32 @@ linters:
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
# - interfacer
|
||||
- lll
|
||||
# - maligned
|
||||
# - misspell
|
||||
- misspell
|
||||
- nakedret
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- staticcheck
|
||||
- structcheck
|
||||
# - structcheck // to be fixed by golangci-lint
|
||||
- stylecheck
|
||||
# - typecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
# - unparam
|
||||
- unused
|
||||
- varcheck
|
||||
# - whitespace
|
||||
# - wsl
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
- linters:
|
||||
- lll
|
||||
source: "https://"
|
||||
max-same-issues: 50
|
||||
|
||||
linters-settings:
|
||||
dogsled:
|
||||
max-blank-identifiers: 3
|
||||
maligned:
|
||||
suggest-new: true
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
revive:
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
ignore-words:
|
||||
- behaviour
|
||||
|
||||
|
||||
run:
|
||||
skip-files:
|
||||
- libs/pubsub/query/query.peg.go
|
||||
|
||||
@@ -25,4 +25,13 @@ checksum:
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
name_template: "{{.Version}} (WARNING: BETA SOFTWARE)"
|
||||
prerelease: auto
|
||||
name_template: "{{.Version}}"
|
||||
|
||||
archives:
|
||||
- files:
|
||||
- LICENSE
|
||||
- README.md
|
||||
- UPGRADING.md
|
||||
- SECURITY.md
|
||||
- CHANGELOG.md
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
default: true,
|
||||
MD007: { "indent": 4 }
|
||||
default: true
|
||||
MD001: false
|
||||
MD007: {indent: 4}
|
||||
MD013: false
|
||||
MD024: { siblings_only: true }
|
||||
MD024: {siblings_only: true}
|
||||
MD025: false
|
||||
MD033: { no-inline-html: false }
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
MD033: false
|
||||
MD036: false
|
||||
MD010: false
|
||||
MD012: false
|
||||
MD028: false
|
||||
|
||||
1
.vscode/settings.json
vendored
1
.vscode/settings.json
vendored
@@ -5,4 +5,5 @@
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
197
CHANGELOG.md
197
CHANGELOG.md
@@ -2,6 +2,175 @@
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.24
|
||||
|
||||
*Nov 21, 2022*
|
||||
|
||||
Apart from one minor bug fix, this release aims to optimize the output of the
|
||||
RPC (both HTTP and WebSocket endpoints). See our [upgrading
|
||||
guidelines](./UPGRADING.md#v03424) for more details.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- `[rpc]` [\#9724](https://github.com/tendermint/tendermint/issues/9724) Remove
|
||||
useless whitespace in RPC output (@adizere, @thanethomson)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- `[rpc]` [\#9692](https://github.com/tendermint/tendermint/issues/9692) Remove
|
||||
`Cache-Control` header response from `/check_tx` endpoint (@JayT106)
|
||||
|
||||
## v0.34.23
|
||||
|
||||
*Nov 9, 2022*
|
||||
|
||||
This release introduces some new Prometheus metrics to help in determining what
|
||||
kinds of messages are consuming the most P2P bandwidth. This builds towards our
|
||||
broader goal of optimizing Tendermint bandwidth consumption, and will give us
|
||||
meaningful insights once we can establish these metrics for a number of chains.
|
||||
|
||||
We now also return `Cache-Control` headers for select RPC endpoints to help
|
||||
facilitate caching.
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106
|
||||
|
||||
### IMPROVEMENTS
|
||||
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
|
||||
Envelope type and associated methods for sending and receiving Envelopes
|
||||
instead of raw bytes. This also adds new metrics,
|
||||
`tendermint_p2p_message_send_bytes_total` and
|
||||
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
|
||||
each message type have been sent.
|
||||
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
|
||||
caching of RPC responses (@JayT106)
|
||||
|
||||
The following RPC endpoints will return `Cache-Control` headers with a maximum
|
||||
age of 1 day:
|
||||
|
||||
- `/abci_info`
|
||||
- `/block`, if `height` is supplied
|
||||
- `/block_by_hash`
|
||||
- `/block_results`, if `height` is supplied
|
||||
- `/blockchain`
|
||||
- `/check_tx`
|
||||
- `/commit`, if `height` is supplied
|
||||
- `/consensus_params`, if `height` is supplied
|
||||
- `/genesis`
|
||||
- `/genesis_chunked`
|
||||
- `/tx`
|
||||
- `/validators`, if `height` is supplied
|
||||
|
||||
## v0.34.22
|
||||
|
||||
This release includes several bug fixes, [one of
|
||||
which](https://github.com/tendermint/tendermint/pull/9518) we discovered while
|
||||
building up a baseline for v0.34 against which to compare our upcoming v0.37
|
||||
release during our [QA process](./docs/qa/).
|
||||
|
||||
Special thanks to external contributors on this release: @RiccardoM
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#9423](https://github.com/tendermint/tendermint/pull/9423) Support
|
||||
HTTPS URLs from the WebSocket client (@RiccardoM, @cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [config] [\#9483](https://github.com/tendermint/tendermint/issues/9483)
|
||||
Calling `tendermint init` would incorrectly leave out the new `[storage]`
|
||||
section delimiter in the generated configuration file - this has now been
|
||||
fixed
|
||||
- [p2p] [\#9500](https://github.com/tendermint/tendermint/issues/9500) Prevent
|
||||
peers who have errored being added to the peer set (@jmalicevic)
|
||||
- [indexer] [\#9473](https://github.com/tendermint/tendermint/issues/9473) Fix
|
||||
bug that caused the psql indexer to index empty blocks whenever one of the
|
||||
transactions returned a non zero code. The relevant deduplication logic has
|
||||
been moved within the kv indexer only (@cmwaters)
|
||||
- [blocksync] [\#9518](https://github.com/tendermint/tendermint/issues/9518) A
|
||||
block sync stall was observed during our QA process whereby the node was
|
||||
unable to make progress. Retrying block requests after a timeout fixes this.
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Release highlights include:
|
||||
|
||||
- A new `[storage]` configuration section and flag `discard_abci_responses`,
|
||||
which, if enabled, discards all ABCI responses except the latest one in order
|
||||
to reduce disk space usage in the state store. When enabled, the
|
||||
`block_results` RPC endpoint can no longer function and will return an error.
|
||||
- A new CLI command, `reindex-event`, to re-index block and tx events to the
|
||||
event sinks. You can run this command when the event store backend
|
||||
dropped/disconnected or you want to replace the backend. When
|
||||
`discard_abci_responses` is enabled, you will not be able to use this command.
|
||||
|
||||
Special thanks to external contributors on this release: @rootwarp & @animart
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#9083](https://github.com/tendermint/tendermint/issues/9083) Backport command to reindex missed events (@cmwaters)
|
||||
- [cli] [\#9107](https://github.com/tendermint/tendermint/issues/9107) Add the `p2p.external-address` argument to set the node P2P external address (@amimart)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#9054](https://github.com/tendermint/tendermint/issues/9054) `discard_abci_responses` flag added to discard all ABCI
|
||||
responses except the last in order to save on storage space in the state
|
||||
store (@samricotta)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [mempool] [\#9033](https://github.com/tendermint/tendermint/issues/9033) Rework lock discipline to mitigate callback deadlocks in the
|
||||
priority mempool
|
||||
- [cli] [\#9103](https://github.com/tendermint/tendermint/issues/9103) fix unsafe-reset-all for working with home path (@rootwarp)
|
||||
|
||||
## v0.34.20
|
||||
|
||||
Special thanks to external contributors on this release: @joeabbey @yihuang
|
||||
|
||||
This release introduces a prioritized mempool. Further notes can be found in UPGRADING.md.
|
||||
|
||||
NOTE: There's a known issue when combining the prioritized mempool with the ABCI socket client, that the team are curently working to resolve. Read more about the issue [here](https://github.com/tendermint/tendermint/pull/9030).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
|
||||
- [indexer] [#8625](https://github.com/tendermint/tendermint/pull/8625) Fix overriding tx index of duplicated txs. (@yihuang)
|
||||
- [mempool] [\#8962](https://github.com/tendermint/tendermint/issues/8962) Backport priority mempool fixes from v0.35.x to v0.34.x (@creachadair).
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8674] Add command to force compact goleveldb databases (@cmwaters)
|
||||
- [mempool] [\#8695] Port back the priority mempool. (@alexanderbez, @jmalicevic, @cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [logging] [\#8845](https://github.com/tendermint/tendermint/issues/8845) Add "Lazy" Stringers to defer Sprintf and Hash until logs print. (@joeabbey)
|
||||
|
||||
## v0.34.19
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8270](https://github.com/tendermint/tendermint/issues/8270) fix reset commands (@alexanderbez).
|
||||
|
||||
## v0.34.18
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [cli] [\#8258](https://github.com/tendermint/tendermint/pull/8258) Fix a bug in the cli that caused `unsafe-reset-all` to panic
|
||||
|
||||
## v0.34.17
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [cli] [\#8081](https://github.com/tendermint/tendermint/issues/8081) make the reset command safe to use (@marbar3778).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#8079](https://github.com/tendermint/tendermint/issues/8079) start the timeout ticker before relay (backport #7844) (@creachadair).
|
||||
- [consensus] [\#7992](https://github.com/tendermint/tendermint/issues/7992) [\#7994](https://github.com/tendermint/tendermint/issues/7994) change lock handling in handleMsg and reactor to alleviate issues gossiping during long ABCI calls (@williambanfield).
|
||||
|
||||
## v0.34.16
|
||||
|
||||
Special thanks to external contributors on this release: @yihuang
|
||||
@@ -389,7 +558,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Remove `MockEvidence` in favor of testing with actual evidence types (`DuplicateVoteEvidence` & `LightClientAttackEvidence`) (@cmwaters)
|
||||
- [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker)
|
||||
- [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [db] [\#5233](https://github.com/tendermint/tendermint/pull/5233) Add support for `badgerdb` database backend (@erikgrinaker)
|
||||
- [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes)
|
||||
- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia (light client attack) evidence can be detected, verified and committed (@cmwaters)
|
||||
@@ -403,7 +572,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [rpc] [\#5017](https://github.com/tendermint/tendermint/pull/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes)
|
||||
- [rpc] [\#5108](https://github.com/tendermint/tendermint/pull/5108) Subscribe using the websocket for new evidence events (@cmwaters)
|
||||
- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section.
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
@@ -483,7 +652,7 @@ This security release fixes:
|
||||
Tendermint 0.33.0 and above allow block proposers to include signatures for the
|
||||
wrong block. This may happen naturally if you start a network, have it run for
|
||||
some time and restart it **without changing the chainID**. (It is a
|
||||
[misconfiguration](https://docs.tendermint.com/master/tendermint-core/using-tendermint.html)
|
||||
[misconfiguration](https://docs.tendermint.com/v0.33/tendermint-core/using-tendermint.html)
|
||||
to reuse chainIDs.) Correct block proposers will accidentally include signatures
|
||||
for the wrong block if they see these signatures, and then commits won't validate,
|
||||
making all proposed blocks invalid. A malicious validator (even with a minimal
|
||||
@@ -782,7 +951,7 @@ and a validator address plus a timestamp. Note we may remove the validator
|
||||
address & timestamp fields in the future (see ADR-25).
|
||||
|
||||
`lite2` package has been added to solve `lite` issues and introduce weak
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client.md) for complete details.
|
||||
`lite` package is now deprecated and will be removed in v0.34 release.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
@@ -1143,7 +1312,7 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah
|
||||
*August 28, 2019*
|
||||
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md)
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/v0.32.x/docs/guides/java.md)
|
||||
guide.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@@ -1176,7 +1345,7 @@ Special thanks to external contributors on this release:
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/v0.34.x/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele)
|
||||
- [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors`
|
||||
option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors).
|
||||
@@ -1493,7 +1662,7 @@ Special thanks to external contributors on this release:
|
||||
- [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation
|
||||
* Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
https://docs.tendermint.com/master/introduction/install.html#compile-with-cleveldb-support)
|
||||
<https://docs.tendermint.com/>)
|
||||
* Use `boltdb` tag to compile Tendermint with bolt db
|
||||
- [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except
|
||||
when IP lookup fails)
|
||||
@@ -1717,7 +1886,7 @@ more details.
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
|
||||
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
|
||||
* Apps
|
||||
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
|
||||
@@ -1770,7 +1939,7 @@ more details.
|
||||
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
|
||||
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
|
||||
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md)
|
||||
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
|
||||
(@guagualvcha)
|
||||
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
|
||||
@@ -2468,7 +2637,7 @@ Special thanks to external contributors on this release:
|
||||
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
|
||||
It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
@@ -2549,7 +2718,7 @@ BREAKING CHANGES:
|
||||
- [abci] Added address of the original proposer of the block to Header
|
||||
- [abci] Change ABCI Header to match Tendermint exactly
|
||||
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
- Remove PubKey from `Validator` (so it's just Address and Power)
|
||||
- Introduce `ValidatorUpdate` (with just PubKey and Power)
|
||||
- InitChain and EndBlock use ValidatorUpdate
|
||||
@@ -2571,7 +2740,7 @@ BREAKING CHANGES:
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
|
||||
- Add NextValidatorSet to State, changes on-disk representation of state
|
||||
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-020-block-size.md)).
|
||||
- Remove ConsensusParams.BlockSize.MaxTxs
|
||||
- Introduce maximum sizes for all components of a block, including ChainID
|
||||
- [types] Updates to the block Header:
|
||||
@@ -2582,7 +2751,7 @@ BREAKING CHANGES:
|
||||
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
|
||||
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
|
||||
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-014-secp-malleability.md)):
|
||||
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
|
||||
- malleability removed by requiring `s` to be in canonical form.
|
||||
|
||||
@@ -3382,7 +3551,7 @@ Also includes the Grand Repo-Merge of 2017.
|
||||
BREAKING CHANGES:
|
||||
|
||||
- Config and Flags:
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/v0.10.0/config/config.go#L11),
|
||||
containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig`
|
||||
- This affects the following flags:
|
||||
- `--seeds` is now `--p2p.seeds`
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
# Unreleased Changes
|
||||
|
||||
## v0.34.16
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
## v0.34.25
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
@@ -23,3 +19,4 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
### IMPROVEMENTS
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ will indicate their support with a heartfelt emoji.
|
||||
|
||||
If the issue would benefit from thorough discussion, maintainers may
|
||||
request that you create a [Request For
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion
|
||||
Comment](https://github.com/tendermint/tendermint/tree/main/rfc). Discussion
|
||||
at the RFC stage will build collective understanding of the dimensions
|
||||
of the problems and help structure conversations around trade-offs.
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.15-alpine as builder
|
||||
FROM golang:1.18-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
@@ -8,7 +8,7 @@ WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM golang:1.15-alpine
|
||||
FROM golang:1.18-alpine
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
|
||||
@@ -6,7 +6,7 @@ DockerHub tags for official releases are [here](https://hub.docker.com/r/tenderm
|
||||
|
||||
Official releases can be found [here](https://github.com/tendermint/tendermint/releases).
|
||||
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/main/DOCKER/Dockerfile).
|
||||
|
||||
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
|
||||
|
||||
@@ -20,9 +20,9 @@ Respective versioned files can be found <https://raw.githubusercontent.com/tende
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
|
||||
|
||||
For more background, see the [the docs](https://docs.tendermint.com/master/introduction/#quick-start).
|
||||
For more background, see the [the docs](https://docs.tendermint.com/v0.34/introduction/#quick-start).
|
||||
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/master/introduction/quick-start.html).
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/v0.34/introduction/quick-start.html).
|
||||
|
||||
## How to use this image
|
||||
|
||||
@@ -37,7 +37,7 @@ docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app
|
||||
|
||||
## Local cluster
|
||||
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run:
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/v0.34.x/Makefile) and run:
|
||||
|
||||
```sh
|
||||
make build-linux
|
||||
@@ -49,8 +49,8 @@ Note that this will build and use a different image than the ones provided here.
|
||||
|
||||
## License
|
||||
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/master/LICENSE).
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/main/LICENSE).
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/main/CONTRIBUTING.md) for more information.
|
||||
|
||||
6
LICENSE
6
LICENSE
@@ -1,5 +1,3 @@
|
||||
Tendermint Core
|
||||
License: Apache2.0
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
@@ -181,7 +179,7 @@ License: Apache2.0
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
@@ -189,7 +187,7 @@ License: Apache2.0
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 All in Bits, Inc
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
56
Makefile
56
Makefile
@@ -13,7 +13,6 @@ endif
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
|
||||
CGO_ENABLED ?= 0
|
||||
|
||||
# handle nostrip
|
||||
@@ -70,34 +69,59 @@ install:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
.PHONY: install
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Protobuf ###
|
||||
###############################################################################
|
||||
|
||||
proto-all: proto-gen proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
check-proto-deps:
|
||||
ifeq (,$(shell which protoc-gen-gogofaster))
|
||||
@go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
endif
|
||||
.PHONY: check-proto-deps
|
||||
|
||||
proto-gen:
|
||||
@docker pull -q tendermintdev/docker-build-proto
|
||||
check-proto-format-deps:
|
||||
ifeq (,$(shell which clang-format))
|
||||
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
|
||||
endif
|
||||
.PHONY: check-proto-format-deps
|
||||
|
||||
proto-gen: check-proto-deps
|
||||
@echo "Generating Protobuf files"
|
||||
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
|
||||
@go run github.com/bufbuild/buf/cmd/buf generate
|
||||
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-lint:
|
||||
@$(DOCKER_BUF) check lint --error-format=json
|
||||
# These targets are provided for convenience and are intended for local
|
||||
# execution only.
|
||||
proto-lint: check-proto-deps
|
||||
@echo "Linting Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf lint
|
||||
.PHONY: proto-lint
|
||||
|
||||
proto-format:
|
||||
proto-format: check-proto-format-deps
|
||||
@echo "Formatting Protobuf files"
|
||||
docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
|
||||
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-check-breaking:
|
||||
@$(DOCKER_BUF) check breaking --against-input .git#branch=master
|
||||
proto-check-breaking: check-proto-deps
|
||||
@echo "Checking for breaking changes in Protobuf files against local branch"
|
||||
@echo "Note: This is only useful if your changes have not yet been committed."
|
||||
@echo " Otherwise read up on buf's \"breaking\" command usage:"
|
||||
@echo " https://docs.buf.build/breaking/usage"
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against ".git"
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
proto-check-breaking-ci:
|
||||
@$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against $(HTTPS_GIT)#branch=v0.34.x
|
||||
.PHONY: proto-check-breaking-ci
|
||||
|
||||
###############################################################################
|
||||
@@ -176,7 +200,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@golangci-lint run
|
||||
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
@@ -192,9 +216,7 @@ build-docs:
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
done < versions ; \
|
||||
mkdir -p ~/output/master ; \
|
||||
cp -r .vuepress/dist/* ~/output/master/
|
||||
done < versions ;
|
||||
.PHONY: build-docs
|
||||
|
||||
sync-docs:
|
||||
|
||||
229
README.md
229
README.md
@@ -1,161 +1,170 @@
|
||||
# Tendermint
|
||||
|
||||
_UPDATE: TendermintCore featureset is frozen for LTS, see issue https://github.com/tendermint/tendermint/issues/9972_<br/>
|
||||
_This is the latest stable release used by cosmoshub-4, version 0.34.24_<br/>
|
||||
_The previous main branch (v0.38.xx) can now be found under "main_backup"_<br/>
|
||||
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
[Byzantine-Fault Tolerant][bft] [State Machine Replication][smr]. Or
|
||||
[Blockchain], for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/AzefAFd)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
[](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
|
||||
[![Version][version-badge]][version-url]
|
||||
[![API Reference][api-badge]][api-url]
|
||||
[![Go version][go-badge]][go-url]
|
||||
[![Discord chat][discord-badge]][discord-url]
|
||||
[![License][license-badge]][license-url]
|
||||
[![Sourcegraph][sg-badge]][sg-url]
|
||||
|
||||
| Branch | Tests | Coverage | Linting |
|
||||
| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------- |
|
||||
| master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) </br>  | [](https://codecov.io/gh/tendermint/tendermint) |  |
|
||||
| Branch | Tests | Linting |
|
||||
|--------|------------------------------------|---------------------------------|
|
||||
| main | [![Tests][tests-badge]][tests-url] | [![Lint][lint-badge]][lint-url] |
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a
|
||||
state transition machine - written in any programming language - and securely
|
||||
replicates it on many machines.
|
||||
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
For protocol details, refer to the [Tendermint Specification](./spec/README.md).
|
||||
|
||||
For detailed analysis of the consensus protocol, including safety and liveness proofs,
|
||||
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Releases
|
||||
|
||||
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint is being used in production in both private and public environments,
|
||||
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
|
||||
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/AzefAFd).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md)
|
||||
|
||||
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
|
||||
to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
| Requirement | Notes |
|
||||
| ----------- | ---------------- |
|
||||
| Go version | Go1.15 or higher |
|
||||
For detailed analysis of the consensus protocol, including safety and liveness
|
||||
proofs, read our paper, "[The latest gossip on BFT
|
||||
consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
|
||||
Complete documentation can be found on the
|
||||
[website](https://docs.tendermint.com/).
|
||||
|
||||
## Releases
|
||||
|
||||
Please do not depend on `main` as your production branch. Use
|
||||
[releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint has been in the production of private and public environments, most
|
||||
notably the blockchains of the Cosmos Network. we haven't released v1.0 yet
|
||||
since we are making breaking changes to the protocol and the APIs. See below for
|
||||
more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help.
|
||||
You can contact us [over email](mailto:hello@newtendermint.org) or [join the
|
||||
chat](https://discord.gg/gnoland).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, please [email us](mailto:security@newtendermint.org).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|-------------------|
|
||||
| Go version | Go 1.18 or higher |
|
||||
|
||||
### Install
|
||||
|
||||
See the [install instructions](/docs/introduction/install.md).
|
||||
See the [install instructions](./docs/introduction/install.md).
|
||||
|
||||
### Quick Start
|
||||
|
||||
- [Single node](/docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
- [Single node](./docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](./docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
|
||||
|
||||
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
|
||||
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
|
||||
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
|
||||
and familiarize yourself with our
|
||||
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
Before contributing to the project, please take a look at the [contributing
|
||||
guidelines](CONTRIBUTING.md) and the [style guide](STYLE_GUIDE.md). You may also
|
||||
find it helpful to read the [specifications](./spec/README.md), and familiarize
|
||||
yourself with our [Architectural Decision Records
|
||||
(ADRs)](./docs/architecture/README.md) and
|
||||
[Request For Comments (RFCs)](./docs/rfc/README.md).
|
||||
|
||||
## Versioning
|
||||
|
||||
### Semantic Versioning
|
||||
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and
|
||||
how the version changes. According to SemVer, anything in the public API can
|
||||
change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||
include the Go APIs.
|
||||
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR
|
||||
version is used to signal breaking changes across Tendermint's API. This API
|
||||
includes all publicly exposed types, functions, and methods in non-internal Go
|
||||
packages as well as the types and methods accessible via the Tendermint RPC
|
||||
interface.
|
||||
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- config
|
||||
- libs
|
||||
- bech32
|
||||
- bits
|
||||
- bytes
|
||||
- json
|
||||
- log
|
||||
- math
|
||||
- net
|
||||
- os
|
||||
- protoio
|
||||
- rand
|
||||
- sync
|
||||
- strings
|
||||
- service
|
||||
- node
|
||||
- rpc/client
|
||||
- types
|
||||
Breaking changes to these public APIs will be documented in the CHANGELOG.
|
||||
|
||||
### Upgrades
|
||||
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0,
|
||||
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
|
||||
will work with existing Tendermint blockchains. In these cases you will
|
||||
have to start a new blockchain, or write something custom to get the old
|
||||
data into the new chain. However, any bump in the PATCH version should be
|
||||
compatible with existing blockchain histories.
|
||||
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0, we do not
|
||||
guarantee that breaking changes (ie. bumps in the MINOR version) will work with
|
||||
existing Tendermint blockchains. In these cases you will have to start a new
|
||||
blockchain, or write something custom to get the old data into the new chain.
|
||||
However, any bump in the PATCH version should be compatible with existing
|
||||
blockchain histories.
|
||||
|
||||
For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Supported Versions
|
||||
|
||||
Because we are a small core team, we only ship patch updates, including security updates,
|
||||
to the most recent minor release and the second-most recent minor release. Consequently,
|
||||
we strongly recommend keeping Tendermint up-to-date. Upgrading instructions can be found
|
||||
in [UPGRADING.md](./UPGRADING.md).
|
||||
Because we are a small core team, we only ship patch updates, including security
|
||||
updates, to the most recent minor release and the second-most recent minor
|
||||
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
|
||||
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
### Libraries
|
||||
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](https://docs.tendermint.com/master/spec/).
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: <https://docs.tendermint.com/master/>
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test).
|
||||
Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building
|
||||
applications in Golang
|
||||
- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs)
|
||||
- [ABCI Tower](https://github.com/penumbra-zone/tower-abci)
|
||||
|
||||
### Applications
|
||||
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
- [Cosmos Hub](https://hub.cosmos.network/)
|
||||
- [Terra](https://www.terra.money/)
|
||||
- [Celestia](https://celestia.org/)
|
||||
- [Anoma](https://anoma.network/)
|
||||
- [Vocdoni](https://docs.vocdoni.io/)
|
||||
|
||||
### Research
|
||||
|
||||
- [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
|
||||
- [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
- [Original Whitepaper: "Tendermint: Consensus Without Mining"](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
- [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
- [Tendermint Core Blog](https://medium.com/tendermint/tagged/tendermint-core)
|
||||
- [Cosmos Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
## Join us!
|
||||
|
||||
The development of Tendermint Core was led primarily by All in Bits, Inc. The
|
||||
Tendermint trademark is owned by New Tendermint, LLC. If you'd like to work
|
||||
full-time on Tendermint2 or [gno.land](https://gno.land), [we're
|
||||
hiring](mailto:hiring@newtendermint.org)!
|
||||
|
||||
[bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance
|
||||
[smr]: https://en.wikipedia.org/wiki/State_machine_replication
|
||||
[Blockchain]: https://en.wikipedia.org/wiki/Blockchain
|
||||
[version-badge]: https://img.shields.io/github/tag/tendermint/tendermint.svg
|
||||
[version-url]: https://github.com/tendermint/tendermint/releases/latest
|
||||
[api-badge]: https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
||||
[api-url]: https://pkg.go.dev/github.com/tendermint/tendermint
|
||||
[go-badge]: https://img.shields.io/badge/go-1.18-blue.svg
|
||||
[go-url]: https://github.com/moovweb/gvm
|
||||
[discord-badge]: https://img.shields.io/discord/669268347736686612.svg
|
||||
[discord-url]: https://discord.gg/cosmosnetwork
|
||||
[license-badge]: https://img.shields.io/github/license/tendermint/tendermint.svg
|
||||
[license-url]: https://github.com/tendermint/tendermint/blob/main/LICENSE
|
||||
[sg-badge]: https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg
|
||||
[sg-url]: https://sourcegraph.com/github.com/tendermint/tendermint?badge
|
||||
[tests-url]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml
|
||||
[tests-badge]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml/badge.svg?branch=main
|
||||
[lint-badge]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml/badge.svg
|
||||
[lint-url]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml
|
||||
|
||||
153
SECURITY.md
153
SECURITY.md
@@ -2,98 +2,146 @@
|
||||
|
||||
## Reporting a Bug
|
||||
|
||||
As part of our [Coordinated Vulnerability Disclosure
|
||||
Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
|
||||
As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security),
|
||||
we operate a [bug bounty][hackerone]. See the policy for more
|
||||
details on submissions and rewards, and see "Example Vulnerabilities" (below)
|
||||
for examples of the kinds of bugs we're most interested in.
|
||||
|
||||
### Guidelines
|
||||
### Guidelines
|
||||
|
||||
We require that all researchers:
|
||||
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting
|
||||
vulnerability information in public places, including Github Issues, Discord
|
||||
channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience,
|
||||
disruption to production systems (including but not limited to the Cosmos
|
||||
Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential
|
||||
between yourself and the Tendermint Core engineering team until the issue has
|
||||
been resolved and disclosed
|
||||
* Avoid posting personally identifiable information, privately or publicly
|
||||
|
||||
If you follow these guidelines when reporting an issue to us, we commit to:
|
||||
|
||||
* Not pursue or support any legal action related to your research on this vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
* Not pursue or support any legal action related to your research on this
|
||||
vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a
|
||||
timely fashion
|
||||
|
||||
## Disclosure Process
|
||||
## Disclosure Process
|
||||
|
||||
Tendermint Core uses the following disclosure process:
|
||||
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc).
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
1. Once a security report is received, the Tendermint Core team works to verify
|
||||
the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the
|
||||
vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private
|
||||
repositories. See “Supported Releases” below for more information on which
|
||||
releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE
|
||||
Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time
|
||||
to prepare their systems for the update. Notifications can include forum
|
||||
posts, tweets, and emails to partners and validators, including emails sent
|
||||
to the [Tendermint Security Mailing List][tmsec-mailing].
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new
|
||||
releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these
|
||||
releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we
|
||||
notify the community, again, through the same channels as above. We also
|
||||
publish a Security Advisory on Github and publish the CVE, as long as neither
|
||||
the Security Advisory nor the CVE include any information on how to exploit
|
||||
these vulnerabilities beyond what information is already available in the
|
||||
patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to
|
||||
submitters.
|
||||
10. One week after the releases go out, we will publish a post with further
|
||||
details on the vulnerability as well as our response to it.
|
||||
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
This process can take some time. Every effort will be made to handle the bug in
|
||||
as timely a manner as possible, however it's important that we follow the
|
||||
process described above to ensure that disclosures are handled consistently and
|
||||
to keep Tendermint Core and its downstream dependent projects--including but not
|
||||
limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
|
||||
### Example Timeline
|
||||
### Example Timeline
|
||||
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
The following is an example timeline for the triage and response. The required
|
||||
roles and team members are described in parentheses after each task; however,
|
||||
multiple people can play each role and each person may play multiple roles.
|
||||
|
||||
#### > 24 Hours Before Release Time
|
||||
#### 24+ Hours Before Release Time
|
||||
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG)
|
||||
4. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Create patches in a private security repo, and ensure that PRs are open
|
||||
targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG)
|
||||
5. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
|
||||
#### 24 Hours Before Release Time
|
||||
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels
|
||||
(Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
|
||||
#### Release Time
|
||||
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT
|
||||
LEAD)
|
||||
2. Cut Cosmos SDK release for eligible versions (COSMOS ENG)
|
||||
3. Cut Gaia release for eligible versions (GAIA ENG)
|
||||
4. Post “Security releases” on forum (TENDERMINT LEAD)
|
||||
5. Post new Tweet linking to forum post (COMMS LEAD)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is
|
||||
out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information
|
||||
(ADMIN)
|
||||
|
||||
#### After Release Time
|
||||
|
||||
1. Write forum post with exploit details (TENDERMINT LEAD)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
|
||||
#### 7 Days After Release Time
|
||||
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
|
||||
## Supported Releases
|
||||
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
The Tendermint Core team commits to releasing security patch releases for both
|
||||
the latest minor release as well for the major/minor release that the Cosmos Hub
|
||||
is running.
|
||||
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
If you are running older versions of Tendermint Core, we encourage you to
|
||||
upgrade at your earliest opportunity so that you can receive security patches
|
||||
directly from the Tendermint repo. While you are welcome to backport security
|
||||
patches to older versions for your own use, we will not publish or promote these
|
||||
backports.
|
||||
|
||||
## Scope
|
||||
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
The full scope of our bug bounty program is outlined on our
|
||||
[Hacker One program page][hackerone]. Please also note that, in the interest of
|
||||
the safety of our users and staff, a few things are explicitly excluded from
|
||||
scope:
|
||||
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Findings derived from social engineering (e.g., phishing)
|
||||
|
||||
## Example Vulnerabilities
|
||||
## Example Vulnerabilities
|
||||
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re
|
||||
most interested in. It is not exhaustive: there are other kinds of issues we may
|
||||
also be interested in!
|
||||
|
||||
### Specification
|
||||
|
||||
@@ -105,7 +153,8 @@ The following is a list of examples of the kinds of vulnerabilities that we’re
|
||||
|
||||
Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
|
||||
* Validation of blockchain data structures, including blocks, block parts, votes, and so on
|
||||
* Validation of blockchain data structures, including blocks, block parts,
|
||||
votes, and so on
|
||||
* Execution of blocks
|
||||
* Validator set changes
|
||||
* Proposer round robin
|
||||
@@ -114,6 +163,9 @@ Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
* A node halting (liveness failure)
|
||||
* Syncing new and old nodes
|
||||
|
||||
Assuming more than 1/3 the voting power is Byzantine:
|
||||
|
||||
* Attacks that go unpunished (unhandled evidence)
|
||||
|
||||
### Networking
|
||||
|
||||
@@ -139,7 +191,7 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Libraries
|
||||
|
||||
* Serialization (Amino)
|
||||
* Serialization
|
||||
* Reading/Writing files and databases
|
||||
|
||||
### Cryptography
|
||||
@@ -150,5 +202,8 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Light Client
|
||||
|
||||
* Core verification
|
||||
* Core verification
|
||||
* Bisection/sequential algorithms
|
||||
|
||||
[hackerone]: https://hackerone.com/cosmos
|
||||
[tmsec-mailing]: https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc
|
||||
|
||||
@@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig
|
||||
* Make use of table driven testing where possible and not-cumbersome
|
||||
* [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)
|
||||
* Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require)
|
||||
* When using mocks, it is recommended to use Testify [mock] (<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
* When using mocks, it is recommended to use Testify [mock](<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
) along with [Mockery](https://github.com/vektra/mockery) for autogeneration
|
||||
|
||||
## Errors
|
||||
|
||||
66
UPGRADING.md
66
UPGRADING.md
@@ -1,6 +1,30 @@
|
||||
# Upgrading Tendermint Core
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
This guide provides instructions for upgrading to specific versions of
|
||||
Tendermint Core.
|
||||
|
||||
## v0.34.24
|
||||
|
||||
Note that in [\#9724](https://github.com/tendermint/tendermint/pull/9724) we
|
||||
un-prettified the JSON output (i.e. removed all indentation) of the HTTP and
|
||||
WebSocket RPC for performance and subscription stability reasons. We recommend
|
||||
using a tool such as [jq](https://github.com/stedolan/jq) to obtain prettified
|
||||
output if you rely on that prettified output in some way.
|
||||
|
||||
## v0.34.20
|
||||
|
||||
### Feature: Priority Mempool
|
||||
|
||||
This release backports an implementation of the Priority Mempool from the v0.35
|
||||
branch. This implementation of the mempool permits the application to set a
|
||||
priority on each transaction during CheckTx, and during block selection the
|
||||
highest-priority transactions are chosen (subject to the constraints on size
|
||||
and gas cost).
|
||||
|
||||
Operators can enable the priority mempool by setting `mempool.version` to
|
||||
`"v1"` in the `config.toml`. For more technical details about the priority
|
||||
mempool, see [ADR 067: Mempool
|
||||
Refactor](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-067-mempool-refactor.md).
|
||||
|
||||
## v0.34.0
|
||||
|
||||
@@ -8,7 +32,7 @@ This guide provides instructions for upgrading to specific versions of Tendermin
|
||||
This release is not compatible with previous blockchains due to changes to
|
||||
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
|
||||
|
||||
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
@@ -18,7 +42,7 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
were added to support the new State Sync feature.
|
||||
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
|
||||
new nodes are able to join a network in a matter of seconds.
|
||||
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
|
||||
Read [the spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#state-sync)
|
||||
if you want to learn more about State Sync, or if you'd like your application to use it.
|
||||
(If you don't want to support State Sync in your application, you can just implement these new
|
||||
ABCI methods as no-ops, leaving them empty.)
|
||||
@@ -34,7 +58,7 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
Applications should be able to handle these evidence types
|
||||
(i.e., through slashing or other accountability measures).
|
||||
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
(used in ABCI as part of `ValidatorUpdate`) now uses a `oneof` protobuf type.
|
||||
Note that since Tendermint only supports ed25519 validator keys, there's only one
|
||||
option in the `oneof`. For more, see "Protocol Buffers," below.
|
||||
@@ -49,12 +73,9 @@ directory. For more, see "Protobuf," below.
|
||||
|
||||
### Blockchain Protocol
|
||||
|
||||
* `Header#LastResultsHash` previously was the root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data)` responses.
|
||||
As of 0.34,`Header#LastResultsHash` is now the root hash of a Merkle tree built from:
|
||||
* `BeginBlock#Events`
|
||||
* Root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data,
|
||||
GasWanted, GasUsed, Events)` responses
|
||||
* `BeginBlock#Events`
|
||||
* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from
|
||||
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
|
||||
fields.
|
||||
|
||||
* Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input,
|
||||
to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962).
|
||||
@@ -112,7 +133,7 @@ Tendermint 0.34 includes new and updated consensus parameters.
|
||||
|
||||
#### Evidence Parameters
|
||||
|
||||
* `MaxBytes`, which caps the total amount of evidence. The default is 1048576 (1 MB).
|
||||
* `MaxBytes`, which caps the total amount of evidence. The default is 1048576 (1 MB).
|
||||
|
||||
### Crypto
|
||||
|
||||
@@ -144,7 +165,7 @@ The `bech32` package has moved to the Cosmos SDK:
|
||||
### CLI
|
||||
|
||||
The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API.
|
||||
See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
See [the docs](https://docs.tendermint.com/v0.33/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
|
||||
### Light Client
|
||||
|
||||
@@ -158,6 +179,7 @@ Other user-relevant changes include:
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The new light client stores headers and validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
|
||||
@@ -188,7 +210,7 @@ blockchains, we recommend that you check the chain ID.
|
||||
|
||||
### Version
|
||||
|
||||
Version is now set through Go linker flags `ld_flags`. Applications that are using tendermint as a library should set this at compile time.
|
||||
Version is now set through Go linker flags `ld_flags`. Applications that are using tendermint as a library should set this at compile time.
|
||||
|
||||
Example:
|
||||
|
||||
@@ -196,7 +218,7 @@ Example:
|
||||
go install -mod=readonly -ldflags "-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(go list -m github.com/tendermint/tendermint | sed 's/ /\@/g') -s -w " -trimpath ./cmd
|
||||
```
|
||||
|
||||
Additionally, the exported constant `version.Version` is now `version.TMCoreSemVer`.
|
||||
Additionally, the exported constant `version.Version` is now `version.TMCoreSemVer`.
|
||||
|
||||
## v0.33.4
|
||||
|
||||
@@ -299,7 +321,7 @@ Evidence Params has been changed to include duration.
|
||||
### RPC Changes
|
||||
|
||||
* `/validators` is now paginated (default: 30 vals per page)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/master/rpc/#/Info/block_results)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/v0.33/rpc/#/Info/block_results)
|
||||
* Event suffix has been removed from the ID in event responses
|
||||
* IDs are now integers not `json-client-XYZ`
|
||||
|
||||
@@ -418,11 +440,11 @@ the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
<https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support>)
|
||||
<https://docs.tendermint.com/v0.33/introduction/install.html#compile-with-cleveldb-support>)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
This release contains a breaking change to the behaviour of the pubsub system.
|
||||
This release contains a breaking change to the behavior of the pubsub system.
|
||||
It also contains some minor breaking changes in the Go API and ABCI.
|
||||
There are no changes to the block or p2p protocols, so v0.31.0 should work fine
|
||||
with blockchains created from the v0.30 series.
|
||||
@@ -440,7 +462,7 @@ In this case, the WS client will receive an error with description:
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
|
||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -493,14 +515,14 @@ due to changes in how various data structures are hashed.
|
||||
Any implementations of Tendermint blockchain verification, including lite clients,
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Merkle tree](https://github.com/tendermint/spec/blob/master/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/spec/blob/master/spec/blockchain/state.md#consensusparams)
|
||||
* [Merkle tree](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/state.md#consensusparams)
|
||||
|
||||
There was also a small change to field ordering in the vote struct. Any
|
||||
implementations of an out-of-process validator (like a Key-Management Server)
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Vote](https://github.com/tendermint/spec/blob/master/spec/consensus/signing.md#votes)
|
||||
* [Vote](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/signing.md#votes)
|
||||
|
||||
Finally, the proposer selection algorithm continues to evolve. See the
|
||||
[work-in-progress
|
||||
@@ -621,7 +643,7 @@ to `timeout_propose = "3s"`.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
The default behaviour of `/abci_query` has been changed to not return a proof,
|
||||
The default behavior of `/abci_query` has been changed to not return a proof,
|
||||
and the name of the parameter that controls this has been changed from `trusted`
|
||||
to `prove`. To get proofs with your queries, ensure you set `prove=true`.
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [The main spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md)
|
||||
- [A protobuf file](./types/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
|
||||
@@ -81,9 +81,15 @@ type ReqRes struct {
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx tmsync.Mutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
mtx tmsync.Mutex
|
||||
|
||||
// callbackInvoked as a variable to track if the callback was already
|
||||
// invoked during the regular execution of the request. This variable
|
||||
// allows clients to set the callback simultaneously without potentially
|
||||
// invoking the callback twice by accident, once when 'SetCallback' is
|
||||
// called and once during the normal request.
|
||||
callbackInvoked bool
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
|
||||
func NewReqRes(req *types.Request) *ReqRes {
|
||||
@@ -92,8 +98,8 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
WaitGroup: waitGroup1(),
|
||||
Response: nil,
|
||||
|
||||
done: false,
|
||||
cb: nil,
|
||||
callbackInvoked: false,
|
||||
cb: nil,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,7 +109,7 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
|
||||
if r.done {
|
||||
if r.callbackInvoked {
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
return
|
||||
@@ -122,6 +128,7 @@ func (r *ReqRes) InvokeCallback() {
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
r.callbackInvoked = true
|
||||
}
|
||||
|
||||
// GetCallback returns the configured callback of the ReqRes object which may be
|
||||
@@ -136,13 +143,6 @@ func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
return r.cb
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
r.mtx.Lock()
|
||||
r.done = true
|
||||
r.mtx.Unlock()
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
wg = &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
@@ -66,7 +66,6 @@ func (cli *grpcClient) OnStart() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
reqres.SetDone()
|
||||
reqres.Done()
|
||||
|
||||
// Notify client listener if set
|
||||
@@ -75,9 +74,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(reqres.Response)
|
||||
}
|
||||
reqres.InvokeCallback()
|
||||
}
|
||||
for reqres := range cli.chReqRes {
|
||||
if reqres != nil {
|
||||
@@ -343,7 +340,9 @@ func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *grpcClient) FlushSync() error {
|
||||
return nil
|
||||
reqres := cli.FlushAsync()
|
||||
cli.finishSyncCall(reqres).GetFlush()
|
||||
return cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
|
||||
@@ -327,12 +327,13 @@ func (app *localClient) ApplySnapshotChunkSync(
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.Callback(req, res)
|
||||
return newLocalReqRes(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
rr.callbackInvoked = true
|
||||
return rr
|
||||
}
|
||||
|
||||
func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
|
||||
reqRes := NewReqRes(req)
|
||||
reqRes.Response = res
|
||||
reqRes.SetDone()
|
||||
return reqRes
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package abcicli_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -118,3 +119,71 @@ func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetLaet ensures that the callback is invoked when
|
||||
// set after the client completes the call into the app. Currently this
|
||||
// test relies on the callback being allowed to be invoked twice if set multiple
|
||||
// times, once when set early and once when set late.
|
||||
func TestCallbackInvokedWhenSetLate(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
<-done
|
||||
|
||||
var called bool
|
||||
cb = func(_ *types.Response) {
|
||||
called = true
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
require.True(t, called)
|
||||
}
|
||||
|
||||
type blockedABCIApplication struct {
|
||||
wg *sync.WaitGroup
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (b blockedABCIApplication) CheckTx(r types.RequestCheckTx) types.ResponseCheckTx {
|
||||
b.wg.Wait()
|
||||
return b.BaseApplication.CheckTx(r)
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetEarly ensures that the callback is invoked when
|
||||
// set before the client completes the call into the app.
|
||||
func TestCallbackInvokedWhenSetEarly(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
|
||||
called := func() bool {
|
||||
select {
|
||||
case <-done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
require.Eventually(t, called, time.Second, time.Millisecond*25)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package kvstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -87,7 +87,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -114,12 +114,11 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -162,7 +161,7 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
@@ -181,7 +180,6 @@ func TestValUpdates(t *testing.T) {
|
||||
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
}
|
||||
|
||||
func makeApplyBlock(
|
||||
@@ -189,7 +187,8 @@ func makeApplyBlock(
|
||||
kvstore types.Application,
|
||||
heightInt int,
|
||||
diff []types.ValidatorUpdate,
|
||||
txs ...[]byte) {
|
||||
txs ...[]byte,
|
||||
) {
|
||||
// make and apply block
|
||||
height := int64(heightInt)
|
||||
hash := []byte("foo")
|
||||
@@ -207,7 +206,6 @@ func makeApplyBlock(
|
||||
kvstore.Commit()
|
||||
|
||||
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||
|
||||
}
|
||||
|
||||
// order doesn't matter
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
Package server is used to start a new ABCI server.
|
||||
|
||||
It contains two server implementation:
|
||||
* gRPC server
|
||||
* socket server
|
||||
|
||||
- gRPC server
|
||||
- socket server
|
||||
*/
|
||||
package server
|
||||
|
||||
|
||||
@@ -1967,6 +1967,11 @@ type ResponseCheckTx struct {
|
||||
GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"`
|
||||
Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"`
|
||||
Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"`
|
||||
Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"`
|
||||
Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"`
|
||||
// mempool_error is set by Tendermint.
|
||||
// ABCI applictions creating a ResponseCheckTX should not set mempool_error.
|
||||
MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} }
|
||||
@@ -2058,6 +2063,27 @@ func (m *ResponseCheckTx) GetCodespace() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) GetSender() string {
|
||||
if m != nil {
|
||||
return m.Sender
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) GetPriority() int64 {
|
||||
if m != nil {
|
||||
return m.Priority
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) GetMempoolError() string {
|
||||
if m != nil {
|
||||
return m.MempoolError
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ResponseDeliverTx struct {
|
||||
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
@@ -3199,179 +3225,181 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) }
|
||||
|
||||
var fileDescriptor_252557cfdd89a31a = []byte{
|
||||
// 2741 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x73, 0x1b, 0xc7,
|
||||
0x11, 0xc6, 0xfb, 0xd1, 0x24, 0x1e, 0x1c, 0xd1, 0x12, 0xb4, 0x92, 0x48, 0x79, 0x55, 0x72, 0x2c,
|
||||
0xd9, 0x26, 0x63, 0xaa, 0xa4, 0x48, 0xb1, 0x13, 0x9b, 0x80, 0x20, 0x83, 0x26, 0x4d, 0x30, 0x4b,
|
||||
0x48, 0xce, 0xcb, 0x5a, 0x2f, 0xb0, 0x43, 0x60, 0x2d, 0x60, 0x77, 0x8d, 0x1d, 0x50, 0xa4, 0x8f,
|
||||
0x71, 0x72, 0x51, 0x2e, 0xce, 0x2d, 0x17, 0xff, 0x8f, 0x9c, 0x72, 0xc9, 0xc5, 0x55, 0xb9, 0xf8,
|
||||
0x98, 0x93, 0x93, 0x92, 0x2a, 0x97, 0xfc, 0x81, 0x9c, 0x52, 0x49, 0xcd, 0x63, 0x5f, 0x00, 0x16,
|
||||
0x00, 0xed, 0xdc, 0x7c, 0x9b, 0x99, 0xed, 0xee, 0xc5, 0xf4, 0x4e, 0x7f, 0xfd, 0x75, 0x0f, 0xe0,
|
||||
0x12, 0xc1, 0xa6, 0x8e, 0x87, 0x03, 0xc3, 0x24, 0x9b, 0x5a, 0xbb, 0x63, 0x6c, 0x92, 0x53, 0x1b,
|
||||
0x3b, 0x1b, 0xf6, 0xd0, 0x22, 0x16, 0x2a, 0xf9, 0x0f, 0x37, 0xe8, 0x43, 0xe9, 0x4a, 0x40, 0xba,
|
||||
0x33, 0x3c, 0xb5, 0x89, 0xb5, 0x69, 0x0f, 0x2d, 0xeb, 0x88, 0xcb, 0x4b, 0x97, 0x03, 0x8f, 0x99,
|
||||
0x9d, 0xa0, 0xb5, 0xd0, 0x53, 0xa1, 0xfc, 0x04, 0x9f, 0xba, 0x4f, 0xaf, 0x4c, 0xe8, 0xda, 0xda,
|
||||
0x50, 0x1b, 0xb8, 0x8f, 0xd7, 0xbb, 0x96, 0xd5, 0xed, 0xe3, 0x4d, 0x36, 0x6b, 0x8f, 0x8e, 0x36,
|
||||
0x89, 0x31, 0xc0, 0x0e, 0xd1, 0x06, 0xb6, 0x10, 0x58, 0xed, 0x5a, 0x5d, 0x8b, 0x0d, 0x37, 0xe9,
|
||||
0x88, 0xaf, 0xca, 0x7f, 0xc8, 0x41, 0x56, 0xc1, 0x9f, 0x8e, 0xb0, 0x43, 0xd0, 0x16, 0xa4, 0x70,
|
||||
0xa7, 0x67, 0x55, 0xe2, 0x57, 0xe3, 0xaf, 0x2e, 0x6d, 0x5d, 0xde, 0x18, 0xdb, 0xdc, 0x86, 0x90,
|
||||
0xab, 0x77, 0x7a, 0x56, 0x23, 0xa6, 0x30, 0x59, 0x74, 0x1b, 0xd2, 0x47, 0xfd, 0x91, 0xd3, 0xab,
|
||||
0x24, 0x98, 0xd2, 0x95, 0x28, 0xa5, 0x07, 0x54, 0xa8, 0x11, 0x53, 0xb8, 0x34, 0x7d, 0x95, 0x61,
|
||||
0x1e, 0x59, 0x95, 0xe4, 0xec, 0x57, 0xed, 0x98, 0x47, 0xec, 0x55, 0x54, 0x16, 0x55, 0x01, 0x1c,
|
||||
0x4c, 0x54, 0xcb, 0x26, 0x86, 0x65, 0x56, 0x52, 0x4c, 0xf3, 0xe5, 0x28, 0xcd, 0x43, 0x4c, 0x9a,
|
||||
0x4c, 0xb0, 0x11, 0x53, 0xf2, 0x8e, 0x3b, 0xa1, 0x36, 0x0c, 0xd3, 0x20, 0x6a, 0xa7, 0xa7, 0x19,
|
||||
0x66, 0x25, 0x3d, 0xdb, 0xc6, 0x8e, 0x69, 0x90, 0x1a, 0x15, 0xa4, 0x36, 0x0c, 0x77, 0x42, 0xb7,
|
||||
0xfc, 0xe9, 0x08, 0x0f, 0x4f, 0x2b, 0x99, 0xd9, 0x5b, 0xfe, 0x19, 0x15, 0xa2, 0x5b, 0x66, 0xd2,
|
||||
0xa8, 0x0e, 0x4b, 0x6d, 0xdc, 0x35, 0x4c, 0xb5, 0xdd, 0xb7, 0x3a, 0x4f, 0x2a, 0x59, 0xa6, 0x2c,
|
||||
0x47, 0x29, 0x57, 0xa9, 0x68, 0x95, 0x4a, 0x36, 0x62, 0x0a, 0xb4, 0xbd, 0x19, 0x7a, 0x1b, 0x72,
|
||||
0x9d, 0x1e, 0xee, 0x3c, 0x51, 0xc9, 0x49, 0x25, 0xc7, 0x6c, 0xac, 0x47, 0xd9, 0xa8, 0x51, 0xb9,
|
||||
0xd6, 0x49, 0x23, 0xa6, 0x64, 0x3b, 0x7c, 0x48, 0xf7, 0xaf, 0xe3, 0xbe, 0x71, 0x8c, 0x87, 0x54,
|
||||
0x3f, 0x3f, 0x7b, 0xff, 0xf7, 0xb9, 0x24, 0xb3, 0x90, 0xd7, 0xdd, 0x09, 0x7a, 0x07, 0xf2, 0xd8,
|
||||
0xd4, 0xc5, 0x36, 0x80, 0x99, 0xb8, 0x1a, 0x79, 0x56, 0x4c, 0xdd, 0xdd, 0x44, 0x0e, 0x8b, 0x31,
|
||||
0xba, 0x0b, 0x99, 0x8e, 0x35, 0x18, 0x18, 0xa4, 0xb2, 0xc4, 0xb4, 0xd7, 0x22, 0x37, 0xc0, 0xa4,
|
||||
0x1a, 0x31, 0x45, 0xc8, 0xa3, 0x7d, 0x28, 0xf6, 0x0d, 0x87, 0xa8, 0x8e, 0xa9, 0xd9, 0x4e, 0xcf,
|
||||
0x22, 0x4e, 0x65, 0x99, 0x59, 0xb8, 0x1e, 0x65, 0x61, 0xcf, 0x70, 0xc8, 0xa1, 0x2b, 0xdc, 0x88,
|
||||
0x29, 0x85, 0x7e, 0x70, 0x81, 0xda, 0xb3, 0x8e, 0x8e, 0xf0, 0xd0, 0x33, 0x58, 0x29, 0xcc, 0xb6,
|
||||
0xd7, 0xa4, 0xd2, 0xae, 0x3e, 0xb5, 0x67, 0x05, 0x17, 0xd0, 0xaf, 0xe0, 0x5c, 0xdf, 0xd2, 0x74,
|
||||
0xcf, 0x9c, 0xda, 0xe9, 0x8d, 0xcc, 0x27, 0x95, 0x22, 0x33, 0x7a, 0x23, 0xf2, 0x47, 0x5a, 0x9a,
|
||||
0xee, 0x9a, 0xa8, 0x51, 0x85, 0x46, 0x4c, 0x59, 0xe9, 0x8f, 0x2f, 0xa2, 0xc7, 0xb0, 0xaa, 0xd9,
|
||||
0x76, 0xff, 0x74, 0xdc, 0x7a, 0x89, 0x59, 0xbf, 0x19, 0x65, 0x7d, 0x9b, 0xea, 0x8c, 0x9b, 0x47,
|
||||
0xda, 0xc4, 0x6a, 0x35, 0x0b, 0xe9, 0x63, 0xad, 0x3f, 0xc2, 0xf2, 0x0f, 0x60, 0x29, 0x10, 0xea,
|
||||
0xa8, 0x02, 0xd9, 0x01, 0x76, 0x1c, 0xad, 0x8b, 0x19, 0x32, 0xe4, 0x15, 0x77, 0x2a, 0x17, 0x61,
|
||||
0x39, 0x18, 0xde, 0xf2, 0xc0, 0x53, 0xa4, 0x81, 0x4b, 0x15, 0x8f, 0xf1, 0xd0, 0xa1, 0xd1, 0x2a,
|
||||
0x14, 0xc5, 0x14, 0x5d, 0x83, 0x02, 0x3b, 0x3e, 0xaa, 0xfb, 0x9c, 0xa2, 0x47, 0x4a, 0x59, 0x66,
|
||||
0x8b, 0x8f, 0x84, 0xd0, 0x3a, 0x2c, 0xd9, 0x5b, 0xb6, 0x27, 0x92, 0x64, 0x22, 0x60, 0x6f, 0xd9,
|
||||
0x42, 0x40, 0xfe, 0x31, 0x94, 0xc7, 0xa3, 0x1d, 0x95, 0x21, 0xf9, 0x04, 0x9f, 0x8a, 0xf7, 0xd1,
|
||||
0x21, 0x5a, 0x15, 0xdb, 0x62, 0xef, 0xc8, 0x2b, 0x62, 0x8f, 0x7f, 0x4d, 0x78, 0xca, 0x5e, 0x98,
|
||||
0xa3, 0xbb, 0x90, 0xa2, 0xa8, 0x29, 0x00, 0x50, 0xda, 0xe0, 0x90, 0xba, 0xe1, 0x42, 0xea, 0x46,
|
||||
0xcb, 0x85, 0xd4, 0x6a, 0xee, 0xab, 0x6f, 0xd6, 0x63, 0x5f, 0xfc, 0x7d, 0x3d, 0xae, 0x30, 0x0d,
|
||||
0x74, 0x91, 0x46, 0xa5, 0x66, 0x98, 0xaa, 0xa1, 0x8b, 0xf7, 0x64, 0xd9, 0x7c, 0x47, 0x47, 0xbb,
|
||||
0x50, 0xee, 0x58, 0xa6, 0x83, 0x4d, 0x67, 0xe4, 0xa8, 0x1c, 0xb2, 0x05, 0xec, 0x4d, 0x46, 0x4d,
|
||||
0xcd, 0x15, 0x3c, 0x60, 0x72, 0x4a, 0xa9, 0x13, 0x5e, 0x40, 0x0f, 0x00, 0x8e, 0xb5, 0xbe, 0xa1,
|
||||
0x6b, 0xc4, 0x1a, 0x3a, 0x95, 0xd4, 0xd5, 0xe4, 0x54, 0x33, 0x8f, 0x5c, 0x91, 0x87, 0xb6, 0xae,
|
||||
0x11, 0x5c, 0x4d, 0xd1, 0x5f, 0xab, 0x04, 0x34, 0xd1, 0x2b, 0x50, 0xd2, 0x6c, 0x5b, 0x75, 0x88,
|
||||
0x46, 0xb0, 0xda, 0x3e, 0x25, 0xd8, 0x61, 0x60, 0xb8, 0xac, 0x14, 0x34, 0xdb, 0x3e, 0xa4, 0xab,
|
||||
0x55, 0xba, 0x88, 0xae, 0x43, 0x91, 0x02, 0x9f, 0xa1, 0xf5, 0xd5, 0x1e, 0x36, 0xba, 0x3d, 0xc2,
|
||||
0x40, 0x2f, 0xa9, 0x14, 0xc4, 0x6a, 0x83, 0x2d, 0xca, 0xba, 0x77, 0x10, 0x18, 0xe8, 0x21, 0x04,
|
||||
0x29, 0x5d, 0x23, 0x1a, 0x73, 0xe4, 0xb2, 0xc2, 0xc6, 0x74, 0xcd, 0xd6, 0x48, 0x4f, 0xb8, 0x87,
|
||||
0x8d, 0xd1, 0x79, 0xc8, 0x08, 0xb3, 0x49, 0x66, 0x56, 0xcc, 0xe8, 0x37, 0xb3, 0x87, 0xd6, 0x31,
|
||||
0x66, 0x28, 0x9f, 0x53, 0xf8, 0x44, 0xfe, 0x6d, 0x02, 0x56, 0x26, 0xe0, 0x91, 0xda, 0xed, 0x69,
|
||||
0x4e, 0xcf, 0x7d, 0x17, 0x1d, 0xa3, 0x3b, 0xd4, 0xae, 0xa6, 0xe3, 0xa1, 0x48, 0x4b, 0x95, 0xa0,
|
||||
0x8b, 0x78, 0xca, 0x6d, 0xb0, 0xe7, 0xc2, 0x35, 0x42, 0x1a, 0x35, 0xa1, 0xdc, 0xd7, 0x1c, 0xa2,
|
||||
0x72, 0xb8, 0x51, 0x03, 0x29, 0x6a, 0x12, 0x64, 0xf7, 0x34, 0x17, 0xa0, 0xe8, 0x61, 0x17, 0x86,
|
||||
0x8a, 0xfd, 0xd0, 0x2a, 0x52, 0x60, 0xb5, 0x7d, 0xfa, 0x99, 0x66, 0x12, 0xc3, 0xc4, 0xea, 0xc4,
|
||||
0x97, 0xbb, 0x38, 0x61, 0xb4, 0x7e, 0x6c, 0xe8, 0xd8, 0xec, 0xb8, 0x9f, 0xec, 0x9c, 0xa7, 0xec,
|
||||
0x7d, 0x52, 0x47, 0x56, 0xa0, 0x18, 0x06, 0x78, 0x54, 0x84, 0x04, 0x39, 0x11, 0x0e, 0x48, 0x90,
|
||||
0x13, 0xf4, 0x43, 0x48, 0xd1, 0x4d, 0xb2, 0xcd, 0x17, 0xa7, 0x64, 0x57, 0xa1, 0xd7, 0x3a, 0xb5,
|
||||
0xb1, 0xc2, 0x24, 0x65, 0xd9, 0x8b, 0x06, 0x0f, 0xf4, 0xc7, 0xad, 0xca, 0x37, 0xa0, 0x34, 0x86,
|
||||
0xea, 0x81, 0xef, 0x17, 0x0f, 0x7e, 0x3f, 0xb9, 0x04, 0x85, 0x10, 0x84, 0xcb, 0xe7, 0x61, 0x75,
|
||||
0x1a, 0x22, 0xcb, 0x3d, 0x6f, 0x3d, 0x84, 0xac, 0xe8, 0x36, 0xe4, 0x3c, 0x48, 0xe6, 0xd1, 0x38,
|
||||
0xe9, 0x2b, 0x57, 0x58, 0xf1, 0x44, 0x69, 0x18, 0xd2, 0x63, 0xcd, 0xce, 0x43, 0x82, 0xfd, 0xf0,
|
||||
0xac, 0x66, 0xdb, 0x0d, 0xcd, 0xe9, 0xc9, 0x1f, 0x43, 0x25, 0x0a, 0x6e, 0xc7, 0xb6, 0x91, 0xf2,
|
||||
0x8e, 0xe1, 0x79, 0xc8, 0x1c, 0x59, 0xc3, 0x81, 0x46, 0x98, 0xb1, 0x82, 0x22, 0x66, 0xf4, 0x78,
|
||||
0x72, 0xe8, 0x4d, 0xb2, 0x65, 0x3e, 0x91, 0x55, 0xb8, 0x18, 0x09, 0xb9, 0x54, 0xc5, 0x30, 0x75,
|
||||
0xcc, 0xfd, 0x59, 0x50, 0xf8, 0xc4, 0x37, 0xc4, 0x7f, 0x2c, 0x9f, 0xd0, 0xd7, 0x3a, 0x6c, 0xaf,
|
||||
0xcc, 0x7e, 0x5e, 0x11, 0x33, 0xf9, 0x9f, 0x39, 0xc8, 0x29, 0xd8, 0xb1, 0x29, 0x26, 0xa0, 0x2a,
|
||||
0xe4, 0xf1, 0x49, 0x07, 0x73, 0x32, 0x14, 0x8f, 0x24, 0x13, 0x5c, 0xba, 0xee, 0x4a, 0xd2, 0x4c,
|
||||
0xee, 0xa9, 0xa1, 0x5b, 0x82, 0xf0, 0x45, 0x73, 0x37, 0xa1, 0x1e, 0x64, 0x7c, 0x77, 0x5c, 0xc6,
|
||||
0x97, 0x8c, 0x4c, 0xde, 0x5c, 0x6b, 0x8c, 0xf2, 0xdd, 0x12, 0x94, 0x2f, 0x35, 0xe7, 0x65, 0x21,
|
||||
0xce, 0x57, 0x0b, 0x71, 0xbe, 0xf4, 0x9c, 0x6d, 0x46, 0x90, 0xbe, 0x5a, 0x88, 0xf4, 0x65, 0xe6,
|
||||
0x18, 0x89, 0x60, 0x7d, 0x77, 0x5c, 0xd6, 0x97, 0x9d, 0xb3, 0xed, 0x31, 0xda, 0xf7, 0x20, 0x4c,
|
||||
0xfb, 0x38, 0x65, 0xbb, 0x16, 0xa9, 0x1d, 0xc9, 0xfb, 0x7e, 0x12, 0xe0, 0x7d, 0xf9, 0x48, 0xd2,
|
||||
0xc5, 0x8d, 0x4c, 0x21, 0x7e, 0xb5, 0x10, 0xf1, 0x83, 0x39, 0x3e, 0x88, 0x60, 0x7e, 0xef, 0x06,
|
||||
0x99, 0xdf, 0x52, 0x24, 0x79, 0x14, 0x87, 0x66, 0x1a, 0xf5, 0xbb, 0xe7, 0x51, 0xbf, 0xe5, 0x48,
|
||||
0xee, 0x2a, 0xf6, 0x30, 0xce, 0xfd, 0x9a, 0x13, 0xdc, 0x8f, 0x73, 0xb5, 0x57, 0x22, 0x4d, 0xcc,
|
||||
0x21, 0x7f, 0xcd, 0x09, 0xf2, 0x57, 0x9c, 0x63, 0x70, 0x0e, 0xfb, 0xfb, 0xf5, 0x74, 0xf6, 0x17,
|
||||
0xcd, 0xcf, 0xc4, 0xcf, 0x5c, 0x8c, 0xfe, 0xa9, 0x11, 0xf4, 0xaf, 0xcc, 0xcc, 0xbf, 0x16, 0x69,
|
||||
0xfe, 0xec, 0xfc, 0xef, 0x06, 0x4d, 0xb3, 0x63, 0xc0, 0x41, 0xa1, 0x0a, 0x0f, 0x87, 0xd6, 0x50,
|
||||
0x50, 0x2b, 0x3e, 0x91, 0x5f, 0xa5, 0x89, 0xdf, 0x07, 0x89, 0x19, 0x5c, 0x91, 0xa5, 0x84, 0x00,
|
||||
0x30, 0xc8, 0x7f, 0x8a, 0xfb, 0xba, 0x2c, 0x57, 0x06, 0x49, 0x43, 0x5e, 0x90, 0x86, 0x00, 0x85,
|
||||
0x4c, 0x84, 0x29, 0xe4, 0x3a, 0x2c, 0x51, 0xa8, 0x1f, 0x63, 0x87, 0x9a, 0xed, 0xb2, 0x43, 0x74,
|
||||
0x13, 0x56, 0x58, 0x2e, 0xe7, 0x44, 0x53, 0xe0, 0x7b, 0x8a, 0xa5, 0xa9, 0x12, 0x7d, 0xc0, 0x0f,
|
||||
0x27, 0x07, 0xfa, 0x37, 0xe0, 0x5c, 0x40, 0xd6, 0x4b, 0x21, 0x9c, 0x12, 0x95, 0x3d, 0xe9, 0x6d,
|
||||
0x91, 0x4b, 0x3e, 0xf0, 0x1d, 0xe4, 0x33, 0x4f, 0x04, 0xa9, 0x8e, 0xa5, 0x63, 0x01, 0xf0, 0x6c,
|
||||
0x4c, 0xd9, 0x68, 0xdf, 0xea, 0x0a, 0x18, 0xa7, 0x43, 0x2a, 0xe5, 0xa1, 0x60, 0x9e, 0x83, 0x9c,
|
||||
0xfc, 0x97, 0xb8, 0x6f, 0xcf, 0x27, 0xa3, 0xd3, 0x78, 0x63, 0xfc, 0xff, 0xc3, 0x1b, 0x13, 0xdf,
|
||||
0x9a, 0x37, 0x06, 0x13, 0x6c, 0x32, 0x9c, 0x60, 0xff, 0x1d, 0xf7, 0xbf, 0xb0, 0xc7, 0x02, 0xbf,
|
||||
0x9d, 0x47, 0xfc, 0x6c, 0x99, 0x66, 0xdf, 0x4b, 0x64, 0x4b, 0xc1, 0xed, 0x33, 0xec, 0xbd, 0x61,
|
||||
0x6e, 0x9f, 0xe5, 0xf9, 0x93, 0x4d, 0xd0, 0x5d, 0xc8, 0xb3, 0xa6, 0x8b, 0x6a, 0xd9, 0x8e, 0x00,
|
||||
0xdc, 0x4b, 0xc1, 0xbd, 0xf2, 0xde, 0xca, 0xc6, 0x01, 0x95, 0x69, 0xda, 0x8e, 0x92, 0xb3, 0xc5,
|
||||
0x28, 0x40, 0x04, 0xf2, 0x21, 0x3e, 0x7a, 0x19, 0xf2, 0xf4, 0xd7, 0x3b, 0xb6, 0xd6, 0xc1, 0x0c,
|
||||
0x3c, 0xf3, 0x8a, 0xbf, 0x20, 0x3f, 0x06, 0x34, 0x09, 0xdf, 0xa8, 0x01, 0x19, 0x7c, 0x8c, 0x4d,
|
||||
0x42, 0xbf, 0x1a, 0x75, 0xf7, 0xf9, 0x29, 0x64, 0x0f, 0x9b, 0xa4, 0x5a, 0xa1, 0x4e, 0xfe, 0xd7,
|
||||
0x37, 0xeb, 0x65, 0x2e, 0xfd, 0xba, 0x35, 0x30, 0x08, 0x1e, 0xd8, 0xe4, 0x54, 0x11, 0xfa, 0xf2,
|
||||
0xe7, 0x09, 0xca, 0xbc, 0x42, 0xd0, 0x3e, 0xd5, 0xb7, 0x6e, 0x00, 0x25, 0x02, 0xac, 0x7b, 0x31,
|
||||
0x7f, 0xaf, 0x01, 0x74, 0x35, 0x47, 0x7d, 0xaa, 0x99, 0x04, 0xeb, 0xc2, 0xe9, 0x81, 0x15, 0x24,
|
||||
0x41, 0x8e, 0xce, 0x46, 0x0e, 0xd6, 0x45, 0x01, 0xe0, 0xcd, 0x03, 0xfb, 0xcc, 0x7e, 0xb7, 0x7d,
|
||||
0x86, 0xbd, 0x9c, 0x1b, 0xf7, 0xf2, 0xef, 0x12, 0x7e, 0x94, 0xf8, 0x24, 0xf5, 0xfb, 0xe7, 0x87,
|
||||
0xdf, 0xb3, 0xca, 0x35, 0x9c, 0x63, 0xd1, 0x21, 0xac, 0x78, 0x51, 0xaa, 0x8e, 0x58, 0xf4, 0xba,
|
||||
0xe7, 0x6e, 0xd1, 0x30, 0x2f, 0x1f, 0x87, 0x97, 0x1d, 0xf4, 0x73, 0xb8, 0x30, 0x86, 0x40, 0x9e,
|
||||
0xe9, 0xc4, 0x82, 0x40, 0xf4, 0x52, 0x18, 0x88, 0x5c, 0xcb, 0xbe, 0xaf, 0x92, 0xdf, 0x31, 0x36,
|
||||
0x76, 0x68, 0x31, 0x14, 0x64, 0x0c, 0x53, 0xbf, 0xfe, 0x35, 0x28, 0x0c, 0x31, 0xa1, 0xf5, 0x79,
|
||||
0xa8, 0xdc, 0x5c, 0xe6, 0x8b, 0xa2, 0x88, 0x3d, 0x80, 0x97, 0xa6, 0x32, 0x07, 0xf4, 0x23, 0xc8,
|
||||
0xfb, 0xa4, 0x23, 0x1e, 0x51, 0xb9, 0x79, 0xd5, 0x88, 0x2f, 0x2b, 0xff, 0x39, 0xee, 0x9b, 0x0c,
|
||||
0xd7, 0x37, 0x75, 0xc8, 0x0c, 0xb1, 0x33, 0xea, 0xf3, 0x8a, 0xa3, 0xb8, 0xf5, 0xc6, 0x62, 0x9c,
|
||||
0x83, 0xae, 0x8e, 0xfa, 0x44, 0x11, 0xca, 0xf2, 0x63, 0xc8, 0xf0, 0x15, 0xb4, 0x04, 0xd9, 0x87,
|
||||
0xfb, 0xbb, 0xfb, 0xcd, 0x0f, 0xf7, 0xcb, 0x31, 0x04, 0x90, 0xd9, 0xae, 0xd5, 0xea, 0x07, 0xad,
|
||||
0x72, 0x1c, 0xe5, 0x21, 0xbd, 0x5d, 0x6d, 0x2a, 0xad, 0x72, 0x82, 0x2e, 0x2b, 0xf5, 0xf7, 0xeb,
|
||||
0xb5, 0x56, 0x39, 0x89, 0x56, 0xa0, 0xc0, 0xc7, 0xea, 0x83, 0xa6, 0xf2, 0xc1, 0x76, 0xab, 0x9c,
|
||||
0x0a, 0x2c, 0x1d, 0xd6, 0xf7, 0xef, 0xd7, 0x95, 0x72, 0x5a, 0x7e, 0x93, 0x96, 0x34, 0x11, 0x2c,
|
||||
0xc5, 0x2f, 0x5e, 0xe2, 0x81, 0xe2, 0x45, 0xfe, 0x63, 0x02, 0xa4, 0x68, 0xea, 0x81, 0xde, 0x1f,
|
||||
0xdb, 0xf8, 0xd6, 0x19, 0x78, 0xcb, 0xd8, 0xee, 0xd1, 0x75, 0x28, 0x0e, 0xf1, 0x11, 0x26, 0x9d,
|
||||
0x1e, 0xa7, 0x42, 0x3c, 0xb1, 0x15, 0x94, 0x82, 0x58, 0x65, 0x4a, 0x0e, 0x17, 0xfb, 0x04, 0x77,
|
||||
0x88, 0xca, 0xeb, 0x28, 0x7e, 0xe8, 0xf2, 0x54, 0x8c, 0xae, 0x1e, 0xf2, 0x45, 0xf9, 0xe3, 0x33,
|
||||
0xf9, 0x32, 0x0f, 0x69, 0xa5, 0xde, 0x52, 0x7e, 0x51, 0x4e, 0x22, 0x04, 0x45, 0x36, 0x54, 0x0f,
|
||||
0xf7, 0xb7, 0x0f, 0x0e, 0x1b, 0x4d, 0xea, 0xcb, 0x73, 0x50, 0x72, 0x7d, 0xe9, 0x2e, 0xa6, 0xe5,
|
||||
0xff, 0xc6, 0xa1, 0x34, 0x16, 0x20, 0x68, 0x0b, 0xd2, 0x9c, 0x4e, 0x47, 0x35, 0xdd, 0x59, 0x7c,
|
||||
0x8b, 0x68, 0xe2, 0xa2, 0xe8, 0x6d, 0xc8, 0x61, 0xd1, 0x27, 0x98, 0x16, 0x88, 0xbc, 0xbf, 0xe1,
|
||||
0x76, 0x12, 0x84, 0xaa, 0xa7, 0x81, 0xde, 0x81, 0xbc, 0x17, 0xe9, 0xa2, 0x86, 0x7b, 0x79, 0x52,
|
||||
0xdd, 0xc3, 0x08, 0xa1, 0xef, 0xeb, 0xa0, 0x7b, 0x3e, 0x27, 0x4b, 0x4d, 0x92, 0x78, 0xa1, 0xce,
|
||||
0x05, 0x84, 0xb2, 0x2b, 0x2f, 0xd7, 0x60, 0x29, 0xb0, 0x1f, 0x74, 0x09, 0xf2, 0x03, 0xed, 0x44,
|
||||
0xf4, 0x9f, 0x78, 0x07, 0x21, 0x37, 0xd0, 0x4e, 0x78, 0xeb, 0xe9, 0x02, 0x64, 0xe9, 0xc3, 0xae,
|
||||
0xc6, 0xd1, 0x26, 0xa9, 0x64, 0x06, 0xda, 0xc9, 0x7b, 0x9a, 0x23, 0x7f, 0x04, 0xc5, 0x70, 0xef,
|
||||
0x85, 0x9e, 0xc4, 0xa1, 0x35, 0x32, 0x75, 0x66, 0x23, 0xad, 0xf0, 0x09, 0xba, 0x0d, 0xe9, 0x63,
|
||||
0x8b, 0x83, 0xd5, 0xf4, 0x90, 0x7d, 0x64, 0x11, 0x1c, 0xe8, 0xdd, 0x70, 0x69, 0xf9, 0x33, 0x48,
|
||||
0x33, 0xf0, 0xa1, 0x40, 0xc2, 0xba, 0x28, 0x82, 0x8f, 0xd2, 0x31, 0xfa, 0x08, 0x40, 0x23, 0x64,
|
||||
0x68, 0xb4, 0x47, 0xbe, 0xe1, 0xf5, 0xe9, 0xe0, 0xb5, 0xed, 0xca, 0x55, 0x2f, 0x0b, 0x14, 0x5b,
|
||||
0xf5, 0x55, 0x03, 0x48, 0x16, 0x30, 0x28, 0xef, 0x43, 0x31, 0xac, 0x1b, 0xec, 0x67, 0x2e, 0x4f,
|
||||
0xe9, 0x67, 0x7a, 0x9c, 0xc7, 0x63, 0x4c, 0x49, 0xde, 0x31, 0x63, 0x13, 0xf9, 0x59, 0x1c, 0x72,
|
||||
0xad, 0x13, 0x71, 0xac, 0x23, 0x9a, 0x35, 0xbe, 0x6a, 0x22, 0xd8, 0x9a, 0xe0, 0xdd, 0x9f, 0xa4,
|
||||
0xd7, 0x53, 0x7a, 0xd7, 0x0b, 0xdc, 0xd4, 0xa2, 0xc5, 0xa3, 0xdb, 0x5c, 0x13, 0x60, 0xf5, 0x16,
|
||||
0xe4, 0xbd, 0x53, 0x45, 0x89, 0xbd, 0xa6, 0xeb, 0x43, 0xec, 0x38, 0x62, 0x6f, 0xee, 0x94, 0xf5,
|
||||
0xfe, 0xac, 0xa7, 0xa2, 0xf9, 0x91, 0x54, 0xf8, 0x44, 0xd6, 0xa1, 0x34, 0x96, 0xb6, 0xd0, 0x5b,
|
||||
0x90, 0xb5, 0x47, 0x6d, 0xd5, 0x75, 0xcf, 0x58, 0xf0, 0xb8, 0x24, 0x6f, 0xd4, 0xee, 0x1b, 0x9d,
|
||||
0x5d, 0x7c, 0xea, 0xfe, 0x18, 0x7b, 0xd4, 0xde, 0xe5, 0x5e, 0xe4, 0x6f, 0x49, 0x04, 0xdf, 0x72,
|
||||
0x0c, 0x39, 0xf7, 0x50, 0xa0, 0x9f, 0x06, 0xe3, 0xc4, 0xed, 0x08, 0x47, 0xa6, 0x52, 0x61, 0x3e,
|
||||
0x10, 0x26, 0x37, 0x61, 0xc5, 0x31, 0xba, 0x26, 0xd6, 0x55, 0xbf, 0xb4, 0x60, 0x6f, 0xcb, 0x29,
|
||||
0x25, 0xfe, 0x60, 0xcf, 0xad, 0x2b, 0xe4, 0xff, 0xc4, 0x21, 0xe7, 0x06, 0x2c, 0x7a, 0x33, 0x70,
|
||||
0xee, 0x8a, 0x53, 0x1a, 0x25, 0xae, 0xa0, 0xdf, 0xbe, 0x0b, 0xff, 0xd6, 0xc4, 0xd9, 0x7f, 0x6b,
|
||||
0x54, 0x1f, 0xd6, 0x6d, 0x88, 0xa7, 0xce, 0xdc, 0x10, 0x7f, 0x1d, 0x10, 0xb1, 0x88, 0xd6, 0x57,
|
||||
0x8f, 0x2d, 0x62, 0x98, 0x5d, 0x95, 0x3b, 0x9b, 0x33, 0xaa, 0x32, 0x7b, 0xf2, 0x88, 0x3d, 0x38,
|
||||
0x60, 0x7e, 0xff, 0x4d, 0x1c, 0x72, 0x5e, 0x6e, 0x3c, 0x6b, 0x37, 0xee, 0x3c, 0x64, 0x04, 0xfc,
|
||||
0xf3, 0x76, 0x9c, 0x98, 0x79, 0x8d, 0xe1, 0x54, 0xa0, 0x31, 0x2c, 0x41, 0x6e, 0x80, 0x89, 0xc6,
|
||||
0x08, 0x02, 0xaf, 0xee, 0xbc, 0xf9, 0xcd, 0x7b, 0xb0, 0x14, 0x68, 0x8c, 0xd2, 0xc8, 0xdb, 0xaf,
|
||||
0x7f, 0x58, 0x8e, 0x49, 0xd9, 0x67, 0x5f, 0x5e, 0x4d, 0xee, 0xe3, 0xa7, 0xf4, 0xcc, 0x2a, 0xf5,
|
||||
0x5a, 0xa3, 0x5e, 0xdb, 0x2d, 0xc7, 0xa5, 0xa5, 0x67, 0x5f, 0x5e, 0xcd, 0x2a, 0x98, 0xf5, 0x57,
|
||||
0x6e, 0x36, 0x60, 0x39, 0xf8, 0x55, 0xc2, 0x19, 0x04, 0x41, 0xf1, 0xfe, 0xc3, 0x83, 0xbd, 0x9d,
|
||||
0xda, 0x76, 0xab, 0xae, 0x3e, 0x6a, 0xb6, 0xea, 0xe5, 0x38, 0xba, 0x00, 0xe7, 0xf6, 0x76, 0xde,
|
||||
0x6b, 0xb4, 0xd4, 0xda, 0xde, 0x4e, 0x7d, 0xbf, 0xa5, 0x6e, 0xb7, 0x5a, 0xdb, 0xb5, 0xdd, 0x72,
|
||||
0x62, 0xeb, 0x73, 0x80, 0xd2, 0x76, 0xb5, 0xb6, 0x43, 0xb3, 0x9f, 0xd1, 0xd1, 0x44, 0xff, 0x2a,
|
||||
0xc5, 0x8a, 0xeb, 0x99, 0x37, 0xb2, 0xd2, 0xec, 0xf6, 0x1d, 0x7a, 0x00, 0x69, 0x56, 0x77, 0xa3,
|
||||
0xd9, 0x57, 0xb4, 0xd2, 0x9c, 0x7e, 0x1e, 0xfd, 0x31, 0x2c, 0x3c, 0x66, 0xde, 0xd9, 0x4a, 0xb3,
|
||||
0xdb, 0x7b, 0x48, 0x81, 0xbc, 0x5f, 0x38, 0xcf, 0xbf, 0xc3, 0x95, 0x16, 0x68, 0xf9, 0x51, 0x9b,
|
||||
0x7e, 0x59, 0x30, 0xff, 0x4e, 0x53, 0x5a, 0x00, 0xc0, 0xd0, 0x1e, 0x64, 0xdd, 0x82, 0x6b, 0xde,
|
||||
0x2d, 0xab, 0x34, 0xb7, 0x1d, 0x47, 0x3f, 0x01, 0x2f, 0x8c, 0x67, 0x5f, 0x19, 0x4b, 0x73, 0x7a,
|
||||
0x8b, 0x68, 0x07, 0x32, 0x82, 0xeb, 0xce, 0xb9, 0x39, 0x95, 0xe6, 0xb5, 0xd7, 0xa8, 0xd3, 0xfc,
|
||||
0x8e, 0xc3, 0xfc, 0x8b, 0x70, 0x69, 0x81, 0xb6, 0x29, 0x7a, 0x08, 0x10, 0x28, 0x83, 0x17, 0xb8,
|
||||
0xe1, 0x96, 0x16, 0x69, 0x87, 0xa2, 0x26, 0xe4, 0xbc, 0x72, 0x67, 0xee, 0x7d, 0xb3, 0x34, 0xbf,
|
||||
0x2f, 0x89, 0x1e, 0x43, 0x21, 0xcc, 0xf3, 0x17, 0xbb, 0x45, 0x96, 0x16, 0x6c, 0x38, 0x52, 0xfb,
|
||||
0x61, 0xd2, 0xbf, 0xd8, 0xad, 0xb2, 0xb4, 0x60, 0xff, 0x11, 0x7d, 0x02, 0x2b, 0x93, 0xa4, 0x7c,
|
||||
0xf1, 0x4b, 0x66, 0xe9, 0x0c, 0x1d, 0x49, 0x34, 0x00, 0x34, 0x85, 0xcc, 0x9f, 0xe1, 0xce, 0x59,
|
||||
0x3a, 0x4b, 0x83, 0xb2, 0x5a, 0xff, 0xea, 0xf9, 0x5a, 0xfc, 0xeb, 0xe7, 0x6b, 0xf1, 0x7f, 0x3c,
|
||||
0x5f, 0x8b, 0x7f, 0xf1, 0x62, 0x2d, 0xf6, 0xf5, 0x8b, 0xb5, 0xd8, 0xdf, 0x5e, 0xac, 0xc5, 0x7e,
|
||||
0xf9, 0x5a, 0xd7, 0x20, 0xbd, 0x51, 0x7b, 0xa3, 0x63, 0x0d, 0x36, 0x83, 0x7f, 0x88, 0x99, 0xf6,
|
||||
0x27, 0x9d, 0x76, 0x86, 0x25, 0xaa, 0x5b, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x3b, 0xae, 0x48,
|
||||
0xb4, 0xc4, 0x23, 0x00, 0x00,
|
||||
// 2782 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x77, 0x23, 0xc5,
|
||||
0xf5, 0xd7, 0x5b, 0xea, 0x2b, 0xeb, 0xe1, 0x1a, 0x33, 0x08, 0x31, 0xd8, 0x43, 0x73, 0xe0, 0x0f,
|
||||
0x03, 0xd8, 0x7f, 0xcc, 0x81, 0x40, 0x20, 0x01, 0x4b, 0x68, 0x90, 0xb1, 0xb1, 0x9c, 0xb6, 0x66,
|
||||
0xc8, 0x8b, 0x69, 0x5a, 0xea, 0xb2, 0xd4, 0x8c, 0xd4, 0xdd, 0x74, 0x97, 0x8c, 0xc5, 0x32, 0x8f,
|
||||
0x0d, 0xd9, 0x90, 0x5d, 0x36, 0x7c, 0x8f, 0xac, 0xb2, 0xc9, 0x86, 0x73, 0xb2, 0x61, 0x99, 0x45,
|
||||
0x0e, 0xc9, 0x99, 0x39, 0xd9, 0xe4, 0x0b, 0x64, 0x95, 0x93, 0x9c, 0x7a, 0xf4, 0x4b, 0x52, 0x4b,
|
||||
0x32, 0x64, 0x97, 0x5d, 0xd5, 0xed, 0x7b, 0x6f, 0xab, 0xaa, 0xeb, 0xfe, 0xee, 0xef, 0xde, 0x12,
|
||||
0x3c, 0x4e, 0xb0, 0xa9, 0x63, 0x67, 0x6c, 0x98, 0x64, 0x4f, 0xeb, 0xf5, 0x8d, 0x3d, 0x32, 0xb5,
|
||||
0xb1, 0xbb, 0x6b, 0x3b, 0x16, 0xb1, 0x50, 0x25, 0x78, 0xb8, 0x4b, 0x1f, 0xd6, 0x9f, 0x08, 0x69,
|
||||
0xf7, 0x9d, 0xa9, 0x4d, 0xac, 0x3d, 0xdb, 0xb1, 0xac, 0x73, 0xae, 0x5f, 0xbf, 0x11, 0x7a, 0xcc,
|
||||
0xfc, 0x84, 0xbd, 0x45, 0x9e, 0x0a, 0xe3, 0xfb, 0x78, 0xea, 0x3d, 0x7d, 0x62, 0xce, 0xd6, 0xd6,
|
||||
0x1c, 0x6d, 0xec, 0x3d, 0xde, 0x19, 0x58, 0xd6, 0x60, 0x84, 0xf7, 0xd8, 0xac, 0x37, 0x39, 0xdf,
|
||||
0x23, 0xc6, 0x18, 0xbb, 0x44, 0x1b, 0xdb, 0x42, 0x61, 0x6b, 0x60, 0x0d, 0x2c, 0x36, 0xdc, 0xa3,
|
||||
0x23, 0x2e, 0x95, 0x7f, 0x5b, 0x80, 0xbc, 0x82, 0x3f, 0x99, 0x60, 0x97, 0xa0, 0x7d, 0xc8, 0xe0,
|
||||
0xfe, 0xd0, 0xaa, 0x25, 0x6f, 0x26, 0x9f, 0x2d, 0xee, 0xdf, 0xd8, 0x9d, 0x59, 0xdc, 0xae, 0xd0,
|
||||
0x6b, 0xf5, 0x87, 0x56, 0x3b, 0xa1, 0x30, 0x5d, 0xf4, 0x0a, 0x64, 0xcf, 0x47, 0x13, 0x77, 0x58,
|
||||
0x4b, 0x31, 0xa3, 0x27, 0xe2, 0x8c, 0x6e, 0x53, 0xa5, 0x76, 0x42, 0xe1, 0xda, 0xf4, 0x55, 0x86,
|
||||
0x79, 0x6e, 0xd5, 0xd2, 0xcb, 0x5f, 0x75, 0x68, 0x9e, 0xb3, 0x57, 0x51, 0x5d, 0xd4, 0x00, 0x70,
|
||||
0x31, 0x51, 0x2d, 0x9b, 0x18, 0x96, 0x59, 0xcb, 0x30, 0xcb, 0x27, 0xe3, 0x2c, 0xcf, 0x30, 0xe9,
|
||||
0x30, 0xc5, 0x76, 0x42, 0x91, 0x5c, 0x6f, 0x42, 0x7d, 0x18, 0xa6, 0x41, 0xd4, 0xfe, 0x50, 0x33,
|
||||
0xcc, 0x5a, 0x76, 0xb9, 0x8f, 0x43, 0xd3, 0x20, 0x4d, 0xaa, 0x48, 0x7d, 0x18, 0xde, 0x84, 0x2e,
|
||||
0xf9, 0x93, 0x09, 0x76, 0xa6, 0xb5, 0xdc, 0xf2, 0x25, 0xff, 0x88, 0x2a, 0xd1, 0x25, 0x33, 0x6d,
|
||||
0xd4, 0x82, 0x62, 0x0f, 0x0f, 0x0c, 0x53, 0xed, 0x8d, 0xac, 0xfe, 0xfd, 0x5a, 0x9e, 0x19, 0xcb,
|
||||
0x71, 0xc6, 0x0d, 0xaa, 0xda, 0xa0, 0x9a, 0xed, 0x84, 0x02, 0x3d, 0x7f, 0x86, 0xde, 0x84, 0x42,
|
||||
0x7f, 0x88, 0xfb, 0xf7, 0x55, 0x72, 0x59, 0x2b, 0x30, 0x1f, 0x3b, 0x71, 0x3e, 0x9a, 0x54, 0xaf,
|
||||
0x7b, 0xd9, 0x4e, 0x28, 0xf9, 0x3e, 0x1f, 0xd2, 0xf5, 0xeb, 0x78, 0x64, 0x5c, 0x60, 0x87, 0xda,
|
||||
0x4b, 0xcb, 0xd7, 0xff, 0x0e, 0xd7, 0x64, 0x1e, 0x24, 0xdd, 0x9b, 0xa0, 0xb7, 0x40, 0xc2, 0xa6,
|
||||
0x2e, 0x96, 0x01, 0xcc, 0xc5, 0xcd, 0xd8, 0xb3, 0x62, 0xea, 0xde, 0x22, 0x0a, 0x58, 0x8c, 0xd1,
|
||||
0x6b, 0x90, 0xeb, 0x5b, 0xe3, 0xb1, 0x41, 0x6a, 0x45, 0x66, 0xbd, 0x1d, 0xbb, 0x00, 0xa6, 0xd5,
|
||||
0x4e, 0x28, 0x42, 0x1f, 0x9d, 0x40, 0x79, 0x64, 0xb8, 0x44, 0x75, 0x4d, 0xcd, 0x76, 0x87, 0x16,
|
||||
0x71, 0x6b, 0x1b, 0xcc, 0xc3, 0xd3, 0x71, 0x1e, 0x8e, 0x0d, 0x97, 0x9c, 0x79, 0xca, 0xed, 0x84,
|
||||
0x52, 0x1a, 0x85, 0x05, 0xd4, 0x9f, 0x75, 0x7e, 0x8e, 0x1d, 0xdf, 0x61, 0xad, 0xb4, 0xdc, 0x5f,
|
||||
0x87, 0x6a, 0x7b, 0xf6, 0xd4, 0x9f, 0x15, 0x16, 0xa0, 0x9f, 0xc1, 0xb5, 0x91, 0xa5, 0xe9, 0xbe,
|
||||
0x3b, 0xb5, 0x3f, 0x9c, 0x98, 0xf7, 0x6b, 0x65, 0xe6, 0xf4, 0xb9, 0xd8, 0x1f, 0x69, 0x69, 0xba,
|
||||
0xe7, 0xa2, 0x49, 0x0d, 0xda, 0x09, 0x65, 0x73, 0x34, 0x2b, 0x44, 0xf7, 0x60, 0x4b, 0xb3, 0xed,
|
||||
0xd1, 0x74, 0xd6, 0x7b, 0x85, 0x79, 0xbf, 0x15, 0xe7, 0xfd, 0x80, 0xda, 0xcc, 0xba, 0x47, 0xda,
|
||||
0x9c, 0xb4, 0x91, 0x87, 0xec, 0x85, 0x36, 0x9a, 0x60, 0xf9, 0xff, 0xa0, 0x18, 0x0a, 0x75, 0x54,
|
||||
0x83, 0xfc, 0x18, 0xbb, 0xae, 0x36, 0xc0, 0x0c, 0x19, 0x24, 0xc5, 0x9b, 0xca, 0x65, 0xd8, 0x08,
|
||||
0x87, 0xb7, 0x3c, 0xf6, 0x0d, 0x69, 0xe0, 0x52, 0xc3, 0x0b, 0xec, 0xb8, 0x34, 0x5a, 0x85, 0xa1,
|
||||
0x98, 0xa2, 0xa7, 0xa0, 0xc4, 0x8e, 0x8f, 0xea, 0x3d, 0xa7, 0xe8, 0x91, 0x51, 0x36, 0x98, 0xf0,
|
||||
0xae, 0x50, 0xda, 0x81, 0xa2, 0xbd, 0x6f, 0xfb, 0x2a, 0x69, 0xa6, 0x02, 0xf6, 0xbe, 0x2d, 0x14,
|
||||
0xe4, 0xef, 0x43, 0x75, 0x36, 0xda, 0x51, 0x15, 0xd2, 0xf7, 0xf1, 0x54, 0xbc, 0x8f, 0x0e, 0xd1,
|
||||
0x96, 0x58, 0x16, 0x7b, 0x87, 0xa4, 0x88, 0x35, 0xfe, 0x29, 0xe5, 0x1b, 0xfb, 0x61, 0x8e, 0x5e,
|
||||
0x83, 0x0c, 0x45, 0x4d, 0x01, 0x80, 0xf5, 0x5d, 0x0e, 0xa9, 0xbb, 0x1e, 0xa4, 0xee, 0x76, 0x3d,
|
||||
0x48, 0x6d, 0x14, 0xbe, 0xfa, 0x66, 0x27, 0xf1, 0xc5, 0x5f, 0x77, 0x92, 0x0a, 0xb3, 0x40, 0x8f,
|
||||
0xd1, 0xa8, 0xd4, 0x0c, 0x53, 0x35, 0x74, 0xf1, 0x9e, 0x3c, 0x9b, 0x1f, 0xea, 0xe8, 0x08, 0xaa,
|
||||
0x7d, 0xcb, 0x74, 0xb1, 0xe9, 0x4e, 0x5c, 0x95, 0x43, 0xb6, 0x80, 0xbd, 0xf9, 0xa8, 0x69, 0x7a,
|
||||
0x8a, 0xa7, 0x4c, 0x4f, 0xa9, 0xf4, 0xa3, 0x02, 0x74, 0x1b, 0xe0, 0x42, 0x1b, 0x19, 0xba, 0x46,
|
||||
0x2c, 0xc7, 0xad, 0x65, 0x6e, 0xa6, 0x17, 0xba, 0xb9, 0xeb, 0xa9, 0xdc, 0xb1, 0x75, 0x8d, 0xe0,
|
||||
0x46, 0x86, 0xfe, 0x5a, 0x25, 0x64, 0x89, 0x9e, 0x81, 0x8a, 0x66, 0xdb, 0xaa, 0x4b, 0x34, 0x82,
|
||||
0xd5, 0xde, 0x94, 0x60, 0x97, 0x81, 0xe1, 0x86, 0x52, 0xd2, 0x6c, 0xfb, 0x8c, 0x4a, 0x1b, 0x54,
|
||||
0x88, 0x9e, 0x86, 0x32, 0x05, 0x3e, 0x43, 0x1b, 0xa9, 0x43, 0x6c, 0x0c, 0x86, 0x84, 0x81, 0x5e,
|
||||
0x5a, 0x29, 0x09, 0x69, 0x9b, 0x09, 0x65, 0xdd, 0x3f, 0x08, 0x0c, 0xf4, 0x10, 0x82, 0x8c, 0xae,
|
||||
0x11, 0x8d, 0x6d, 0xe4, 0x86, 0xc2, 0xc6, 0x54, 0x66, 0x6b, 0x64, 0x28, 0xb6, 0x87, 0x8d, 0xd1,
|
||||
0x75, 0xc8, 0x09, 0xb7, 0x69, 0xe6, 0x56, 0xcc, 0xe8, 0x37, 0xb3, 0x1d, 0xeb, 0x02, 0x33, 0x94,
|
||||
0x2f, 0x28, 0x7c, 0x22, 0xff, 0x2a, 0x05, 0x9b, 0x73, 0xf0, 0x48, 0xfd, 0x0e, 0x35, 0x77, 0xe8,
|
||||
0xbd, 0x8b, 0x8e, 0xd1, 0xab, 0xd4, 0xaf, 0xa6, 0x63, 0x47, 0xa4, 0xa5, 0x5a, 0x78, 0x8b, 0x78,
|
||||
0xca, 0x6d, 0xb3, 0xe7, 0x62, 0x6b, 0x84, 0x36, 0xea, 0x40, 0x75, 0xa4, 0xb9, 0x44, 0xe5, 0x70,
|
||||
0xa3, 0x86, 0x52, 0xd4, 0x3c, 0xc8, 0x1e, 0x6b, 0x1e, 0x40, 0xd1, 0xc3, 0x2e, 0x1c, 0x95, 0x47,
|
||||
0x11, 0x29, 0x52, 0x60, 0xab, 0x37, 0xfd, 0x4c, 0x33, 0x89, 0x61, 0x62, 0x75, 0xee, 0xcb, 0x3d,
|
||||
0x36, 0xe7, 0xb4, 0x75, 0x61, 0xe8, 0xd8, 0xec, 0x7b, 0x9f, 0xec, 0x9a, 0x6f, 0xec, 0x7f, 0x52,
|
||||
0x57, 0x56, 0xa0, 0x1c, 0x05, 0x78, 0x54, 0x86, 0x14, 0xb9, 0x14, 0x1b, 0x90, 0x22, 0x97, 0xe8,
|
||||
0xff, 0x21, 0x43, 0x17, 0xc9, 0x16, 0x5f, 0x5e, 0x90, 0x5d, 0x85, 0x5d, 0x77, 0x6a, 0x63, 0x85,
|
||||
0x69, 0xca, 0xb2, 0x1f, 0x0d, 0x3e, 0xe8, 0xcf, 0x7a, 0x95, 0x9f, 0x83, 0xca, 0x0c, 0xaa, 0x87,
|
||||
0xbe, 0x5f, 0x32, 0xfc, 0xfd, 0xe4, 0x0a, 0x94, 0x22, 0x10, 0x2e, 0x5f, 0x87, 0xad, 0x45, 0x88,
|
||||
0x2c, 0x0f, 0x7d, 0x79, 0x04, 0x59, 0xd1, 0x2b, 0x50, 0xf0, 0x21, 0x99, 0x47, 0xe3, 0xfc, 0x5e,
|
||||
0x79, 0xca, 0x8a, 0xaf, 0x4a, 0xc3, 0x90, 0x1e, 0x6b, 0x76, 0x1e, 0x52, 0xec, 0x87, 0xe7, 0x35,
|
||||
0xdb, 0x6e, 0x6b, 0xee, 0x50, 0xfe, 0x08, 0x6a, 0x71, 0x70, 0x3b, 0xb3, 0x8c, 0x8c, 0x7f, 0x0c,
|
||||
0xaf, 0x43, 0xee, 0xdc, 0x72, 0xc6, 0x1a, 0x61, 0xce, 0x4a, 0x8a, 0x98, 0xd1, 0xe3, 0xc9, 0xa1,
|
||||
0x37, 0xcd, 0xc4, 0x7c, 0x22, 0xab, 0xf0, 0x58, 0x2c, 0xe4, 0x52, 0x13, 0xc3, 0xd4, 0x31, 0xdf,
|
||||
0xcf, 0x92, 0xc2, 0x27, 0x81, 0x23, 0xfe, 0x63, 0xf9, 0x84, 0xbe, 0xd6, 0x65, 0x6b, 0x65, 0xfe,
|
||||
0x25, 0x45, 0xcc, 0xe4, 0xbf, 0x17, 0xa0, 0xa0, 0x60, 0xd7, 0xa6, 0x98, 0x80, 0x1a, 0x20, 0xe1,
|
||||
0xcb, 0x3e, 0xe6, 0x64, 0x28, 0x19, 0x4b, 0x26, 0xb8, 0x76, 0xcb, 0xd3, 0xa4, 0x99, 0xdc, 0x37,
|
||||
0x43, 0x2f, 0x0b, 0xc2, 0x17, 0xcf, 0xdd, 0x84, 0x79, 0x98, 0xf1, 0xbd, 0xea, 0x31, 0xbe, 0x74,
|
||||
0x6c, 0xf2, 0xe6, 0x56, 0x33, 0x94, 0xef, 0x65, 0x41, 0xf9, 0x32, 0x2b, 0x5e, 0x16, 0xe1, 0x7c,
|
||||
0xcd, 0x08, 0xe7, 0xcb, 0xae, 0x58, 0x66, 0x0c, 0xe9, 0x6b, 0x46, 0x48, 0x5f, 0x6e, 0x85, 0x93,
|
||||
0x18, 0xd6, 0xf7, 0xaa, 0xc7, 0xfa, 0xf2, 0x2b, 0x96, 0x3d, 0x43, 0xfb, 0x6e, 0x47, 0x69, 0x1f,
|
||||
0xa7, 0x6c, 0x4f, 0xc5, 0x5a, 0xc7, 0xf2, 0xbe, 0x1f, 0x84, 0x78, 0x9f, 0x14, 0x4b, 0xba, 0xb8,
|
||||
0x93, 0x05, 0xc4, 0xaf, 0x19, 0x21, 0x7e, 0xb0, 0x62, 0x0f, 0x62, 0x98, 0xdf, 0xdb, 0x61, 0xe6,
|
||||
0x57, 0x8c, 0x25, 0x8f, 0xe2, 0xd0, 0x2c, 0xa2, 0x7e, 0xaf, 0xfb, 0xd4, 0x6f, 0x23, 0x96, 0xbb,
|
||||
0x8a, 0x35, 0xcc, 0x72, 0xbf, 0xce, 0x1c, 0xf7, 0xe3, 0x5c, 0xed, 0x99, 0x58, 0x17, 0x2b, 0xc8,
|
||||
0x5f, 0x67, 0x8e, 0xfc, 0x95, 0x57, 0x38, 0x5c, 0xc1, 0xfe, 0x7e, 0xbe, 0x98, 0xfd, 0xc5, 0xf3,
|
||||
0x33, 0xf1, 0x33, 0xd7, 0xa3, 0x7f, 0x6a, 0x0c, 0xfd, 0xab, 0x32, 0xf7, 0xcf, 0xc7, 0xba, 0xbf,
|
||||
0x3a, 0xff, 0x7b, 0x8e, 0xa6, 0xd9, 0x19, 0xe0, 0xa0, 0x50, 0x85, 0x1d, 0xc7, 0x72, 0x04, 0xb5,
|
||||
0xe2, 0x13, 0xf9, 0x59, 0x9a, 0xf8, 0x03, 0x90, 0x58, 0xc2, 0x15, 0x59, 0x4a, 0x08, 0x01, 0x83,
|
||||
0xfc, 0xfb, 0x64, 0x60, 0xcb, 0x72, 0x65, 0x98, 0x34, 0x48, 0x82, 0x34, 0x84, 0x28, 0x64, 0x2a,
|
||||
0x4a, 0x21, 0x77, 0xa0, 0x48, 0xa1, 0x7e, 0x86, 0x1d, 0x6a, 0xb6, 0xc7, 0x0e, 0xd1, 0x2d, 0xd8,
|
||||
0x64, 0xb9, 0x9c, 0x13, 0x4d, 0x81, 0xef, 0x19, 0x96, 0xa6, 0x2a, 0xf4, 0x01, 0x3f, 0x9c, 0x1c,
|
||||
0xe8, 0x5f, 0x84, 0x6b, 0x21, 0x5d, 0x3f, 0x85, 0x70, 0x4a, 0x54, 0xf5, 0xb5, 0x0f, 0x44, 0x2e,
|
||||
0x79, 0x3f, 0xd8, 0xa0, 0x80, 0x79, 0x22, 0xc8, 0xf4, 0x2d, 0x1d, 0x0b, 0x80, 0x67, 0x63, 0xca,
|
||||
0x46, 0x47, 0xd6, 0x40, 0xc0, 0x38, 0x1d, 0x52, 0x2d, 0x1f, 0x05, 0x25, 0x0e, 0x72, 0xf2, 0x1f,
|
||||
0x93, 0x81, 0xbf, 0x80, 0x8c, 0x2e, 0xe2, 0x8d, 0xc9, 0xff, 0x0e, 0x6f, 0x4c, 0x7d, 0x6b, 0xde,
|
||||
0x18, 0x4e, 0xb0, 0xe9, 0x68, 0x82, 0xfd, 0x67, 0x32, 0xf8, 0xc2, 0x3e, 0x0b, 0xfc, 0x76, 0x3b,
|
||||
0x12, 0x64, 0xcb, 0x2c, 0xfb, 0x5e, 0x22, 0x5b, 0x0a, 0x6e, 0x9f, 0x63, 0xef, 0x8d, 0x72, 0xfb,
|
||||
0x3c, 0xcf, 0x9f, 0x6c, 0x82, 0x5e, 0x03, 0x89, 0x35, 0x5d, 0x54, 0xcb, 0x76, 0x05, 0xe0, 0x3e,
|
||||
0x1e, 0x5e, 0x2b, 0xef, 0xad, 0xec, 0x9e, 0x52, 0x9d, 0x8e, 0xed, 0x2a, 0x05, 0x5b, 0x8c, 0x42,
|
||||
0x44, 0x40, 0x8a, 0xf0, 0xd1, 0x1b, 0x20, 0xd1, 0x5f, 0xef, 0xda, 0x5a, 0x1f, 0x33, 0xf0, 0x94,
|
||||
0x94, 0x40, 0x20, 0xdf, 0x03, 0x34, 0x0f, 0xdf, 0xa8, 0x0d, 0x39, 0x7c, 0x81, 0x4d, 0x42, 0xbf,
|
||||
0x1a, 0xdd, 0xee, 0xeb, 0x0b, 0xc8, 0x1e, 0x36, 0x49, 0xa3, 0x46, 0x37, 0xf9, 0x1f, 0xdf, 0xec,
|
||||
0x54, 0xb9, 0xf6, 0x0b, 0xd6, 0xd8, 0x20, 0x78, 0x6c, 0x93, 0xa9, 0x22, 0xec, 0xe5, 0xbf, 0xa4,
|
||||
0x28, 0xf3, 0x8a, 0x40, 0xfb, 0xc2, 0xbd, 0xf5, 0x02, 0x28, 0x15, 0x62, 0xdd, 0xeb, 0xed, 0xf7,
|
||||
0x36, 0xc0, 0x40, 0x73, 0xd5, 0x4f, 0x35, 0x93, 0x60, 0x5d, 0x6c, 0x7a, 0x48, 0x82, 0xea, 0x50,
|
||||
0xa0, 0xb3, 0x89, 0x8b, 0x75, 0x51, 0x00, 0xf8, 0xf3, 0xd0, 0x3a, 0xf3, 0xdf, 0x6d, 0x9d, 0xd1,
|
||||
0x5d, 0x2e, 0xcc, 0xec, 0x72, 0x88, 0x15, 0x49, 0x61, 0x56, 0x44, 0x7f, 0x9b, 0xed, 0x18, 0x96,
|
||||
0x63, 0x90, 0x29, 0xfb, 0x34, 0x69, 0xc5, 0x9f, 0xd3, 0x3a, 0x73, 0x8c, 0xc7, 0xb6, 0x65, 0x8d,
|
||||
0x54, 0x0e, 0x5e, 0x45, 0x66, 0xba, 0x21, 0x84, 0x2d, 0x86, 0x61, 0xbf, 0x4e, 0x05, 0xe1, 0x17,
|
||||
0xb0, 0xdf, 0xff, 0xb9, 0x0d, 0x96, 0x7f, 0xc3, 0x4a, 0xe2, 0x68, 0xf2, 0x46, 0x67, 0xb0, 0xe9,
|
||||
0x87, 0xbf, 0x3a, 0x61, 0xb0, 0xe0, 0x1d, 0xe8, 0x75, 0xf1, 0xa3, 0x7a, 0x11, 0x15, 0xbb, 0xe8,
|
||||
0xc7, 0xf0, 0xe8, 0x0c, 0xb4, 0xf9, 0xae, 0x53, 0x6b, 0x22, 0xdc, 0x23, 0x51, 0x84, 0xf3, 0x3c,
|
||||
0x07, 0x7b, 0x95, 0xfe, 0x8e, 0x41, 0x77, 0x48, 0xab, 0xac, 0x30, 0x15, 0x59, 0xf8, 0xf5, 0x9f,
|
||||
0x82, 0x92, 0x83, 0x09, 0x2d, 0xfc, 0x23, 0x75, 0xec, 0x06, 0x17, 0x8a, 0xea, 0xf8, 0x14, 0x1e,
|
||||
0x59, 0x48, 0x49, 0xd0, 0xf7, 0x40, 0x0a, 0xd8, 0x4c, 0x32, 0xa6, 0x24, 0xf4, 0xcb, 0x9c, 0x40,
|
||||
0x57, 0xfe, 0x43, 0x32, 0x70, 0x19, 0x2d, 0x9c, 0x5a, 0x90, 0x73, 0xb0, 0x3b, 0x19, 0xf1, 0x52,
|
||||
0xa6, 0xbc, 0xff, 0xe2, 0x7a, 0x64, 0x86, 0x4a, 0x27, 0x23, 0xa2, 0x08, 0x63, 0xf9, 0x1e, 0xe4,
|
||||
0xb8, 0x04, 0x15, 0x21, 0x7f, 0xe7, 0xe4, 0xe8, 0xa4, 0xf3, 0xc1, 0x49, 0x35, 0x81, 0x00, 0x72,
|
||||
0x07, 0xcd, 0x66, 0xeb, 0xb4, 0x5b, 0x4d, 0x22, 0x09, 0xb2, 0x07, 0x8d, 0x8e, 0xd2, 0xad, 0xa6,
|
||||
0xa8, 0x58, 0x69, 0xbd, 0xd7, 0x6a, 0x76, 0xab, 0x69, 0xb4, 0x09, 0x25, 0x3e, 0x56, 0x6f, 0x77,
|
||||
0x94, 0xf7, 0x0f, 0xba, 0xd5, 0x4c, 0x48, 0x74, 0xd6, 0x3a, 0x79, 0xa7, 0xa5, 0x54, 0xb3, 0xf2,
|
||||
0x4b, 0xb4, 0x56, 0x8a, 0xa1, 0x3f, 0x41, 0x55, 0x94, 0x0c, 0x55, 0x45, 0xf2, 0xef, 0x52, 0x50,
|
||||
0x8f, 0xe7, 0x34, 0xe8, 0xbd, 0x99, 0x85, 0xef, 0x5f, 0x81, 0x10, 0xcd, 0xac, 0x1e, 0x3d, 0x0d,
|
||||
0x65, 0x07, 0x9f, 0x63, 0xd2, 0x1f, 0x72, 0x8e, 0xc5, 0x33, 0x66, 0x49, 0x29, 0x09, 0x29, 0x33,
|
||||
0x72, 0xb9, 0xda, 0xc7, 0xb8, 0x4f, 0x54, 0x0e, 0x45, 0xfc, 0xd0, 0x49, 0x54, 0x8d, 0x4a, 0xcf,
|
||||
0xb8, 0x50, 0xfe, 0xe8, 0x4a, 0x7b, 0x29, 0x41, 0x56, 0x69, 0x75, 0x95, 0x9f, 0x54, 0xd3, 0x08,
|
||||
0x41, 0x99, 0x0d, 0xd5, 0xb3, 0x93, 0x83, 0xd3, 0xb3, 0x76, 0x87, 0xee, 0xe5, 0x35, 0xa8, 0x78,
|
||||
0x7b, 0xe9, 0x09, 0xb3, 0xf2, 0xbf, 0x93, 0x50, 0x99, 0x09, 0x10, 0xb4, 0x0f, 0x59, 0xce, 0xd3,
|
||||
0xe3, 0xba, 0xf9, 0x2c, 0xbe, 0x45, 0x34, 0x71, 0x55, 0xf4, 0x26, 0x14, 0xb0, 0x68, 0x40, 0x2c,
|
||||
0x0a, 0x44, 0xde, 0x38, 0xf1, 0x5a, 0x14, 0xc2, 0xd4, 0xb7, 0x40, 0x6f, 0x81, 0xe4, 0x47, 0xba,
|
||||
0x28, 0x0e, 0x9f, 0x9c, 0x37, 0xf7, 0x31, 0x42, 0xd8, 0x07, 0x36, 0xe8, 0xf5, 0x80, 0xec, 0x65,
|
||||
0xe6, 0xab, 0x03, 0x61, 0xce, 0x15, 0x84, 0xb1, 0xa7, 0x2f, 0x37, 0xa1, 0x18, 0x5a, 0x0f, 0x7a,
|
||||
0x1c, 0xa4, 0xb1, 0x76, 0x29, 0x1a, 0x5b, 0xbc, 0x35, 0x51, 0x18, 0x6b, 0x97, 0xbc, 0xa7, 0xf5,
|
||||
0x28, 0xe4, 0xe9, 0xc3, 0x81, 0xc6, 0xd1, 0x26, 0xad, 0xe4, 0xc6, 0xda, 0xe5, 0xbb, 0x9a, 0x2b,
|
||||
0x7f, 0x08, 0xe5, 0x68, 0x53, 0x87, 0x9e, 0x44, 0xc7, 0x9a, 0x98, 0x3a, 0xf3, 0x91, 0x55, 0xf8,
|
||||
0x04, 0xbd, 0x02, 0xd9, 0x0b, 0x8b, 0x83, 0xd5, 0xe2, 0x90, 0xbd, 0x6b, 0x11, 0x1c, 0x6a, 0x0a,
|
||||
0x71, 0x6d, 0xf9, 0x33, 0xc8, 0x32, 0xf0, 0xa1, 0x40, 0xc2, 0xda, 0x33, 0x82, 0xe8, 0xd2, 0x31,
|
||||
0xfa, 0x10, 0x40, 0x23, 0xc4, 0x31, 0x7a, 0x93, 0xc0, 0xf1, 0xce, 0x62, 0xf0, 0x3a, 0xf0, 0xf4,
|
||||
0x1a, 0x37, 0x04, 0x8a, 0x6d, 0x05, 0xa6, 0x21, 0x24, 0x0b, 0x39, 0x94, 0x4f, 0xa0, 0x1c, 0xb5,
|
||||
0x0d, 0x37, 0x4a, 0x37, 0x16, 0x34, 0x4a, 0x7d, 0x32, 0xe5, 0x53, 0xb1, 0x34, 0x6f, 0xc5, 0xb1,
|
||||
0x89, 0xfc, 0x79, 0x12, 0x0a, 0xdd, 0x4b, 0x71, 0xac, 0x63, 0xba, 0x40, 0x81, 0x69, 0x2a, 0xdc,
|
||||
0xf3, 0xe0, 0x6d, 0xa5, 0xb4, 0xdf, 0xac, 0x7a, 0xdb, 0x0f, 0xdc, 0xcc, 0xba, 0x55, 0xa9, 0xd7,
|
||||
0xb5, 0x13, 0x60, 0xf5, 0x06, 0x48, 0xfe, 0xa9, 0xa2, 0x15, 0x83, 0xa6, 0xeb, 0x0e, 0x76, 0x5d,
|
||||
0xb1, 0x36, 0x6f, 0xca, 0x9a, 0x8a, 0xd6, 0xa7, 0xa2, 0xab, 0x92, 0x56, 0xf8, 0x44, 0xd6, 0xa1,
|
||||
0x32, 0x93, 0xb6, 0xd0, 0x1b, 0x90, 0xb7, 0x27, 0x3d, 0xd5, 0xdb, 0x9e, 0x99, 0xe0, 0xf1, 0xd8,
|
||||
0xe3, 0xa4, 0x37, 0x32, 0xfa, 0x47, 0x78, 0xea, 0xfd, 0x18, 0x7b, 0xd2, 0x3b, 0xe2, 0xbb, 0xc8,
|
||||
0xdf, 0x92, 0x0a, 0xbf, 0xe5, 0x02, 0x0a, 0xde, 0xa1, 0x40, 0x3f, 0x0c, 0xc7, 0x89, 0xd7, 0x6a,
|
||||
0x8e, 0x4d, 0xa5, 0xc2, 0x7d, 0x28, 0x4c, 0x6e, 0xc1, 0xa6, 0x6b, 0x0c, 0x4c, 0xac, 0xab, 0x41,
|
||||
0xcd, 0xc2, 0xde, 0x56, 0x50, 0x2a, 0xfc, 0xc1, 0xb1, 0x57, 0xb0, 0xc8, 0xff, 0x4a, 0x42, 0xc1,
|
||||
0x0b, 0x58, 0xf4, 0x52, 0xe8, 0xdc, 0x95, 0x17, 0x74, 0x60, 0x3c, 0xc5, 0xa0, 0x2f, 0x18, 0xfd,
|
||||
0xad, 0xa9, 0xab, 0xff, 0xd6, 0xb8, 0x06, 0xaf, 0xd7, 0x69, 0xcf, 0x5c, 0xb9, 0xd3, 0xfe, 0x02,
|
||||
0x20, 0x62, 0x11, 0x6d, 0xa4, 0x5e, 0x58, 0xc4, 0x30, 0x07, 0x2a, 0xdf, 0x6c, 0xce, 0xa8, 0xaa,
|
||||
0xec, 0xc9, 0x5d, 0xf6, 0xe0, 0x94, 0xed, 0xfb, 0x2f, 0x92, 0x50, 0xf0, 0x73, 0xe3, 0x55, 0xdb,
|
||||
0x7c, 0xd7, 0x21, 0x27, 0xe0, 0x9f, 0xf7, 0xf9, 0xc4, 0xcc, 0xef, 0x38, 0x67, 0x42, 0x1d, 0xe7,
|
||||
0x3a, 0x14, 0xc6, 0x98, 0x68, 0x8c, 0x20, 0xf0, 0xb2, 0xd1, 0x9f, 0xdf, 0x7a, 0x1d, 0x8a, 0xa1,
|
||||
0x8e, 0x2b, 0x8d, 0xbc, 0x93, 0xd6, 0x07, 0xd5, 0x44, 0x3d, 0xff, 0xf9, 0x97, 0x37, 0xd3, 0x27,
|
||||
0xf8, 0x53, 0x7a, 0x66, 0x95, 0x56, 0xb3, 0xdd, 0x6a, 0x1e, 0x55, 0x93, 0xf5, 0xe2, 0xe7, 0x5f,
|
||||
0xde, 0xcc, 0x2b, 0x98, 0x35, 0x6e, 0x6e, 0xb5, 0x61, 0x23, 0xfc, 0x55, 0xa2, 0x19, 0x04, 0x41,
|
||||
0xf9, 0x9d, 0x3b, 0xa7, 0xc7, 0x87, 0xcd, 0x83, 0x6e, 0x4b, 0xbd, 0xdb, 0xe9, 0xb6, 0xaa, 0x49,
|
||||
0xf4, 0x28, 0x5c, 0x3b, 0x3e, 0x7c, 0xb7, 0xdd, 0x55, 0x9b, 0xc7, 0x87, 0xad, 0x93, 0xae, 0x7a,
|
||||
0xd0, 0xed, 0x1e, 0x34, 0x8f, 0xaa, 0xa9, 0xfd, 0x5f, 0x02, 0x54, 0x0e, 0x1a, 0xcd, 0x43, 0x9a,
|
||||
0xfd, 0x8c, 0xbe, 0x26, 0x1a, 0x63, 0x19, 0x56, 0xb5, 0x2f, 0xbd, 0xea, 0xad, 0x2f, 0xef, 0x0b,
|
||||
0xa2, 0xdb, 0x90, 0x65, 0x05, 0x3d, 0x5a, 0x7e, 0xf7, 0x5b, 0x5f, 0xd1, 0x28, 0xa4, 0x3f, 0x86,
|
||||
0x85, 0xc7, 0xd2, 0xcb, 0xe0, 0xfa, 0xf2, 0xbe, 0x21, 0x52, 0x40, 0x0a, 0x2a, 0xf2, 0xd5, 0x97,
|
||||
0xc3, 0xf5, 0x35, 0x7a, 0x89, 0xd4, 0x67, 0x50, 0x16, 0xac, 0xbe, 0x2c, 0xad, 0xaf, 0x01, 0x60,
|
||||
0xe8, 0x18, 0xf2, 0x5e, 0x25, 0xb7, 0xea, 0xfa, 0xb6, 0xbe, 0xb2, 0xcf, 0x47, 0x3f, 0x01, 0xaf,
|
||||
0xb8, 0x97, 0xdf, 0x45, 0xd7, 0x57, 0x34, 0x2d, 0xd1, 0x21, 0xe4, 0x04, 0xd7, 0x5d, 0x71, 0x25,
|
||||
0x5b, 0x5f, 0xd5, 0xb7, 0xa3, 0x9b, 0x16, 0xb4, 0x32, 0x56, 0xdf, 0xb0, 0xd7, 0xd7, 0xe8, 0xc7,
|
||||
0xa2, 0x3b, 0x00, 0xa1, 0xfa, 0x7a, 0x8d, 0xab, 0xf3, 0xfa, 0x3a, 0x7d, 0x56, 0xd4, 0x81, 0x82,
|
||||
0x5f, 0xee, 0xac, 0xbc, 0xc8, 0xae, 0xaf, 0x6e, 0x78, 0xa2, 0x7b, 0x50, 0x8a, 0xf2, 0xfc, 0xf5,
|
||||
0xae, 0xa7, 0xeb, 0x6b, 0x76, 0x32, 0xa9, 0xff, 0x28, 0xe9, 0x5f, 0xef, 0xba, 0xba, 0xbe, 0x66,
|
||||
0x63, 0x13, 0x7d, 0x0c, 0x9b, 0xf3, 0xa4, 0x7c, 0xfd, 0xdb, 0xeb, 0xfa, 0x15, 0x5a, 0x9d, 0x68,
|
||||
0x0c, 0x68, 0x01, 0x99, 0xbf, 0xc2, 0x65, 0x76, 0xfd, 0x2a, 0x9d, 0xcf, 0x46, 0xeb, 0xab, 0x07,
|
||||
0xdb, 0xc9, 0xaf, 0x1f, 0x6c, 0x27, 0xff, 0xf6, 0x60, 0x3b, 0xf9, 0xc5, 0xc3, 0xed, 0xc4, 0xd7,
|
||||
0x0f, 0xb7, 0x13, 0x7f, 0x7e, 0xb8, 0x9d, 0xf8, 0xe9, 0xf3, 0x03, 0x83, 0x0c, 0x27, 0xbd, 0xdd,
|
||||
0xbe, 0x35, 0xde, 0x0b, 0xff, 0xd3, 0x66, 0xd1, 0xbf, 0x7f, 0x7a, 0x39, 0x96, 0xa8, 0x5e, 0xfe,
|
||||
0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x2d, 0x07, 0xd8, 0x1d, 0x24, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@@ -5635,6 +5663,25 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.MempoolError) > 0 {
|
||||
i -= len(m.MempoolError)
|
||||
copy(dAtA[i:], m.MempoolError)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError)))
|
||||
i--
|
||||
dAtA[i] = 0x5a
|
||||
}
|
||||
if m.Priority != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Priority))
|
||||
i--
|
||||
dAtA[i] = 0x50
|
||||
}
|
||||
if len(m.Sender) > 0 {
|
||||
i -= len(m.Sender)
|
||||
copy(dAtA[i:], m.Sender)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender)))
|
||||
i--
|
||||
dAtA[i] = 0x4a
|
||||
}
|
||||
if len(m.Codespace) > 0 {
|
||||
i -= len(m.Codespace)
|
||||
copy(dAtA[i:], m.Codespace)
|
||||
@@ -7390,6 +7437,17 @@ func (m *ResponseCheckTx) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
l = len(m.Sender)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
if m.Priority != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Priority))
|
||||
}
|
||||
l = len(m.MempoolError)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -11929,6 +11987,89 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.Codespace = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 9:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Sender = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 10:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType)
|
||||
}
|
||||
m.Priority = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Priority |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 11:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.MempoolError = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
|
||||
@@ -8,35 +8,34 @@ There are four different behaviours a reactor can report.
|
||||
|
||||
1. bad message
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
# This message will request the peer be stopped for an error
|
||||
|
||||
2. message out of order
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
# This message will request the peer be stopped for an error
|
||||
|
||||
3. consesnsus Vote
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
# This message will request the peer be marked as good
|
||||
|
||||
4. block part
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
|
||||
*/
|
||||
package behaviour
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -19,58 +20,6 @@ const (
|
||||
BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
msg := bcproto.Message{}
|
||||
|
||||
switch pb := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
|
||||
case *bcproto.BlockResponse:
|
||||
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
|
||||
case *bcproto.NoBlockResponse:
|
||||
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
|
||||
case *bcproto.StatusRequest:
|
||||
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
|
||||
case *bcproto.StatusResponse:
|
||||
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", pb)
|
||||
}
|
||||
|
||||
bz, err := proto.Marshal(&msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *bcproto.Message_BlockRequest:
|
||||
return msg.BlockRequest, nil
|
||||
case *bcproto.Message_BlockResponse:
|
||||
return msg.BlockResponse, nil
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
return msg.NoBlockResponse, nil
|
||||
case *bcproto.Message_StatusRequest:
|
||||
return msg.StatusRequest, nil
|
||||
case *bcproto.Message_StatusResponse:
|
||||
return msg.StatusResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateMsg validates a message.
|
||||
func ValidateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
@@ -108,3 +57,31 @@ func ValidateMsg(pb proto.Message) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
//
|
||||
// Deprecated: Will be removed in v0.37.
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
if um, ok := pb.(p2p.Wrapper); ok {
|
||||
pb = um.Wrap()
|
||||
}
|
||||
bz, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
//
|
||||
// Deprecated: Will be removed in v0.37.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pb.Unwrap()
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:lll // ignore line length in tests
|
||||
//nolint:lll // ignore line length in tests
|
||||
func TestBlockchainMessageVectors(t *testing.T) {
|
||||
block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil)
|
||||
block.Version.Block = 11 // overwrite updated protocol version
|
||||
|
||||
@@ -32,6 +32,7 @@ const (
|
||||
maxTotalRequesters = 600
|
||||
maxPendingRequests = maxTotalRequesters
|
||||
maxPendingRequestsPerPeer = 20
|
||||
requestRetrySeconds = 30
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending us data at at least that rate, we
|
||||
@@ -410,6 +411,7 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
}
|
||||
|
||||
// for debugging purposes
|
||||
//
|
||||
//nolint:unused
|
||||
func (pool *BlockPool) debug() string {
|
||||
pool.mtx.Lock()
|
||||
@@ -601,7 +603,7 @@ OUTER_LOOP:
|
||||
}
|
||||
peer = bpr.pool.pickIncrAvailablePeer(bpr.height)
|
||||
if peer == nil {
|
||||
// log.Info("No peers available", "height", height)
|
||||
bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height)
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
continue PICK_PEER_LOOP
|
||||
}
|
||||
@@ -611,6 +613,7 @@ OUTER_LOOP:
|
||||
bpr.peerID = peer.id
|
||||
bpr.mtx.Unlock()
|
||||
|
||||
to := time.NewTimer(requestRetrySeconds * time.Second)
|
||||
// Send request and wait.
|
||||
bpr.pool.sendRequest(bpr.height, peer.id)
|
||||
WAIT_LOOP:
|
||||
@@ -623,6 +626,11 @@ OUTER_LOOP:
|
||||
return
|
||||
case <-bpr.Quit():
|
||||
return
|
||||
case <-to.C:
|
||||
bpr.Logger.Debug("Retrying block request after timeout", "height", bpr.height, "peer", bpr.peerID)
|
||||
// Simulate a redo
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP
|
||||
case peerID := <-bpr.redoCh:
|
||||
if peerID == bpr.peerID {
|
||||
bpr.reset()
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -144,21 +146,20 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height()})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -182,75 +183,73 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bl})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockResponse{Block: bl},
|
||||
}, bcR.Logger)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
}, bcR.Logger)
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
if err := bc.ValidateMsg(e.Message); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
bcR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message)
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
switch msg := e.Message.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
bcR.respondToPeer(msg, src)
|
||||
bcR.respondToPeer(msg, e.Src)
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Block content is invalid", "err", err)
|
||||
return
|
||||
}
|
||||
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
|
||||
bcR.pool.AddBlock(e.Src.ID(), bi, msg.Block.Size())
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobut", "err", err)
|
||||
return
|
||||
}
|
||||
src.TrySend(BlockchainChannel, msgBytes)
|
||||
p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
|
||||
bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height)
|
||||
case *bcproto.NoBlockResponse:
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
msg := &bcproto.Message{}
|
||||
err := proto.Unmarshal(msgBytes, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
uw, err := msg.Unwrap()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bcR.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: chID,
|
||||
Src: peer,
|
||||
Message: uw,
|
||||
})
|
||||
}
|
||||
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
|
||||
@@ -286,13 +285,10 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
}, bcR.Logger)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
@@ -304,7 +300,7 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
go bcR.BroadcastStatusRequest() //nolint: errcheck
|
||||
|
||||
}
|
||||
}
|
||||
@@ -369,6 +365,12 @@ FOR_LOOP:
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommitLight(
|
||||
chainID, firstID, first.Height, second.LastCommit)
|
||||
|
||||
if err == nil {
|
||||
// validate the block before we persist it
|
||||
err = bcR.blockExec.ValidateBlock(state, first)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error in validation", "err", err)
|
||||
peerID := bcR.pool.RedoRequest(first.Height)
|
||||
@@ -386,29 +388,29 @@ FOR_LOOP:
|
||||
bcR.Switch.StopPeerForError(peer2, fmt.Errorf("blockchainReactor validation error: %v", err))
|
||||
}
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
bcR.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
var err error
|
||||
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
bcR.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
|
||||
continue FOR_LOOP
|
||||
|
||||
case <-bcR.Quit():
|
||||
@@ -419,13 +421,9 @@ FOR_LOOP:
|
||||
|
||||
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
|
||||
func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
|
||||
bm, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
return fmt.Errorf("could not convert msg to proto: %w", err)
|
||||
}
|
||||
|
||||
bcR.Switch.Broadcast(BlockchainChannel, bm)
|
||||
|
||||
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -70,7 +72,9 @@ func newBlockchainReactor(
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
@@ -83,7 +87,9 @@ func newBlockchainReactor(
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
@@ -188,6 +194,25 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLegacyReactorReceiveBasic(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
reactor := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 10).reactor
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
reactor.AddPeer(peer)
|
||||
m := &bcproto.StatusRequest{}
|
||||
wm := m.Wrap()
|
||||
msg, err := proto.Marshal(wm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(BlockchainChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
|
||||
@@ -2,9 +2,10 @@ package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -172,21 +173,20 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -206,35 +206,28 @@ func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
|
||||
bcR.Logger.Error("Could not send block message to peer", "err", err)
|
||||
return false
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: pbbi})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockResponse{Block: pbbi},
|
||||
}, bcR.Logger)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
}, bcR.Logger)
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
@@ -250,34 +243,27 @@ func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error decoding message", "src", src, "chId", chID, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
if err := bc.ValidateMsg(e.Message); err != nil {
|
||||
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
switch msg := e.Message.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
if queued := bcR.sendBlockToPeer(msg, src); !queued {
|
||||
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height)
|
||||
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
|
||||
}
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
if queued := bcR.sendStatusResponseToPeer(msg, src); !queued {
|
||||
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send status message to peer", "src", src)
|
||||
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
|
||||
}
|
||||
|
||||
case *bcproto.BlockResponse:
|
||||
@@ -289,23 +275,23 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
peerID: e.Src.ID(),
|
||||
height: bi.Height,
|
||||
block: bi,
|
||||
length: len(msgBytes),
|
||||
length: msg.Size(),
|
||||
},
|
||||
}
|
||||
bcR.Logger.Info("Received", "src", src, "height", bi.Height)
|
||||
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
case *bcproto.NoBlockResponse:
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: noBlockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
peerID: e.Src.ID(),
|
||||
height: msg.Height,
|
||||
},
|
||||
}
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
case *bcproto.StatusResponse:
|
||||
@@ -313,18 +299,35 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
peerID: e.Src.ID(),
|
||||
height: msg.Height,
|
||||
length: len(msgBytes),
|
||||
length: msg.Size(),
|
||||
},
|
||||
}
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg)))
|
||||
bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
msg := &bcproto.Message{}
|
||||
err := proto.Unmarshal(msgBytes, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
uw, err := msg.Unwrap()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bcR.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: chID,
|
||||
Src: peer,
|
||||
Message: uw,
|
||||
})
|
||||
}
|
||||
|
||||
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
|
||||
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
|
||||
|
||||
@@ -492,11 +495,10 @@ func (bcR *BlockchainReactor) processBlock() error {
|
||||
// Implements bcRNotifier
|
||||
// sendStatusRequest broadcasts `BlockStore` height.
|
||||
func (bcR *BlockchainReactor) sendStatusRequest() {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
|
||||
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
@@ -507,11 +509,10 @@ func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) erro
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockRequest{Height: height},
|
||||
}, bcR.Logger)
|
||||
if !queued {
|
||||
return errSendQueueFull
|
||||
}
|
||||
@@ -534,8 +535,8 @@ func (bcR *BlockchainReactor) switchToConsensus() {
|
||||
// Called by FSM and pool:
|
||||
// - pool calls when it detects slow peer or when peer times out
|
||||
// - FSM calls when:
|
||||
// - adding a block (addBlock) fails
|
||||
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
|
||||
// - adding a block (addBlock) fails
|
||||
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
|
||||
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
|
||||
msgData := bcFsmMessage{
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -18,6 +19,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
@@ -102,7 +104,9 @@ func newBlockchainReactor(
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
@@ -115,7 +119,9 @@ func newBlockchainReactor(
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
@@ -345,6 +351,25 @@ outerFor:
|
||||
assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
func TestLegacyReactorReceiveBasic(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
reactor := newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, 10)
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
reactor.AddPeer(peer)
|
||||
m := &bcproto.StatusRequest{}
|
||||
wm := m.Wrap()
|
||||
msg, err := proto.Marshal(wm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(BlockchainChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package v2
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
@@ -16,7 +15,7 @@ type iIO interface {
|
||||
sendBlockNotFound(height int64, peerID p2p.ID) error
|
||||
sendStatusResponse(base, height int64, peerID p2p.ID) error
|
||||
|
||||
broadcastStatusRequest() error
|
||||
broadcastStatusRequest()
|
||||
|
||||
trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
}
|
||||
@@ -47,13 +46,10 @@ func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockRequest{Height: height},
|
||||
}, sio.sw.Logger); !queued {
|
||||
return fmt.Errorf("send queue full")
|
||||
}
|
||||
return nil
|
||||
@@ -65,12 +61,10 @@ func (sio *switchIO) sendStatusResponse(base int64, height int64, peerID p2p.ID)
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{Height: height, Base: base})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
}, sio.sw.Logger); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
@@ -91,11 +85,10 @@ func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bpb})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockResponse{Block: bpb},
|
||||
}, sio.sw.Logger); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
@@ -107,12 +100,10 @@ func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error {
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: height},
|
||||
}, sio.sw.Logger); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
@@ -127,14 +118,10 @@ func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
return ok
|
||||
}
|
||||
|
||||
func (sio *switchIO) broadcastStatusRequest() error {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func (sio *switchIO) broadcastStatusRequest() {
|
||||
// XXX: maybe we should use an io specific peer list here
|
||||
sio.sw.Broadcast(BlockchainChannel, msgBytes)
|
||||
|
||||
return nil
|
||||
sio.sw.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -215,7 +217,7 @@ type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.ID
|
||||
size int64
|
||||
size int
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
@@ -349,9 +351,7 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
case <-doProcessBlockCh:
|
||||
r.processor.send(rProcessBlock{})
|
||||
case <-doStatusCh:
|
||||
if err := r.io.broadcastStatusRequest(); err != nil {
|
||||
r.logger.Error("Error broadcasting status request", "err", err)
|
||||
}
|
||||
r.io.broadcastStatusRequest()
|
||||
|
||||
// Events from peers. Closing the channel signals event loop termination.
|
||||
case event, ok := <-events:
|
||||
@@ -455,39 +455,31 @@ func (r *BlockchainReactor) Stop() error {
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling different message types.
|
||||
func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
r.logger.Error("error decoding message",
|
||||
"src", src.ID(), "chId", chID, "msg", msg, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
func (r *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
if err := bc.ValidateMsg(e.Message); err != nil {
|
||||
r.logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
r.logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
r.logger.Debug("Receive", "src", e.Src.ID(), "chID", e.ChannelID, "msg", e.Message)
|
||||
|
||||
r.logger.Debug("Receive", "src", src.ID(), "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
switch msg := e.Message.(type) {
|
||||
case *bcproto.StatusRequest:
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src.ID()); err != nil {
|
||||
r.logger.Error("Could not send status message to peer", "src", src)
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), e.Src.ID()); err != nil {
|
||||
r.logger.Error("Could not send status message to peer", "src", e.Src)
|
||||
}
|
||||
|
||||
case *bcproto.BlockRequest:
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
if err = r.io.sendBlockToPeer(block, src.ID()); err != nil {
|
||||
if err := r.io.sendBlockToPeer(block, e.Src.ID()); err != nil {
|
||||
r.logger.Error("Could not send block message to peer: ", err)
|
||||
}
|
||||
} else {
|
||||
r.logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
peerID := src.ID()
|
||||
if err = r.io.sendBlockNotFound(msg.Height, peerID); err != nil {
|
||||
r.logger.Info("peer asking for a block we don't have", "src", e.Src, "height", msg.Height)
|
||||
peerID := e.Src.ID()
|
||||
if err := r.io.sendBlockNotFound(msg.Height, peerID); err != nil {
|
||||
r.logger.Error("Couldn't send block not found: ", err)
|
||||
}
|
||||
}
|
||||
@@ -495,7 +487,7 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
case *bcproto.StatusResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height}
|
||||
r.events <- bcStatusResponse{peerID: e.Src.ID(), base: msg.Base, height: msg.Height}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
@@ -508,10 +500,10 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcBlockResponse{
|
||||
peerID: src.ID(),
|
||||
peerID: e.Src.ID(),
|
||||
block: bi,
|
||||
size: int64(len(msgBytes)),
|
||||
time: time.Now(),
|
||||
size: msg.Size(),
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
@@ -519,12 +511,29 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
case *bcproto.NoBlockResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()}
|
||||
r.events <- bcNoBlockResponse{peerID: e.Src.ID(), height: msg.Height, time: time.Now()}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
msg := &bcproto.Message{}
|
||||
err := proto.Unmarshal(msgBytes, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
uw, err := msg.Unwrap()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
r.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: chID,
|
||||
Src: peer,
|
||||
Message: uw,
|
||||
})
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor interface
|
||||
func (r *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer.ID())
|
||||
@@ -559,6 +568,7 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,13 +9,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
@@ -53,34 +53,19 @@ func (mp mockPeer) NodeInfo() p2p.NodeInfo {
|
||||
func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
|
||||
func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} }
|
||||
|
||||
func (mp mockPeer) SendEnvelope(e p2p.Envelope) bool { return true }
|
||||
func (mp mockPeer) TrySendEnvelope(e p2p.Envelope) bool { return true }
|
||||
|
||||
func (mp mockPeer) Send(byte, []byte) bool { return true }
|
||||
func (mp mockPeer) TrySend(byte, []byte) bool { return true }
|
||||
|
||||
func (mp mockPeer) Set(string, interface{}) {}
|
||||
func (mp mockPeer) Get(string) interface{} { return struct{}{} }
|
||||
|
||||
// nolint:unused // ignore
|
||||
type mockBlockStore struct {
|
||||
blocks map[int64]*types.Block
|
||||
}
|
||||
func (mp mockPeer) SetRemovalFailed() {}
|
||||
func (mp mockPeer) GetRemovalFailed() bool { return false }
|
||||
|
||||
// nolint:unused // ignore
|
||||
func (ml *mockBlockStore) Height() int64 {
|
||||
return int64(len(ml.blocks))
|
||||
}
|
||||
|
||||
// nolint:unused // ignore
|
||||
func (ml *mockBlockStore) LoadBlock(height int64) *types.Block {
|
||||
return ml.blocks[height]
|
||||
}
|
||||
|
||||
// nolint:unused // ignore
|
||||
func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) {
|
||||
ml.blocks[block.Height] = block
|
||||
}
|
||||
|
||||
type mockBlockApplier struct {
|
||||
}
|
||||
type mockBlockApplier struct{}
|
||||
|
||||
// XXX: Add whitelist/blacklist?
|
||||
func (mba *mockBlockApplier) ApplyBlock(
|
||||
@@ -130,8 +115,7 @@ func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool
|
||||
return true
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) broadcastStatusRequest() error {
|
||||
return nil
|
||||
func (sio *mockSwitchIo) broadcastStatusRequest() {
|
||||
}
|
||||
|
||||
type testReactorParams struct {
|
||||
@@ -159,7 +143,9 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
db := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(db)
|
||||
stateStore := sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
@@ -350,9 +336,7 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
// }
|
||||
|
||||
func TestReactorHelperMode(t *testing.T) {
|
||||
var (
|
||||
channelID = byte(0x40)
|
||||
)
|
||||
channelID := byte(0x40)
|
||||
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
@@ -368,7 +352,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
|
||||
type testEvent struct {
|
||||
peer string
|
||||
event interface{}
|
||||
event proto.Message
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@@ -380,10 +364,10 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
name: "status request",
|
||||
params: params,
|
||||
msgs: []testEvent{
|
||||
{"P1", bcproto.StatusRequest{}},
|
||||
{"P1", bcproto.BlockRequest{Height: 13}},
|
||||
{"P1", bcproto.BlockRequest{Height: 20}},
|
||||
{"P1", bcproto.BlockRequest{Height: 22}},
|
||||
{"P1", &bcproto.StatusRequest{}},
|
||||
{"P1", &bcproto.BlockRequest{Height: 13}},
|
||||
{"P1", &bcproto.BlockRequest{Height: 20}},
|
||||
{"P1", &bcproto.BlockRequest{Height: 22}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -400,25 +384,27 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
for i := 0; i < len(tt.msgs); i++ {
|
||||
step := tt.msgs[i]
|
||||
switch ev := step.event.(type) {
|
||||
case bcproto.StatusRequest:
|
||||
case *bcproto.StatusRequest:
|
||||
old := mockSwitch.numStatusResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
reactor.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: channelID,
|
||||
Src: mockPeer{id: p2p.ID(step.peer)},
|
||||
Message: ev})
|
||||
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
|
||||
case bcproto.BlockRequest:
|
||||
case *bcproto.BlockRequest:
|
||||
if ev.Height > params.startHeight {
|
||||
old := mockSwitch.numNoBlockResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
reactor.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: channelID,
|
||||
Src: mockPeer{id: p2p.ID(step.peer)},
|
||||
Message: ev})
|
||||
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
|
||||
} else {
|
||||
old := mockSwitch.numBlockResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
reactor.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: channelID,
|
||||
Src: mockPeer{id: p2p.ID(step.peer)},
|
||||
Message: ev})
|
||||
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
|
||||
}
|
||||
}
|
||||
@@ -429,6 +415,34 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLegacyReactorReceiveBasic(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
params := testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
startHeight: 20,
|
||||
mockA: true,
|
||||
}
|
||||
reactor := newTestReactor(params)
|
||||
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
reactor.io = mockSwitch
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
reactor.AddPeer(peer)
|
||||
m := &bcproto.StatusRequest{}
|
||||
wm := m.Wrap()
|
||||
msg, err := proto.Marshal(wm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(BlockchainChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReactorSetSwitchNil(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
@@ -465,7 +479,8 @@ type testApp struct {
|
||||
}
|
||||
|
||||
func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) (
|
||||
*types.GenesisDoc, []types.PrivValidator) {
|
||||
*types.GenesisDoc, []types.PrivValidator,
|
||||
) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
@@ -490,7 +505,8 @@ func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower
|
||||
func newReactorStore(
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
maxBlockHeight int64,
|
||||
) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
@@ -504,14 +520,19 @@ func newReactorStore(
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
},
|
||||
)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
|
||||
@@ -52,13 +52,8 @@ func (rt *Routine) setLogger(logger log.Logger) {
|
||||
rt.logger = logger
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func (rt *Routine) setMetrics(metrics *Metrics) {
|
||||
rt.metrics = metrics
|
||||
}
|
||||
|
||||
func (rt *Routine) start() {
|
||||
rt.logger.Info(fmt.Sprintf("%s: run", rt.name))
|
||||
rt.logger.Info("routine start", "msg", log.NewLazySprintf("%s: run", rt.name))
|
||||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
|
||||
if !running {
|
||||
panic(fmt.Sprintf("%s is already running", rt.name))
|
||||
@@ -98,7 +93,7 @@ func (rt *Routine) start() {
|
||||
return
|
||||
}
|
||||
rt.metrics.EventsOut.With("routine", rt.name).Add(1)
|
||||
rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
|
||||
rt.logger.Debug("routine start", "msg", log.NewLazySprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
|
||||
|
||||
// Skip rTrySchedule and rProcessBlock events as they clutter the history
|
||||
// due to their frequency.
|
||||
@@ -118,7 +113,7 @@ func (rt *Routine) start() {
|
||||
|
||||
// XXX: look into returning OpError in the net package
|
||||
func (rt *Routine) send(event Event) bool {
|
||||
rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event))
|
||||
rt.logger.Debug("routine send", "msg", log.NewLazySprintf("%s: received %T %+v", rt.name, event, event))
|
||||
if !rt.isRunning() {
|
||||
return false
|
||||
}
|
||||
@@ -150,7 +145,7 @@ func (rt *Routine) stop() {
|
||||
return
|
||||
}
|
||||
|
||||
rt.logger.Info(fmt.Sprintf("%s: stop", rt.name))
|
||||
rt.logger.Info("routine stop", "msg", log.NewLazySprintf("%s: stop", rt.name))
|
||||
rt.queue.Dispose() // this should block until all queue items are free?
|
||||
}
|
||||
|
||||
|
||||
@@ -366,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
|
||||
}
|
||||
|
||||
// CONTRACT: peer exists and in Ready state.
|
||||
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error {
|
||||
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int, now time.Time) error {
|
||||
peer := sc.peers[peerID]
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
@@ -379,7 +379,7 @@ func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now t
|
||||
height, pendingTime, now)
|
||||
}
|
||||
|
||||
peer.lastRate = size / now.Sub(pendingTime).Nanoseconds()
|
||||
peer.lastRate = int64(size) / now.Sub(pendingTime).Nanoseconds()
|
||||
|
||||
sc.setStateAtHeight(height, blockStateReceived)
|
||||
delete(sc.pendingBlocks, height)
|
||||
@@ -532,7 +532,7 @@ func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time)
|
||||
err = sc.markReceived(event.peerID, event.block.Height, event.block.Size(), event.time)
|
||||
if err != nil {
|
||||
sc.removePeer(event.peerID)
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
|
||||
@@ -853,7 +853,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
size int64
|
||||
size int
|
||||
tm time.Time
|
||||
}
|
||||
tests := []struct {
|
||||
|
||||
9
buf.gen.yaml
Normal file
9
buf.gen.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
version: v1
|
||||
plugins:
|
||||
- name: gogofaster
|
||||
out: ./proto/
|
||||
opt:
|
||||
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types
|
||||
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
|
||||
- plugins=grpc
|
||||
- paths=source_relative
|
||||
3
buf.work.yaml
Normal file
3
buf.work.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
version: v1
|
||||
directories:
|
||||
- proto
|
||||
66
cmd/tendermint/commands/compact.go
Normal file
66
cmd/tendermint/commands/compact.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var CompactGoLevelDBCmd = &cobra.Command{
|
||||
Use: "experimental-compact-goleveldb",
|
||||
Short: "force compacts the tendermint storage engine (only GoLevelDB supported)",
|
||||
Long: `
|
||||
This is a temporary utility command that performs a force compaction on the state
|
||||
and blockstores to reduce disk space for a pruning node. This should only be run
|
||||
once the node has stopped. This command will likely be omitted in the future after
|
||||
the planned refactor to the storage engine.
|
||||
|
||||
Currently, only GoLevelDB is supported.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if config.DBBackend != "goleveldb" {
|
||||
return errors.New("compaction is currently only supported with goleveldb")
|
||||
}
|
||||
|
||||
compactGoLevelDBs(config.RootDir, logger)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func compactGoLevelDBs(rootDir string, logger log.Logger) {
|
||||
dbNames := []string{"state", "blockstore"}
|
||||
o := &opt.Options{
|
||||
DisableSeeksCompaction: true,
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for _, dbName := range dbNames {
|
||||
dbName := dbName
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
dbPath := filepath.Join(rootDir, "data", dbName+".db")
|
||||
store, err := leveldb.OpenFile(dbPath, o)
|
||||
if err != nil {
|
||||
logger.Error("failed to initialize tendermint db", "path", dbPath, "err", err)
|
||||
return
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
logger.Info("starting compaction...", "db", dbPath)
|
||||
|
||||
err = store.CompactRange(util.Range{Start: nil, Limit: nil})
|
||||
if err != nil {
|
||||
logger.Error("failed to compact tendermint db", "path", dbPath, "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package debug
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@@ -82,7 +81,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
|
||||
func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) {
|
||||
start := time.Now().UTC()
|
||||
|
||||
tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp")
|
||||
tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err)
|
||||
return
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -68,7 +67,6 @@ func zipDir(src, dest string) error {
|
||||
_, err = io.Copy(headerWriter, file)
|
||||
return err
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dest and returns an error upon failure. The
|
||||
@@ -111,5 +109,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error {
|
||||
return fmt.Errorf("failed to encode state dump: %w", err)
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
|
||||
return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package debug
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -56,7 +55,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Create a temporary directory which will contain all the state dumps and
|
||||
// relevant files and directories that will be compressed into a file.
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp")
|
||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
@@ -105,7 +104,7 @@ func killProc(pid uint64, dir string) error {
|
||||
// pipe STDERR output from tailing the Tendermint process to a file
|
||||
//
|
||||
// NOTE: This will only work on UNIX systems.
|
||||
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) // nolint: gosec
|
||||
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) //nolint: gosec
|
||||
|
||||
outFile, err := os.Create(filepath.Join(dir, "stacktrace.out"))
|
||||
if err != nil {
|
||||
|
||||
@@ -3,7 +3,7 @@ package debug
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -67,16 +67,17 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
resp, err := http.Get(endpoint) // nolint: gosec
|
||||
//nolint:gosec,nolintlint
|
||||
resp, err := http.Get(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read %s profile response body: %w", profile, err)
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
|
||||
return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
|
||||
}
|
||||
|
||||
232
cmd/tendermint/commands/reindex_event.go
Normal file
232
cmd/tendermint/commands/reindex_event.go
Normal file
@@ -0,0 +1,232 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/progressbar"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv"
|
||||
"github.com/tendermint/tendermint/state/indexer/sink/psql"
|
||||
"github.com/tendermint/tendermint/state/txindex"
|
||||
"github.com/tendermint/tendermint/state/txindex/kv"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
reindexFailed = "event re-index failed: "
|
||||
)
|
||||
|
||||
var (
|
||||
ErrHeightNotAvailable = errors.New("height is not available")
|
||||
ErrInvalidRequest = errors.New("invalid request")
|
||||
)
|
||||
|
||||
// ReIndexEventCmd constructs a command to re-index events in a block height interval.
|
||||
var ReIndexEventCmd = &cobra.Command{
|
||||
Use: "reindex-event",
|
||||
Short: "Re-index events to the event store backends",
|
||||
Long: `
|
||||
reindex-event is an offline tooling to re-index block and tx events to the eventsinks.
|
||||
You can run this command when the event store backend dropped/disconnected or you want to
|
||||
replace the backend. The default start-height is 0, meaning the tooling will start
|
||||
reindex from the base block height(inclusive); and the default end-height is 0, meaning
|
||||
the tooling will reindex until the latest block height(inclusive). User can omit
|
||||
either or both arguments.
|
||||
|
||||
Note: This operation requires ABCIResponses. Do not set DiscardABCIResponses to true if you
|
||||
want to use this command.
|
||||
`,
|
||||
Example: `
|
||||
tendermint reindex-event
|
||||
tendermint reindex-event --start-height 2
|
||||
tendermint reindex-event --end-height 10
|
||||
tendermint reindex-event --start-height 2 --end-height 10
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
bs, ss, err := loadStateAndBlockStore(config)
|
||||
if err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := checkValidHeight(bs); err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
bi, ti, err := loadEventSinks(config)
|
||||
if err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
riArgs := eventReIndexArgs{
|
||||
startHeight: startHeight,
|
||||
endHeight: endHeight,
|
||||
blockIndexer: bi,
|
||||
txIndexer: ti,
|
||||
blockStore: bs,
|
||||
stateStore: ss,
|
||||
}
|
||||
if err := eventReIndex(cmd, riArgs); err != nil {
|
||||
panic(fmt.Errorf("%s: %w", reindexFailed, err))
|
||||
}
|
||||
|
||||
fmt.Println("event re-index finished")
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
startHeight int64
|
||||
endHeight int64
|
||||
)
|
||||
|
||||
func init() {
|
||||
ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index")
|
||||
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
|
||||
}
|
||||
|
||||
func loadEventSinks(cfg *tmcfg.Config) (indexer.BlockIndexer, txindex.TxIndexer, error) {
|
||||
switch strings.ToLower(cfg.TxIndex.Indexer) {
|
||||
case "null":
|
||||
return nil, nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
|
||||
case "psql":
|
||||
conn := cfg.TxIndex.PsqlConn
|
||||
if conn == "" {
|
||||
return nil, nil, errors.New("the psql connection settings cannot be empty")
|
||||
}
|
||||
es, err := psql.NewEventSink(conn, cfg.ChainID())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return es.BlockIndexer(), es.TxIndexer(), nil
|
||||
case "kv":
|
||||
store, err := dbm.NewDB("tx_index", dbm.BackendType(cfg.DBBackend), cfg.DBDir())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
txIndexer := kv.NewTxIndex(store)
|
||||
blockIndexer := blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
|
||||
return blockIndexer, txIndexer, nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unsupported event sink type: %s", cfg.TxIndex.Indexer)
|
||||
}
|
||||
}
|
||||
|
||||
type eventReIndexArgs struct {
|
||||
startHeight int64
|
||||
endHeight int64
|
||||
blockIndexer indexer.BlockIndexer
|
||||
txIndexer txindex.TxIndexer
|
||||
blockStore state.BlockStore
|
||||
stateStore state.Store
|
||||
}
|
||||
|
||||
func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
|
||||
var bar progressbar.Bar
|
||||
bar.NewOption(args.startHeight-1, args.endHeight)
|
||||
|
||||
fmt.Println("start re-indexing events:")
|
||||
defer bar.Finish()
|
||||
for i := args.startHeight; i <= args.endHeight; i++ {
|
||||
select {
|
||||
case <-cmd.Context().Done():
|
||||
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
|
||||
default:
|
||||
b := args.blockStore.LoadBlock(i)
|
||||
if b == nil {
|
||||
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
|
||||
}
|
||||
|
||||
r, err := args.stateStore.LoadABCIResponses(i)
|
||||
if err != nil {
|
||||
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
|
||||
}
|
||||
|
||||
e := types.EventDataNewBlockHeader{
|
||||
Header: b.Header,
|
||||
NumTxs: int64(len(b.Txs)),
|
||||
ResultBeginBlock: *r.BeginBlock,
|
||||
ResultEndBlock: *r.EndBlock,
|
||||
}
|
||||
|
||||
var batch *txindex.Batch
|
||||
if e.NumTxs > 0 {
|
||||
batch = txindex.NewBatch(e.NumTxs)
|
||||
|
||||
for i := range b.Data.Txs {
|
||||
tr := abcitypes.TxResult{
|
||||
Height: b.Height,
|
||||
Index: uint32(i),
|
||||
Tx: b.Data.Txs[i],
|
||||
Result: *(r.DeliverTxs[i]),
|
||||
}
|
||||
|
||||
if err = batch.Add(&tr); err != nil {
|
||||
return fmt.Errorf("adding tx to batch: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := args.txIndexer.AddBatch(batch); err != nil {
|
||||
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := args.blockIndexer.Index(e); err != nil {
|
||||
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
bar.Play(i)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkValidHeight(bs state.BlockStore) error {
|
||||
base := bs.Base()
|
||||
|
||||
if startHeight == 0 {
|
||||
startHeight = base
|
||||
fmt.Printf("set the start block height to the base height of the blockstore %d \n", base)
|
||||
}
|
||||
|
||||
if startHeight < base {
|
||||
return fmt.Errorf("%s (requested start height: %d, base height: %d)",
|
||||
ErrHeightNotAvailable, startHeight, base)
|
||||
}
|
||||
|
||||
height := bs.Height()
|
||||
|
||||
if startHeight > height {
|
||||
return fmt.Errorf(
|
||||
"%s (requested start height: %d, store height: %d)", ErrHeightNotAvailable, startHeight, height)
|
||||
}
|
||||
|
||||
if endHeight == 0 || endHeight > height {
|
||||
endHeight = height
|
||||
fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height)
|
||||
}
|
||||
|
||||
if endHeight < base {
|
||||
return fmt.Errorf(
|
||||
"%s (requested end height: %d, base height: %d)", ErrHeightNotAvailable, endHeight, base)
|
||||
}
|
||||
|
||||
if endHeight < startHeight {
|
||||
return fmt.Errorf(
|
||||
"%s (requested the end height: %d is less than the start height: %d)",
|
||||
ErrInvalidRequest, startHeight, endHeight)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
193
cmd/tendermint/commands/reindex_event_test.go
Normal file
193
cmd/tendermint/commands/reindex_event_test.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
blockmocks "github.com/tendermint/tendermint/state/indexer/mocks"
|
||||
"github.com/tendermint/tendermint/state/mocks"
|
||||
txmocks "github.com/tendermint/tendermint/state/txindex/mocks"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
height int64 = 10
|
||||
base int64 = 2
|
||||
)
|
||||
|
||||
func setupReIndexEventCmd() *cobra.Command {
|
||||
reIndexEventCmd := &cobra.Command{
|
||||
Use: ReIndexEventCmd.Use,
|
||||
Run: func(cmd *cobra.Command, args []string) {},
|
||||
}
|
||||
|
||||
_ = reIndexEventCmd.ExecuteContext(context.Background())
|
||||
|
||||
return reIndexEventCmd
|
||||
}
|
||||
|
||||
func TestReIndexEventCheckHeight(t *testing.T) {
|
||||
mockBlockStore := &mocks.BlockStore{}
|
||||
mockBlockStore.
|
||||
On("Base").Return(base).
|
||||
On("Height").Return(height)
|
||||
|
||||
testCases := []struct {
|
||||
startHeight int64
|
||||
endHeight int64
|
||||
validHeight bool
|
||||
}{
|
||||
{0, 0, true},
|
||||
{0, base, true},
|
||||
{0, base - 1, false},
|
||||
{0, height, true},
|
||||
{0, height + 1, true},
|
||||
{0, 0, true},
|
||||
{base - 1, 0, false},
|
||||
{base, 0, true},
|
||||
{base, base, true},
|
||||
{base, base - 1, false},
|
||||
{base, height, true},
|
||||
{base, height + 1, true},
|
||||
{height, 0, true},
|
||||
{height, base, false},
|
||||
{height, height - 1, false},
|
||||
{height, height, true},
|
||||
{height, height + 1, true},
|
||||
{height + 1, 0, false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
startHeight = tc.startHeight
|
||||
endHeight = tc.endHeight
|
||||
|
||||
err := checkValidHeight(mockBlockStore)
|
||||
if tc.validHeight {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadEventSink(t *testing.T) {
|
||||
testCases := []struct {
|
||||
sinks string
|
||||
connURL string
|
||||
loadErr bool
|
||||
}{
|
||||
{"", "", true},
|
||||
{"NULL", "", true},
|
||||
{"KV", "", false},
|
||||
{"PSQL", "", true}, // true because empty connect url
|
||||
// skip to test PSQL connect with correct url
|
||||
{"UnsupportedSinkType", "wrongUrl", true},
|
||||
}
|
||||
|
||||
for idx, tc := range testCases {
|
||||
cfg := tmcfg.TestConfig()
|
||||
cfg.TxIndex.Indexer = tc.sinks
|
||||
cfg.TxIndex.PsqlConn = tc.connURL
|
||||
_, _, err := loadEventSinks(cfg)
|
||||
if tc.loadErr {
|
||||
require.Error(t, err, idx)
|
||||
} else {
|
||||
require.NoError(t, err, idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBlockStore(t *testing.T) {
|
||||
cfg := tmcfg.TestConfig()
|
||||
cfg.DBPath = t.TempDir()
|
||||
_, _, err := loadStateAndBlockStore(cfg)
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = dbm.NewDB("blockstore", dbm.GoLevelDBBackend, cfg.DBDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get StateStore
|
||||
_, err = dbm.NewDB("state", dbm.GoLevelDBBackend, cfg.DBDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
bs, ss, err := loadStateAndBlockStore(cfg)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, bs)
|
||||
require.NotNil(t, ss)
|
||||
|
||||
}
|
||||
|
||||
func TestReIndexEvent(t *testing.T) {
|
||||
mockBlockStore := &mocks.BlockStore{}
|
||||
mockStateStore := &mocks.Store{}
|
||||
mockBlockIndexer := &blockmocks.BlockIndexer{}
|
||||
mockTxIndexer := &txmocks.TxIndexer{}
|
||||
|
||||
mockBlockStore.
|
||||
On("Base").Return(base).
|
||||
On("Height").Return(height).
|
||||
On("LoadBlock", base).Return(nil).Once().
|
||||
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
|
||||
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
|
||||
|
||||
dtx := abcitypes.ResponseDeliverTx{}
|
||||
abciResp := &prototmstate.ABCIResponses{
|
||||
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
|
||||
EndBlock: &abcitypes.ResponseEndBlock{},
|
||||
BeginBlock: &abcitypes.ResponseBeginBlock{},
|
||||
}
|
||||
|
||||
mockBlockIndexer.
|
||||
On("Index", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
|
||||
On("Index", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil)
|
||||
|
||||
mockTxIndexer.
|
||||
On("AddBatch", mock.AnythingOfType("*txindex.Batch")).Return(errors.New("")).Once().
|
||||
On("AddBatch", mock.AnythingOfType("*txindex.Batch")).Return(nil)
|
||||
|
||||
mockStateStore.
|
||||
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
|
||||
On("LoadABCIResponses", base).Return(abciResp, nil).
|
||||
On("LoadABCIResponses", height).Return(abciResp, nil)
|
||||
|
||||
testCases := []struct {
|
||||
startHeight int64
|
||||
endHeight int64
|
||||
reIndexErr bool
|
||||
}{
|
||||
{base, height, true}, // LoadBlock error
|
||||
{base, height, true}, // LoadABCIResponses error
|
||||
{base, height, true}, // index block event error
|
||||
{base, height, true}, // index tx event error
|
||||
{base, base, false},
|
||||
{height, height, false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
args := eventReIndexArgs{
|
||||
startHeight: tc.startHeight,
|
||||
endHeight: tc.endHeight,
|
||||
blockIndexer: mockBlockIndexer,
|
||||
txIndexer: mockTxIndexer,
|
||||
blockStore: mockBlockStore,
|
||||
stateStore: mockStateStore,
|
||||
}
|
||||
|
||||
err := eventReIndex(setupReIndexEventCmd(), args)
|
||||
if tc.reIndexErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
186
cmd/tendermint/commands/reset.go
Normal file
186
cmd/tendermint/commands/reset.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
// ResetAllCmd removes the database of this Tendermint core
|
||||
// instance.
|
||||
var ResetAllCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-all",
|
||||
Aliases: []string{"unsafe_reset_all"},
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||
RunE: resetAllCmd,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
var keepAddrBook bool
|
||||
|
||||
// ResetStateCmd removes the database of the specified Tendermint core instance.
|
||||
var ResetStateCmd = &cobra.Command{
|
||||
Use: "reset-state",
|
||||
Short: "Remove all the data and WAL",
|
||||
PreRun: deprecateSnakeCase,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
config, err = ParseConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resetState(config.DBDir(), logger)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact")
|
||||
}
|
||||
|
||||
// ResetPrivValidatorCmd resets the private validator files.
|
||||
var ResetPrivValidatorCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-priv-validator",
|
||||
Aliases: []string{"unsafe_reset_priv_validator"},
|
||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||
PreRun: deprecateSnakeCase,
|
||||
RunE: resetPrivValidator,
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAllCmd(cmd *cobra.Command, args []string) (err error) {
|
||||
config, err = ParseConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resetAll(
|
||||
config.DBDir(),
|
||||
config.P2P.AddrBookFile(),
|
||||
config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(),
|
||||
logger,
|
||||
)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) (err error) {
|
||||
config, err = ParseConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
|
||||
return nil
|
||||
}
|
||||
|
||||
// resetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
func resetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) error {
|
||||
if keepAddrBook {
|
||||
logger.Info("The address book remains intact")
|
||||
} else {
|
||||
removeAddrBook(addrBookFile, logger)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
}
|
||||
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
|
||||
// recreate the dbDir since the privVal state needs to live there
|
||||
resetFilePV(privValKeyFile, privValStateFile, logger)
|
||||
return nil
|
||||
}
|
||||
|
||||
// resetState removes address book files plus all databases.
|
||||
func resetState(dbDir string, logger log.Logger) error {
|
||||
blockdb := filepath.Join(dbDir, "blockstore.db")
|
||||
state := filepath.Join(dbDir, "state.db")
|
||||
wal := filepath.Join(dbDir, "cs.wal")
|
||||
evidence := filepath.Join(dbDir, "evidence.db")
|
||||
txIndex := filepath.Join(dbDir, "tx_index.db")
|
||||
|
||||
if tmos.FileExists(blockdb) {
|
||||
if err := os.RemoveAll(blockdb); err == nil {
|
||||
logger.Info("Removed all blockstore.db", "dir", blockdb)
|
||||
} else {
|
||||
logger.Error("error removing all blockstore.db", "dir", blockdb, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(state) {
|
||||
if err := os.RemoveAll(state); err == nil {
|
||||
logger.Info("Removed all state.db", "dir", state)
|
||||
} else {
|
||||
logger.Error("error removing all state.db", "dir", state, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(wal) {
|
||||
if err := os.RemoveAll(wal); err == nil {
|
||||
logger.Info("Removed all cs.wal", "dir", wal)
|
||||
} else {
|
||||
logger.Error("error removing all cs.wal", "dir", wal, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(evidence) {
|
||||
if err := os.RemoveAll(evidence); err == nil {
|
||||
logger.Info("Removed all evidence.db", "dir", evidence)
|
||||
} else {
|
||||
logger.Error("error removing all evidence.db", "dir", evidence, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(txIndex) {
|
||||
if err := os.RemoveAll(txIndex); err == nil {
|
||||
logger.Info("Removed tx_index.db", "dir", txIndex)
|
||||
} else {
|
||||
logger.Error("error removing tx_index.db", "dir", txIndex, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
if _, err := os.Stat(privValKeyFile); err == nil {
|
||||
pv := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
|
||||
pv.Reset()
|
||||
logger.Info(
|
||||
"Reset private validator file to genesis state",
|
||||
"keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile,
|
||||
)
|
||||
} else {
|
||||
pv := privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv.Save()
|
||||
logger.Info(
|
||||
"Generated private validator file",
|
||||
"keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func removeAddrBook(addrBookFile string, logger log.Logger) {
|
||||
if err := os.Remove(addrBookFile); err == nil {
|
||||
logger.Info("Removed existing address book", "file", addrBookFile)
|
||||
} else if !os.IsNotExist(err) {
|
||||
logger.Info("Error removing address book", "file", addrBookFile, "err", err)
|
||||
}
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
// ResetAllCmd removes the database of this Tendermint core
|
||||
// instance.
|
||||
var ResetAllCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-all",
|
||||
Aliases: []string{"unsafe_reset_all"},
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||
Run: resetAll,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
var keepAddrBook bool
|
||||
|
||||
func init() {
|
||||
ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact")
|
||||
}
|
||||
|
||||
// ResetPrivValidatorCmd resets the private validator files.
|
||||
var ResetPrivValidatorCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-priv-validator",
|
||||
Aliases: []string{"unsafe_reset_priv_validator"},
|
||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||
Run: resetPrivValidator,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAll(cmd *cobra.Command, args []string) {
|
||||
ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(), logger)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) {
|
||||
resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
|
||||
}
|
||||
|
||||
// ResetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// Exported so other CLI tools can use it.
|
||||
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
if keepAddrBook {
|
||||
logger.Info("The address book remains intact")
|
||||
} else {
|
||||
removeAddrBook(addrBookFile, logger)
|
||||
}
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
}
|
||||
// recreate the dbDir since the privVal state needs to live there
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
resetFilePV(privValKeyFile, privValStateFile, logger)
|
||||
}
|
||||
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
if _, err := os.Stat(privValKeyFile); err == nil {
|
||||
pv := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
|
||||
pv.Reset()
|
||||
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv := privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
}
|
||||
|
||||
func removeAddrBook(addrBookFile string, logger log.Logger) {
|
||||
if err := os.Remove(addrBookFile); err == nil {
|
||||
logger.Info("Removed existing address book", "file", addrBookFile)
|
||||
} else if !os.IsNotExist(err) {
|
||||
logger.Info("Error removing address book", "file", addrBookFile, "err", err)
|
||||
}
|
||||
}
|
||||
53
cmd/tendermint/commands/reset_test.go
Normal file
53
cmd/tendermint/commands/reset_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
func Test_ResetAll(t *testing.T) {
|
||||
config := cfg.TestConfig()
|
||||
dir := t.TempDir()
|
||||
config.SetRoot(dir)
|
||||
cfg.EnsureRoot(dir)
|
||||
require.NoError(t, initFilesWithConfig(config))
|
||||
pv := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
pv.LastSignState.Height = 10
|
||||
pv.Save()
|
||||
require.NoError(t, resetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(), logger))
|
||||
require.DirExists(t, config.DBDir())
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
|
||||
require.FileExists(t, config.PrivValidatorStateFile())
|
||||
pv = privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
require.Equal(t, int64(0), pv.LastSignState.Height)
|
||||
}
|
||||
|
||||
func Test_ResetState(t *testing.T) {
|
||||
config := cfg.TestConfig()
|
||||
dir := t.TempDir()
|
||||
config.SetRoot(dir)
|
||||
cfg.EnsureRoot(dir)
|
||||
require.NoError(t, initFilesWithConfig(config))
|
||||
pv := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
pv.LastSignState.Height = 10
|
||||
pv.Save()
|
||||
require.NoError(t, resetState(config.DBDir(), logger))
|
||||
require.DirExists(t, config.DBDir())
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
|
||||
require.FileExists(t, config.PrivValidatorStateFile())
|
||||
pv = privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
// private validator state should still be in tact.
|
||||
require.Equal(t, int64(10), pv.LastSignState.Height)
|
||||
}
|
||||
@@ -2,12 +2,14 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
)
|
||||
@@ -55,6 +57,10 @@ func RollbackState(config *cfg.Config) (int64, []byte, error) {
|
||||
func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store, error) {
|
||||
dbType := dbm.BackendType(config.DBBackend)
|
||||
|
||||
if !os.FileExists(filepath.Join(config.DBDir(), "blockstore.db")) {
|
||||
return nil, nil, fmt.Errorf("no blockstore found in %v", config.DBDir())
|
||||
}
|
||||
|
||||
// Get BlockStore
|
||||
blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir())
|
||||
if err != nil {
|
||||
@@ -62,12 +68,18 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store,
|
||||
}
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
|
||||
if !os.FileExists(filepath.Join(config.DBDir(), "state.db")) {
|
||||
return nil, nil, fmt.Errorf("no statestore found in %v", config.DBDir())
|
||||
}
|
||||
|
||||
// Get StateStore
|
||||
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
stateStore := state.NewStore(stateDB)
|
||||
stateStore := state.NewStore(stateDB, state.StoreOptions{
|
||||
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
|
||||
})
|
||||
|
||||
return blockStore, stateStore, nil
|
||||
}
|
||||
|
||||
@@ -29,12 +29,25 @@ func registerFlagsRootCmd(cmd *cobra.Command) {
|
||||
|
||||
// ParseConfig retrieves the default environment configuration,
|
||||
// sets up the Tendermint root and ensures that the root exists
|
||||
func ParseConfig() (*cfg.Config, error) {
|
||||
func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) {
|
||||
conf := cfg.DefaultConfig()
|
||||
err := viper.Unmarshal(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var home string
|
||||
if os.Getenv("TMHOME") != "" {
|
||||
home = os.Getenv("TMHOME")
|
||||
} else {
|
||||
home, err = cmd.Flags().GetString(cli.HomeFlag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conf.RootDir = home
|
||||
|
||||
conf.SetRoot(conf.RootDir)
|
||||
cfg.EnsureRoot(conf.RootDir)
|
||||
if err := conf.ValidateBasic(); err != nil {
|
||||
@@ -52,7 +65,7 @@ var RootCmd = &cobra.Command{
|
||||
return nil
|
||||
}
|
||||
|
||||
config, err = ParseConfig()
|
||||
config, err = ParseConfig(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -18,9 +17,7 @@ import (
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
|
||||
)
|
||||
var defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
|
||||
|
||||
// clearConfig clears env vars, the given root dir, and resets viper.
|
||||
func clearConfig(dir string) {
|
||||
@@ -88,7 +85,6 @@ func TestRootHome(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRootFlagsEnv(t *testing.T) {
|
||||
|
||||
// defaults
|
||||
defaults := cfg.DefaultConfig()
|
||||
defaultLogLvl := defaults.LogLevel
|
||||
@@ -116,7 +112,6 @@ func TestRootFlagsEnv(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRootConfig(t *testing.T) {
|
||||
|
||||
// write non-default config
|
||||
nonDefaultLogLvl := "abc:debug"
|
||||
cvals := map[string]string{
|
||||
@@ -140,7 +135,7 @@ func TestRootConfig(t *testing.T) {
|
||||
|
||||
// XXX: path must match cfg.defaultConfigPath
|
||||
configFilePath := filepath.Join(defaultRoot, "config")
|
||||
err := tmos.EnsureDir(configFilePath, 0700)
|
||||
err := tmos.EnsureDir(configFilePath, 0o700)
|
||||
require.Nil(t, err)
|
||||
|
||||
// write the non-defaults to a different path
|
||||
@@ -168,5 +163,5 @@ func WriteConfigVals(dir string, vals map[string]string) error {
|
||||
data += fmt.Sprintf("%s = \"%s\"\n", k, v)
|
||||
}
|
||||
cfile := filepath.Join(dir, "config.toml")
|
||||
return ioutil.WriteFile(cfile, []byte(data), 0600)
|
||||
return os.WriteFile(cfile, []byte(data), 0o600)
|
||||
}
|
||||
|
||||
@@ -63,6 +63,8 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
"p2p.laddr",
|
||||
config.P2P.ListenAddress,
|
||||
"node listen address. (0.0.0.0:0 means any interface, any port)")
|
||||
cmd.Flags().String("p2p.external-address",
|
||||
config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
|
||||
cmd.Flags().String("p2p.unconditional_peer_ids",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -13,6 +14,25 @@ var VersionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show version info",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println(version.TMCoreSemVer)
|
||||
if verbose {
|
||||
values, _ := json.MarshalIndent(struct {
|
||||
Tendermint string `json:"tendermint"`
|
||||
ABCI string `json:"abci"`
|
||||
BlockProtocol uint64 `json:"block_protocol"`
|
||||
P2PProtocol uint64 `json:"p2p_protocol"`
|
||||
}{
|
||||
Tendermint: version.TMCoreSemVer,
|
||||
ABCI: version.ABCIVersion,
|
||||
BlockProtocol: version.BlockProtocol,
|
||||
P2PProtocol: version.P2PProtocol,
|
||||
}, "", " ")
|
||||
fmt.Println(string(values))
|
||||
} else {
|
||||
fmt.Println(version.TMCoreSemVer)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
VersionCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Show protocol and library versions")
|
||||
}
|
||||
|
||||
@@ -18,16 +18,19 @@ func main() {
|
||||
cmd.InitFilesCmd,
|
||||
cmd.ProbeUpnpCmd,
|
||||
cmd.LightCmd,
|
||||
cmd.ReIndexEventCmd,
|
||||
cmd.ReplayCmd,
|
||||
cmd.ReplayConsoleCmd,
|
||||
cmd.ResetAllCmd,
|
||||
cmd.ResetPrivValidatorCmd,
|
||||
cmd.ResetStateCmd,
|
||||
cmd.ShowValidatorCmd,
|
||||
cmd.TestnetFilesCmd,
|
||||
cmd.ShowNodeIDCmd,
|
||||
cmd.GenNodeKeyCmd,
|
||||
cmd.VersionCmd,
|
||||
cmd.RollbackStateCmd,
|
||||
cmd.CompactGoLevelDBCmd,
|
||||
debug.DebugCmd,
|
||||
cli.NewCompletionCmd(rootCmd, true),
|
||||
)
|
||||
|
||||
@@ -23,6 +23,11 @@ const (
|
||||
|
||||
// DefaultLogLevel defines a default log level as INFO.
|
||||
DefaultLogLevel = "info"
|
||||
|
||||
// Mempool versions. V1 is prioritized mempool, v0 is regular mempool.
|
||||
// Default is v0.
|
||||
MempoolV0 = "v0"
|
||||
MempoolV1 = "v1"
|
||||
)
|
||||
|
||||
// NOTE: Most of the structs & relevant comments + the
|
||||
@@ -69,6 +74,7 @@ type Config struct {
|
||||
StateSync *StateSyncConfig `mapstructure:"statesync"`
|
||||
FastSync *FastSyncConfig `mapstructure:"fastsync"`
|
||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||
Storage *StorageConfig `mapstructure:"storage"`
|
||||
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
|
||||
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
||||
}
|
||||
@@ -83,6 +89,7 @@ func DefaultConfig() *Config {
|
||||
StateSync: DefaultStateSyncConfig(),
|
||||
FastSync: DefaultFastSyncConfig(),
|
||||
Consensus: DefaultConsensusConfig(),
|
||||
Storage: DefaultStorageConfig(),
|
||||
TxIndex: DefaultTxIndexConfig(),
|
||||
Instrumentation: DefaultInstrumentationConfig(),
|
||||
}
|
||||
@@ -98,6 +105,7 @@ func TestConfig() *Config {
|
||||
StateSync: TestStateSyncConfig(),
|
||||
FastSync: TestFastSyncConfig(),
|
||||
Consensus: TestConsensusConfig(),
|
||||
Storage: TestStorageConfig(),
|
||||
TxIndex: TestTxIndexConfig(),
|
||||
Instrumentation: TestInstrumentationConfig(),
|
||||
}
|
||||
@@ -676,6 +684,13 @@ func DefaultFuzzConnConfig() *FuzzConnConfig {
|
||||
|
||||
// MempoolConfig defines the configuration options for the Tendermint mempool
|
||||
type MempoolConfig struct {
|
||||
// Mempool version to use:
|
||||
// 1) "v0" - (default) FIFO mempool.
|
||||
// 2) "v1" - prioritized mempool.
|
||||
// WARNING: There's a known memory leak with the prioritized mempool
|
||||
// that the team are working on. Read more here:
|
||||
// https://github.com/tendermint/tendermint/issues/8775
|
||||
Version string `mapstructure:"version"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
@@ -699,20 +714,39 @@ type MempoolConfig struct {
|
||||
// Including space needed by encoding (one varint per transaction).
|
||||
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
MaxBatchBytes int `mapstructure:"max_batch_bytes"`
|
||||
|
||||
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
|
||||
// insertion time into the mempool is beyond TTLDuration.
|
||||
TTLDuration time.Duration `mapstructure:"ttl-duration"`
|
||||
|
||||
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLDuration is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if
|
||||
// it's insertion time into the mempool is beyond TTLDuration.
|
||||
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
|
||||
}
|
||||
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
|
||||
func DefaultMempoolConfig() *MempoolConfig {
|
||||
return &MempoolConfig{
|
||||
Version: MempoolV0,
|
||||
Recheck: true,
|
||||
Broadcast: true,
|
||||
WalPath: "",
|
||||
// Each signature verification takes .5ms, Size reduced until we implement
|
||||
// ABCI Recheck
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
TTLDuration: 0 * time.Second,
|
||||
TTLNumBlocks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1040,11 +1074,41 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// StorageConfig
|
||||
|
||||
// StorageConfig allows more fine-grained control over certain storage-related
|
||||
// behavior.
|
||||
type StorageConfig struct {
|
||||
// Set to false to ensure ABCI responses are persisted. ABCI responses are
|
||||
// required for `/block_results` RPC queries, and to reindex events in the
|
||||
// command-line tool.
|
||||
DiscardABCIResponses bool `mapstructure:"discard_abci_responses"`
|
||||
}
|
||||
|
||||
// DefaultStorageConfig returns the default configuration options relating to
|
||||
// Tendermint storage optimization.
|
||||
func DefaultStorageConfig() *StorageConfig {
|
||||
return &StorageConfig{
|
||||
DiscardABCIResponses: false,
|
||||
}
|
||||
}
|
||||
|
||||
// TestStorageConfig returns storage configuration that can be used for
|
||||
// testing.
|
||||
func TestStorageConfig() *StorageConfig {
|
||||
return &StorageConfig{
|
||||
DiscardABCIResponses: false,
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// TxIndexConfig
|
||||
// Remember that Event has the following structure:
|
||||
// type: [
|
||||
// key: value,
|
||||
// ...
|
||||
//
|
||||
// key: value,
|
||||
// ...
|
||||
//
|
||||
// ]
|
||||
//
|
||||
// CompositeKeys are constructed by `type.key`
|
||||
|
||||
@@ -27,7 +27,6 @@ func TestDefaultConfig(t *testing.T) {
|
||||
assert.Equal("/foo/bar", cfg.GenesisFile())
|
||||
assert.Equal("/opt/data", cfg.DBDir())
|
||||
assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir())
|
||||
|
||||
}
|
||||
|
||||
func TestConfigValidateBasic(t *testing.T) {
|
||||
@@ -140,8 +139,8 @@ func TestFastSyncConfigValidateBasic(t *testing.T) {
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
//nolint:lll
|
||||
func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
// nolint: lll
|
||||
testcases := map[string]struct {
|
||||
modify func(*ConsensusConfig)
|
||||
expectErr bool
|
||||
@@ -166,6 +165,7 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
|
||||
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
|
||||
}
|
||||
|
||||
for desc, tc := range testcases {
|
||||
tc := tc // appease linter
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
|
||||
@@ -3,7 +3,7 @@ package config
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
// DefaultDirPerm is the default permissions used when creating directories.
|
||||
const DefaultDirPerm = 0700
|
||||
const DefaultDirPerm = 0o700
|
||||
|
||||
var configTemplate *template.Template
|
||||
|
||||
@@ -63,7 +63,7 @@ func WriteConfigFile(configFilePath string, config *Config) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0644)
|
||||
tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0o644)
|
||||
}
|
||||
|
||||
// Note: any changes to the comments/variables/mapstructure
|
||||
@@ -342,6 +342,11 @@ dial_timeout = "{{ .P2P.DialTimeout }}"
|
||||
#######################################################
|
||||
[mempool]
|
||||
|
||||
# Mempool version to use:
|
||||
# 1) "v0" - (default) FIFO mempool.
|
||||
# 2) "v1" - prioritized mempool.
|
||||
version = "{{ .Mempool.Version }}"
|
||||
|
||||
recheck = {{ .Mempool.Recheck }}
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
wal_dir = "{{ js .Mempool.WalPath }}"
|
||||
@@ -371,6 +376,22 @@ max_tx_bytes = {{ .Mempool.MaxTxBytes }}
|
||||
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
max_batch_bytes = {{ .Mempool.MaxBatchBytes }}
|
||||
|
||||
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
|
||||
# insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-duration = "{{ .Mempool.TTLDuration }}"
|
||||
|
||||
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-duration is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if
|
||||
# it's insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
|
||||
|
||||
#######################################################
|
||||
### State Sync Configuration Options ###
|
||||
#######################################################
|
||||
@@ -459,6 +480,17 @@ create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
|
||||
peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}"
|
||||
peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
|
||||
#######################################################
|
||||
### Storage Configuration Options ###
|
||||
#######################################################
|
||||
[storage]
|
||||
|
||||
# Set to true to discard ABCI responses from the state store, which can save a
|
||||
# considerable amount of disk space. Set to false to ensure ABCI responses are
|
||||
# persisted. ABCI responses are required for /block_results RPC queries, and to
|
||||
# reindex events in the command-line tool.
|
||||
discard_abci_responses = {{ .Storage.DiscardABCIResponses}}
|
||||
|
||||
#######################################################
|
||||
### Transaction Indexer Configuration Options ###
|
||||
#######################################################
|
||||
@@ -473,8 +505,14 @@ peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
# 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
indexer = "{{ .TxIndex.Indexer }}"
|
||||
|
||||
# The PostgreSQL connection configuration, the connection format:
|
||||
# postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
|
||||
psql-conn = "{{ .TxIndex.PsqlConn }}"
|
||||
|
||||
#######################################################
|
||||
### Instrumentation Configuration Options ###
|
||||
#######################################################
|
||||
@@ -506,7 +544,7 @@ func ResetTestRoot(testName string) *Config {
|
||||
|
||||
func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||
// create a unique, concurrency-safe test directory under os.TempDir()
|
||||
rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName))
|
||||
rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -533,11 +571,11 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||
chainID = "tendermint_test"
|
||||
}
|
||||
testGenesis := fmt.Sprintf(testGenesisFmt, chainID)
|
||||
tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
|
||||
tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0o644)
|
||||
}
|
||||
// we always overwrite the priv val
|
||||
tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644)
|
||||
tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644)
|
||||
tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0o644)
|
||||
tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0o644)
|
||||
|
||||
config := TestConfig().SetRoot(rootDir)
|
||||
return config
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -23,7 +22,7 @@ func TestEnsureRoot(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
// setup temp dir for test
|
||||
tmpDir, err := ioutil.TempDir("", "config-test")
|
||||
tmpDir, err := os.MkdirTemp("", "config-test")
|
||||
require.Nil(err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
@@ -31,7 +30,7 @@ func TestEnsureRoot(t *testing.T) {
|
||||
EnsureRoot(tmpDir)
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
|
||||
data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
|
||||
require.Nil(err)
|
||||
|
||||
if !checkConfig(string(data)) {
|
||||
@@ -52,7 +51,7 @@ func TestEnsureTestRoot(t *testing.T) {
|
||||
rootDir := cfg.RootDir
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
|
||||
data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
|
||||
require.Nil(err)
|
||||
|
||||
if !checkConfig(string(data)) {
|
||||
@@ -68,7 +67,7 @@ func checkConfig(configFile string) bool {
|
||||
var valid bool
|
||||
|
||||
// list of words we expect in the config
|
||||
var elems = []string{
|
||||
elems := []string{
|
||||
"moniker",
|
||||
"seeds",
|
||||
"proxy_app",
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Consensus
|
||||
|
||||
See the [consensus spec](https://github.com/tendermint/spec/tree/master/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/spec/tree/master/spec/reactors/consensus) for more information.
|
||||
See the [consensus spec](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/tendermint/tree/v0.34.x/spec/reactors/consensus) for more information.
|
||||
|
||||
@@ -21,7 +21,12 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -46,7 +51,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
for i := 0; i < nValidators; i++ {
|
||||
logger := consensusLogger().With("test", "byzantine", "validator", i)
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
@@ -58,14 +65,31 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
// one for mempool, one for consensus
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
var mempool mempl.Mempool
|
||||
|
||||
switch thisConfig.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
}
|
||||
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
@@ -142,10 +166,16 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
for i, peer := range peerList {
|
||||
if i < len(peerList)/2 {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1}))
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
Message: &tmcons.Vote{Vote: prevote1.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
}, bcs.Logger)
|
||||
} else {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2}))
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
Message: &tmcons.Vote{Vote: prevote2.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
}, bcs.Logger)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -399,7 +429,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
// wait for someone in the big partition (B) to make a block
|
||||
<-blocksSubs[ind2].Out()
|
||||
|
||||
t.Log("A block has been committed. Healing partition")
|
||||
t.Logf("A block has been committed. Healing partition")
|
||||
p2p.Connect2Switches(switches, ind0, ind1)
|
||||
p2p.Connect2Switches(switches, ind0, ind2)
|
||||
|
||||
@@ -425,8 +455,8 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
case <-done:
|
||||
case <-tick.C:
|
||||
for i, reactor := range reactors {
|
||||
t.Log(fmt.Sprintf("Consensus Reactor %v", i))
|
||||
t.Log(fmt.Sprintf("%v", reactor))
|
||||
t.Logf(fmt.Sprintf("Consensus Reactor %v", i))
|
||||
t.Logf(fmt.Sprintf("%v", reactor))
|
||||
}
|
||||
t.Fatalf("Timed out waiting for all validators to commit first block")
|
||||
}
|
||||
@@ -489,18 +519,26 @@ func sendProposalAndParts(
|
||||
parts *types.PartSet,
|
||||
) {
|
||||
// proposal
|
||||
msg := &ProposalMessage{Proposal: proposal}
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.Proposal{Proposal: *proposal.ToProto()},
|
||||
}, cs.Logger)
|
||||
|
||||
// parts
|
||||
for i := 0; i < int(parts.Total()); i++ {
|
||||
part := parts.GetPart(i)
|
||||
msg := &BlockPartMessage{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
pp, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(err) // TODO: wbanfield better error handling
|
||||
}
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: *pp,
|
||||
},
|
||||
}, cs.Logger)
|
||||
}
|
||||
|
||||
// votes
|
||||
@@ -508,9 +546,14 @@ func sendProposalAndParts(
|
||||
prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
|
||||
precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
|
||||
cs.mtx.Unlock()
|
||||
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{Vote: prevote.ToProto()},
|
||||
}, cs.Logger)
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{Vote: precommit.ToProto()},
|
||||
}, cs.Logger)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -548,7 +591,10 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
|
||||
func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
br.reactor.RemovePeer(peer, reason)
|
||||
}
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
br.reactor.Receive(chID, peer, msgBytes)
|
||||
func (br *ByzantineReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
br.reactor.ReceiveEnvelope(e)
|
||||
}
|
||||
func (br *ByzantineReactor) Receive(chID byte, p p2p.Peer, m []byte) {
|
||||
br.reactor.Receive(chID, p, m)
|
||||
}
|
||||
func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
@@ -15,8 +15,6 @@ import (
|
||||
"github.com/go-kit/log/term"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"path"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
@@ -31,6 +29,8 @@ import (
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
@@ -90,8 +90,8 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida
|
||||
func (vs *validatorStub) signVote(
|
||||
voteType tmproto.SignedMsgType,
|
||||
hash []byte,
|
||||
header types.PartSetHeader) (*types.Vote, error) {
|
||||
|
||||
header types.PartSetHeader,
|
||||
) (*types.Vote, error) {
|
||||
pubKey, err := vs.PrivValidator.GetPubKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get pubkey: %w", err)
|
||||
@@ -139,7 +139,8 @@ func signVotes(
|
||||
voteType tmproto.SignedMsgType,
|
||||
hash []byte,
|
||||
header types.PartSetHeader,
|
||||
vss ...*validatorStub) []*types.Vote {
|
||||
vss ...*validatorStub,
|
||||
) []*types.Vote {
|
||||
votes := make([]*types.Vote, len(vss))
|
||||
for i, vs := range vss {
|
||||
votes[i] = signVote(vs, voteType, hash, header)
|
||||
@@ -390,12 +391,34 @@ func newStateWithConfigAndBlockStore(
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
|
||||
// Make Mempool
|
||||
memplMetrics := mempl.NopMetrics()
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
var mempool mempl.Mempool
|
||||
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
logger := consensusLogger()
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
}
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
@@ -404,7 +427,9 @@ func newStateWithConfigAndBlockStore(
|
||||
|
||||
// Make State
|
||||
stateDB := blockDB
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
if err := stateStore.Save(state); err != nil { // for save height 1's validators info
|
||||
panic(err)
|
||||
}
|
||||
@@ -426,7 +451,7 @@ func newStateWithConfigAndBlockStore(
|
||||
|
||||
func loadPrivValidator(config *cfg.Config) *privval.FilePV {
|
||||
privValidatorKeyFile := config.PrivValidatorKeyFile()
|
||||
ensureDir(filepath.Dir(privValidatorKeyFile), 0700)
|
||||
ensureDir(filepath.Dir(privValidatorKeyFile), 0o700)
|
||||
privValidatorStateFile := config.PrivValidatorStateFile()
|
||||
privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
|
||||
privValidator.Reset()
|
||||
@@ -453,7 +478,8 @@ func randState(nValidators int) (*State, []*validatorStub) {
|
||||
//-------------------------------------------------------------------------------
|
||||
|
||||
func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration,
|
||||
errorMessage string) {
|
||||
errorMessage string,
|
||||
) {
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
break
|
||||
@@ -631,7 +657,8 @@ func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int32) {
|
||||
}
|
||||
|
||||
func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32,
|
||||
voteType tmproto.SignedMsgType) {
|
||||
voteType tmproto.SignedMsgType,
|
||||
) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewVote event")
|
||||
@@ -687,21 +714,24 @@ func consensusLogger() log.Logger {
|
||||
}
|
||||
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) {
|
||||
appFunc func() abci.Application, configOpts ...func(*cfg.Config),
|
||||
) ([]*State, cleanupFunc) {
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
css := make([]*State, nValidators)
|
||||
logger := consensusLogger()
|
||||
configRootDirs := make([]string, 0, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
for _, opt := range configOpts {
|
||||
opt(thisConfig)
|
||||
}
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
@@ -732,11 +762,13 @@ func randConsensusNetWithPeers(
|
||||
configRootDirs := make([]string, 0, nPeers)
|
||||
for i := 0; i < nPeers; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0o700) // dir for wal
|
||||
if i == 0 {
|
||||
peer0Config = thisConfig
|
||||
}
|
||||
@@ -744,11 +776,11 @@ func randConsensusNetWithPeers(
|
||||
if i < nValidators {
|
||||
privVal = privVals[i]
|
||||
} else {
|
||||
tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_")
|
||||
tempKeyFile, err := os.CreateTemp("", "priv_validator_key_")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tempStateFile, err := ioutil.TempFile("", "priv_validator_state_")
|
||||
tempStateFile, err := os.CreateTemp("", "priv_validator_state_")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -870,7 +902,7 @@ func newCounter() abci.Application {
|
||||
}
|
||||
|
||||
func newPersistentKVStore() abci.Application {
|
||||
dir, err := ioutil.TempDir("", "persistent-kvstore")
|
||||
dir, err := os.MkdirTemp("", "persistent-kvstore")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -94,7 +95,10 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw
|
||||
peers := sw.Peers().List()
|
||||
for _, peer := range peers {
|
||||
cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
Message: &tmcons.Vote{Vote: precommit.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
}, cs.Logger)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ func deliverTxsRange(cs *State, start, end int) {
|
||||
func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(blockDB)
|
||||
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB)
|
||||
err := stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
@@ -138,7 +138,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
app := NewCounterApplication()
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(blockDB)
|
||||
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
|
||||
err := stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
@@ -15,173 +14,155 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message.
|
||||
//
|
||||
// TODO: This needs to be removed, but WALToProto depends on this.
|
||||
func MsgToProto(msg Message) (*tmcons.Message, error) {
|
||||
if msg == nil {
|
||||
return nil, errors.New("consensus: message is nil")
|
||||
}
|
||||
var pb tmcons.Message
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *NewRoundStepMessage:
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_NewRoundStep{
|
||||
NewRoundStep: &tmcons.NewRoundStep{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: uint32(msg.Step),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
},
|
||||
},
|
||||
m := &tmcons.NewRoundStep{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: uint32(msg.Step),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
}
|
||||
return m.Wrap().(*tmcons.Message), nil
|
||||
|
||||
case *NewValidBlockMessage:
|
||||
pbPartSetHeader := msg.BlockPartSetHeader.ToProto()
|
||||
pbBits := msg.BlockParts.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_NewValidBlock{
|
||||
NewValidBlock: &tmcons.NewValidBlock{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
},
|
||||
},
|
||||
m := &tmcons.NewValidBlock{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
}
|
||||
return m.Wrap().(*tmcons.Message), nil
|
||||
|
||||
case *ProposalMessage:
|
||||
pbP := msg.Proposal.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_Proposal{
|
||||
Proposal: &tmcons.Proposal{
|
||||
Proposal: *pbP,
|
||||
},
|
||||
},
|
||||
m := &tmcons.Proposal{
|
||||
Proposal: *pbP,
|
||||
}
|
||||
return m.Wrap().(*tmcons.Message), nil
|
||||
|
||||
case *ProposalPOLMessage:
|
||||
pbBits := msg.ProposalPOL.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_ProposalPol{
|
||||
ProposalPol: &tmcons.ProposalPOL{
|
||||
Height: msg.Height,
|
||||
ProposalPolRound: msg.ProposalPOLRound,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
},
|
||||
m := &tmcons.ProposalPOL{
|
||||
Height: msg.Height,
|
||||
ProposalPolRound: msg.ProposalPOLRound,
|
||||
ProposalPol: *pbBits,
|
||||
}
|
||||
return m.Wrap().(*tmcons.Message), nil
|
||||
|
||||
case *BlockPartMessage:
|
||||
parts, err := msg.Part.ToProto()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("msg to proto error: %w", err)
|
||||
}
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_BlockPart{
|
||||
BlockPart: &tmcons.BlockPart{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: *parts,
|
||||
},
|
||||
},
|
||||
m := &tmcons.BlockPart{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: *parts,
|
||||
}
|
||||
return m.Wrap().(*tmcons.Message), nil
|
||||
|
||||
case *VoteMessage:
|
||||
vote := msg.Vote.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{
|
||||
Vote: vote,
|
||||
},
|
||||
},
|
||||
m := &tmcons.Vote{
|
||||
Vote: vote,
|
||||
}
|
||||
return m.Wrap().(*tmcons.Message), nil
|
||||
|
||||
case *HasVoteMessage:
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_HasVote{
|
||||
HasVote: &tmcons.HasVote{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
},
|
||||
},
|
||||
m := &tmcons.HasVote{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
}
|
||||
return m.Wrap().(*tmcons.Message), nil
|
||||
|
||||
case *VoteSetMaj23Message:
|
||||
bi := msg.BlockID.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetMaj23{
|
||||
VoteSetMaj23: &tmcons.VoteSetMaj23{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
},
|
||||
},
|
||||
m := &tmcons.VoteSetMaj23{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
}
|
||||
return m.Wrap().(*tmcons.Message), nil
|
||||
|
||||
case *VoteSetBitsMessage:
|
||||
bi := msg.BlockID.ToProto()
|
||||
bits := msg.Votes.ToProto()
|
||||
|
||||
vsb := &tmcons.Message_VoteSetBits{
|
||||
VoteSetBits: &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
},
|
||||
m := &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
}
|
||||
|
||||
if bits != nil {
|
||||
vsb.VoteSetBits.Votes = *bits
|
||||
m.Votes = *bits
|
||||
}
|
||||
|
||||
pb = tmcons.Message{
|
||||
Sum: vsb,
|
||||
}
|
||||
return m.Wrap().(*tmcons.Message), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("consensus: message not recognized: %T", msg)
|
||||
}
|
||||
|
||||
return &pb, nil
|
||||
}
|
||||
|
||||
// MsgFromProto takes a consensus proto message and returns the native go type
|
||||
func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
if msg == nil {
|
||||
func MsgFromProto(p *tmcons.Message) (Message, error) {
|
||||
if p == nil {
|
||||
return nil, errors.New("consensus: nil message")
|
||||
}
|
||||
var pb Message
|
||||
um, err := p.Unwrap()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := msg.Sum.(type) {
|
||||
case *tmcons.Message_NewRoundStep:
|
||||
rs, err := tmmath.SafeConvertUint8(int64(msg.NewRoundStep.Step))
|
||||
switch msg := um.(type) {
|
||||
case *tmcons.NewRoundStep:
|
||||
rs, err := tmmath.SafeConvertUint8(int64(msg.Step))
|
||||
// deny message based on possible overflow
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("denying message due to possible overflow: %w", err)
|
||||
}
|
||||
pb = &NewRoundStepMessage{
|
||||
Height: msg.NewRoundStep.Height,
|
||||
Round: msg.NewRoundStep.Round,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: cstypes.RoundStepType(rs),
|
||||
SecondsSinceStartTime: msg.NewRoundStep.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.NewRoundStep.LastCommitRound,
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
}
|
||||
case *tmcons.Message_NewValidBlock:
|
||||
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader)
|
||||
case *tmcons.NewValidBlock:
|
||||
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.BlockPartSetHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parts to proto error: %w", err)
|
||||
}
|
||||
|
||||
pbBits := new(bits.BitArray)
|
||||
pbBits.FromProto(msg.NewValidBlock.BlockParts)
|
||||
pbBits.FromProto(msg.BlockParts)
|
||||
|
||||
pb = &NewValidBlockMessage{
|
||||
Height: msg.NewValidBlock.Height,
|
||||
Round: msg.NewValidBlock.Round,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: *pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.NewValidBlock.IsCommit,
|
||||
IsCommit: msg.IsCommit,
|
||||
}
|
||||
case *tmcons.Message_Proposal:
|
||||
pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal)
|
||||
case *tmcons.Proposal:
|
||||
pbP, err := types.ProposalFromProto(&msg.Proposal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("proposal msg to proto error: %w", err)
|
||||
}
|
||||
@@ -189,26 +170,26 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
pb = &ProposalMessage{
|
||||
Proposal: pbP,
|
||||
}
|
||||
case *tmcons.Message_ProposalPol:
|
||||
case *tmcons.ProposalPOL:
|
||||
pbBits := new(bits.BitArray)
|
||||
pbBits.FromProto(&msg.ProposalPol.ProposalPol)
|
||||
pbBits.FromProto(&msg.ProposalPol)
|
||||
pb = &ProposalPOLMessage{
|
||||
Height: msg.ProposalPol.Height,
|
||||
ProposalPOLRound: msg.ProposalPol.ProposalPolRound,
|
||||
Height: msg.Height,
|
||||
ProposalPOLRound: msg.ProposalPolRound,
|
||||
ProposalPOL: pbBits,
|
||||
}
|
||||
case *tmcons.Message_BlockPart:
|
||||
parts, err := types.PartFromProto(&msg.BlockPart.Part)
|
||||
case *tmcons.BlockPart:
|
||||
parts, err := types.PartFromProto(&msg.Part)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blockpart msg to proto error: %w", err)
|
||||
}
|
||||
pb = &BlockPartMessage{
|
||||
Height: msg.BlockPart.Height,
|
||||
Round: msg.BlockPart.Round,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: parts,
|
||||
}
|
||||
case *tmcons.Message_Vote:
|
||||
vote, err := types.VoteFromProto(msg.Vote.Vote)
|
||||
case *tmcons.Vote:
|
||||
vote, err := types.VoteFromProto(msg.Vote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("vote msg to proto error: %w", err)
|
||||
}
|
||||
@@ -216,36 +197,36 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
pb = &VoteMessage{
|
||||
Vote: vote,
|
||||
}
|
||||
case *tmcons.Message_HasVote:
|
||||
case *tmcons.HasVote:
|
||||
pb = &HasVoteMessage{
|
||||
Height: msg.HasVote.Height,
|
||||
Round: msg.HasVote.Round,
|
||||
Type: msg.HasVote.Type,
|
||||
Index: msg.HasVote.Index,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
}
|
||||
case *tmcons.Message_VoteSetMaj23:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID)
|
||||
case *tmcons.VoteSetMaj23:
|
||||
bi, err := types.BlockIDFromProto(&msg.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err)
|
||||
}
|
||||
pb = &VoteSetMaj23Message{
|
||||
Height: msg.VoteSetMaj23.Height,
|
||||
Round: msg.VoteSetMaj23.Round,
|
||||
Type: msg.VoteSetMaj23.Type,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: *bi,
|
||||
}
|
||||
case *tmcons.Message_VoteSetBits:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID)
|
||||
case *tmcons.VoteSetBits:
|
||||
bi, err := types.BlockIDFromProto(&msg.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("voteSetBits msg to proto error: %w", err)
|
||||
}
|
||||
bits := new(bits.BitArray)
|
||||
bits.FromProto(&msg.VoteSetBits.Votes)
|
||||
bits.FromProto(&msg.Votes)
|
||||
|
||||
pb = &VoteSetBitsMessage{
|
||||
Height: msg.VoteSetBits.Height,
|
||||
Round: msg.VoteSetBits.Round,
|
||||
Type: msg.VoteSetBits.Type,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: *bi,
|
||||
Votes: bits,
|
||||
}
|
||||
@@ -262,6 +243,8 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
|
||||
// MustEncode takes the reactors msg, makes it proto and marshals it
|
||||
// this mimics `MustMarshalBinaryBare` in that is panics on error
|
||||
//
|
||||
// Deprecated: Will be removed in v0.37.
|
||||
func MustEncode(msg Message) []byte {
|
||||
pb, err := MsgToProto(msg)
|
||||
if err != nil {
|
||||
|
||||
@@ -80,17 +80,15 @@ func TestMsgToProto(t *testing.T) {
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_NewRoundStep{
|
||||
NewRoundStep: &tmcons.NewRoundStep{
|
||||
Height: 2,
|
||||
Round: 1,
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, (&tmcons.NewRoundStep{
|
||||
Height: 2,
|
||||
Round: 1,
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
}).Wrap().(*tmcons.Message),
|
||||
|
||||
false},
|
||||
|
||||
{"successful NewValidBlockMessage", &NewValidBlockMessage{
|
||||
Height: 1,
|
||||
@@ -98,92 +96,78 @@ func TestMsgToProto(t *testing.T) {
|
||||
BlockPartSetHeader: psh,
|
||||
BlockParts: bits,
|
||||
IsCommit: false,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_NewValidBlock{
|
||||
NewValidBlock: &tmcons.NewValidBlock{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
BlockPartSetHeader: pbPsh,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: false,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, (&tmcons.NewValidBlock{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
BlockPartSetHeader: pbPsh,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: false,
|
||||
}).Wrap().(*tmcons.Message),
|
||||
|
||||
false},
|
||||
{"successful BlockPartMessage", &BlockPartMessage{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: &parts,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_BlockPart{
|
||||
BlockPart: &tmcons.BlockPart{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: *pbParts,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, (&tmcons.BlockPart{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: *pbParts,
|
||||
}).Wrap().(*tmcons.Message),
|
||||
|
||||
false},
|
||||
{"successful ProposalPOLMessage", &ProposalPOLMessage{
|
||||
Height: 1,
|
||||
ProposalPOLRound: 1,
|
||||
ProposalPOL: bits,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_ProposalPol{
|
||||
ProposalPol: &tmcons.ProposalPOL{
|
||||
Height: 1,
|
||||
ProposalPolRound: 1,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
}}, false},
|
||||
}, (&tmcons.ProposalPOL{
|
||||
Height: 1,
|
||||
ProposalPolRound: 1,
|
||||
ProposalPol: *pbBits,
|
||||
}).Wrap().(*tmcons.Message),
|
||||
false},
|
||||
{"successful ProposalMessage", &ProposalMessage{
|
||||
Proposal: &proposal,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_Proposal{
|
||||
Proposal: &tmcons.Proposal{
|
||||
Proposal: *pbProposal,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, (&tmcons.Proposal{
|
||||
Proposal: *pbProposal,
|
||||
}).Wrap().(*tmcons.Message),
|
||||
|
||||
false},
|
||||
{"successful VoteMessage", &VoteMessage{
|
||||
Vote: vote,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{
|
||||
Vote: pbVote,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, (&tmcons.Vote{
|
||||
Vote: pbVote,
|
||||
}).Wrap().(*tmcons.Message),
|
||||
|
||||
false},
|
||||
{"successful VoteSetMaj23", &VoteSetMaj23Message{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: bi,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetMaj23{
|
||||
VoteSetMaj23: &tmcons.VoteSetMaj23{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, (&tmcons.VoteSetMaj23{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
}).Wrap().(*tmcons.Message),
|
||||
|
||||
false},
|
||||
{"successful VoteSetBits", &VoteSetBitsMessage{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: bi,
|
||||
Votes: bits,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetBits{
|
||||
VoteSetBits: &tmcons.VoteSetBits{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
Votes: *pbBits,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, (&tmcons.VoteSetBits{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
Votes: *pbBits,
|
||||
}).Wrap().(*tmcons.Message),
|
||||
|
||||
false},
|
||||
{"failure", nil, &tmcons.Message{}, true},
|
||||
}
|
||||
for _, tt := range testsCases {
|
||||
@@ -314,7 +298,7 @@ func TestWALMsgProto(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:lll //ignore line length for tests
|
||||
//nolint:lll //ignore line length for tests
|
||||
func TestConsMsgsVectors(t *testing.T) {
|
||||
date := time.Date(2018, 8, 30, 12, 0, 0, 0, time.UTC)
|
||||
psh := types.PartSetHeader{
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
@@ -46,6 +45,7 @@ type Reactor struct {
|
||||
mtx tmsync.RWMutex
|
||||
waitSync bool
|
||||
eventBus *types.EventBus
|
||||
rs *cstypes.RoundState
|
||||
|
||||
Metrics *Metrics
|
||||
}
|
||||
@@ -58,6 +58,7 @@ func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption)
|
||||
conR := &Reactor{
|
||||
conS: consensusState,
|
||||
waitSync: waitSync,
|
||||
rs: consensusState.GetRoundState(),
|
||||
Metrics: NopMetrics(),
|
||||
}
|
||||
conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR)
|
||||
@@ -78,6 +79,7 @@ func (conR *Reactor) OnStart() error {
|
||||
go conR.peerStatsRoutine()
|
||||
|
||||
conR.subscribeToBroadcastEvents()
|
||||
go conR.updateRoundStateRoutine()
|
||||
|
||||
if !conR.WaitSync() {
|
||||
err := conR.conS.Start()
|
||||
@@ -145,6 +147,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
Priority: 6,
|
||||
SendQueueCapacity: 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: DataChannel, // maybe split between gossiping current block and catchup stuff
|
||||
@@ -153,6 +156,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: VoteChannel,
|
||||
@@ -160,6 +164,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 100 * 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: VoteSetBitsChannel,
|
||||
@@ -167,6 +172,7 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 2,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -220,34 +226,37 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
// Peer state updates can happen in parallel, but processing of
|
||||
// proposals, block parts, and votes are ordered by the receiveRoutine
|
||||
// NOTE: blocks on consensus state for proposals, block parts, and votes
|
||||
func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
if !conR.IsRunning() {
|
||||
conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
|
||||
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID)
|
||||
return
|
||||
}
|
||||
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
m := e.Message
|
||||
if wm, ok := m.(p2p.Wrapper); ok {
|
||||
m = wm.Wrap()
|
||||
}
|
||||
msg, err := MsgFromProto(m.(*tmcons.Message))
|
||||
if err != nil {
|
||||
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = msg.ValidateBasic(); err != nil {
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
|
||||
conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", msg)
|
||||
|
||||
// Get peer states
|
||||
ps, ok := src.Get(types.PeerStateKey).(*PeerState)
|
||||
ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("Peer %v has no state", src))
|
||||
panic(fmt.Sprintf("Peer %v has no state", e.Src))
|
||||
}
|
||||
|
||||
switch chID {
|
||||
switch e.ChannelID {
|
||||
case StateChannel:
|
||||
switch msg := msg.(type) {
|
||||
case *NewRoundStepMessage:
|
||||
@@ -255,8 +264,8 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
initialHeight := conR.conS.state.InitialHeight
|
||||
conR.conS.mtx.Unlock()
|
||||
if err = msg.ValidateHeight(initialHeight); err != nil {
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
ps.ApplyNewRoundStepMessage(msg)
|
||||
@@ -275,7 +284,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
// Peer claims to have a maj23 for some BlockID at H,R,S,
|
||||
err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
|
||||
if err != nil {
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
// Respond with a VoteSetBitsMessage showing which votes we have.
|
||||
@@ -289,13 +298,19 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
default:
|
||||
panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
|
||||
}
|
||||
src.TrySend(VoteSetBitsChannel, MustEncode(&VoteSetBitsMessage{
|
||||
eMsg := &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: msg.BlockID,
|
||||
Votes: ourVotes,
|
||||
}))
|
||||
BlockID: msg.BlockID.ToProto(),
|
||||
}
|
||||
if votes := ourVotes.ToProto(); votes != nil {
|
||||
eMsg.Votes = *votes
|
||||
}
|
||||
p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: VoteSetBitsChannel,
|
||||
Message: eMsg,
|
||||
}, conR.Logger)
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -308,13 +323,13 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
switch msg := msg.(type) {
|
||||
case *ProposalMessage:
|
||||
ps.SetHasProposal(msg.Proposal)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
case *ProposalPOLMessage:
|
||||
ps.ApplyProposalPOLMessage(msg)
|
||||
case *BlockPartMessage:
|
||||
ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index))
|
||||
conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -334,7 +349,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
|
||||
ps.SetHasVote(msg.Vote)
|
||||
|
||||
cs.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
|
||||
default:
|
||||
// don't punish (leave room for soft upgrades)
|
||||
@@ -373,10 +388,27 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
}
|
||||
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID))
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID))
|
||||
}
|
||||
}
|
||||
|
||||
func (conR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
msg := &tmcons.Message{}
|
||||
err := proto.Unmarshal(msgBytes, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
uw, err := msg.Unwrap()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
conR.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: chID,
|
||||
Src: peer,
|
||||
Message: uw,
|
||||
})
|
||||
}
|
||||
|
||||
// SetEventBus sets event bus.
|
||||
func (conR *Reactor) SetEventBus(b *types.EventBus) {
|
||||
conR.eventBus = b
|
||||
@@ -427,29 +459,39 @@ func (conR *Reactor) unsubscribeFromBroadcastEvents() {
|
||||
|
||||
func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(nrsMsg))
|
||||
conR.Switch.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: nrsMsg,
|
||||
})
|
||||
}
|
||||
|
||||
func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
|
||||
csMsg := &NewValidBlockMessage{
|
||||
psh := rs.ProposalBlockParts.Header()
|
||||
csMsg := &tmcons.NewValidBlock{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
BlockPartSetHeader: rs.ProposalBlockParts.Header(),
|
||||
BlockParts: rs.ProposalBlockParts.BitArray(),
|
||||
BlockPartSetHeader: psh.ToProto(),
|
||||
BlockParts: rs.ProposalBlockParts.BitArray().ToProto(),
|
||||
IsCommit: rs.Step == cstypes.RoundStepCommit,
|
||||
}
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(csMsg))
|
||||
conR.Switch.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: csMsg,
|
||||
})
|
||||
}
|
||||
|
||||
// Broadcasts HasVoteMessage to peers that care.
|
||||
func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
msg := &HasVoteMessage{
|
||||
msg := &tmcons.HasVote{
|
||||
Height: vote.Height,
|
||||
Round: vote.Round,
|
||||
Type: vote.Type,
|
||||
Index: vote.ValidatorIndex,
|
||||
}
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(msg))
|
||||
conR.Switch.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: msg,
|
||||
})
|
||||
/*
|
||||
// TODO: Make this broadcast more selective.
|
||||
for _, peer := range conR.Switch.Peers().List() {
|
||||
@@ -460,7 +502,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
prs := ps.GetRoundState()
|
||||
if prs.Height == vote.Height {
|
||||
// TODO: Also filter on round?
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg})
|
||||
e := p2p.Envelope{
|
||||
ChannelID: StateChannel, struct{ ConsensusMessage }{msg},
|
||||
Message: p,
|
||||
}
|
||||
p2p.TrySendEnvelopeShim(peer, e) //nolint: staticcheck
|
||||
} else {
|
||||
// Height doesn't match
|
||||
// TODO: check a field, maybe CatchupCommitRound?
|
||||
@@ -470,11 +516,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
*/
|
||||
}
|
||||
|
||||
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) {
|
||||
nrsMsg = &NewRoundStepMessage{
|
||||
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep) {
|
||||
nrsMsg = &tmcons.NewRoundStep{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Step: rs.Step,
|
||||
Step: uint32(rs.Step),
|
||||
SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()),
|
||||
LastCommitRound: rs.LastCommit.GetRound(),
|
||||
}
|
||||
@@ -482,9 +528,32 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage)
|
||||
}
|
||||
|
||||
func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
peer.Send(StateChannel, MustEncode(nrsMsg))
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: StateChannel,
|
||||
Message: nrsMsg,
|
||||
}, conR.Logger)
|
||||
}
|
||||
|
||||
func (conR *Reactor) updateRoundStateRoutine() {
|
||||
t := time.NewTicker(100 * time.Microsecond)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
if !conR.IsRunning() {
|
||||
return
|
||||
}
|
||||
rs := conR.conS.GetRoundState()
|
||||
conR.mtx.Lock()
|
||||
conR.rs = rs
|
||||
conR.mtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (conR *Reactor) getRoundState() *cstypes.RoundState {
|
||||
conR.mtx.RLock()
|
||||
defer conR.mtx.RUnlock()
|
||||
return conR.rs
|
||||
}
|
||||
|
||||
func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
@@ -496,20 +565,26 @@ OUTER_LOOP:
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
return
|
||||
}
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
|
||||
// Send proposal Block parts?
|
||||
if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) {
|
||||
if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
|
||||
part := rs.ProposalBlockParts.GetPart(index)
|
||||
msg := &BlockPartMessage{
|
||||
Height: rs.Height, // This tells peer that this part applies to us.
|
||||
Round: rs.Round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
parts, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: rs.Height, // This tells peer that this part applies to us.
|
||||
Round: rs.Round, // This tells peer that this part applies to us.
|
||||
Part: *parts,
|
||||
},
|
||||
}, logger) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
@@ -555,9 +630,11 @@ OUTER_LOOP:
|
||||
if rs.Proposal != nil && !prs.Proposal {
|
||||
// Proposal: share the proposal metadata with peer.
|
||||
{
|
||||
msg := &ProposalMessage{Proposal: rs.Proposal}
|
||||
logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.Proposal{Proposal: *rs.Proposal.ToProto()},
|
||||
}, logger) {
|
||||
// NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected!
|
||||
ps.SetHasProposal(rs.Proposal)
|
||||
}
|
||||
@@ -567,13 +644,15 @@ OUTER_LOOP:
|
||||
// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
|
||||
// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
|
||||
if 0 <= rs.Proposal.POLRound {
|
||||
msg := &ProposalPOLMessage{
|
||||
Height: rs.Height,
|
||||
ProposalPOLRound: rs.Proposal.POLRound,
|
||||
ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
|
||||
}
|
||||
logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.ProposalPOL{
|
||||
Height: rs.Height,
|
||||
ProposalPolRound: rs.Proposal.POLRound,
|
||||
ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(),
|
||||
},
|
||||
}, logger)
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
@@ -610,13 +689,20 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt
|
||||
return
|
||||
}
|
||||
// Send the part
|
||||
msg := &BlockPartMessage{
|
||||
Height: prs.Height, // Not our height, so it doesn't matter.
|
||||
Round: prs.Round, // Not our height, so it doesn't matter.
|
||||
Part: part,
|
||||
}
|
||||
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
pp, err := part.ToProto()
|
||||
if err != nil {
|
||||
logger.Error("Could not convert part to proto", "index", index, "error", err)
|
||||
return
|
||||
}
|
||||
if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: prs.Height, // Not our height, so it doesn't matter.
|
||||
Round: prs.Round, // Not our height, so it doesn't matter.
|
||||
Part: *pp,
|
||||
},
|
||||
}, logger) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
} else {
|
||||
logger.Debug("Sending block part for catchup failed")
|
||||
@@ -639,7 +725,7 @@ OUTER_LOOP:
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
return
|
||||
}
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
|
||||
switch sleeping {
|
||||
@@ -771,16 +857,20 @@ OUTER_LOOP:
|
||||
|
||||
// Maybe send Height/Round/Prevotes
|
||||
{
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
|
||||
p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
}, ps.logger)
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -788,16 +878,19 @@ OUTER_LOOP:
|
||||
|
||||
// Maybe send Height/Round/Precommits
|
||||
{
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
}, ps.logger)
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -805,16 +898,20 @@ OUTER_LOOP:
|
||||
|
||||
// Maybe send Height/Round/ProposalPOL
|
||||
{
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
|
||||
p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
}, ps.logger)
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -829,12 +926,15 @@ OUTER_LOOP:
|
||||
if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() &&
|
||||
prs.Height >= conR.conS.blockStore.Base() {
|
||||
if commit := conR.conS.LoadCommit(prs.Height); commit != nil {
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: commit.BlockID,
|
||||
}))
|
||||
p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: commit.BlockID.ToProto(),
|
||||
},
|
||||
}, ps.logger)
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -1048,9 +1148,13 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in
|
||||
// Returns true if vote was sent.
|
||||
func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
|
||||
if vote, ok := ps.PickVoteToSend(votes); ok {
|
||||
msg := &VoteMessage{vote}
|
||||
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
|
||||
if ps.peer.Send(VoteChannel, MustEncode(msg)) {
|
||||
if p2p.SendEnvelopeShim(ps.peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{
|
||||
Vote: vote.ToProto(),
|
||||
},
|
||||
}, ps.logger) {
|
||||
ps.SetHasVote(vote)
|
||||
return true
|
||||
}
|
||||
@@ -1242,12 +1346,12 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
|
||||
}
|
||||
|
||||
func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) {
|
||||
logger := ps.logger.With(
|
||||
ps.logger.Debug("setHasVote",
|
||||
"peerH/R",
|
||||
fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
|
||||
log.NewLazySprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
|
||||
"H/R",
|
||||
fmt.Sprintf("%d/%d", height, round))
|
||||
logger.Debug("setHasVote", "type", voteType, "index", index)
|
||||
log.NewLazySprintf("%d/%d", height, round),
|
||||
"type", voteType, "index", index)
|
||||
|
||||
// NOTE: some may be nil BitArrays -> no side effects.
|
||||
psVotes := ps.getVoteBitArray(height, round, voteType)
|
||||
@@ -1416,15 +1520,6 @@ func init() {
|
||||
tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits")
|
||||
}
|
||||
|
||||
func decodeMsg(bz []byte) (msg Message, err error) {
|
||||
pb := &tmcons.Message{}
|
||||
if err = proto.Unmarshal(bz, pb); err != nil {
|
||||
return msg, err
|
||||
}
|
||||
|
||||
return MsgFromProto(pb)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// NewRoundStepMessage is sent for every step taken in the ConsensusState.
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -29,8 +30,11 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
p2pmock "github.com/tendermint/tendermint/p2p/mock"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
@@ -136,7 +140,9 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
logger := consensusLogger()
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
@@ -152,14 +158,33 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
memplMetrics := mempl.NopMetrics()
|
||||
// one for mempool, one for consensus
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
var mempool mempl.Mempool
|
||||
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
}
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
@@ -231,7 +256,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
}, css)
|
||||
}
|
||||
|
||||
func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
func TestLegacyReactorReceiveBasicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
@@ -241,13 +266,45 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Src: peer,
|
||||
Message: &tmcons.HasVote{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType},
|
||||
})
|
||||
reactor.AddPeer(peer)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLegacyReactorReceiveBasic(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
v := &tmcons.HasVote{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Index: 1,
|
||||
Type: tmproto.PrevoteType,
|
||||
}
|
||||
w := v.Wrap()
|
||||
msg, err := proto.Marshal(w)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
reactor.AddPeer(peer)
|
||||
@@ -264,15 +321,18 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
// we should call InitPeer here
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.Panics(t, func() {
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
reactor.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Src: peer,
|
||||
Message: &tmcons.HasVote{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -668,7 +728,7 @@ func capture() {
|
||||
// Ensure basic validation of structs is functioning
|
||||
|
||||
func TestNewRoundStepMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct {
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageLastCommitRound int32
|
||||
@@ -707,7 +767,7 @@ func TestNewRoundStepMessageValidateBasic(t *testing.T) {
|
||||
|
||||
func TestNewRoundStepMessageValidateHeight(t *testing.T) {
|
||||
initialHeight := int64(10)
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct { //nolint: maligned
|
||||
expectErr bool
|
||||
messageLastCommitRound int32
|
||||
messageHeight int64
|
||||
@@ -857,7 +917,7 @@ func TestHasVoteMessageValidateBasic(t *testing.T) {
|
||||
invalidSignedMsgType tmproto.SignedMsgType = 0x03
|
||||
)
|
||||
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct { //nolint: maligned
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageIndex int32
|
||||
@@ -902,7 +962,7 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct { //nolint: maligned
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageHeight int64
|
||||
|
||||
@@ -418,7 +418,7 @@ func (h *Handshaker) ReplayBlocks(
|
||||
|
||||
case appBlockHeight == storeBlockHeight:
|
||||
// We ran Commit, but didn't save the state, so replayBlock with mock app.
|
||||
abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight)
|
||||
abciResponses, err := h.stateStore.LoadLastABCIResponse(storeBlockHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -297,7 +297,9 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
if err != nil {
|
||||
tmos.Exit(err.Error())
|
||||
}
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
tmos.Exit(err.Error())
|
||||
|
||||
@@ -15,12 +15,18 @@ type emptyMempool struct{}
|
||||
|
||||
var _ mempl.Mempool = emptyMempool{}
|
||||
|
||||
func (emptyMempool) Lock() {}
|
||||
func (emptyMempool) Unlock() {}
|
||||
func (emptyMempool) Size() int { return 0 }
|
||||
func (emptyMempool) Lock() {}
|
||||
func (emptyMempool) Unlock() {}
|
||||
func (emptyMempool) Size() int { return 0 }
|
||||
func (emptyMempool) SizeBytes() int64 { return 0 }
|
||||
func (emptyMempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (txmp emptyMempool) RemoveTxByKey(txKey types.TxKey) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) Update(
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -65,7 +64,8 @@ func TestMain(m *testing.M) {
|
||||
// wal writer when we need to, instead of with every message.
|
||||
|
||||
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
|
||||
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store,
|
||||
) {
|
||||
logger := log.TestingLogger()
|
||||
state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
@@ -78,7 +78,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
|
||||
)
|
||||
cs.SetLogger(logger)
|
||||
|
||||
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
|
||||
bytes, _ := os.ReadFile(cs.config.WalFile())
|
||||
t.Logf("====== WAL: \n\r%X\n", bytes)
|
||||
|
||||
err := cs.Start()
|
||||
@@ -126,14 +126,18 @@ func TestWALCrash(t *testing.T) {
|
||||
initFn func(dbm.DB, *State, context.Context)
|
||||
heightToStop int64
|
||||
}{
|
||||
{"empty block",
|
||||
{
|
||||
"empty block",
|
||||
func(stateDB dbm.DB, cs *State, ctx context.Context) {},
|
||||
1},
|
||||
{"many non-empty blocks",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"many non-empty blocks",
|
||||
func(stateDB dbm.DB, cs *State, ctx context.Context) {
|
||||
go sendTxs(ctx, cs)
|
||||
},
|
||||
3},
|
||||
3,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
@@ -146,7 +150,8 @@ func TestWALCrash(t *testing.T) {
|
||||
}
|
||||
|
||||
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
initFn func(dbm.DB, *State, context.Context), heightToStop int64) {
|
||||
initFn func(dbm.DB, *State, context.Context), heightToStop int64,
|
||||
) {
|
||||
walPanicked := make(chan error)
|
||||
crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop}
|
||||
|
||||
@@ -159,7 +164,9 @@ LOOP:
|
||||
logger := log.NewNopLogger()
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := blockDB
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
@@ -282,7 +289,8 @@ func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() }
|
||||
|
||||
func (w *crashingWAL) SearchForEndHeight(
|
||||
height int64,
|
||||
options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
|
||||
options *WALSearchOptions,
|
||||
) (rd io.ReadCloser, found bool, err error) {
|
||||
return w.next.SearchForEndHeight(height, options)
|
||||
}
|
||||
|
||||
@@ -290,7 +298,7 @@ func (w *crashingWAL) Start() error { return w.next.Start() }
|
||||
func (w *crashingWAL) Stop() error { return w.next.Stop() }
|
||||
func (w *crashingWAL) Wait() { w.next.Wait() }
|
||||
|
||||
//------------------------------------------------------------------------------------------
|
||||
// ------------------------------------------------------------------------------------------
|
||||
type testSim struct {
|
||||
GenesisState sm.State
|
||||
Config *cfg.Config
|
||||
@@ -586,7 +594,7 @@ func TestHandshakeReplayNone(t *testing.T) {
|
||||
func TestMockProxyApp(t *testing.T) {
|
||||
sim.CleanupFunc() // clean the test env created in TestSimulateValidatorsChange
|
||||
logger := log.TestingLogger()
|
||||
var validTxs, invalidTxs = 0, 0
|
||||
validTxs, invalidTxs := 0, 0
|
||||
txIndex := 0
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
@@ -634,7 +642,7 @@ func TestMockProxyApp(t *testing.T) {
|
||||
}
|
||||
|
||||
func tempWALWithData(data []byte) string {
|
||||
walFile, err := ioutil.TempFile("", "wal")
|
||||
walFile, err := os.CreateTemp("", "wal")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to create temp WAL file: %v", err))
|
||||
}
|
||||
@@ -693,7 +701,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion)
|
||||
|
||||
}
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
store.chain = chain
|
||||
store.commits = commits
|
||||
|
||||
@@ -712,7 +722,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
// use a throwaway tendermint state
|
||||
proxyApp := proxy.NewAppConns(clientCreator2)
|
||||
stateDB1 := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB1)
|
||||
stateStore := sm.NewStore(stateDB1, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
err := stateStore.Save(genesisState)
|
||||
require.NoError(t, err)
|
||||
buildAppStateFromChain(proxyApp, stateStore, genesisState, chain, nBlocks, mode)
|
||||
@@ -788,7 +800,8 @@ func applyBlock(stateStore sm.Store, st sm.State, blk *types.Block, proxyApp pro
|
||||
}
|
||||
|
||||
func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store,
|
||||
state sm.State, chain []*types.Block, nBlocks int, mode uint) {
|
||||
state sm.State, chain []*types.Block, nBlocks int, mode uint,
|
||||
) {
|
||||
// start a new app without handshake, play nBlocks blocks
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
panic(err)
|
||||
@@ -825,7 +838,6 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store,
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown mode %v", mode))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func buildTMStateFromChain(
|
||||
@@ -834,7 +846,8 @@ func buildTMStateFromChain(
|
||||
state sm.State,
|
||||
chain []*types.Block,
|
||||
nBlocks int,
|
||||
mode uint) sm.State {
|
||||
mode uint,
|
||||
) sm.State {
|
||||
// run the whole chain against this client to build up the tendermint state
|
||||
clientCreator := proxy.NewLocalClientCreator(
|
||||
kvstore.NewPersistentKVStoreApplication(
|
||||
@@ -891,7 +904,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
stateDB, state, store := stateAndStore(config, pubKey, appVersion)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||
state.LastValidators = state.Validators.Copy()
|
||||
// mode = 0 for committing all the blocks
|
||||
@@ -975,8 +990,8 @@ func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Bl
|
||||
}
|
||||
|
||||
func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta,
|
||||
privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) {
|
||||
|
||||
privVal types.PrivValidator, height int64,
|
||||
) (*types.Block, *types.PartSet) {
|
||||
lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil)
|
||||
if height > 1 {
|
||||
vote, _ := types.MakeVote(
|
||||
@@ -1058,8 +1073,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
case EndHeightMessage:
|
||||
// if its not the first one, we have a full block
|
||||
if thisBlockParts != nil {
|
||||
var pbb = new(tmproto.Block)
|
||||
bz, err := ioutil.ReadAll(thisBlockParts.GetReader())
|
||||
pbb := new(tmproto.Block)
|
||||
bz, err := io.ReadAll(thisBlockParts.GetReader())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -1098,11 +1113,11 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
}
|
||||
}
|
||||
// grab the last block too
|
||||
bz, err := ioutil.ReadAll(thisBlockParts.GetReader())
|
||||
bz, err := io.ReadAll(thisBlockParts.GetReader())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var pbb = new(tmproto.Block)
|
||||
pbb := new(tmproto.Block)
|
||||
err = proto.Unmarshal(bz, pbb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -1146,9 +1161,12 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} {
|
||||
func stateAndStore(
|
||||
config *cfg.Config,
|
||||
pubKey crypto.PubKey,
|
||||
appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) {
|
||||
appVersion uint64,
|
||||
) (dbm.DB, sm.State, *mockBlockStore) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())
|
||||
state.Version.Consensus.App = appVersion
|
||||
store := newMockBlockStore(config, state.ConsensusParams)
|
||||
@@ -1182,6 +1200,7 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain
|
||||
func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block {
|
||||
return bs.chain[int64(len(bs.chain))-1]
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
block := bs.chain[height-1]
|
||||
return &types.BlockMeta{
|
||||
@@ -1192,9 +1211,11 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil }
|
||||
func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
return bs.commits[height-1]
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
return bs.commits[height-1]
|
||||
}
|
||||
@@ -1225,7 +1246,9 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
stateDB, state, store := stateAndStore(config, pubKey, 0x0)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
oldValAddr := state.Validators.Validators[0].Address
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
@@ -305,6 +305,15 @@ func (cs *State) OnStart() error {
|
||||
}
|
||||
}
|
||||
|
||||
// we need the timeoutRoutine for replay so
|
||||
// we don't block on the tick chan.
|
||||
// NOTE: we will get a build up of garbage go routines
|
||||
// firing on the tockChan until the receiveRoutine is started
|
||||
// to deal with them (by that point, at most one will be valid)
|
||||
if err := cs.timeoutTicker.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We may have lost some votes if the process crashed reload from consensus
|
||||
// log to catchup.
|
||||
if cs.doWALCatchup {
|
||||
@@ -361,15 +370,6 @@ func (cs *State) OnStart() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// we need the timeoutRoutine for replay so
|
||||
// we don't block on the tick chan.
|
||||
// NOTE: we will get a build up of garbage go routines
|
||||
// firing on the tockChan until the receiveRoutine is started
|
||||
// to deal with them (by that point, at most one will be valid)
|
||||
if err := cs.timeoutTicker.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Double Signing Risk Reduction
|
||||
if err := cs.checkDoubleSigningRisk(cs.Height); err != nil {
|
||||
return err
|
||||
@@ -468,7 +468,6 @@ func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error
|
||||
|
||||
// SetProposal inputs a proposal.
|
||||
func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
|
||||
|
||||
if peerID == "" {
|
||||
cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""}
|
||||
} else {
|
||||
@@ -481,7 +480,6 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error {
|
||||
|
||||
// AddProposalBlockPart inputs a part of the proposal block.
|
||||
func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error {
|
||||
|
||||
if peerID == "" {
|
||||
cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""}
|
||||
} else {
|
||||
@@ -499,7 +497,6 @@ func (cs *State) SetProposalAndBlock(
|
||||
parts *types.PartSet,
|
||||
peerID p2p.ID,
|
||||
) error {
|
||||
|
||||
if err := cs.SetProposal(proposal, peerID); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -802,7 +799,6 @@ func (cs *State) receiveRoutine(maxSteps int) {
|
||||
func (cs *State) handleMsg(mi msgInfo) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
|
||||
var (
|
||||
added bool
|
||||
err error
|
||||
@@ -819,6 +815,24 @@ func (cs *State) handleMsg(mi msgInfo) {
|
||||
case *BlockPartMessage:
|
||||
// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
|
||||
added, err = cs.addProposalBlockPart(msg, peerID)
|
||||
|
||||
// We unlock here to yield to any routines that need to read the the RoundState.
|
||||
// Previously, this code held the lock from the point at which the final block
|
||||
// part was received until the block executed against the application.
|
||||
// This prevented the reactor from being able to retrieve the most updated
|
||||
// version of the RoundState. The reactor needs the updated RoundState to
|
||||
// gossip the now completed block.
|
||||
//
|
||||
// This code can be further improved by either always operating on a copy
|
||||
// of RoundState and only locking when switching out State's copy of
|
||||
// RoundState with the updated copy or by emitting RoundState events in
|
||||
// more places for routines depending on it to listen for.
|
||||
cs.mtx.Unlock()
|
||||
|
||||
cs.mtx.Lock()
|
||||
if added && cs.ProposalBlockParts.IsComplete() {
|
||||
cs.handleCompleteProposal(msg.Height)
|
||||
}
|
||||
if added {
|
||||
cs.statsMsgQueue <- mi
|
||||
}
|
||||
@@ -920,7 +934,6 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid timeout step: %v", ti.Step))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (cs *State) handleTxsAvailable() {
|
||||
@@ -953,7 +966,9 @@ func (cs *State) handleTxsAvailable() {
|
||||
// Used internally by handleTimeout and handleMsg to make state transitions
|
||||
|
||||
// Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit),
|
||||
// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1)
|
||||
//
|
||||
// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1)
|
||||
//
|
||||
// Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1)
|
||||
// Enter: +2/3 precommits for nil at (height,round-1)
|
||||
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
|
||||
@@ -964,7 +979,7 @@ func (cs *State) enterNewRound(height int64, round int32) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
|
||||
logger.Debug(
|
||||
"entering new round with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -973,7 +988,7 @@ func (cs *State) enterNewRound(height int64, round int32) {
|
||||
logger.Debug("need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now)
|
||||
}
|
||||
|
||||
logger.Debug("entering new round", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering new round", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
// increment validators if necessary
|
||||
validators := cs.Validators
|
||||
@@ -1038,7 +1053,9 @@ func (cs *State) needProofBlock(height int64) bool {
|
||||
|
||||
// Enter (CreateEmptyBlocks): from enterNewRound(height,round)
|
||||
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ):
|
||||
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
||||
//
|
||||
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
||||
//
|
||||
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
|
||||
func (cs *State) enterPropose(height int64, round int32) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
@@ -1046,12 +1063,12 @@ func (cs *State) enterPropose(height int64, round int32) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering propose step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("entering propose step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering propose step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPropose:
|
||||
@@ -1160,7 +1177,6 @@ func (cs *State) isProposalComplete() bool {
|
||||
}
|
||||
// if this is false the proposer is lying or we haven't received the POL yet
|
||||
return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
|
||||
|
||||
}
|
||||
|
||||
// Create the next block to propose and return it. Returns nil block upon error.
|
||||
@@ -1213,7 +1229,7 @@ func (cs *State) enterPrevote(height int64, round int32) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering prevote step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -1224,7 +1240,7 @@ func (cs *State) enterPrevote(height int64, round int32) {
|
||||
cs.newStep()
|
||||
}()
|
||||
|
||||
logger.Debug("entering prevote step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering prevote step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
// Sign and broadcast vote as necessary
|
||||
cs.doPrevote(height, round)
|
||||
@@ -1273,7 +1289,7 @@ func (cs *State) enterPrevoteWait(height int64, round int32) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering prevote wait step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -1285,7 +1301,7 @@ func (cs *State) enterPrevoteWait(height int64, round int32) {
|
||||
))
|
||||
}
|
||||
|
||||
logger.Debug("entering prevote wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering prevote wait step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrevoteWait:
|
||||
@@ -1309,12 +1325,12 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering precommit step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("entering precommit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering precommit step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommit:
|
||||
@@ -1432,7 +1448,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) {
|
||||
logger.Debug(
|
||||
"entering precommit wait step with invalid args",
|
||||
"triggered_timeout", cs.TriggeredTimeoutPrecommit,
|
||||
"current", fmt.Sprintf("%v/%v", cs.Height, cs.Round),
|
||||
"current", log.NewLazySprintf("%v/%v", cs.Height, cs.Round),
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -1444,7 +1460,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) {
|
||||
))
|
||||
}
|
||||
|
||||
logger.Debug("entering precommit wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering precommit wait step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommitWait:
|
||||
@@ -1463,12 +1479,12 @@ func (cs *State) enterCommit(height int64, commitRound int32) {
|
||||
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
|
||||
logger.Debug(
|
||||
"entering commit step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("entering commit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering commit step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterCommit:
|
||||
@@ -1501,7 +1517,7 @@ func (cs *State) enterCommit(height int64, commitRound int32) {
|
||||
if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) {
|
||||
logger.Info(
|
||||
"commit is for a block we do not know about; set ProposalBlock=nil",
|
||||
"proposal", cs.ProposalBlock.Hash(),
|
||||
"proposal", log.NewLazyBlockHash(cs.ProposalBlock),
|
||||
"commit", blockID.Hash,
|
||||
)
|
||||
|
||||
@@ -1538,7 +1554,7 @@ func (cs *State) tryFinalizeCommit(height int64) {
|
||||
// TODO: ^^ wait, why does it matter that we're a validator?
|
||||
logger.Debug(
|
||||
"failed attempt to finalize commit; we do not have the commit block",
|
||||
"proposal_block", cs.ProposalBlock.Hash(),
|
||||
"proposal_block", log.NewLazyBlockHash(cs.ProposalBlock),
|
||||
"commit_block", blockID.Hash,
|
||||
)
|
||||
return
|
||||
@@ -1554,7 +1570,7 @@ func (cs *State) finalizeCommit(height int64) {
|
||||
if cs.Height != height || cs.Step != cstypes.RoundStepCommit {
|
||||
logger.Debug(
|
||||
"entering finalize commit step",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -1580,11 +1596,11 @@ func (cs *State) finalizeCommit(height int64) {
|
||||
|
||||
logger.Info(
|
||||
"finalizing commit of block",
|
||||
"hash", block.Hash(),
|
||||
"hash", log.NewLazyBlockHash(block),
|
||||
"root", block.AppHash,
|
||||
"num_txs", len(block.Txs),
|
||||
)
|
||||
logger.Debug(fmt.Sprintf("%v", block))
|
||||
logger.Debug("committed block", "block", log.NewLazySprintf("%v", block))
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
@@ -1864,12 +1880,12 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add
|
||||
)
|
||||
}
|
||||
if added && cs.ProposalBlockParts.IsComplete() {
|
||||
bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader())
|
||||
bz, err := io.ReadAll(cs.ProposalBlockParts.GetReader())
|
||||
if err != nil {
|
||||
return added, err
|
||||
}
|
||||
|
||||
var pbb = new(tmproto.Block)
|
||||
pbb := new(tmproto.Block)
|
||||
err = proto.Unmarshal(bz, pbb)
|
||||
if err != nil {
|
||||
return added, err
|
||||
@@ -1888,44 +1904,43 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add
|
||||
if err := cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()); err != nil {
|
||||
cs.Logger.Error("failed publishing event complete proposal", "err", err)
|
||||
}
|
||||
}
|
||||
return added, nil
|
||||
}
|
||||
|
||||
// Update Valid* if we can.
|
||||
prevotes := cs.Votes.Prevotes(cs.Round)
|
||||
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
|
||||
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Debug(
|
||||
"updating valid block to new proposal block",
|
||||
"valid_round", cs.Round,
|
||||
"valid_block_hash", cs.ProposalBlock.Hash(),
|
||||
)
|
||||
func (cs *State) handleCompleteProposal(blockHeight int64) {
|
||||
// Update Valid* if we can.
|
||||
prevotes := cs.Votes.Prevotes(cs.Round)
|
||||
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
|
||||
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Debug(
|
||||
"updating valid block to new proposal block",
|
||||
"valid_round", cs.Round,
|
||||
"valid_block_hash", log.NewLazyBlockHash(cs.ProposalBlock),
|
||||
)
|
||||
|
||||
cs.ValidRound = cs.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
}
|
||||
// TODO: In case there is +2/3 majority in Prevotes set for some
|
||||
// block and cs.ProposalBlock contains different block, either
|
||||
// proposer is faulty or voting power of faulty processes is more
|
||||
// than 1/3. We should trigger in the future accountability
|
||||
// procedure at this point.
|
||||
cs.ValidRound = cs.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
}
|
||||
|
||||
if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() {
|
||||
// Move onto the next step
|
||||
cs.enterPrevote(height, cs.Round)
|
||||
if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added
|
||||
cs.enterPrecommit(height, cs.Round)
|
||||
}
|
||||
} else if cs.Step == cstypes.RoundStepCommit {
|
||||
// If we're waiting on the proposal block...
|
||||
cs.tryFinalizeCommit(height)
|
||||
}
|
||||
|
||||
return added, nil
|
||||
// TODO: In case there is +2/3 majority in Prevotes set for some
|
||||
// block and cs.ProposalBlock contains different block, either
|
||||
// proposer is faulty or voting power of faulty processes is more
|
||||
// than 1/3. We should trigger in the future accountability
|
||||
// procedure at this point.
|
||||
}
|
||||
|
||||
return added, nil
|
||||
if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() {
|
||||
// Move onto the next step
|
||||
cs.enterPrevote(blockHeight, cs.Round)
|
||||
if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added
|
||||
cs.enterPrecommit(blockHeight, cs.Round)
|
||||
}
|
||||
} else if cs.Step == cstypes.RoundStepCommit {
|
||||
// If we're waiting on the proposal block...
|
||||
cs.tryFinalizeCommit(blockHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
|
||||
@@ -1935,7 +1950,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) {
|
||||
// If the vote height is off, we'll just ignore it,
|
||||
// But if it's a conflicting sig, add it to the cs.evpool.
|
||||
// If it's otherwise invalid, punish peer.
|
||||
// nolint: gocritic
|
||||
//nolint: gocritic
|
||||
if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok {
|
||||
if cs.privValidatorPubKey == nil {
|
||||
return false, errPubKeyIsNotSet
|
||||
@@ -2076,7 +2091,7 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error
|
||||
} else {
|
||||
cs.Logger.Debug(
|
||||
"valid block we do not know about; set ProposalBlock=nil",
|
||||
"proposal", cs.ProposalBlock.Hash(),
|
||||
"proposal", log.NewLazyBlockHash(cs.ProposalBlock),
|
||||
"block_id", blockID.Hash,
|
||||
)
|
||||
|
||||
@@ -2192,10 +2207,11 @@ func (cs *State) voteTime() time.Time {
|
||||
now := tmtime.Now()
|
||||
minVoteTime := now
|
||||
// TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil,
|
||||
// even if cs.LockedBlock != nil. See https://docs.tendermint.com/master/spec/.
|
||||
// even if cs.LockedBlock != nil. See https://github.com/tendermint/tendermint/tree/v0.34.x/spec/.
|
||||
timeIota := time.Duration(cs.state.ConsensusParams.Block.TimeIotaMs) * time.Millisecond
|
||||
if cs.LockedBlock != nil {
|
||||
// See the BFT time spec https://docs.tendermint.com/master/spec/consensus/bft-time.html
|
||||
// See the BFT time spec
|
||||
// https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/bft-time.md
|
||||
minVoteTime = cs.LockedBlock.Time.Add(timeIota)
|
||||
} else if cs.ProposalBlock != nil {
|
||||
minVoteTime = cs.ProposalBlock.Time.Add(timeIota)
|
||||
|
||||
@@ -47,7 +47,9 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
}
|
||||
blockStoreDB := db.NewMemDB()
|
||||
stateDB := blockStoreDB
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make genesis state: %w", err)
|
||||
|
||||
@@ -3,7 +3,6 @@ package consensus
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@@ -27,7 +26,7 @@ const (
|
||||
)
|
||||
|
||||
func TestWALTruncate(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
walDir, err := os.MkdirTemp("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
|
||||
@@ -109,7 +108,7 @@ func TestWALEncoderDecoder(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWALWrite(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
walDir, err := os.MkdirTemp("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
walFile := filepath.Join(walDir, "wal")
|
||||
@@ -177,7 +176,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWALPeriodicSync(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
walDir, err := os.MkdirTemp("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
|
||||
@@ -269,18 +268,23 @@ func BenchmarkWalDecode512B(b *testing.B) {
|
||||
func BenchmarkWalDecode10KB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 10*1024)
|
||||
}
|
||||
|
||||
func BenchmarkWalDecode100KB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 100*1024)
|
||||
}
|
||||
|
||||
func BenchmarkWalDecode1MB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 1024*1024)
|
||||
}
|
||||
|
||||
func BenchmarkWalDecode10MB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 10*1024*1024)
|
||||
}
|
||||
|
||||
func BenchmarkWalDecode100MB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 100*1024*1024)
|
||||
}
|
||||
|
||||
func BenchmarkWalDecode1GB(b *testing.B) {
|
||||
benchmarkWalDecode(b, 1024*1024*1024)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ For any specific algorithm, use its specific module e.g.
|
||||
|
||||
## Binary encoding
|
||||
|
||||
For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/blockchain/encoding.html).
|
||||
For Binary encoding, please refer to the [Tendermint encoding specification](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/core/encoding.md).
|
||||
|
||||
## JSON Encoding
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ package armor
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/openpgp/armor" // nolint: staticcheck
|
||||
"golang.org/x/crypto/openpgp/armor" //nolint: staticcheck
|
||||
)
|
||||
|
||||
func EncodeArmor(blockType string, headers map[string]string, data []byte) string {
|
||||
@@ -31,7 +31,7 @@ func DecodeArmor(armorStr string) (blockType string, headers map[string]string,
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
data, err = ioutil.ReadAll(block.Body)
|
||||
data, err = io.ReadAll(block.Body)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
@@ -12,20 +12,19 @@ second pre-image attacks. Hence, use this library with caution.
|
||||
Otherwise you might run into similar issues as, e.g., in early Bitcoin:
|
||||
https://bitcointalk.org/?topic=102395
|
||||
|
||||
*
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
* *
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
* * * h6
|
||||
/ \ / \ / \
|
||||
h0 h1 h2 h3 h4 h5
|
||||
*
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
/ \
|
||||
* *
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
* * * h6
|
||||
/ \ / \ / \
|
||||
h0 h1 h2 h3 h4 h5
|
||||
|
||||
TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure.
|
||||
|
||||
*/
|
||||
package merkle
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user