mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-22 20:52:50 +00:00
Compare commits
151 Commits
wb/abc-cli
...
wb/v034-e2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
00dd3a6801 | ||
|
|
223ece93c8 | ||
|
|
ba1711e706 | ||
|
|
8df725f92f | ||
|
|
2b3737333f | ||
|
|
bbb5f3bfef | ||
|
|
d6b413ff8e | ||
|
|
7b615f8123 | ||
|
|
7d9447198c | ||
|
|
5276400c30 | ||
|
|
493dd69f31 | ||
|
|
1d9d947d88 | ||
|
|
479bdd71e6 | ||
|
|
17f4ea3680 | ||
|
|
5c32cfa00e | ||
|
|
5e354a3a57 | ||
|
|
9e14e954f9 | ||
|
|
6b7d30cf37 | ||
|
|
25101d1116 | ||
|
|
b83cc0aeda | ||
|
|
4a1df4911d | ||
|
|
a3cc3d98b9 | ||
|
|
fe024521ef | ||
|
|
02def9ca64 | ||
|
|
ce2409f3ff | ||
|
|
30915e9337 | ||
|
|
8a7affe3a0 | ||
|
|
851f404305 | ||
|
|
f63496dcd6 | ||
|
|
044b12585f | ||
|
|
ac2e7fab3d | ||
|
|
e6f0711648 | ||
|
|
603a1d6610 | ||
|
|
ad72896ca5 | ||
|
|
9afdac6b52 | ||
|
|
a694dad540 | ||
|
|
9d1556a5bc | ||
|
|
7ae73be891 | ||
|
|
bbb1506f5e | ||
|
|
7cb014cf27 | ||
|
|
004967f962 | ||
|
|
6670c24e5f | ||
|
|
b685ec7f2d | ||
|
|
016c91b50f | ||
|
|
34f23ab88a | ||
|
|
aef0bd874d | ||
|
|
d2bd4471bc | ||
|
|
c7006af6fd | ||
|
|
972eee6ebc | ||
|
|
b06540b5ff | ||
|
|
d1213f7e5f | ||
|
|
624bbac8f6 | ||
|
|
874c9a0951 | ||
|
|
986f8d6532 | ||
|
|
e40e7ea46e | ||
|
|
7fa34c63af | ||
|
|
da6ec8f082 | ||
|
|
efddab0734 | ||
|
|
3454f8cb89 | ||
|
|
2f231ceb95 | ||
|
|
6e85f46d9a | ||
|
|
89139784c5 | ||
|
|
ecf19029b4 | ||
|
|
9ae5797866 | ||
|
|
07670318a9 | ||
|
|
1e32a149dd | ||
|
|
2c553d735a | ||
|
|
799489e474 | ||
|
|
66b1a3ee4c | ||
|
|
06e8620621 | ||
|
|
b2dd100a76 | ||
|
|
314b139ac3 | ||
|
|
551072c962 | ||
|
|
4b5472c387 | ||
|
|
fd3bfb38e7 | ||
|
|
186e0e4df2 | ||
|
|
a97bb37d44 | ||
|
|
9e8837ad63 | ||
|
|
6b4e9078de | ||
|
|
1d25a3f0bc | ||
|
|
96085df7c1 | ||
|
|
cb6baad5ac | ||
|
|
db60bbad54 | ||
|
|
5487718cff | ||
|
|
cf58c4191b | ||
|
|
ce70b10f81 | ||
|
|
98c75c9429 | ||
|
|
9fe245025f | ||
|
|
de423678eb | ||
|
|
6a14fc2105 | ||
|
|
89bb82617a | ||
|
|
df9b1676f9 | ||
|
|
f88aad5903 | ||
|
|
75c6af7dcf | ||
|
|
bc63f213da | ||
|
|
80f656d8d7 | ||
|
|
f36cc80568 | ||
|
|
c477c810f3 | ||
|
|
3757810247 | ||
|
|
3b467f951d | ||
|
|
b14dc70664 | ||
|
|
2cbb35f980 | ||
|
|
c9c570e151 | ||
|
|
e6700355f6 | ||
|
|
40f18b8d8f | ||
|
|
4d0b6e7c5a | ||
|
|
6695e525f9 | ||
|
|
6eeb1b3a5d | ||
|
|
cac59a7677 | ||
|
|
dfd5bae784 | ||
|
|
41c176ccc6 | ||
|
|
05340ca069 | ||
|
|
9994396e59 | ||
|
|
c4834df3f3 | ||
|
|
12e3419f2b | ||
|
|
9ec863f948 | ||
|
|
d0031b0503 | ||
|
|
d35b50b528 | ||
|
|
bd48acb2ca | ||
|
|
0b835bea7a | ||
|
|
12ecfb0383 | ||
|
|
3e7fc468e4 | ||
|
|
113118ec00 | ||
|
|
4ef140f6ca | ||
|
|
61831cf5ef | ||
|
|
8a2dcbafae | ||
|
|
3e119fc6c4 | ||
|
|
f721bf5154 | ||
|
|
3567d3ab38 | ||
|
|
46a6691e11 | ||
|
|
876b3c0dbe | ||
|
|
31b3e279fc | ||
|
|
85870def7b | ||
|
|
ff2758b32e | ||
|
|
a82cb7dcda | ||
|
|
1dfb3451ea | ||
|
|
9f13b9b083 | ||
|
|
16ba782fa6 | ||
|
|
474ed04273 | ||
|
|
2d8287d0f7 | ||
|
|
294a9695b4 | ||
|
|
849461aab2 | ||
|
|
8ba6d218e4 | ||
|
|
0f8932f4ef | ||
|
|
73ef2675ce | ||
|
|
e0c6199aae | ||
|
|
0c05841902 | ||
|
|
4023580a25 | ||
|
|
2db1e422d8 | ||
|
|
093961ae2d | ||
|
|
d030cddca0 |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -7,4 +7,4 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @alexanderbez @cmwaters @ebuchman @marbar3778 @tessr @tychoish
|
||||
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair
|
||||
|
||||
27
.github/dependabot.yml
vendored
27
.github/dependabot.yml
vendored
@@ -1,27 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- fadeev
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- melekes
|
||||
- tessr
|
||||
labels:
|
||||
- T:dependencies
|
||||
19
.github/mergify.yml
vendored
Normal file
19
.github/mergify.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
queue_rules:
|
||||
- name: default
|
||||
conditions:
|
||||
- base=v0.34.x
|
||||
- label=S:automerge
|
||||
|
||||
pull_request_rules:
|
||||
- name: Automerge to v0.34.x
|
||||
conditions:
|
||||
- base=v0.34.x
|
||||
- label=S:automerge
|
||||
actions:
|
||||
queue:
|
||||
method: squash
|
||||
name: default
|
||||
commit_message_template: |
|
||||
{{ title }} (#{{ number }})
|
||||
|
||||
{{ body }}
|
||||
75
.github/workflows/check-generated.yml
vendored
Normal file
75
.github/workflows/check-generated.yml
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
# Verify that generated code is up-to-date.
|
||||
#
|
||||
# Note that we run these checks regardless whether the input files have
|
||||
# changed, because generated code can change in response to toolchain updates
|
||||
# even if no files in the repository are modified.
|
||||
name: Check generated code
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- v0.34.x
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check-mocks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: "Check generated mocks"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
readonly MOCKERY=2.12.3 # N.B. no leading "v"
|
||||
curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf -
|
||||
make mockery 2>/dev/null
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Generated mocks require update (either Mockery or source files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check-proto:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1 # we need a .git directory to run git diff
|
||||
|
||||
- name: "Check protobuf generated code"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Install buf and gogo tools, so that differences that arise from
|
||||
# toolchain differences are also caught.
|
||||
readonly tools="$(mktemp -d)"
|
||||
export PATH="${PATH}:${tools}/bin"
|
||||
export GOBIN="${tools}/bin"
|
||||
|
||||
go install github.com/bufbuild/buf/cmd/buf
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
|
||||
make proto-gen
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make proto-gen' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
40
.github/workflows/coverage.yml
vendored
40
.github/workflows/coverage.yml
vendored
@@ -10,25 +10,25 @@ jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
|
||||
# cache multiple
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00"
|
||||
path: ./pkgs.txt.part.00
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01"
|
||||
path: ./pkgs.txt.part.01
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02"
|
||||
path: ./pkgs.txt.part.02
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03"
|
||||
path: ./pkgs.txt.part.03
|
||||
@@ -42,11 +42,11 @@ jobs:
|
||||
goarch: ["arm", "amd64"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -64,17 +64,17 @@ jobs:
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}"
|
||||
if: env.GIT_DIFF
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./${{ matrix.part }}profile.out
|
||||
@@ -91,33 +91,33 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v1.0.13
|
||||
- uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
8
.github/workflows/docker.yml
vendored
8
.github/workflows/docker.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -40,17 +40,17 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
35
.github/workflows/e2e-manual.yml
vendored
Normal file
35
.github/workflows/e2e-manual.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
# Manually run randomly generated E2E testnets (as nightly).
|
||||
name: e2e-manual
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
8
.github/workflows/e2e-nightly-34x.yml
vendored
8
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -21,11 +21,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.15.4'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@e9db0ef
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
8
.github/workflows/e2e-nightly-master.yml
vendored
8
.github/workflows/e2e-nightly-master.yml
vendored
@@ -20,11 +20,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
6
.github/workflows/e2e.yml
vendored
6
.github/workflows/e2e.yml
vendored
@@ -13,11 +13,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.15.4'
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
|
||||
10
.github/workflows/fuzz-nightly.yml
vendored
10
.github/workflows/fuzz-nightly.yml
vendored
@@ -9,11 +9,11 @@ jobs:
|
||||
fuzz-nightly-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
@@ -45,14 +45,14 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive crashers
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 1
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
|
||||
6
.github/workflows/linkchecker.yml
vendored
6
.github/workflows/linkchecker.yml
vendored
@@ -1,12 +1,12 @@
|
||||
name: Check Markdown links
|
||||
on:
|
||||
on:
|
||||
schedule:
|
||||
- cron: '* */24 * * *'
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.7
|
||||
- uses: actions/checkout@v3
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
@@ -11,19 +11,22 @@ jobs:
|
||||
golangci:
|
||||
name: golangci-lint
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v2.5.1
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.38
|
||||
version: v1.45
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
6
.github/workflows/linter.yml
vendored
6
.github/workflows/linter.yml
vendored
@@ -11,6 +11,7 @@ on:
|
||||
branches: [master]
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -18,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v3
|
||||
env:
|
||||
@@ -27,6 +28,5 @@ jobs:
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
MARKDOWN_CONFIG_FILE: .markdownlint.yml
|
||||
VALIDATE_OPAENAPI: true
|
||||
VALIDATE_OPENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
|
||||
51
.github/workflows/proto-docker.yml
vendored
51
.github/workflows/proto-docker.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: Build & Push TM Proto Builder
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
schedule:
|
||||
# run this job once a month to recieve any go or buf updates
|
||||
- cron: "* * 1 * *"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermintdev/docker-build-proto
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
21
.github/workflows/proto-lint.yml
vendored
Normal file
21
.github/workflows/proto-lint.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Protobuf Lint
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'proto/**'
|
||||
push:
|
||||
branches:
|
||||
- v0.34.x
|
||||
paths:
|
||||
- 'proto/**'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.6.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
4
.github/workflows/proto.yml
vendored
4
.github/workflows/proto.yml
vendored
@@ -10,13 +10,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@v3
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@v3
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
|
||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@@ -10,18 +10,18 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.15.4'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
|
||||
40
.github/workflows/tests.yml
vendored
40
.github/workflows/tests.yml
vendored
@@ -23,11 +23,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -55,24 +55,24 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -87,24 +87,24 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -118,24 +118,24 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "^1.15.4"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.4
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
|
||||
@@ -1,44 +1,45 @@
|
||||
linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- exportloopref
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
- goconst
|
||||
- gocritic
|
||||
# - gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- revive
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
# - interfacer
|
||||
- lll
|
||||
- misspell
|
||||
# - maligned
|
||||
# - misspell
|
||||
- nakedret
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
# - typecheck
|
||||
- unconvert
|
||||
# - unparam
|
||||
- unused
|
||||
- varcheck
|
||||
# - whitespace
|
||||
# - wsl
|
||||
# - gocognit
|
||||
- nolintlint
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
@@ -57,5 +58,13 @@ linters-settings:
|
||||
suggest-new: true
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
golint:
|
||||
revive:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
ignore-words:
|
||||
- behaviour
|
||||
|
||||
|
||||
|
||||
240
CHANGELOG.md
240
CHANGELOG.md
@@ -1,11 +1,121 @@
|
||||
# Changelog
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.20-rc1
|
||||
|
||||
Special thanks to external contributors on this release: @joeabbey @yihuang
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [indexer] [#8625](https://github.com/tendermint/tendermint/pull/8625) Fix overriding tx index of duplicated txs. (@yihuang)
|
||||
- [mempool] [\#8962](https://github.com/tendermint/tendermint/issues/8962) Backport priority mempool fixes from v0.35.x to v0.34.x (@creachadair).
|
||||
|
||||
## v0.34.20-rc0
|
||||
|
||||
This RC introduces the prioritized mempool.
|
||||
|
||||
NOTE: There's a known memory leak with the prioritized mempool that the team are currently working on resolving. We will cut v0.34.20 when this has been resolved. This release candidate is to provide the SDK with the new APIs. Read more about the issue [here](https://github.com/tendermint/tendermint/issues/8775)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8674] Add command to force compact goleveldb databases (@cmwaters)
|
||||
- [mempool] [\#8695] Port back the priority mempool. (@alexanderbez, @jmalicevic, @cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [logging] [\#8845](https://github.com/tendermint/tendermint/issues/8845) Add "Lazy" Stringers to defer Sprintf and Hash until logs print. (@joeabbey)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
|
||||
|
||||
## v0.34.19
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8270](https://github.com/tendermint/tendermint/issues/8270) fix reset commands (@alexanderbez).
|
||||
|
||||
## v0.34.18
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [cli] [\#8258](https://github.com/tendermint/tendermint/pull/8258) Fix a bug in the cli that caused `unsafe-reset-all` to panic
|
||||
|
||||
## v0.34.17
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [cli] [\#8081](https://github.com/tendermint/tendermint/issues/8081) make the reset command safe to use (@marbar3778).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#8079](https://github.com/tendermint/tendermint/issues/8079) start the timeout ticker before relay (backport #7844) (@creachadair).
|
||||
- [consensus] [\#7992](https://github.com/tendermint/tendermint/issues/7992) [\#7994](https://github.com/tendermint/tendermint/issues/7994) change lock handling in handleMsg and reactor to alleviate issues gossiping during long ABCI calls (@williambanfield).
|
||||
|
||||
## v0.34.16
|
||||
|
||||
Special thanks to external contributors on this release: @yihuang
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#7617](https://github.com/tendermint/tendermint/issues/7617) calculate prevote message delay metric (backport #7551) (@williambanfield).
|
||||
- [consensus] [\#7631](https://github.com/tendermint/tendermint/issues/7631) check proposal non-nil in prevote message delay metric (backport #7625) (@williambanfield).
|
||||
- [statesync] [\#7885](https://github.com/tendermint/tendermint/issues/7885) statesync: assert app version matches (backport #7856) (@cmwaters).
|
||||
- [statesync] [\#7881](https://github.com/tendermint/tendermint/issues/7881) fix app hash in state rollback (backport #7837) (@cmwaters).
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang).
|
||||
|
||||
## v0.34.15
|
||||
|
||||
Special thanks to external contributors on this release: @thanethomson
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7368](https://github.com/tendermint/tendermint/issues/7368) cmd: add integration test for rollback functionality (@cmwaters).
|
||||
- [\#7309](https://github.com/tendermint/tendermint/issues/7309) pubsub: Report a non-nil error when shutting down (fixes #7306).
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#7230](https://github.com/tendermint/tendermint/issues/7230) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson).
|
||||
|
||||
## v0.34.14
|
||||
|
||||
This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) The tendermint binary now has built-in suppport for running the end-to-end test application (with state sync support) (@cmwaters).
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state. This may be useful in the event of non-determinstic app hash or when reverting an upgrade. @cmwaters
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [\#7103](https://github.com/tendermint/tendermint/pull/7104) Remove IAVL dependency (backport of #6550) (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [ABCI] [\#7110](https://github.com/tendermint/tendermint/issues/7110) Revert "change client to use multi-reader mutexes (#6873)" (@tychoish).
|
||||
|
||||
## v0.34.13
|
||||
|
||||
This release backports improvements to state synchronization and ABCI
|
||||
performance under concurrent load, and the PostgreSQL event indexer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6881](https://github.com/tendermint/tendermint/issues/6881) improvements to stateprovider logic (@cmwaters)
|
||||
- [ABCI] [\#6873](https://github.com/tendermint/tendermint/issues/6873) change client to use multi-reader mutexes (@tychoish)
|
||||
- [indexing] [\#6906](https://github.com/tendermint/tendermint/issues/6906) enable the PostgreSQL indexer sink (@creachadair)
|
||||
|
||||
## v0.34.12
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
|
||||
@@ -28,8 +138,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
This release improves the robustness of statesync; tweaking channel priorities and timeouts and
|
||||
adding two new parameters to the state sync config.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Apps
|
||||
@@ -53,8 +161,6 @@ This release fixes a bug where peers would sometimes try to send messages
|
||||
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
|
||||
this issue!
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
|
||||
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
|
||||
|
||||
@@ -70,8 +176,6 @@ This release also includes a small Go API-breaking change, to reduce panics in t
|
||||
|
||||
Special thanks to our external contributors on this release: @gchaincl
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
@@ -94,8 +198,6 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
This release, in conjunction with [a fix in the Cosmos SDK](https://github.com/cosmos/cosmos-sdk/pull/8641),
|
||||
introduces changes that should mean the logs are much, much quieter. 🎉
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] [\#6174](https://github.com/tendermint/tendermint/issues/6174) Include timestamp (`ts` field; `time.RFC3339Nano` format) in JSON logger output (@melekes)
|
||||
@@ -133,8 +235,6 @@ use remote signer implementations instead of `FilePV` in production.
|
||||
Thank you to @joe-bowman for his assistance with this vulnerability and a particular
|
||||
shout-out to @marbar3778 for diagnosing it quickly.
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#6128](https://github.com/tendermint/tendermint/pull/6128) Remove privValidator from log call (@tessr)
|
||||
@@ -155,8 +255,6 @@ Thank you to our friends at Crypto.com for the initial report of this memory lea
|
||||
|
||||
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6022](https://github.com/tendermint/tendermint/pull/6022) Fix a bug when the number of validators equals 100 (@melekes)
|
||||
@@ -176,8 +274,6 @@ or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [[security fix]](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) Use correct source of evidence time (@cmwaters)
|
||||
@@ -191,8 +287,6 @@ This release fixes a substantial bug in evidence handling where evidence could
|
||||
sometimes be broadcast before the block containing that evidence was fully committed,
|
||||
resulting in some nodes panicking when trying to verify said evidence.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
@@ -216,8 +310,6 @@ disconnecting from this node. As a temporary remedy (until the mempool package
|
||||
is refactored), the `max-batch-bytes` was disabled. Transactions will be sent
|
||||
one by one without batching.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -246,8 +338,6 @@ Holy smokes, this is a big one! For a more reader-friendly overview of the chang
|
||||
Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio,
|
||||
@joe-bowman, @cuonglm, @SadPencil and @dongsam.
|
||||
|
||||
And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -488,9 +578,6 @@ as 2/3+ of the signatures are checked._
|
||||
|
||||
Special thanks to @njmurarka at Bluzelle Networks for reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [consensus] Do not allow signatures for a wrong block in commits (@ebuchman)
|
||||
@@ -506,8 +593,6 @@ need to update your code.**
|
||||
|
||||
Special thanks to external contributors on this release: @tau3,
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -567,8 +652,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
Special thanks to external contributors on this release: @whylee259, @greg-szabo
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -655,9 +738,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -670,8 +750,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@antho1404, @michaelfig, @gterzian, @tau3, @Shivani912
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -722,9 +800,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
Special thanks to external contributors on this release:
|
||||
@princesinha19
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#3333](https://github.com/tendermint/tendermint/issues/3333) Add `order_by` to `/tx_search` endpoint, allowing to change default ordering from asc to desc (@princesinha19)
|
||||
@@ -743,9 +818,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @mrekucci, @PSalant726, @princesinha19, @greg-szabo, @dongsam, @cuonglm, @jgimeno, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
*January 14, 2020*
|
||||
|
||||
This release contains breaking changes to the `Block#Header`, specifically
|
||||
@@ -974,9 +1046,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -988,9 +1057,6 @@ _January, 9, 2020_
|
||||
|
||||
Special thanks to external contributors on this release: @greg-szabo, @gregzaitsev, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc/lib] [\#4248](https://github.com/tendermint/tendermint/issues/4248) RPC client basic authentication support (@greg-szabo)
|
||||
@@ -1012,9 +1078,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @erikgrinaker, @guagualvcha, @hsyis, @cosmostuba, @whunmr, @austinabell
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1054,9 +1117,6 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1083,9 +1143,6 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
|
||||
@@ -1101,9 +1158,6 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
|
||||
@@ -1114,9 +1168,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @jon-certik, @gracenoah, @PSalant726, @gchaincl
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -1152,9 +1203,6 @@ guide.
|
||||
Special thanks to external contributors on this release:
|
||||
@gchaincl, @bluele, @climber73
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info)
|
||||
@@ -1175,9 +1223,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@ruseinov, @bluele, @guagualvcha
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1217,9 +1262,6 @@ This release contains a minor enhancement to the ABCI and some breaking changes
|
||||
- CheckTx requests include a `CheckTxType` enum that can be set to `Recheck` to indicate to the application that this transaction was already checked/validated and certain expensive operations (like checking signatures) can be skipped
|
||||
- Removed various functions from `libs` pkgs
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1265,9 +1307,6 @@ and the RPC, namely:
|
||||
[docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events)
|
||||
- Bind RPC to localhost by default, not to the public interface [UPGRADING/RPC_Changes](./UPGRADING.md#rpc_changes)
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1368,9 +1407,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -1390,9 +1426,6 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1419,9 +1452,6 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
|
||||
@@ -1437,9 +1467,6 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
|
||||
@@ -1734,9 +1761,6 @@ See the [v0.31.0
|
||||
Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for
|
||||
more details.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1747,7 +1771,7 @@ program](https://hackerone.com/tendermint).
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
|
||||
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
|
||||
* Apps
|
||||
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
|
||||
@@ -1800,7 +1824,7 @@ program](https://hackerone.com/tendermint).
|
||||
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
|
||||
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
|
||||
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md)
|
||||
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
|
||||
(@guagualvcha)
|
||||
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
|
||||
@@ -1867,7 +1891,7 @@ For more, see issues marked
|
||||
|
||||
This release also includes a fix to prevent Tendermint from including the same
|
||||
piece of evidence in more than one block. This issue was reported by @chengwenxi in our
|
||||
[bug bounty program](https://hackerone.com/tendermint).
|
||||
[bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1956,9 +1980,6 @@ This release contains two important fixes: one for p2p layer where we sometimes
|
||||
were not closing connections and one for consensus layer where consensus with
|
||||
no empty blocks (`create_empty_blocks = false`) could halt.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [pex] [\#3037](https://github.com/tendermint/tendermint/issues/3037) Only log "Reached max attempts to dial" once
|
||||
- [rpc] [\#3159](https://github.com/tendermint/tendermint/issues/3159) Expose
|
||||
@@ -1997,9 +2018,6 @@ While we are trying to stabilize the Block protocol to preserve compatibility
|
||||
with old chains, there may be some final changes yet to come before Cosmos
|
||||
launch as we continue to audit and test the software.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -2047,9 +2065,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@HaoyangLiu
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES:
|
||||
- [consensus] Fix consensus halt from proposing blocks with too much evidence
|
||||
|
||||
@@ -2177,9 +2192,6 @@ Special thanks to @dlguddus for discovering a [major
|
||||
issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677)
|
||||
in the proposer selection algorithm.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
This release is primarily about fixes to the proposer selection algorithm
|
||||
in preparation for the [Cosmos Game of
|
||||
Stakes](https://blog.cosmos.network/the-game-of-stakes-is-open-for-registration-83a404746ee6).
|
||||
@@ -2241,9 +2253,6 @@ Special thanks to external contributors on this release:
|
||||
@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko,
|
||||
@nagarajmanjunath, @tomtau
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#2747](https://github.com/tendermint/tendermint/issues/2747) Enable subscription to tags emitted from `BeginBlock`/`EndBlock` (@kostko)
|
||||
@@ -2282,9 +2291,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@danil-lashin, @kevlubkcm, @krhubert, @srmo
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Go API
|
||||
@@ -2328,8 +2334,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#2582](https://github.com/tendermint/tendermint/issues/2582) Enable CORS on RPC API (@hleb-albau)
|
||||
@@ -2347,8 +2351,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
Special thanks to external contributors on this release: @katakonst
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#2704](https://github.com/tendermint/tendermint/issues/2704) Simplify valid POL round logic
|
||||
@@ -2376,7 +2378,7 @@ Special thanks to external contributors on this release:
|
||||
@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu.
|
||||
|
||||
Special thanks to @Slamper for a series of bug reports in our [bug bounty
|
||||
program](https://hackerone.com/tendermint) which are fixed in this release.
|
||||
program](https://hackerone.com/cosmos) which are fixed in this release.
|
||||
|
||||
This release is primarily about adding Version fields to various data structures,
|
||||
optimizing consensus messages for signing and verification in
|
||||
@@ -2520,9 +2522,7 @@ Special thanks to external contributors on this release:
|
||||
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
|
||||
It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
@@ -2603,7 +2603,7 @@ BREAKING CHANGES:
|
||||
- [abci] Added address of the original proposer of the block to Header
|
||||
- [abci] Change ABCI Header to match Tendermint exactly
|
||||
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
- Remove PubKey from `Validator` (so it's just Address and Power)
|
||||
- Introduce `ValidatorUpdate` (with just PubKey and Power)
|
||||
- InitChain and EndBlock use ValidatorUpdate
|
||||
@@ -2625,7 +2625,7 @@ BREAKING CHANGES:
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
|
||||
- Add NextValidatorSet to State, changes on-disk representation of state
|
||||
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)).
|
||||
- Remove ConsensusParams.BlockSize.MaxTxs
|
||||
- Introduce maximum sizes for all components of a block, including ChainID
|
||||
- [types] Updates to the block Header:
|
||||
@@ -2636,7 +2636,7 @@ BREAKING CHANGES:
|
||||
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
|
||||
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
|
||||
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)):
|
||||
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
|
||||
- malleability removed by requiring `s` to be in canonical form.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Unreleased Changes
|
||||
|
||||
## v0.34.13
|
||||
## v0.34.20
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
@@ -22,5 +22,4 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
|
||||
### BUG FIXES
|
||||
### BUG FIXES
|
||||
|
||||
62
Makefile
62
Makefile
@@ -13,7 +13,6 @@ endif
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
|
||||
CGO_ENABLED ?= 0
|
||||
|
||||
# handle nostrip
|
||||
@@ -70,34 +69,59 @@ install:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
.PHONY: install
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Protobuf ###
|
||||
###############################################################################
|
||||
|
||||
proto-all: proto-gen proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
check-proto-deps:
|
||||
ifeq (,$(shell which protoc-gen-gogofaster))
|
||||
@go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
endif
|
||||
.PHONY: check-proto-deps
|
||||
|
||||
proto-gen:
|
||||
@docker pull -q tendermintdev/docker-build-proto
|
||||
check-proto-format-deps:
|
||||
ifeq (,$(shell which clang-format))
|
||||
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
|
||||
endif
|
||||
.PHONY: check-proto-format-deps
|
||||
|
||||
proto-gen: check-proto-deps
|
||||
@echo "Generating Protobuf files"
|
||||
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
|
||||
@go run github.com/bufbuild/buf/cmd/buf generate
|
||||
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-lint:
|
||||
@$(DOCKER_BUF) check lint --error-format=json
|
||||
# These targets are provided for convenience and are intended for local
|
||||
# execution only.
|
||||
proto-lint: check-proto-deps
|
||||
@echo "Linting Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf lint
|
||||
.PHONY: proto-lint
|
||||
|
||||
proto-format:
|
||||
proto-format: check-proto-format-deps
|
||||
@echo "Formatting Protobuf files"
|
||||
docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
|
||||
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-check-breaking:
|
||||
@$(DOCKER_BUF) check breaking --against-input .git#branch=master
|
||||
proto-check-breaking: check-proto-deps
|
||||
@echo "Checking for breaking changes in Protobuf files against local branch"
|
||||
@echo "Note: This is only useful if your changes have not yet been committed."
|
||||
@echo " Otherwise read up on buf's \"breaking\" command usage:"
|
||||
@echo " https://docs.buf.build/breaking/usage"
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against ".git"
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
proto-check-breaking-ci:
|
||||
@$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against $(HTTPS_GIT)#branch=v0.34.x
|
||||
.PHONY: proto-check-breaking-ci
|
||||
|
||||
###############################################################################
|
||||
@@ -186,12 +210,12 @@ DESTINATION = ./index.html.md
|
||||
###############################################################################
|
||||
|
||||
build-docs:
|
||||
cd docs && \
|
||||
while read p; do \
|
||||
(git checkout $${p} . && npm install && VUEPRESS_BASE="/$${p}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${p} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${p}/ ; \
|
||||
cp ~/output/$${p}/index.html ~/output ; \
|
||||
@cd docs && \
|
||||
while read -r branch path_prefix; do \
|
||||
(git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
done < versions ;
|
||||
.PHONY: build-docs
|
||||
|
||||
|
||||
@@ -81,9 +81,15 @@ type ReqRes struct {
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx tmsync.Mutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
mtx tmsync.Mutex
|
||||
|
||||
// callbackInvoked as a variable to track if the callback was already
|
||||
// invoked during the regular execution of the request. This variable
|
||||
// allows clients to set the callback simultaneously without potentially
|
||||
// invoking the callback twice by accident, once when 'SetCallback' is
|
||||
// called and once during the normal request.
|
||||
callbackInvoked bool
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
|
||||
func NewReqRes(req *types.Request) *ReqRes {
|
||||
@@ -92,8 +98,8 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
WaitGroup: waitGroup1(),
|
||||
Response: nil,
|
||||
|
||||
done: false,
|
||||
cb: nil,
|
||||
callbackInvoked: false,
|
||||
cb: nil,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,7 +109,7 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
|
||||
if r.done {
|
||||
if r.callbackInvoked {
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
return
|
||||
@@ -122,6 +128,7 @@ func (r *ReqRes) InvokeCallback() {
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
r.callbackInvoked = true
|
||||
}
|
||||
|
||||
// GetCallback returns the configured callback of the ReqRes object which may be
|
||||
@@ -136,13 +143,6 @@ func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
return r.cb
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
r.mtx.Lock()
|
||||
r.done = true
|
||||
r.mtx.Unlock()
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
wg = &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
@@ -66,7 +66,6 @@ func (cli *grpcClient) OnStart() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
reqres.SetDone()
|
||||
reqres.Done()
|
||||
|
||||
// Notify client listener if set
|
||||
@@ -75,9 +74,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(reqres.Response)
|
||||
}
|
||||
reqres.InvokeCallback()
|
||||
}
|
||||
for reqres := range cli.chReqRes {
|
||||
if reqres != nil {
|
||||
@@ -90,6 +87,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
@@ -342,7 +340,9 @@ func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *grpcClient) FlushSync() error {
|
||||
return nil
|
||||
reqres := cli.FlushAsync()
|
||||
cli.finishSyncCall(reqres).GetFlush()
|
||||
return cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
|
||||
@@ -20,6 +20,12 @@ type localClient struct {
|
||||
Callback
|
||||
}
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NewLocalClient creates a local client, which will be directly calling the
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = new(tmsync.Mutex)
|
||||
@@ -321,12 +327,13 @@ func (app *localClient) ApplySnapshotChunkSync(
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.Callback(req, res)
|
||||
return newLocalReqRes(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
rr.callbackInvoked = true
|
||||
return rr
|
||||
}
|
||||
|
||||
func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
|
||||
reqRes := NewReqRes(req)
|
||||
reqRes.Response = res
|
||||
reqRes.SetDone()
|
||||
return reqRes
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package abcicli_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -118,3 +119,71 @@ func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetLaet ensures that the callback is invoked when
|
||||
// set after the client completes the call into the app. Currently this
|
||||
// test relies on the callback being allowed to be invoked twice if set multiple
|
||||
// times, once when set early and once when set late.
|
||||
func TestCallbackInvokedWhenSetLate(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
<-done
|
||||
|
||||
var called bool
|
||||
cb = func(_ *types.Response) {
|
||||
called = true
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
require.True(t, called)
|
||||
}
|
||||
|
||||
type blockedABCIApplication struct {
|
||||
wg *sync.WaitGroup
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (b blockedABCIApplication) CheckTx(r types.RequestCheckTx) types.ResponseCheckTx {
|
||||
b.wg.Wait()
|
||||
return b.BaseApplication.CheckTx(r)
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetEarly ensures that the callback is invoked when
|
||||
// set before the client completes the call into the app.
|
||||
func TestCallbackInvokedWhenSetEarly(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
|
||||
called := func() bool {
|
||||
select {
|
||||
case <-done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
require.Eventually(t, called, time.Second, time.Millisecond*25)
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
|
||||
func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
numDeliverTxs := 2000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
@@ -148,6 +148,7 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
|
||||
@@ -293,7 +293,7 @@ func TestClientServer(t *testing.T) {
|
||||
|
||||
// set up grpc app
|
||||
kvstore = NewApplication()
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "/tmp/kvstore-grpc")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
|
||||
@@ -42,7 +42,7 @@ func (r ResponseQuery) IsErr() bool {
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshalling so we emit defaults (ie. disable omitempty)
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
|
||||
@@ -1967,6 +1967,11 @@ type ResponseCheckTx struct {
|
||||
GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"`
|
||||
Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"`
|
||||
Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"`
|
||||
Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"`
|
||||
Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"`
|
||||
// mempool_error is set by Tendermint.
|
||||
// ABCI applictions creating a ResponseCheckTX should not set mempool_error.
|
||||
MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} }
|
||||
@@ -2058,6 +2063,27 @@ func (m *ResponseCheckTx) GetCodespace() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) GetSender() string {
|
||||
if m != nil {
|
||||
return m.Sender
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) GetPriority() int64 {
|
||||
if m != nil {
|
||||
return m.Priority
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ResponseCheckTx) GetMempoolError() string {
|
||||
if m != nil {
|
||||
return m.MempoolError
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ResponseDeliverTx struct {
|
||||
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
@@ -3199,179 +3225,181 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) }
|
||||
|
||||
var fileDescriptor_252557cfdd89a31a = []byte{
|
||||
// 2741 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x73, 0x1b, 0xc7,
|
||||
0x11, 0xc6, 0xfb, 0xd1, 0x24, 0x1e, 0x1c, 0xd1, 0x12, 0xb4, 0x92, 0x48, 0x79, 0x55, 0x72, 0x2c,
|
||||
0xd9, 0x26, 0x63, 0xaa, 0xa4, 0x48, 0xb1, 0x13, 0x9b, 0x80, 0x20, 0x83, 0x26, 0x4d, 0x30, 0x4b,
|
||||
0x48, 0xce, 0xcb, 0x5a, 0x2f, 0xb0, 0x43, 0x60, 0x2d, 0x60, 0x77, 0x8d, 0x1d, 0x50, 0xa4, 0x8f,
|
||||
0x71, 0x72, 0x51, 0x2e, 0xce, 0x2d, 0x17, 0xff, 0x8f, 0x9c, 0x72, 0xc9, 0xc5, 0x55, 0xb9, 0xf8,
|
||||
0x98, 0x93, 0x93, 0x92, 0x2a, 0x97, 0xfc, 0x81, 0x9c, 0x52, 0x49, 0xcd, 0x63, 0x5f, 0x00, 0x16,
|
||||
0x00, 0xed, 0xdc, 0x7c, 0x9b, 0x99, 0xed, 0xee, 0xc5, 0xf4, 0x4e, 0x7f, 0xfd, 0x75, 0x0f, 0xe0,
|
||||
0x12, 0xc1, 0xa6, 0x8e, 0x87, 0x03, 0xc3, 0x24, 0x9b, 0x5a, 0xbb, 0x63, 0x6c, 0x92, 0x53, 0x1b,
|
||||
0x3b, 0x1b, 0xf6, 0xd0, 0x22, 0x16, 0x2a, 0xf9, 0x0f, 0x37, 0xe8, 0x43, 0xe9, 0x4a, 0x40, 0xba,
|
||||
0x33, 0x3c, 0xb5, 0x89, 0xb5, 0x69, 0x0f, 0x2d, 0xeb, 0x88, 0xcb, 0x4b, 0x97, 0x03, 0x8f, 0x99,
|
||||
0x9d, 0xa0, 0xb5, 0xd0, 0x53, 0xa1, 0xfc, 0x04, 0x9f, 0xba, 0x4f, 0xaf, 0x4c, 0xe8, 0xda, 0xda,
|
||||
0x50, 0x1b, 0xb8, 0x8f, 0xd7, 0xbb, 0x96, 0xd5, 0xed, 0xe3, 0x4d, 0x36, 0x6b, 0x8f, 0x8e, 0x36,
|
||||
0x89, 0x31, 0xc0, 0x0e, 0xd1, 0x06, 0xb6, 0x10, 0x58, 0xed, 0x5a, 0x5d, 0x8b, 0x0d, 0x37, 0xe9,
|
||||
0x88, 0xaf, 0xca, 0x7f, 0xc8, 0x41, 0x56, 0xc1, 0x9f, 0x8e, 0xb0, 0x43, 0xd0, 0x16, 0xa4, 0x70,
|
||||
0xa7, 0x67, 0x55, 0xe2, 0x57, 0xe3, 0xaf, 0x2e, 0x6d, 0x5d, 0xde, 0x18, 0xdb, 0xdc, 0x86, 0x90,
|
||||
0xab, 0x77, 0x7a, 0x56, 0x23, 0xa6, 0x30, 0x59, 0x74, 0x1b, 0xd2, 0x47, 0xfd, 0x91, 0xd3, 0xab,
|
||||
0x24, 0x98, 0xd2, 0x95, 0x28, 0xa5, 0x07, 0x54, 0xa8, 0x11, 0x53, 0xb8, 0x34, 0x7d, 0x95, 0x61,
|
||||
0x1e, 0x59, 0x95, 0xe4, 0xec, 0x57, 0xed, 0x98, 0x47, 0xec, 0x55, 0x54, 0x16, 0x55, 0x01, 0x1c,
|
||||
0x4c, 0x54, 0xcb, 0x26, 0x86, 0x65, 0x56, 0x52, 0x4c, 0xf3, 0xe5, 0x28, 0xcd, 0x43, 0x4c, 0x9a,
|
||||
0x4c, 0xb0, 0x11, 0x53, 0xf2, 0x8e, 0x3b, 0xa1, 0x36, 0x0c, 0xd3, 0x20, 0x6a, 0xa7, 0xa7, 0x19,
|
||||
0x66, 0x25, 0x3d, 0xdb, 0xc6, 0x8e, 0x69, 0x90, 0x1a, 0x15, 0xa4, 0x36, 0x0c, 0x77, 0x42, 0xb7,
|
||||
0xfc, 0xe9, 0x08, 0x0f, 0x4f, 0x2b, 0x99, 0xd9, 0x5b, 0xfe, 0x19, 0x15, 0xa2, 0x5b, 0x66, 0xd2,
|
||||
0xa8, 0x0e, 0x4b, 0x6d, 0xdc, 0x35, 0x4c, 0xb5, 0xdd, 0xb7, 0x3a, 0x4f, 0x2a, 0x59, 0xa6, 0x2c,
|
||||
0x47, 0x29, 0x57, 0xa9, 0x68, 0x95, 0x4a, 0x36, 0x62, 0x0a, 0xb4, 0xbd, 0x19, 0x7a, 0x1b, 0x72,
|
||||
0x9d, 0x1e, 0xee, 0x3c, 0x51, 0xc9, 0x49, 0x25, 0xc7, 0x6c, 0xac, 0x47, 0xd9, 0xa8, 0x51, 0xb9,
|
||||
0xd6, 0x49, 0x23, 0xa6, 0x64, 0x3b, 0x7c, 0x48, 0xf7, 0xaf, 0xe3, 0xbe, 0x71, 0x8c, 0x87, 0x54,
|
||||
0x3f, 0x3f, 0x7b, 0xff, 0xf7, 0xb9, 0x24, 0xb3, 0x90, 0xd7, 0xdd, 0x09, 0x7a, 0x07, 0xf2, 0xd8,
|
||||
0xd4, 0xc5, 0x36, 0x80, 0x99, 0xb8, 0x1a, 0x79, 0x56, 0x4c, 0xdd, 0xdd, 0x44, 0x0e, 0x8b, 0x31,
|
||||
0xba, 0x0b, 0x99, 0x8e, 0x35, 0x18, 0x18, 0xa4, 0xb2, 0xc4, 0xb4, 0xd7, 0x22, 0x37, 0xc0, 0xa4,
|
||||
0x1a, 0x31, 0x45, 0xc8, 0xa3, 0x7d, 0x28, 0xf6, 0x0d, 0x87, 0xa8, 0x8e, 0xa9, 0xd9, 0x4e, 0xcf,
|
||||
0x22, 0x4e, 0x65, 0x99, 0x59, 0xb8, 0x1e, 0x65, 0x61, 0xcf, 0x70, 0xc8, 0xa1, 0x2b, 0xdc, 0x88,
|
||||
0x29, 0x85, 0x7e, 0x70, 0x81, 0xda, 0xb3, 0x8e, 0x8e, 0xf0, 0xd0, 0x33, 0x58, 0x29, 0xcc, 0xb6,
|
||||
0xd7, 0xa4, 0xd2, 0xae, 0x3e, 0xb5, 0x67, 0x05, 0x17, 0xd0, 0xaf, 0xe0, 0x5c, 0xdf, 0xd2, 0x74,
|
||||
0xcf, 0x9c, 0xda, 0xe9, 0x8d, 0xcc, 0x27, 0x95, 0x22, 0x33, 0x7a, 0x23, 0xf2, 0x47, 0x5a, 0x9a,
|
||||
0xee, 0x9a, 0xa8, 0x51, 0x85, 0x46, 0x4c, 0x59, 0xe9, 0x8f, 0x2f, 0xa2, 0xc7, 0xb0, 0xaa, 0xd9,
|
||||
0x76, 0xff, 0x74, 0xdc, 0x7a, 0x89, 0x59, 0xbf, 0x19, 0x65, 0x7d, 0x9b, 0xea, 0x8c, 0x9b, 0x47,
|
||||
0xda, 0xc4, 0x6a, 0x35, 0x0b, 0xe9, 0x63, 0xad, 0x3f, 0xc2, 0xf2, 0x0f, 0x60, 0x29, 0x10, 0xea,
|
||||
0xa8, 0x02, 0xd9, 0x01, 0x76, 0x1c, 0xad, 0x8b, 0x19, 0x32, 0xe4, 0x15, 0x77, 0x2a, 0x17, 0x61,
|
||||
0x39, 0x18, 0xde, 0xf2, 0xc0, 0x53, 0xa4, 0x81, 0x4b, 0x15, 0x8f, 0xf1, 0xd0, 0xa1, 0xd1, 0x2a,
|
||||
0x14, 0xc5, 0x14, 0x5d, 0x83, 0x02, 0x3b, 0x3e, 0xaa, 0xfb, 0x9c, 0xa2, 0x47, 0x4a, 0x59, 0x66,
|
||||
0x8b, 0x8f, 0x84, 0xd0, 0x3a, 0x2c, 0xd9, 0x5b, 0xb6, 0x27, 0x92, 0x64, 0x22, 0x60, 0x6f, 0xd9,
|
||||
0x42, 0x40, 0xfe, 0x31, 0x94, 0xc7, 0xa3, 0x1d, 0x95, 0x21, 0xf9, 0x04, 0x9f, 0x8a, 0xf7, 0xd1,
|
||||
0x21, 0x5a, 0x15, 0xdb, 0x62, 0xef, 0xc8, 0x2b, 0x62, 0x8f, 0x7f, 0x4d, 0x78, 0xca, 0x5e, 0x98,
|
||||
0xa3, 0xbb, 0x90, 0xa2, 0xa8, 0x29, 0x00, 0x50, 0xda, 0xe0, 0x90, 0xba, 0xe1, 0x42, 0xea, 0x46,
|
||||
0xcb, 0x85, 0xd4, 0x6a, 0xee, 0xab, 0x6f, 0xd6, 0x63, 0x5f, 0xfc, 0x7d, 0x3d, 0xae, 0x30, 0x0d,
|
||||
0x74, 0x91, 0x46, 0xa5, 0x66, 0x98, 0xaa, 0xa1, 0x8b, 0xf7, 0x64, 0xd9, 0x7c, 0x47, 0x47, 0xbb,
|
||||
0x50, 0xee, 0x58, 0xa6, 0x83, 0x4d, 0x67, 0xe4, 0xa8, 0x1c, 0xb2, 0x05, 0xec, 0x4d, 0x46, 0x4d,
|
||||
0xcd, 0x15, 0x3c, 0x60, 0x72, 0x4a, 0xa9, 0x13, 0x5e, 0x40, 0x0f, 0x00, 0x8e, 0xb5, 0xbe, 0xa1,
|
||||
0x6b, 0xc4, 0x1a, 0x3a, 0x95, 0xd4, 0xd5, 0xe4, 0x54, 0x33, 0x8f, 0x5c, 0x91, 0x87, 0xb6, 0xae,
|
||||
0x11, 0x5c, 0x4d, 0xd1, 0x5f, 0xab, 0x04, 0x34, 0xd1, 0x2b, 0x50, 0xd2, 0x6c, 0x5b, 0x75, 0x88,
|
||||
0x46, 0xb0, 0xda, 0x3e, 0x25, 0xd8, 0x61, 0x60, 0xb8, 0xac, 0x14, 0x34, 0xdb, 0x3e, 0xa4, 0xab,
|
||||
0x55, 0xba, 0x88, 0xae, 0x43, 0x91, 0x02, 0x9f, 0xa1, 0xf5, 0xd5, 0x1e, 0x36, 0xba, 0x3d, 0xc2,
|
||||
0x40, 0x2f, 0xa9, 0x14, 0xc4, 0x6a, 0x83, 0x2d, 0xca, 0xba, 0x77, 0x10, 0x18, 0xe8, 0x21, 0x04,
|
||||
0x29, 0x5d, 0x23, 0x1a, 0x73, 0xe4, 0xb2, 0xc2, 0xc6, 0x74, 0xcd, 0xd6, 0x48, 0x4f, 0xb8, 0x87,
|
||||
0x8d, 0xd1, 0x79, 0xc8, 0x08, 0xb3, 0x49, 0x66, 0x56, 0xcc, 0xe8, 0x37, 0xb3, 0x87, 0xd6, 0x31,
|
||||
0x66, 0x28, 0x9f, 0x53, 0xf8, 0x44, 0xfe, 0x6d, 0x02, 0x56, 0x26, 0xe0, 0x91, 0xda, 0xed, 0x69,
|
||||
0x4e, 0xcf, 0x7d, 0x17, 0x1d, 0xa3, 0x3b, 0xd4, 0xae, 0xa6, 0xe3, 0xa1, 0x48, 0x4b, 0x95, 0xa0,
|
||||
0x8b, 0x78, 0xca, 0x6d, 0xb0, 0xe7, 0xc2, 0x35, 0x42, 0x1a, 0x35, 0xa1, 0xdc, 0xd7, 0x1c, 0xa2,
|
||||
0x72, 0xb8, 0x51, 0x03, 0x29, 0x6a, 0x12, 0x64, 0xf7, 0x34, 0x17, 0xa0, 0xe8, 0x61, 0x17, 0x86,
|
||||
0x8a, 0xfd, 0xd0, 0x2a, 0x52, 0x60, 0xb5, 0x7d, 0xfa, 0x99, 0x66, 0x12, 0xc3, 0xc4, 0xea, 0xc4,
|
||||
0x97, 0xbb, 0x38, 0x61, 0xb4, 0x7e, 0x6c, 0xe8, 0xd8, 0xec, 0xb8, 0x9f, 0xec, 0x9c, 0xa7, 0xec,
|
||||
0x7d, 0x52, 0x47, 0x56, 0xa0, 0x18, 0x06, 0x78, 0x54, 0x84, 0x04, 0x39, 0x11, 0x0e, 0x48, 0x90,
|
||||
0x13, 0xf4, 0x43, 0x48, 0xd1, 0x4d, 0xb2, 0xcd, 0x17, 0xa7, 0x64, 0x57, 0xa1, 0xd7, 0x3a, 0xb5,
|
||||
0xb1, 0xc2, 0x24, 0x65, 0xd9, 0x8b, 0x06, 0x0f, 0xf4, 0xc7, 0xad, 0xca, 0x37, 0xa0, 0x34, 0x86,
|
||||
0xea, 0x81, 0xef, 0x17, 0x0f, 0x7e, 0x3f, 0xb9, 0x04, 0x85, 0x10, 0x84, 0xcb, 0xe7, 0x61, 0x75,
|
||||
0x1a, 0x22, 0xcb, 0x3d, 0x6f, 0x3d, 0x84, 0xac, 0xe8, 0x36, 0xe4, 0x3c, 0x48, 0xe6, 0xd1, 0x38,
|
||||
0xe9, 0x2b, 0x57, 0x58, 0xf1, 0x44, 0x69, 0x18, 0xd2, 0x63, 0xcd, 0xce, 0x43, 0x82, 0xfd, 0xf0,
|
||||
0xac, 0x66, 0xdb, 0x0d, 0xcd, 0xe9, 0xc9, 0x1f, 0x43, 0x25, 0x0a, 0x6e, 0xc7, 0xb6, 0x91, 0xf2,
|
||||
0x8e, 0xe1, 0x79, 0xc8, 0x1c, 0x59, 0xc3, 0x81, 0x46, 0x98, 0xb1, 0x82, 0x22, 0x66, 0xf4, 0x78,
|
||||
0x72, 0xe8, 0x4d, 0xb2, 0x65, 0x3e, 0x91, 0x55, 0xb8, 0x18, 0x09, 0xb9, 0x54, 0xc5, 0x30, 0x75,
|
||||
0xcc, 0xfd, 0x59, 0x50, 0xf8, 0xc4, 0x37, 0xc4, 0x7f, 0x2c, 0x9f, 0xd0, 0xd7, 0x3a, 0x6c, 0xaf,
|
||||
0xcc, 0x7e, 0x5e, 0x11, 0x33, 0xf9, 0x9f, 0x39, 0xc8, 0x29, 0xd8, 0xb1, 0x29, 0x26, 0xa0, 0x2a,
|
||||
0xe4, 0xf1, 0x49, 0x07, 0x73, 0x32, 0x14, 0x8f, 0x24, 0x13, 0x5c, 0xba, 0xee, 0x4a, 0xd2, 0x4c,
|
||||
0xee, 0xa9, 0xa1, 0x5b, 0x82, 0xf0, 0x45, 0x73, 0x37, 0xa1, 0x1e, 0x64, 0x7c, 0x77, 0x5c, 0xc6,
|
||||
0x97, 0x8c, 0x4c, 0xde, 0x5c, 0x6b, 0x8c, 0xf2, 0xdd, 0x12, 0x94, 0x2f, 0x35, 0xe7, 0x65, 0x21,
|
||||
0xce, 0x57, 0x0b, 0x71, 0xbe, 0xf4, 0x9c, 0x6d, 0x46, 0x90, 0xbe, 0x5a, 0x88, 0xf4, 0x65, 0xe6,
|
||||
0x18, 0x89, 0x60, 0x7d, 0x77, 0x5c, 0xd6, 0x97, 0x9d, 0xb3, 0xed, 0x31, 0xda, 0xf7, 0x20, 0x4c,
|
||||
0xfb, 0x38, 0x65, 0xbb, 0x16, 0xa9, 0x1d, 0xc9, 0xfb, 0x7e, 0x12, 0xe0, 0x7d, 0xf9, 0x48, 0xd2,
|
||||
0xc5, 0x8d, 0x4c, 0x21, 0x7e, 0xb5, 0x10, 0xf1, 0x83, 0x39, 0x3e, 0x88, 0x60, 0x7e, 0xef, 0x06,
|
||||
0x99, 0xdf, 0x52, 0x24, 0x79, 0x14, 0x87, 0x66, 0x1a, 0xf5, 0xbb, 0xe7, 0x51, 0xbf, 0xe5, 0x48,
|
||||
0xee, 0x2a, 0xf6, 0x30, 0xce, 0xfd, 0x9a, 0x13, 0xdc, 0x8f, 0x73, 0xb5, 0x57, 0x22, 0x4d, 0xcc,
|
||||
0x21, 0x7f, 0xcd, 0x09, 0xf2, 0x57, 0x9c, 0x63, 0x70, 0x0e, 0xfb, 0xfb, 0xf5, 0x74, 0xf6, 0x17,
|
||||
0xcd, 0xcf, 0xc4, 0xcf, 0x5c, 0x8c, 0xfe, 0xa9, 0x11, 0xf4, 0xaf, 0xcc, 0xcc, 0xbf, 0x16, 0x69,
|
||||
0xfe, 0xec, 0xfc, 0xef, 0x06, 0x4d, 0xb3, 0x63, 0xc0, 0x41, 0xa1, 0x0a, 0x0f, 0x87, 0xd6, 0x50,
|
||||
0x50, 0x2b, 0x3e, 0x91, 0x5f, 0xa5, 0x89, 0xdf, 0x07, 0x89, 0x19, 0x5c, 0x91, 0xa5, 0x84, 0x00,
|
||||
0x30, 0xc8, 0x7f, 0x8a, 0xfb, 0xba, 0x2c, 0x57, 0x06, 0x49, 0x43, 0x5e, 0x90, 0x86, 0x00, 0x85,
|
||||
0x4c, 0x84, 0x29, 0xe4, 0x3a, 0x2c, 0x51, 0xa8, 0x1f, 0x63, 0x87, 0x9a, 0xed, 0xb2, 0x43, 0x74,
|
||||
0x13, 0x56, 0x58, 0x2e, 0xe7, 0x44, 0x53, 0xe0, 0x7b, 0x8a, 0xa5, 0xa9, 0x12, 0x7d, 0xc0, 0x0f,
|
||||
0x27, 0x07, 0xfa, 0x37, 0xe0, 0x5c, 0x40, 0xd6, 0x4b, 0x21, 0x9c, 0x12, 0x95, 0x3d, 0xe9, 0x6d,
|
||||
0x91, 0x4b, 0x3e, 0xf0, 0x1d, 0xe4, 0x33, 0x4f, 0x04, 0xa9, 0x8e, 0xa5, 0x63, 0x01, 0xf0, 0x6c,
|
||||
0x4c, 0xd9, 0x68, 0xdf, 0xea, 0x0a, 0x18, 0xa7, 0x43, 0x2a, 0xe5, 0xa1, 0x60, 0x9e, 0x83, 0x9c,
|
||||
0xfc, 0x97, 0xb8, 0x6f, 0xcf, 0x27, 0xa3, 0xd3, 0x78, 0x63, 0xfc, 0xff, 0xc3, 0x1b, 0x13, 0xdf,
|
||||
0x9a, 0x37, 0x06, 0x13, 0x6c, 0x32, 0x9c, 0x60, 0xff, 0x1d, 0xf7, 0xbf, 0xb0, 0xc7, 0x02, 0xbf,
|
||||
0x9d, 0x47, 0xfc, 0x6c, 0x99, 0x66, 0xdf, 0x4b, 0x64, 0x4b, 0xc1, 0xed, 0x33, 0xec, 0xbd, 0x61,
|
||||
0x6e, 0x9f, 0xe5, 0xf9, 0x93, 0x4d, 0xd0, 0x5d, 0xc8, 0xb3, 0xa6, 0x8b, 0x6a, 0xd9, 0x8e, 0x00,
|
||||
0xdc, 0x4b, 0xc1, 0xbd, 0xf2, 0xde, 0xca, 0xc6, 0x01, 0x95, 0x69, 0xda, 0x8e, 0x92, 0xb3, 0xc5,
|
||||
0x28, 0x40, 0x04, 0xf2, 0x21, 0x3e, 0x7a, 0x19, 0xf2, 0xf4, 0xd7, 0x3b, 0xb6, 0xd6, 0xc1, 0x0c,
|
||||
0x3c, 0xf3, 0x8a, 0xbf, 0x20, 0x3f, 0x06, 0x34, 0x09, 0xdf, 0xa8, 0x01, 0x19, 0x7c, 0x8c, 0x4d,
|
||||
0x42, 0xbf, 0x1a, 0x75, 0xf7, 0xf9, 0x29, 0x64, 0x0f, 0x9b, 0xa4, 0x5a, 0xa1, 0x4e, 0xfe, 0xd7,
|
||||
0x37, 0xeb, 0x65, 0x2e, 0xfd, 0xba, 0x35, 0x30, 0x08, 0x1e, 0xd8, 0xe4, 0x54, 0x11, 0xfa, 0xf2,
|
||||
0xe7, 0x09, 0xca, 0xbc, 0x42, 0xd0, 0x3e, 0xd5, 0xb7, 0x6e, 0x00, 0x25, 0x02, 0xac, 0x7b, 0x31,
|
||||
0x7f, 0xaf, 0x01, 0x74, 0x35, 0x47, 0x7d, 0xaa, 0x99, 0x04, 0xeb, 0xc2, 0xe9, 0x81, 0x15, 0x24,
|
||||
0x41, 0x8e, 0xce, 0x46, 0x0e, 0xd6, 0x45, 0x01, 0xe0, 0xcd, 0x03, 0xfb, 0xcc, 0x7e, 0xb7, 0x7d,
|
||||
0x86, 0xbd, 0x9c, 0x1b, 0xf7, 0xf2, 0xef, 0x12, 0x7e, 0x94, 0xf8, 0x24, 0xf5, 0xfb, 0xe7, 0x87,
|
||||
0xdf, 0xb3, 0xca, 0x35, 0x9c, 0x63, 0xd1, 0x21, 0xac, 0x78, 0x51, 0xaa, 0x8e, 0x58, 0xf4, 0xba,
|
||||
0xe7, 0x6e, 0xd1, 0x30, 0x2f, 0x1f, 0x87, 0x97, 0x1d, 0xf4, 0x73, 0xb8, 0x30, 0x86, 0x40, 0x9e,
|
||||
0xe9, 0xc4, 0x82, 0x40, 0xf4, 0x52, 0x18, 0x88, 0x5c, 0xcb, 0xbe, 0xaf, 0x92, 0xdf, 0x31, 0x36,
|
||||
0x76, 0x68, 0x31, 0x14, 0x64, 0x0c, 0x53, 0xbf, 0xfe, 0x35, 0x28, 0x0c, 0x31, 0xa1, 0xf5, 0x79,
|
||||
0xa8, 0xdc, 0x5c, 0xe6, 0x8b, 0xa2, 0x88, 0x3d, 0x80, 0x97, 0xa6, 0x32, 0x07, 0xf4, 0x23, 0xc8,
|
||||
0xfb, 0xa4, 0x23, 0x1e, 0x51, 0xb9, 0x79, 0xd5, 0x88, 0x2f, 0x2b, 0xff, 0x39, 0xee, 0x9b, 0x0c,
|
||||
0xd7, 0x37, 0x75, 0xc8, 0x0c, 0xb1, 0x33, 0xea, 0xf3, 0x8a, 0xa3, 0xb8, 0xf5, 0xc6, 0x62, 0x9c,
|
||||
0x83, 0xae, 0x8e, 0xfa, 0x44, 0x11, 0xca, 0xf2, 0x63, 0xc8, 0xf0, 0x15, 0xb4, 0x04, 0xd9, 0x87,
|
||||
0xfb, 0xbb, 0xfb, 0xcd, 0x0f, 0xf7, 0xcb, 0x31, 0x04, 0x90, 0xd9, 0xae, 0xd5, 0xea, 0x07, 0xad,
|
||||
0x72, 0x1c, 0xe5, 0x21, 0xbd, 0x5d, 0x6d, 0x2a, 0xad, 0x72, 0x82, 0x2e, 0x2b, 0xf5, 0xf7, 0xeb,
|
||||
0xb5, 0x56, 0x39, 0x89, 0x56, 0xa0, 0xc0, 0xc7, 0xea, 0x83, 0xa6, 0xf2, 0xc1, 0x76, 0xab, 0x9c,
|
||||
0x0a, 0x2c, 0x1d, 0xd6, 0xf7, 0xef, 0xd7, 0x95, 0x72, 0x5a, 0x7e, 0x93, 0x96, 0x34, 0x11, 0x2c,
|
||||
0xc5, 0x2f, 0x5e, 0xe2, 0x81, 0xe2, 0x45, 0xfe, 0x63, 0x02, 0xa4, 0x68, 0xea, 0x81, 0xde, 0x1f,
|
||||
0xdb, 0xf8, 0xd6, 0x19, 0x78, 0xcb, 0xd8, 0xee, 0xd1, 0x75, 0x28, 0x0e, 0xf1, 0x11, 0x26, 0x9d,
|
||||
0x1e, 0xa7, 0x42, 0x3c, 0xb1, 0x15, 0x94, 0x82, 0x58, 0x65, 0x4a, 0x0e, 0x17, 0xfb, 0x04, 0x77,
|
||||
0x88, 0xca, 0xeb, 0x28, 0x7e, 0xe8, 0xf2, 0x54, 0x8c, 0xae, 0x1e, 0xf2, 0x45, 0xf9, 0xe3, 0x33,
|
||||
0xf9, 0x32, 0x0f, 0x69, 0xa5, 0xde, 0x52, 0x7e, 0x51, 0x4e, 0x22, 0x04, 0x45, 0x36, 0x54, 0x0f,
|
||||
0xf7, 0xb7, 0x0f, 0x0e, 0x1b, 0x4d, 0xea, 0xcb, 0x73, 0x50, 0x72, 0x7d, 0xe9, 0x2e, 0xa6, 0xe5,
|
||||
0xff, 0xc6, 0xa1, 0x34, 0x16, 0x20, 0x68, 0x0b, 0xd2, 0x9c, 0x4e, 0x47, 0x35, 0xdd, 0x59, 0x7c,
|
||||
0x8b, 0x68, 0xe2, 0xa2, 0xe8, 0x6d, 0xc8, 0x61, 0xd1, 0x27, 0x98, 0x16, 0x88, 0xbc, 0xbf, 0xe1,
|
||||
0x76, 0x12, 0x84, 0xaa, 0xa7, 0x81, 0xde, 0x81, 0xbc, 0x17, 0xe9, 0xa2, 0x86, 0x7b, 0x79, 0x52,
|
||||
0xdd, 0xc3, 0x08, 0xa1, 0xef, 0xeb, 0xa0, 0x7b, 0x3e, 0x27, 0x4b, 0x4d, 0x92, 0x78, 0xa1, 0xce,
|
||||
0x05, 0x84, 0xb2, 0x2b, 0x2f, 0xd7, 0x60, 0x29, 0xb0, 0x1f, 0x74, 0x09, 0xf2, 0x03, 0xed, 0x44,
|
||||
0xf4, 0x9f, 0x78, 0x07, 0x21, 0x37, 0xd0, 0x4e, 0x78, 0xeb, 0xe9, 0x02, 0x64, 0xe9, 0xc3, 0xae,
|
||||
0xc6, 0xd1, 0x26, 0xa9, 0x64, 0x06, 0xda, 0xc9, 0x7b, 0x9a, 0x23, 0x7f, 0x04, 0xc5, 0x70, 0xef,
|
||||
0x85, 0x9e, 0xc4, 0xa1, 0x35, 0x32, 0x75, 0x66, 0x23, 0xad, 0xf0, 0x09, 0xba, 0x0d, 0xe9, 0x63,
|
||||
0x8b, 0x83, 0xd5, 0xf4, 0x90, 0x7d, 0x64, 0x11, 0x1c, 0xe8, 0xdd, 0x70, 0x69, 0xf9, 0x33, 0x48,
|
||||
0x33, 0xf0, 0xa1, 0x40, 0xc2, 0xba, 0x28, 0x82, 0x8f, 0xd2, 0x31, 0xfa, 0x08, 0x40, 0x23, 0x64,
|
||||
0x68, 0xb4, 0x47, 0xbe, 0xe1, 0xf5, 0xe9, 0xe0, 0xb5, 0xed, 0xca, 0x55, 0x2f, 0x0b, 0x14, 0x5b,
|
||||
0xf5, 0x55, 0x03, 0x48, 0x16, 0x30, 0x28, 0xef, 0x43, 0x31, 0xac, 0x1b, 0xec, 0x67, 0x2e, 0x4f,
|
||||
0xe9, 0x67, 0x7a, 0x9c, 0xc7, 0x63, 0x4c, 0x49, 0xde, 0x31, 0x63, 0x13, 0xf9, 0x59, 0x1c, 0x72,
|
||||
0xad, 0x13, 0x71, 0xac, 0x23, 0x9a, 0x35, 0xbe, 0x6a, 0x22, 0xd8, 0x9a, 0xe0, 0xdd, 0x9f, 0xa4,
|
||||
0xd7, 0x53, 0x7a, 0xd7, 0x0b, 0xdc, 0xd4, 0xa2, 0xc5, 0xa3, 0xdb, 0x5c, 0x13, 0x60, 0xf5, 0x16,
|
||||
0xe4, 0xbd, 0x53, 0x45, 0x89, 0xbd, 0xa6, 0xeb, 0x43, 0xec, 0x38, 0x62, 0x6f, 0xee, 0x94, 0xf5,
|
||||
0xfe, 0xac, 0xa7, 0xa2, 0xf9, 0x91, 0x54, 0xf8, 0x44, 0xd6, 0xa1, 0x34, 0x96, 0xb6, 0xd0, 0x5b,
|
||||
0x90, 0xb5, 0x47, 0x6d, 0xd5, 0x75, 0xcf, 0x58, 0xf0, 0xb8, 0x24, 0x6f, 0xd4, 0xee, 0x1b, 0x9d,
|
||||
0x5d, 0x7c, 0xea, 0xfe, 0x18, 0x7b, 0xd4, 0xde, 0xe5, 0x5e, 0xe4, 0x6f, 0x49, 0x04, 0xdf, 0x72,
|
||||
0x0c, 0x39, 0xf7, 0x50, 0xa0, 0x9f, 0x06, 0xe3, 0xc4, 0xed, 0x08, 0x47, 0xa6, 0x52, 0x61, 0x3e,
|
||||
0x10, 0x26, 0x37, 0x61, 0xc5, 0x31, 0xba, 0x26, 0xd6, 0x55, 0xbf, 0xb4, 0x60, 0x6f, 0xcb, 0x29,
|
||||
0x25, 0xfe, 0x60, 0xcf, 0xad, 0x2b, 0xe4, 0xff, 0xc4, 0x21, 0xe7, 0x06, 0x2c, 0x7a, 0x33, 0x70,
|
||||
0xee, 0x8a, 0x53, 0x1a, 0x25, 0xae, 0xa0, 0xdf, 0xbe, 0x0b, 0xff, 0xd6, 0xc4, 0xd9, 0x7f, 0x6b,
|
||||
0x54, 0x1f, 0xd6, 0x6d, 0x88, 0xa7, 0xce, 0xdc, 0x10, 0x7f, 0x1d, 0x10, 0xb1, 0x88, 0xd6, 0x57,
|
||||
0x8f, 0x2d, 0x62, 0x98, 0x5d, 0x95, 0x3b, 0x9b, 0x33, 0xaa, 0x32, 0x7b, 0xf2, 0x88, 0x3d, 0x38,
|
||||
0x60, 0x7e, 0xff, 0x4d, 0x1c, 0x72, 0x5e, 0x6e, 0x3c, 0x6b, 0x37, 0xee, 0x3c, 0x64, 0x04, 0xfc,
|
||||
0xf3, 0x76, 0x9c, 0x98, 0x79, 0x8d, 0xe1, 0x54, 0xa0, 0x31, 0x2c, 0x41, 0x6e, 0x80, 0x89, 0xc6,
|
||||
0x08, 0x02, 0xaf, 0xee, 0xbc, 0xf9, 0xcd, 0x7b, 0xb0, 0x14, 0x68, 0x8c, 0xd2, 0xc8, 0xdb, 0xaf,
|
||||
0x7f, 0x58, 0x8e, 0x49, 0xd9, 0x67, 0x5f, 0x5e, 0x4d, 0xee, 0xe3, 0xa7, 0xf4, 0xcc, 0x2a, 0xf5,
|
||||
0x5a, 0xa3, 0x5e, 0xdb, 0x2d, 0xc7, 0xa5, 0xa5, 0x67, 0x5f, 0x5e, 0xcd, 0x2a, 0x98, 0xf5, 0x57,
|
||||
0x6e, 0x36, 0x60, 0x39, 0xf8, 0x55, 0xc2, 0x19, 0x04, 0x41, 0xf1, 0xfe, 0xc3, 0x83, 0xbd, 0x9d,
|
||||
0xda, 0x76, 0xab, 0xae, 0x3e, 0x6a, 0xb6, 0xea, 0xe5, 0x38, 0xba, 0x00, 0xe7, 0xf6, 0x76, 0xde,
|
||||
0x6b, 0xb4, 0xd4, 0xda, 0xde, 0x4e, 0x7d, 0xbf, 0xa5, 0x6e, 0xb7, 0x5a, 0xdb, 0xb5, 0xdd, 0x72,
|
||||
0x62, 0xeb, 0x73, 0x80, 0xd2, 0x76, 0xb5, 0xb6, 0x43, 0xb3, 0x9f, 0xd1, 0xd1, 0x44, 0xff, 0x2a,
|
||||
0xc5, 0x8a, 0xeb, 0x99, 0x37, 0xb2, 0xd2, 0xec, 0xf6, 0x1d, 0x7a, 0x00, 0x69, 0x56, 0x77, 0xa3,
|
||||
0xd9, 0x57, 0xb4, 0xd2, 0x9c, 0x7e, 0x1e, 0xfd, 0x31, 0x2c, 0x3c, 0x66, 0xde, 0xd9, 0x4a, 0xb3,
|
||||
0xdb, 0x7b, 0x48, 0x81, 0xbc, 0x5f, 0x38, 0xcf, 0xbf, 0xc3, 0x95, 0x16, 0x68, 0xf9, 0x51, 0x9b,
|
||||
0x7e, 0x59, 0x30, 0xff, 0x4e, 0x53, 0x5a, 0x00, 0xc0, 0xd0, 0x1e, 0x64, 0xdd, 0x82, 0x6b, 0xde,
|
||||
0x2d, 0xab, 0x34, 0xb7, 0x1d, 0x47, 0x3f, 0x01, 0x2f, 0x8c, 0x67, 0x5f, 0x19, 0x4b, 0x73, 0x7a,
|
||||
0x8b, 0x68, 0x07, 0x32, 0x82, 0xeb, 0xce, 0xb9, 0x39, 0x95, 0xe6, 0xb5, 0xd7, 0xa8, 0xd3, 0xfc,
|
||||
0x8e, 0xc3, 0xfc, 0x8b, 0x70, 0x69, 0x81, 0xb6, 0x29, 0x7a, 0x08, 0x10, 0x28, 0x83, 0x17, 0xb8,
|
||||
0xe1, 0x96, 0x16, 0x69, 0x87, 0xa2, 0x26, 0xe4, 0xbc, 0x72, 0x67, 0xee, 0x7d, 0xb3, 0x34, 0xbf,
|
||||
0x2f, 0x89, 0x1e, 0x43, 0x21, 0xcc, 0xf3, 0x17, 0xbb, 0x45, 0x96, 0x16, 0x6c, 0x38, 0x52, 0xfb,
|
||||
0x61, 0xd2, 0xbf, 0xd8, 0xad, 0xb2, 0xb4, 0x60, 0xff, 0x11, 0x7d, 0x02, 0x2b, 0x93, 0xa4, 0x7c,
|
||||
0xf1, 0x4b, 0x66, 0xe9, 0x0c, 0x1d, 0x49, 0x34, 0x00, 0x34, 0x85, 0xcc, 0x9f, 0xe1, 0xce, 0x59,
|
||||
0x3a, 0x4b, 0x83, 0xb2, 0x5a, 0xff, 0xea, 0xf9, 0x5a, 0xfc, 0xeb, 0xe7, 0x6b, 0xf1, 0x7f, 0x3c,
|
||||
0x5f, 0x8b, 0x7f, 0xf1, 0x62, 0x2d, 0xf6, 0xf5, 0x8b, 0xb5, 0xd8, 0xdf, 0x5e, 0xac, 0xc5, 0x7e,
|
||||
0xf9, 0x5a, 0xd7, 0x20, 0xbd, 0x51, 0x7b, 0xa3, 0x63, 0x0d, 0x36, 0x83, 0x7f, 0x88, 0x99, 0xf6,
|
||||
0x27, 0x9d, 0x76, 0x86, 0x25, 0xaa, 0x5b, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x3b, 0xae, 0x48,
|
||||
0xb4, 0xc4, 0x23, 0x00, 0x00,
|
||||
// 2782 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x77, 0x23, 0xc5,
|
||||
0xf5, 0xd7, 0x5b, 0xea, 0x2b, 0xeb, 0xe1, 0x1a, 0x33, 0x08, 0x31, 0xd8, 0x43, 0x73, 0xe0, 0x0f,
|
||||
0x03, 0xd8, 0x7f, 0xcc, 0x81, 0x40, 0x20, 0x01, 0x4b, 0x68, 0x90, 0xb1, 0xb1, 0x9c, 0xb6, 0x66,
|
||||
0xc8, 0x8b, 0x69, 0x5a, 0xea, 0xb2, 0xd4, 0x8c, 0xd4, 0xdd, 0x74, 0x97, 0x8c, 0xc5, 0x32, 0x8f,
|
||||
0x0d, 0xd9, 0x90, 0x5d, 0x36, 0x7c, 0x8f, 0xac, 0xb2, 0xc9, 0x86, 0x73, 0xb2, 0x61, 0x99, 0x45,
|
||||
0x0e, 0xc9, 0x99, 0x39, 0xd9, 0xe4, 0x0b, 0x64, 0x95, 0x93, 0x9c, 0x7a, 0xf4, 0x4b, 0x52, 0x4b,
|
||||
0x32, 0x64, 0x97, 0x5d, 0xd5, 0xed, 0x7b, 0x6f, 0xab, 0xaa, 0xeb, 0xfe, 0xee, 0xef, 0xde, 0x12,
|
||||
0x3c, 0x4e, 0xb0, 0xa9, 0x63, 0x67, 0x6c, 0x98, 0x64, 0x4f, 0xeb, 0xf5, 0x8d, 0x3d, 0x32, 0xb5,
|
||||
0xb1, 0xbb, 0x6b, 0x3b, 0x16, 0xb1, 0x50, 0x25, 0x78, 0xb8, 0x4b, 0x1f, 0xd6, 0x9f, 0x08, 0x69,
|
||||
0xf7, 0x9d, 0xa9, 0x4d, 0xac, 0x3d, 0xdb, 0xb1, 0xac, 0x73, 0xae, 0x5f, 0xbf, 0x11, 0x7a, 0xcc,
|
||||
0xfc, 0x84, 0xbd, 0x45, 0x9e, 0x0a, 0xe3, 0xfb, 0x78, 0xea, 0x3d, 0x7d, 0x62, 0xce, 0xd6, 0xd6,
|
||||
0x1c, 0x6d, 0xec, 0x3d, 0xde, 0x19, 0x58, 0xd6, 0x60, 0x84, 0xf7, 0xd8, 0xac, 0x37, 0x39, 0xdf,
|
||||
0x23, 0xc6, 0x18, 0xbb, 0x44, 0x1b, 0xdb, 0x42, 0x61, 0x6b, 0x60, 0x0d, 0x2c, 0x36, 0xdc, 0xa3,
|
||||
0x23, 0x2e, 0x95, 0x7f, 0x5b, 0x80, 0xbc, 0x82, 0x3f, 0x99, 0x60, 0x97, 0xa0, 0x7d, 0xc8, 0xe0,
|
||||
0xfe, 0xd0, 0xaa, 0x25, 0x6f, 0x26, 0x9f, 0x2d, 0xee, 0xdf, 0xd8, 0x9d, 0x59, 0xdc, 0xae, 0xd0,
|
||||
0x6b, 0xf5, 0x87, 0x56, 0x3b, 0xa1, 0x30, 0x5d, 0xf4, 0x0a, 0x64, 0xcf, 0x47, 0x13, 0x77, 0x58,
|
||||
0x4b, 0x31, 0xa3, 0x27, 0xe2, 0x8c, 0x6e, 0x53, 0xa5, 0x76, 0x42, 0xe1, 0xda, 0xf4, 0x55, 0x86,
|
||||
0x79, 0x6e, 0xd5, 0xd2, 0xcb, 0x5f, 0x75, 0x68, 0x9e, 0xb3, 0x57, 0x51, 0x5d, 0xd4, 0x00, 0x70,
|
||||
0x31, 0x51, 0x2d, 0x9b, 0x18, 0x96, 0x59, 0xcb, 0x30, 0xcb, 0x27, 0xe3, 0x2c, 0xcf, 0x30, 0xe9,
|
||||
0x30, 0xc5, 0x76, 0x42, 0x91, 0x5c, 0x6f, 0x42, 0x7d, 0x18, 0xa6, 0x41, 0xd4, 0xfe, 0x50, 0x33,
|
||||
0xcc, 0x5a, 0x76, 0xb9, 0x8f, 0x43, 0xd3, 0x20, 0x4d, 0xaa, 0x48, 0x7d, 0x18, 0xde, 0x84, 0x2e,
|
||||
0xf9, 0x93, 0x09, 0x76, 0xa6, 0xb5, 0xdc, 0xf2, 0x25, 0xff, 0x88, 0x2a, 0xd1, 0x25, 0x33, 0x6d,
|
||||
0xd4, 0x82, 0x62, 0x0f, 0x0f, 0x0c, 0x53, 0xed, 0x8d, 0xac, 0xfe, 0xfd, 0x5a, 0x9e, 0x19, 0xcb,
|
||||
0x71, 0xc6, 0x0d, 0xaa, 0xda, 0xa0, 0x9a, 0xed, 0x84, 0x02, 0x3d, 0x7f, 0x86, 0xde, 0x84, 0x42,
|
||||
0x7f, 0x88, 0xfb, 0xf7, 0x55, 0x72, 0x59, 0x2b, 0x30, 0x1f, 0x3b, 0x71, 0x3e, 0x9a, 0x54, 0xaf,
|
||||
0x7b, 0xd9, 0x4e, 0x28, 0xf9, 0x3e, 0x1f, 0xd2, 0xf5, 0xeb, 0x78, 0x64, 0x5c, 0x60, 0x87, 0xda,
|
||||
0x4b, 0xcb, 0xd7, 0xff, 0x0e, 0xd7, 0x64, 0x1e, 0x24, 0xdd, 0x9b, 0xa0, 0xb7, 0x40, 0xc2, 0xa6,
|
||||
0x2e, 0x96, 0x01, 0xcc, 0xc5, 0xcd, 0xd8, 0xb3, 0x62, 0xea, 0xde, 0x22, 0x0a, 0x58, 0x8c, 0xd1,
|
||||
0x6b, 0x90, 0xeb, 0x5b, 0xe3, 0xb1, 0x41, 0x6a, 0x45, 0x66, 0xbd, 0x1d, 0xbb, 0x00, 0xa6, 0xd5,
|
||||
0x4e, 0x28, 0x42, 0x1f, 0x9d, 0x40, 0x79, 0x64, 0xb8, 0x44, 0x75, 0x4d, 0xcd, 0x76, 0x87, 0x16,
|
||||
0x71, 0x6b, 0x1b, 0xcc, 0xc3, 0xd3, 0x71, 0x1e, 0x8e, 0x0d, 0x97, 0x9c, 0x79, 0xca, 0xed, 0x84,
|
||||
0x52, 0x1a, 0x85, 0x05, 0xd4, 0x9f, 0x75, 0x7e, 0x8e, 0x1d, 0xdf, 0x61, 0xad, 0xb4, 0xdc, 0x5f,
|
||||
0x87, 0x6a, 0x7b, 0xf6, 0xd4, 0x9f, 0x15, 0x16, 0xa0, 0x9f, 0xc1, 0xb5, 0x91, 0xa5, 0xe9, 0xbe,
|
||||
0x3b, 0xb5, 0x3f, 0x9c, 0x98, 0xf7, 0x6b, 0x65, 0xe6, 0xf4, 0xb9, 0xd8, 0x1f, 0x69, 0x69, 0xba,
|
||||
0xe7, 0xa2, 0x49, 0x0d, 0xda, 0x09, 0x65, 0x73, 0x34, 0x2b, 0x44, 0xf7, 0x60, 0x4b, 0xb3, 0xed,
|
||||
0xd1, 0x74, 0xd6, 0x7b, 0x85, 0x79, 0xbf, 0x15, 0xe7, 0xfd, 0x80, 0xda, 0xcc, 0xba, 0x47, 0xda,
|
||||
0x9c, 0xb4, 0x91, 0x87, 0xec, 0x85, 0x36, 0x9a, 0x60, 0xf9, 0xff, 0xa0, 0x18, 0x0a, 0x75, 0x54,
|
||||
0x83, 0xfc, 0x18, 0xbb, 0xae, 0x36, 0xc0, 0x0c, 0x19, 0x24, 0xc5, 0x9b, 0xca, 0x65, 0xd8, 0x08,
|
||||
0x87, 0xb7, 0x3c, 0xf6, 0x0d, 0x69, 0xe0, 0x52, 0xc3, 0x0b, 0xec, 0xb8, 0x34, 0x5a, 0x85, 0xa1,
|
||||
0x98, 0xa2, 0xa7, 0xa0, 0xc4, 0x8e, 0x8f, 0xea, 0x3d, 0xa7, 0xe8, 0x91, 0x51, 0x36, 0x98, 0xf0,
|
||||
0xae, 0x50, 0xda, 0x81, 0xa2, 0xbd, 0x6f, 0xfb, 0x2a, 0x69, 0xa6, 0x02, 0xf6, 0xbe, 0x2d, 0x14,
|
||||
0xe4, 0xef, 0x43, 0x75, 0x36, 0xda, 0x51, 0x15, 0xd2, 0xf7, 0xf1, 0x54, 0xbc, 0x8f, 0x0e, 0xd1,
|
||||
0x96, 0x58, 0x16, 0x7b, 0x87, 0xa4, 0x88, 0x35, 0xfe, 0x29, 0xe5, 0x1b, 0xfb, 0x61, 0x8e, 0x5e,
|
||||
0x83, 0x0c, 0x45, 0x4d, 0x01, 0x80, 0xf5, 0x5d, 0x0e, 0xa9, 0xbb, 0x1e, 0xa4, 0xee, 0x76, 0x3d,
|
||||
0x48, 0x6d, 0x14, 0xbe, 0xfa, 0x66, 0x27, 0xf1, 0xc5, 0x5f, 0x77, 0x92, 0x0a, 0xb3, 0x40, 0x8f,
|
||||
0xd1, 0xa8, 0xd4, 0x0c, 0x53, 0x35, 0x74, 0xf1, 0x9e, 0x3c, 0x9b, 0x1f, 0xea, 0xe8, 0x08, 0xaa,
|
||||
0x7d, 0xcb, 0x74, 0xb1, 0xe9, 0x4e, 0x5c, 0x95, 0x43, 0xb6, 0x80, 0xbd, 0xf9, 0xa8, 0x69, 0x7a,
|
||||
0x8a, 0xa7, 0x4c, 0x4f, 0xa9, 0xf4, 0xa3, 0x02, 0x74, 0x1b, 0xe0, 0x42, 0x1b, 0x19, 0xba, 0x46,
|
||||
0x2c, 0xc7, 0xad, 0x65, 0x6e, 0xa6, 0x17, 0xba, 0xb9, 0xeb, 0xa9, 0xdc, 0xb1, 0x75, 0x8d, 0xe0,
|
||||
0x46, 0x86, 0xfe, 0x5a, 0x25, 0x64, 0x89, 0x9e, 0x81, 0x8a, 0x66, 0xdb, 0xaa, 0x4b, 0x34, 0x82,
|
||||
0xd5, 0xde, 0x94, 0x60, 0x97, 0x81, 0xe1, 0x86, 0x52, 0xd2, 0x6c, 0xfb, 0x8c, 0x4a, 0x1b, 0x54,
|
||||
0x88, 0x9e, 0x86, 0x32, 0x05, 0x3e, 0x43, 0x1b, 0xa9, 0x43, 0x6c, 0x0c, 0x86, 0x84, 0x81, 0x5e,
|
||||
0x5a, 0x29, 0x09, 0x69, 0x9b, 0x09, 0x65, 0xdd, 0x3f, 0x08, 0x0c, 0xf4, 0x10, 0x82, 0x8c, 0xae,
|
||||
0x11, 0x8d, 0x6d, 0xe4, 0x86, 0xc2, 0xc6, 0x54, 0x66, 0x6b, 0x64, 0x28, 0xb6, 0x87, 0x8d, 0xd1,
|
||||
0x75, 0xc8, 0x09, 0xb7, 0x69, 0xe6, 0x56, 0xcc, 0xe8, 0x37, 0xb3, 0x1d, 0xeb, 0x02, 0x33, 0x94,
|
||||
0x2f, 0x28, 0x7c, 0x22, 0xff, 0x2a, 0x05, 0x9b, 0x73, 0xf0, 0x48, 0xfd, 0x0e, 0x35, 0x77, 0xe8,
|
||||
0xbd, 0x8b, 0x8e, 0xd1, 0xab, 0xd4, 0xaf, 0xa6, 0x63, 0x47, 0xa4, 0xa5, 0x5a, 0x78, 0x8b, 0x78,
|
||||
0xca, 0x6d, 0xb3, 0xe7, 0x62, 0x6b, 0x84, 0x36, 0xea, 0x40, 0x75, 0xa4, 0xb9, 0x44, 0xe5, 0x70,
|
||||
0xa3, 0x86, 0x52, 0xd4, 0x3c, 0xc8, 0x1e, 0x6b, 0x1e, 0x40, 0xd1, 0xc3, 0x2e, 0x1c, 0x95, 0x47,
|
||||
0x11, 0x29, 0x52, 0x60, 0xab, 0x37, 0xfd, 0x4c, 0x33, 0x89, 0x61, 0x62, 0x75, 0xee, 0xcb, 0x3d,
|
||||
0x36, 0xe7, 0xb4, 0x75, 0x61, 0xe8, 0xd8, 0xec, 0x7b, 0x9f, 0xec, 0x9a, 0x6f, 0xec, 0x7f, 0x52,
|
||||
0x57, 0x56, 0xa0, 0x1c, 0x05, 0x78, 0x54, 0x86, 0x14, 0xb9, 0x14, 0x1b, 0x90, 0x22, 0x97, 0xe8,
|
||||
0xff, 0x21, 0x43, 0x17, 0xc9, 0x16, 0x5f, 0x5e, 0x90, 0x5d, 0x85, 0x5d, 0x77, 0x6a, 0x63, 0x85,
|
||||
0x69, 0xca, 0xb2, 0x1f, 0x0d, 0x3e, 0xe8, 0xcf, 0x7a, 0x95, 0x9f, 0x83, 0xca, 0x0c, 0xaa, 0x87,
|
||||
0xbe, 0x5f, 0x32, 0xfc, 0xfd, 0xe4, 0x0a, 0x94, 0x22, 0x10, 0x2e, 0x5f, 0x87, 0xad, 0x45, 0x88,
|
||||
0x2c, 0x0f, 0x7d, 0x79, 0x04, 0x59, 0xd1, 0x2b, 0x50, 0xf0, 0x21, 0x99, 0x47, 0xe3, 0xfc, 0x5e,
|
||||
0x79, 0xca, 0x8a, 0xaf, 0x4a, 0xc3, 0x90, 0x1e, 0x6b, 0x76, 0x1e, 0x52, 0xec, 0x87, 0xe7, 0x35,
|
||||
0xdb, 0x6e, 0x6b, 0xee, 0x50, 0xfe, 0x08, 0x6a, 0x71, 0x70, 0x3b, 0xb3, 0x8c, 0x8c, 0x7f, 0x0c,
|
||||
0xaf, 0x43, 0xee, 0xdc, 0x72, 0xc6, 0x1a, 0x61, 0xce, 0x4a, 0x8a, 0x98, 0xd1, 0xe3, 0xc9, 0xa1,
|
||||
0x37, 0xcd, 0xc4, 0x7c, 0x22, 0xab, 0xf0, 0x58, 0x2c, 0xe4, 0x52, 0x13, 0xc3, 0xd4, 0x31, 0xdf,
|
||||
0xcf, 0x92, 0xc2, 0x27, 0x81, 0x23, 0xfe, 0x63, 0xf9, 0x84, 0xbe, 0xd6, 0x65, 0x6b, 0x65, 0xfe,
|
||||
0x25, 0x45, 0xcc, 0xe4, 0xbf, 0x17, 0xa0, 0xa0, 0x60, 0xd7, 0xa6, 0x98, 0x80, 0x1a, 0x20, 0xe1,
|
||||
0xcb, 0x3e, 0xe6, 0x64, 0x28, 0x19, 0x4b, 0x26, 0xb8, 0x76, 0xcb, 0xd3, 0xa4, 0x99, 0xdc, 0x37,
|
||||
0x43, 0x2f, 0x0b, 0xc2, 0x17, 0xcf, 0xdd, 0x84, 0x79, 0x98, 0xf1, 0xbd, 0xea, 0x31, 0xbe, 0x74,
|
||||
0x6c, 0xf2, 0xe6, 0x56, 0x33, 0x94, 0xef, 0x65, 0x41, 0xf9, 0x32, 0x2b, 0x5e, 0x16, 0xe1, 0x7c,
|
||||
0xcd, 0x08, 0xe7, 0xcb, 0xae, 0x58, 0x66, 0x0c, 0xe9, 0x6b, 0x46, 0x48, 0x5f, 0x6e, 0x85, 0x93,
|
||||
0x18, 0xd6, 0xf7, 0xaa, 0xc7, 0xfa, 0xf2, 0x2b, 0x96, 0x3d, 0x43, 0xfb, 0x6e, 0x47, 0x69, 0x1f,
|
||||
0xa7, 0x6c, 0x4f, 0xc5, 0x5a, 0xc7, 0xf2, 0xbe, 0x1f, 0x84, 0x78, 0x9f, 0x14, 0x4b, 0xba, 0xb8,
|
||||
0x93, 0x05, 0xc4, 0xaf, 0x19, 0x21, 0x7e, 0xb0, 0x62, 0x0f, 0x62, 0x98, 0xdf, 0xdb, 0x61, 0xe6,
|
||||
0x57, 0x8c, 0x25, 0x8f, 0xe2, 0xd0, 0x2c, 0xa2, 0x7e, 0xaf, 0xfb, 0xd4, 0x6f, 0x23, 0x96, 0xbb,
|
||||
0x8a, 0x35, 0xcc, 0x72, 0xbf, 0xce, 0x1c, 0xf7, 0xe3, 0x5c, 0xed, 0x99, 0x58, 0x17, 0x2b, 0xc8,
|
||||
0x5f, 0x67, 0x8e, 0xfc, 0x95, 0x57, 0x38, 0x5c, 0xc1, 0xfe, 0x7e, 0xbe, 0x98, 0xfd, 0xc5, 0xf3,
|
||||
0x33, 0xf1, 0x33, 0xd7, 0xa3, 0x7f, 0x6a, 0x0c, 0xfd, 0xab, 0x32, 0xf7, 0xcf, 0xc7, 0xba, 0xbf,
|
||||
0x3a, 0xff, 0x7b, 0x8e, 0xa6, 0xd9, 0x19, 0xe0, 0xa0, 0x50, 0x85, 0x1d, 0xc7, 0x72, 0x04, 0xb5,
|
||||
0xe2, 0x13, 0xf9, 0x59, 0x9a, 0xf8, 0x03, 0x90, 0x58, 0xc2, 0x15, 0x59, 0x4a, 0x08, 0x01, 0x83,
|
||||
0xfc, 0xfb, 0x64, 0x60, 0xcb, 0x72, 0x65, 0x98, 0x34, 0x48, 0x82, 0x34, 0x84, 0x28, 0x64, 0x2a,
|
||||
0x4a, 0x21, 0x77, 0xa0, 0x48, 0xa1, 0x7e, 0x86, 0x1d, 0x6a, 0xb6, 0xc7, 0x0e, 0xd1, 0x2d, 0xd8,
|
||||
0x64, 0xb9, 0x9c, 0x13, 0x4d, 0x81, 0xef, 0x19, 0x96, 0xa6, 0x2a, 0xf4, 0x01, 0x3f, 0x9c, 0x1c,
|
||||
0xe8, 0x5f, 0x84, 0x6b, 0x21, 0x5d, 0x3f, 0x85, 0x70, 0x4a, 0x54, 0xf5, 0xb5, 0x0f, 0x44, 0x2e,
|
||||
0x79, 0x3f, 0xd8, 0xa0, 0x80, 0x79, 0x22, 0xc8, 0xf4, 0x2d, 0x1d, 0x0b, 0x80, 0x67, 0x63, 0xca,
|
||||
0x46, 0x47, 0xd6, 0x40, 0xc0, 0x38, 0x1d, 0x52, 0x2d, 0x1f, 0x05, 0x25, 0x0e, 0x72, 0xf2, 0x1f,
|
||||
0x93, 0x81, 0xbf, 0x80, 0x8c, 0x2e, 0xe2, 0x8d, 0xc9, 0xff, 0x0e, 0x6f, 0x4c, 0x7d, 0x6b, 0xde,
|
||||
0x18, 0x4e, 0xb0, 0xe9, 0x68, 0x82, 0xfd, 0x67, 0x32, 0xf8, 0xc2, 0x3e, 0x0b, 0xfc, 0x76, 0x3b,
|
||||
0x12, 0x64, 0xcb, 0x2c, 0xfb, 0x5e, 0x22, 0x5b, 0x0a, 0x6e, 0x9f, 0x63, 0xef, 0x8d, 0x72, 0xfb,
|
||||
0x3c, 0xcf, 0x9f, 0x6c, 0x82, 0x5e, 0x03, 0x89, 0x35, 0x5d, 0x54, 0xcb, 0x76, 0x05, 0xe0, 0x3e,
|
||||
0x1e, 0x5e, 0x2b, 0xef, 0xad, 0xec, 0x9e, 0x52, 0x9d, 0x8e, 0xed, 0x2a, 0x05, 0x5b, 0x8c, 0x42,
|
||||
0x44, 0x40, 0x8a, 0xf0, 0xd1, 0x1b, 0x20, 0xd1, 0x5f, 0xef, 0xda, 0x5a, 0x1f, 0x33, 0xf0, 0x94,
|
||||
0x94, 0x40, 0x20, 0xdf, 0x03, 0x34, 0x0f, 0xdf, 0xa8, 0x0d, 0x39, 0x7c, 0x81, 0x4d, 0x42, 0xbf,
|
||||
0x1a, 0xdd, 0xee, 0xeb, 0x0b, 0xc8, 0x1e, 0x36, 0x49, 0xa3, 0x46, 0x37, 0xf9, 0x1f, 0xdf, 0xec,
|
||||
0x54, 0xb9, 0xf6, 0x0b, 0xd6, 0xd8, 0x20, 0x78, 0x6c, 0x93, 0xa9, 0x22, 0xec, 0xe5, 0xbf, 0xa4,
|
||||
0x28, 0xf3, 0x8a, 0x40, 0xfb, 0xc2, 0xbd, 0xf5, 0x02, 0x28, 0x15, 0x62, 0xdd, 0xeb, 0xed, 0xf7,
|
||||
0x36, 0xc0, 0x40, 0x73, 0xd5, 0x4f, 0x35, 0x93, 0x60, 0x5d, 0x6c, 0x7a, 0x48, 0x82, 0xea, 0x50,
|
||||
0xa0, 0xb3, 0x89, 0x8b, 0x75, 0x51, 0x00, 0xf8, 0xf3, 0xd0, 0x3a, 0xf3, 0xdf, 0x6d, 0x9d, 0xd1,
|
||||
0x5d, 0x2e, 0xcc, 0xec, 0x72, 0x88, 0x15, 0x49, 0x61, 0x56, 0x44, 0x7f, 0x9b, 0xed, 0x18, 0x96,
|
||||
0x63, 0x90, 0x29, 0xfb, 0x34, 0x69, 0xc5, 0x9f, 0xd3, 0x3a, 0x73, 0x8c, 0xc7, 0xb6, 0x65, 0x8d,
|
||||
0x54, 0x0e, 0x5e, 0x45, 0x66, 0xba, 0x21, 0x84, 0x2d, 0x86, 0x61, 0xbf, 0x4e, 0x05, 0xe1, 0x17,
|
||||
0xb0, 0xdf, 0xff, 0xb9, 0x0d, 0x96, 0x7f, 0xc3, 0x4a, 0xe2, 0x68, 0xf2, 0x46, 0x67, 0xb0, 0xe9,
|
||||
0x87, 0xbf, 0x3a, 0x61, 0xb0, 0xe0, 0x1d, 0xe8, 0x75, 0xf1, 0xa3, 0x7a, 0x11, 0x15, 0xbb, 0xe8,
|
||||
0xc7, 0xf0, 0xe8, 0x0c, 0xb4, 0xf9, 0xae, 0x53, 0x6b, 0x22, 0xdc, 0x23, 0x51, 0x84, 0xf3, 0x3c,
|
||||
0x07, 0x7b, 0x95, 0xfe, 0x8e, 0x41, 0x77, 0x48, 0xab, 0xac, 0x30, 0x15, 0x59, 0xf8, 0xf5, 0x9f,
|
||||
0x82, 0x92, 0x83, 0x09, 0x2d, 0xfc, 0x23, 0x75, 0xec, 0x06, 0x17, 0x8a, 0xea, 0xf8, 0x14, 0x1e,
|
||||
0x59, 0x48, 0x49, 0xd0, 0xf7, 0x40, 0x0a, 0xd8, 0x4c, 0x32, 0xa6, 0x24, 0xf4, 0xcb, 0x9c, 0x40,
|
||||
0x57, 0xfe, 0x43, 0x32, 0x70, 0x19, 0x2d, 0x9c, 0x5a, 0x90, 0x73, 0xb0, 0x3b, 0x19, 0xf1, 0x52,
|
||||
0xa6, 0xbc, 0xff, 0xe2, 0x7a, 0x64, 0x86, 0x4a, 0x27, 0x23, 0xa2, 0x08, 0x63, 0xf9, 0x1e, 0xe4,
|
||||
0xb8, 0x04, 0x15, 0x21, 0x7f, 0xe7, 0xe4, 0xe8, 0xa4, 0xf3, 0xc1, 0x49, 0x35, 0x81, 0x00, 0x72,
|
||||
0x07, 0xcd, 0x66, 0xeb, 0xb4, 0x5b, 0x4d, 0x22, 0x09, 0xb2, 0x07, 0x8d, 0x8e, 0xd2, 0xad, 0xa6,
|
||||
0xa8, 0x58, 0x69, 0xbd, 0xd7, 0x6a, 0x76, 0xab, 0x69, 0xb4, 0x09, 0x25, 0x3e, 0x56, 0x6f, 0x77,
|
||||
0x94, 0xf7, 0x0f, 0xba, 0xd5, 0x4c, 0x48, 0x74, 0xd6, 0x3a, 0x79, 0xa7, 0xa5, 0x54, 0xb3, 0xf2,
|
||||
0x4b, 0xb4, 0x56, 0x8a, 0xa1, 0x3f, 0x41, 0x55, 0x94, 0x0c, 0x55, 0x45, 0xf2, 0xef, 0x52, 0x50,
|
||||
0x8f, 0xe7, 0x34, 0xe8, 0xbd, 0x99, 0x85, 0xef, 0x5f, 0x81, 0x10, 0xcd, 0xac, 0x1e, 0x3d, 0x0d,
|
||||
0x65, 0x07, 0x9f, 0x63, 0xd2, 0x1f, 0x72, 0x8e, 0xc5, 0x33, 0x66, 0x49, 0x29, 0x09, 0x29, 0x33,
|
||||
0x72, 0xb9, 0xda, 0xc7, 0xb8, 0x4f, 0x54, 0x0e, 0x45, 0xfc, 0xd0, 0x49, 0x54, 0x8d, 0x4a, 0xcf,
|
||||
0xb8, 0x50, 0xfe, 0xe8, 0x4a, 0x7b, 0x29, 0x41, 0x56, 0x69, 0x75, 0x95, 0x9f, 0x54, 0xd3, 0x08,
|
||||
0x41, 0x99, 0x0d, 0xd5, 0xb3, 0x93, 0x83, 0xd3, 0xb3, 0x76, 0x87, 0xee, 0xe5, 0x35, 0xa8, 0x78,
|
||||
0x7b, 0xe9, 0x09, 0xb3, 0xf2, 0xbf, 0x93, 0x50, 0x99, 0x09, 0x10, 0xb4, 0x0f, 0x59, 0xce, 0xd3,
|
||||
0xe3, 0xba, 0xf9, 0x2c, 0xbe, 0x45, 0x34, 0x71, 0x55, 0xf4, 0x26, 0x14, 0xb0, 0x68, 0x40, 0x2c,
|
||||
0x0a, 0x44, 0xde, 0x38, 0xf1, 0x5a, 0x14, 0xc2, 0xd4, 0xb7, 0x40, 0x6f, 0x81, 0xe4, 0x47, 0xba,
|
||||
0x28, 0x0e, 0x9f, 0x9c, 0x37, 0xf7, 0x31, 0x42, 0xd8, 0x07, 0x36, 0xe8, 0xf5, 0x80, 0xec, 0x65,
|
||||
0xe6, 0xab, 0x03, 0x61, 0xce, 0x15, 0x84, 0xb1, 0xa7, 0x2f, 0x37, 0xa1, 0x18, 0x5a, 0x0f, 0x7a,
|
||||
0x1c, 0xa4, 0xb1, 0x76, 0x29, 0x1a, 0x5b, 0xbc, 0x35, 0x51, 0x18, 0x6b, 0x97, 0xbc, 0xa7, 0xf5,
|
||||
0x28, 0xe4, 0xe9, 0xc3, 0x81, 0xc6, 0xd1, 0x26, 0xad, 0xe4, 0xc6, 0xda, 0xe5, 0xbb, 0x9a, 0x2b,
|
||||
0x7f, 0x08, 0xe5, 0x68, 0x53, 0x87, 0x9e, 0x44, 0xc7, 0x9a, 0x98, 0x3a, 0xf3, 0x91, 0x55, 0xf8,
|
||||
0x04, 0xbd, 0x02, 0xd9, 0x0b, 0x8b, 0x83, 0xd5, 0xe2, 0x90, 0xbd, 0x6b, 0x11, 0x1c, 0x6a, 0x0a,
|
||||
0x71, 0x6d, 0xf9, 0x33, 0xc8, 0x32, 0xf0, 0xa1, 0x40, 0xc2, 0xda, 0x33, 0x82, 0xe8, 0xd2, 0x31,
|
||||
0xfa, 0x10, 0x40, 0x23, 0xc4, 0x31, 0x7a, 0x93, 0xc0, 0xf1, 0xce, 0x62, 0xf0, 0x3a, 0xf0, 0xf4,
|
||||
0x1a, 0x37, 0x04, 0x8a, 0x6d, 0x05, 0xa6, 0x21, 0x24, 0x0b, 0x39, 0x94, 0x4f, 0xa0, 0x1c, 0xb5,
|
||||
0x0d, 0x37, 0x4a, 0x37, 0x16, 0x34, 0x4a, 0x7d, 0x32, 0xe5, 0x53, 0xb1, 0x34, 0x6f, 0xc5, 0xb1,
|
||||
0x89, 0xfc, 0x79, 0x12, 0x0a, 0xdd, 0x4b, 0x71, 0xac, 0x63, 0xba, 0x40, 0x81, 0x69, 0x2a, 0xdc,
|
||||
0xf3, 0xe0, 0x6d, 0xa5, 0xb4, 0xdf, 0xac, 0x7a, 0xdb, 0x0f, 0xdc, 0xcc, 0xba, 0x55, 0xa9, 0xd7,
|
||||
0xb5, 0x13, 0x60, 0xf5, 0x06, 0x48, 0xfe, 0xa9, 0xa2, 0x15, 0x83, 0xa6, 0xeb, 0x0e, 0x76, 0x5d,
|
||||
0xb1, 0x36, 0x6f, 0xca, 0x9a, 0x8a, 0xd6, 0xa7, 0xa2, 0xab, 0x92, 0x56, 0xf8, 0x44, 0xd6, 0xa1,
|
||||
0x32, 0x93, 0xb6, 0xd0, 0x1b, 0x90, 0xb7, 0x27, 0x3d, 0xd5, 0xdb, 0x9e, 0x99, 0xe0, 0xf1, 0xd8,
|
||||
0xe3, 0xa4, 0x37, 0x32, 0xfa, 0x47, 0x78, 0xea, 0xfd, 0x18, 0x7b, 0xd2, 0x3b, 0xe2, 0xbb, 0xc8,
|
||||
0xdf, 0x92, 0x0a, 0xbf, 0xe5, 0x02, 0x0a, 0xde, 0xa1, 0x40, 0x3f, 0x0c, 0xc7, 0x89, 0xd7, 0x6a,
|
||||
0x8e, 0x4d, 0xa5, 0xc2, 0x7d, 0x28, 0x4c, 0x6e, 0xc1, 0xa6, 0x6b, 0x0c, 0x4c, 0xac, 0xab, 0x41,
|
||||
0xcd, 0xc2, 0xde, 0x56, 0x50, 0x2a, 0xfc, 0xc1, 0xb1, 0x57, 0xb0, 0xc8, 0xff, 0x4a, 0x42, 0xc1,
|
||||
0x0b, 0x58, 0xf4, 0x52, 0xe8, 0xdc, 0x95, 0x17, 0x74, 0x60, 0x3c, 0xc5, 0xa0, 0x2f, 0x18, 0xfd,
|
||||
0xad, 0xa9, 0xab, 0xff, 0xd6, 0xb8, 0x06, 0xaf, 0xd7, 0x69, 0xcf, 0x5c, 0xb9, 0xd3, 0xfe, 0x02,
|
||||
0x20, 0x62, 0x11, 0x6d, 0xa4, 0x5e, 0x58, 0xc4, 0x30, 0x07, 0x2a, 0xdf, 0x6c, 0xce, 0xa8, 0xaa,
|
||||
0xec, 0xc9, 0x5d, 0xf6, 0xe0, 0x94, 0xed, 0xfb, 0x2f, 0x92, 0x50, 0xf0, 0x73, 0xe3, 0x55, 0xdb,
|
||||
0x7c, 0xd7, 0x21, 0x27, 0xe0, 0x9f, 0xf7, 0xf9, 0xc4, 0xcc, 0xef, 0x38, 0x67, 0x42, 0x1d, 0xe7,
|
||||
0x3a, 0x14, 0xc6, 0x98, 0x68, 0x8c, 0x20, 0xf0, 0xb2, 0xd1, 0x9f, 0xdf, 0x7a, 0x1d, 0x8a, 0xa1,
|
||||
0x8e, 0x2b, 0x8d, 0xbc, 0x93, 0xd6, 0x07, 0xd5, 0x44, 0x3d, 0xff, 0xf9, 0x97, 0x37, 0xd3, 0x27,
|
||||
0xf8, 0x53, 0x7a, 0x66, 0x95, 0x56, 0xb3, 0xdd, 0x6a, 0x1e, 0x55, 0x93, 0xf5, 0xe2, 0xe7, 0x5f,
|
||||
0xde, 0xcc, 0x2b, 0x98, 0x35, 0x6e, 0x6e, 0xb5, 0x61, 0x23, 0xfc, 0x55, 0xa2, 0x19, 0x04, 0x41,
|
||||
0xf9, 0x9d, 0x3b, 0xa7, 0xc7, 0x87, 0xcd, 0x83, 0x6e, 0x4b, 0xbd, 0xdb, 0xe9, 0xb6, 0xaa, 0x49,
|
||||
0xf4, 0x28, 0x5c, 0x3b, 0x3e, 0x7c, 0xb7, 0xdd, 0x55, 0x9b, 0xc7, 0x87, 0xad, 0x93, 0xae, 0x7a,
|
||||
0xd0, 0xed, 0x1e, 0x34, 0x8f, 0xaa, 0xa9, 0xfd, 0x5f, 0x02, 0x54, 0x0e, 0x1a, 0xcd, 0x43, 0x9a,
|
||||
0xfd, 0x8c, 0xbe, 0x26, 0x1a, 0x63, 0x19, 0x56, 0xb5, 0x2f, 0xbd, 0xea, 0xad, 0x2f, 0xef, 0x0b,
|
||||
0xa2, 0xdb, 0x90, 0x65, 0x05, 0x3d, 0x5a, 0x7e, 0xf7, 0x5b, 0x5f, 0xd1, 0x28, 0xa4, 0x3f, 0x86,
|
||||
0x85, 0xc7, 0xd2, 0xcb, 0xe0, 0xfa, 0xf2, 0xbe, 0x21, 0x52, 0x40, 0x0a, 0x2a, 0xf2, 0xd5, 0x97,
|
||||
0xc3, 0xf5, 0x35, 0x7a, 0x89, 0xd4, 0x67, 0x50, 0x16, 0xac, 0xbe, 0x2c, 0xad, 0xaf, 0x01, 0x60,
|
||||
0xe8, 0x18, 0xf2, 0x5e, 0x25, 0xb7, 0xea, 0xfa, 0xb6, 0xbe, 0xb2, 0xcf, 0x47, 0x3f, 0x01, 0xaf,
|
||||
0xb8, 0x97, 0xdf, 0x45, 0xd7, 0x57, 0x34, 0x2d, 0xd1, 0x21, 0xe4, 0x04, 0xd7, 0x5d, 0x71, 0x25,
|
||||
0x5b, 0x5f, 0xd5, 0xb7, 0xa3, 0x9b, 0x16, 0xb4, 0x32, 0x56, 0xdf, 0xb0, 0xd7, 0xd7, 0xe8, 0xc7,
|
||||
0xa2, 0x3b, 0x00, 0xa1, 0xfa, 0x7a, 0x8d, 0xab, 0xf3, 0xfa, 0x3a, 0x7d, 0x56, 0xd4, 0x81, 0x82,
|
||||
0x5f, 0xee, 0xac, 0xbc, 0xc8, 0xae, 0xaf, 0x6e, 0x78, 0xa2, 0x7b, 0x50, 0x8a, 0xf2, 0xfc, 0xf5,
|
||||
0xae, 0xa7, 0xeb, 0x6b, 0x76, 0x32, 0xa9, 0xff, 0x28, 0xe9, 0x5f, 0xef, 0xba, 0xba, 0xbe, 0x66,
|
||||
0x63, 0x13, 0x7d, 0x0c, 0x9b, 0xf3, 0xa4, 0x7c, 0xfd, 0xdb, 0xeb, 0xfa, 0x15, 0x5a, 0x9d, 0x68,
|
||||
0x0c, 0x68, 0x01, 0x99, 0xbf, 0xc2, 0x65, 0x76, 0xfd, 0x2a, 0x9d, 0xcf, 0x46, 0xeb, 0xab, 0x07,
|
||||
0xdb, 0xc9, 0xaf, 0x1f, 0x6c, 0x27, 0xff, 0xf6, 0x60, 0x3b, 0xf9, 0xc5, 0xc3, 0xed, 0xc4, 0xd7,
|
||||
0x0f, 0xb7, 0x13, 0x7f, 0x7e, 0xb8, 0x9d, 0xf8, 0xe9, 0xf3, 0x03, 0x83, 0x0c, 0x27, 0xbd, 0xdd,
|
||||
0xbe, 0x35, 0xde, 0x0b, 0xff, 0xd3, 0x66, 0xd1, 0xbf, 0x7f, 0x7a, 0x39, 0x96, 0xa8, 0x5e, 0xfe,
|
||||
0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x2d, 0x07, 0xd8, 0x1d, 0x24, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@@ -5635,6 +5663,25 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.MempoolError) > 0 {
|
||||
i -= len(m.MempoolError)
|
||||
copy(dAtA[i:], m.MempoolError)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError)))
|
||||
i--
|
||||
dAtA[i] = 0x5a
|
||||
}
|
||||
if m.Priority != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.Priority))
|
||||
i--
|
||||
dAtA[i] = 0x50
|
||||
}
|
||||
if len(m.Sender) > 0 {
|
||||
i -= len(m.Sender)
|
||||
copy(dAtA[i:], m.Sender)
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender)))
|
||||
i--
|
||||
dAtA[i] = 0x4a
|
||||
}
|
||||
if len(m.Codespace) > 0 {
|
||||
i -= len(m.Codespace)
|
||||
copy(dAtA[i:], m.Codespace)
|
||||
@@ -7390,6 +7437,17 @@ func (m *ResponseCheckTx) Size() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
l = len(m.Sender)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
if m.Priority != 0 {
|
||||
n += 1 + sovTypes(uint64(m.Priority))
|
||||
}
|
||||
l = len(m.MempoolError)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -11929,6 +11987,89 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.Codespace = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 9:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Sender = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 10:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType)
|
||||
}
|
||||
m.Priority = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Priority |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 11:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.MempoolError = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTypes(dAtA[iNdEx:])
|
||||
|
||||
@@ -369,6 +369,12 @@ FOR_LOOP:
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommitLight(
|
||||
chainID, firstID, first.Height, second.LastCommit)
|
||||
|
||||
if err == nil {
|
||||
// validate the block before we persist it
|
||||
err = bcR.blockExec.ValidateBlock(state, first)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error in validation", "err", err)
|
||||
peerID := bcR.pool.RedoRequest(first.Height)
|
||||
@@ -386,29 +392,29 @@ FOR_LOOP:
|
||||
bcR.Switch.StopPeerForError(peer2, fmt.Errorf("blockchainReactor validation error: %v", err))
|
||||
}
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
bcR.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
var err error
|
||||
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
bcR.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
|
||||
continue FOR_LOOP
|
||||
|
||||
case <-bcR.Quit():
|
||||
|
||||
@@ -58,7 +58,7 @@ func (rt *Routine) setMetrics(metrics *Metrics) {
|
||||
}
|
||||
|
||||
func (rt *Routine) start() {
|
||||
rt.logger.Info(fmt.Sprintf("%s: run", rt.name))
|
||||
rt.logger.Info("routine start", "msg", log.NewLazySprintf("%s: run", rt.name))
|
||||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
|
||||
if !running {
|
||||
panic(fmt.Sprintf("%s is already running", rt.name))
|
||||
@@ -98,7 +98,7 @@ func (rt *Routine) start() {
|
||||
return
|
||||
}
|
||||
rt.metrics.EventsOut.With("routine", rt.name).Add(1)
|
||||
rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
|
||||
rt.logger.Debug("routine start", "msg", log.NewLazySprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
|
||||
|
||||
// Skip rTrySchedule and rProcessBlock events as they clutter the history
|
||||
// due to their frequency.
|
||||
@@ -118,7 +118,7 @@ func (rt *Routine) start() {
|
||||
|
||||
// XXX: look into returning OpError in the net package
|
||||
func (rt *Routine) send(event Event) bool {
|
||||
rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event))
|
||||
rt.logger.Debug("routine send", "msg", log.NewLazySprintf("%s: received %T %+v", rt.name, event, event))
|
||||
if !rt.isRunning() {
|
||||
return false
|
||||
}
|
||||
@@ -150,7 +150,7 @@ func (rt *Routine) stop() {
|
||||
return
|
||||
}
|
||||
|
||||
rt.logger.Info(fmt.Sprintf("%s: stop", rt.name))
|
||||
rt.logger.Info("routine stop", "msg", log.NewLazySprintf("%s: stop", rt.name))
|
||||
rt.queue.Dispose() // this should block until all queue items are free?
|
||||
}
|
||||
|
||||
|
||||
9
buf.gen.yaml
Normal file
9
buf.gen.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
version: v1
|
||||
plugins:
|
||||
- name: gogofaster
|
||||
out: ./proto/
|
||||
opt:
|
||||
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types
|
||||
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
|
||||
- plugins=grpc
|
||||
- paths=source_relative
|
||||
3
buf.work.yaml
Normal file
3
buf.work.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
version: v1
|
||||
directories:
|
||||
- proto
|
||||
66
cmd/tendermint/commands/compact.go
Normal file
66
cmd/tendermint/commands/compact.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var CompactGoLevelDBCmd = &cobra.Command{
|
||||
Use: "experimental-compact-goleveldb",
|
||||
Short: "force compacts the tendermint storage engine (only GoLevelDB supported)",
|
||||
Long: `
|
||||
This is a temporary utility command that performs a force compaction on the state
|
||||
and blockstores to reduce disk space for a pruning node. This should only be run
|
||||
once the node has stopped. This command will likely be omitted in the future after
|
||||
the planned refactor to the storage engine.
|
||||
|
||||
Currently, only GoLevelDB is supported.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if config.DBBackend != "goleveldb" {
|
||||
return errors.New("compaction is currently only supported with goleveldb")
|
||||
}
|
||||
|
||||
compactGoLevelDBs(config.RootDir, logger)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func compactGoLevelDBs(rootDir string, logger log.Logger) {
|
||||
dbNames := []string{"state", "blockstore"}
|
||||
o := &opt.Options{
|
||||
DisableSeeksCompaction: true,
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for _, dbName := range dbNames {
|
||||
dbName := dbName
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
dbPath := filepath.Join(rootDir, "data", dbName+".db")
|
||||
store, err := leveldb.OpenFile(dbPath, o)
|
||||
if err != nil {
|
||||
logger.Error("failed to initialize tendermint db", "path", dbPath, "err", err)
|
||||
return
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
logger.Info("starting compaction...", "db", dbPath)
|
||||
|
||||
err = store.CompactRange(util.Range{Start: nil, Limit: nil})
|
||||
if err != nil {
|
||||
logger.Error("failed to compact tendermint db", "path", dbPath, "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
186
cmd/tendermint/commands/reset.go
Normal file
186
cmd/tendermint/commands/reset.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
// ResetAllCmd removes the database of this Tendermint core
|
||||
// instance.
|
||||
var ResetAllCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-all",
|
||||
Aliases: []string{"unsafe_reset_all"},
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||
RunE: resetAllCmd,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
var keepAddrBook bool
|
||||
|
||||
// ResetStateCmd removes the database of the specified Tendermint core instance.
|
||||
var ResetStateCmd = &cobra.Command{
|
||||
Use: "reset-state",
|
||||
Short: "Remove all the data and WAL",
|
||||
PreRun: deprecateSnakeCase,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
config, err = ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resetState(config.DBDir(), logger)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact")
|
||||
}
|
||||
|
||||
// ResetPrivValidatorCmd resets the private validator files.
|
||||
var ResetPrivValidatorCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-priv-validator",
|
||||
Aliases: []string{"unsafe_reset_priv_validator"},
|
||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||
PreRun: deprecateSnakeCase,
|
||||
RunE: resetPrivValidator,
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAllCmd(cmd *cobra.Command, args []string) (err error) {
|
||||
config, err = ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resetAll(
|
||||
config.DBDir(),
|
||||
config.P2P.AddrBookFile(),
|
||||
config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(),
|
||||
logger,
|
||||
)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) (err error) {
|
||||
config, err = ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
|
||||
return nil
|
||||
}
|
||||
|
||||
// resetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
func resetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) error {
|
||||
if keepAddrBook {
|
||||
logger.Info("The address book remains intact")
|
||||
} else {
|
||||
removeAddrBook(addrBookFile, logger)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
}
|
||||
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
|
||||
// recreate the dbDir since the privVal state needs to live there
|
||||
resetFilePV(privValKeyFile, privValStateFile, logger)
|
||||
return nil
|
||||
}
|
||||
|
||||
// resetState removes address book files plus all databases.
|
||||
func resetState(dbDir string, logger log.Logger) error {
|
||||
blockdb := filepath.Join(dbDir, "blockstore.db")
|
||||
state := filepath.Join(dbDir, "state.db")
|
||||
wal := filepath.Join(dbDir, "cs.wal")
|
||||
evidence := filepath.Join(dbDir, "evidence.db")
|
||||
txIndex := filepath.Join(dbDir, "tx_index.db")
|
||||
|
||||
if tmos.FileExists(blockdb) {
|
||||
if err := os.RemoveAll(blockdb); err == nil {
|
||||
logger.Info("Removed all blockstore.db", "dir", blockdb)
|
||||
} else {
|
||||
logger.Error("error removing all blockstore.db", "dir", blockdb, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(state) {
|
||||
if err := os.RemoveAll(state); err == nil {
|
||||
logger.Info("Removed all state.db", "dir", state)
|
||||
} else {
|
||||
logger.Error("error removing all state.db", "dir", state, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(wal) {
|
||||
if err := os.RemoveAll(wal); err == nil {
|
||||
logger.Info("Removed all cs.wal", "dir", wal)
|
||||
} else {
|
||||
logger.Error("error removing all cs.wal", "dir", wal, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(evidence) {
|
||||
if err := os.RemoveAll(evidence); err == nil {
|
||||
logger.Info("Removed all evidence.db", "dir", evidence)
|
||||
} else {
|
||||
logger.Error("error removing all evidence.db", "dir", evidence, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(txIndex) {
|
||||
if err := os.RemoveAll(txIndex); err == nil {
|
||||
logger.Info("Removed tx_index.db", "dir", txIndex)
|
||||
} else {
|
||||
logger.Error("error removing tx_index.db", "dir", txIndex, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
if _, err := os.Stat(privValKeyFile); err == nil {
|
||||
pv := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
|
||||
pv.Reset()
|
||||
logger.Info(
|
||||
"Reset private validator file to genesis state",
|
||||
"keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile,
|
||||
)
|
||||
} else {
|
||||
pv := privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv.Save()
|
||||
logger.Info(
|
||||
"Generated private validator file",
|
||||
"keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func removeAddrBook(addrBookFile string, logger log.Logger) {
|
||||
if err := os.Remove(addrBookFile); err == nil {
|
||||
logger.Info("Removed existing address book", "file", addrBookFile)
|
||||
} else if !os.IsNotExist(err) {
|
||||
logger.Info("Error removing address book", "file", addrBookFile, "err", err)
|
||||
}
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
// ResetAllCmd removes the database of this Tendermint core
|
||||
// instance.
|
||||
var ResetAllCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-all",
|
||||
Aliases: []string{"unsafe_reset_all"},
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||
Run: resetAll,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
var keepAddrBook bool
|
||||
|
||||
func init() {
|
||||
ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact")
|
||||
}
|
||||
|
||||
// ResetPrivValidatorCmd resets the private validator files.
|
||||
var ResetPrivValidatorCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-priv-validator",
|
||||
Aliases: []string{"unsafe_reset_priv_validator"},
|
||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||
Run: resetPrivValidator,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAll(cmd *cobra.Command, args []string) {
|
||||
ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(), logger)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) {
|
||||
resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
|
||||
}
|
||||
|
||||
// ResetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// Exported so other CLI tools can use it.
|
||||
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
if keepAddrBook {
|
||||
logger.Info("The address book remains intact")
|
||||
} else {
|
||||
removeAddrBook(addrBookFile, logger)
|
||||
}
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
}
|
||||
// recreate the dbDir since the privVal state needs to live there
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
resetFilePV(privValKeyFile, privValStateFile, logger)
|
||||
}
|
||||
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
if _, err := os.Stat(privValKeyFile); err == nil {
|
||||
pv := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
|
||||
pv.Reset()
|
||||
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv := privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
}
|
||||
|
||||
func removeAddrBook(addrBookFile string, logger log.Logger) {
|
||||
if err := os.Remove(addrBookFile); err == nil {
|
||||
logger.Info("Removed existing address book", "file", addrBookFile)
|
||||
} else if !os.IsNotExist(err) {
|
||||
logger.Info("Error removing address book", "file", addrBookFile, "err", err)
|
||||
}
|
||||
}
|
||||
53
cmd/tendermint/commands/reset_test.go
Normal file
53
cmd/tendermint/commands/reset_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
func Test_ResetAll(t *testing.T) {
|
||||
config := cfg.TestConfig()
|
||||
dir := t.TempDir()
|
||||
config.SetRoot(dir)
|
||||
cfg.EnsureRoot(dir)
|
||||
require.NoError(t, initFilesWithConfig(config))
|
||||
pv := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
pv.LastSignState.Height = 10
|
||||
pv.Save()
|
||||
require.NoError(t, resetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(), logger))
|
||||
require.DirExists(t, config.DBDir())
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
|
||||
require.FileExists(t, config.PrivValidatorStateFile())
|
||||
pv = privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
require.Equal(t, int64(0), pv.LastSignState.Height)
|
||||
}
|
||||
|
||||
func Test_ResetState(t *testing.T) {
|
||||
config := cfg.TestConfig()
|
||||
dir := t.TempDir()
|
||||
config.SetRoot(dir)
|
||||
cfg.EnsureRoot(dir)
|
||||
require.NoError(t, initFilesWithConfig(config))
|
||||
pv := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
pv.LastSignState.Height = 10
|
||||
pv.Save()
|
||||
require.NoError(t, resetState(config.DBDir(), logger))
|
||||
require.DirExists(t, config.DBDir())
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
|
||||
require.FileExists(t, config.PrivValidatorStateFile())
|
||||
pv = privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
// private validator state should still be in tact.
|
||||
require.Equal(t, int64(10), pv.LastSignState.Height)
|
||||
}
|
||||
73
cmd/tendermint/commands/rollback.go
Normal file
73
cmd/tendermint/commands/rollback.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
)
|
||||
|
||||
var RollbackStateCmd = &cobra.Command{
|
||||
Use: "rollback",
|
||||
Short: "rollback tendermint state by one height",
|
||||
Long: `
|
||||
A state rollback is performed to recover from an incorrect application state transition,
|
||||
when Tendermint has persisted an incorrect app hash and is thus unable to make
|
||||
progress. Rollback overwrites a state at height n with the state at height n - 1.
|
||||
The application should also roll back to height n - 1. No blocks are removed, so upon
|
||||
restarting Tendermint the transactions in block n will be re-executed against the
|
||||
application.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
height, hash, err := RollbackState(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rollback state: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Rolled back state to height %d and hash %v", height, hash)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// RollbackState takes the state at the current height n and overwrites it with the state
|
||||
// at height n - 1. Note state here refers to tendermint state not application state.
|
||||
// Returns the latest state height and app hash alongside an error if there was one.
|
||||
func RollbackState(config *cfg.Config) (int64, []byte, error) {
|
||||
// use the parsed config to load the block and state store
|
||||
blockStore, stateStore, err := loadStateAndBlockStore(config)
|
||||
if err != nil {
|
||||
return -1, nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = blockStore.Close()
|
||||
_ = stateStore.Close()
|
||||
}()
|
||||
|
||||
// rollback the last state
|
||||
return state.Rollback(blockStore, stateStore)
|
||||
}
|
||||
|
||||
func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store, error) {
|
||||
dbType := dbm.BackendType(config.DBBackend)
|
||||
|
||||
// Get BlockStore
|
||||
blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
|
||||
// Get StateStore
|
||||
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
stateStore := state.NewStore(stateDB)
|
||||
|
||||
return blockStore, stateStore, nil
|
||||
}
|
||||
@@ -46,9 +46,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
"proxy_app",
|
||||
config.ProxyApp,
|
||||
"proxy app address, or one of: 'kvstore',"+
|
||||
" 'persistent_kvstore',"+
|
||||
" 'counter',"+
|
||||
" 'counter_serial' or 'noop' for local testing.")
|
||||
" 'persistent_kvstore', 'counter', 'e2e' or 'noop' for local testing.")
|
||||
cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)")
|
||||
|
||||
// rpc flags
|
||||
|
||||
@@ -22,11 +22,14 @@ func main() {
|
||||
cmd.ReplayConsoleCmd,
|
||||
cmd.ResetAllCmd,
|
||||
cmd.ResetPrivValidatorCmd,
|
||||
cmd.ResetStateCmd,
|
||||
cmd.ShowValidatorCmd,
|
||||
cmd.TestnetFilesCmd,
|
||||
cmd.ShowNodeIDCmd,
|
||||
cmd.GenNodeKeyCmd,
|
||||
cmd.VersionCmd,
|
||||
cmd.RollbackStateCmd,
|
||||
cmd.CompactGoLevelDBCmd,
|
||||
debug.DebugCmd,
|
||||
cli.NewCompletionCmd(rootCmd, true),
|
||||
)
|
||||
|
||||
@@ -23,6 +23,11 @@ const (
|
||||
|
||||
// DefaultLogLevel defines a default log level as INFO.
|
||||
DefaultLogLevel = "info"
|
||||
|
||||
// Mempool versions. V1 is prioritized mempool, v0 is regular mempool.
|
||||
// Default is v0.
|
||||
MempoolV0 = "v0"
|
||||
MempoolV1 = "v1"
|
||||
)
|
||||
|
||||
// NOTE: Most of the structs & relevant comments + the
|
||||
@@ -52,6 +57,9 @@ var (
|
||||
|
||||
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
||||
defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName)
|
||||
|
||||
minSubscriptionBufferSize = 100
|
||||
defaultSubscriptionBufferSize = 200
|
||||
)
|
||||
|
||||
// Config defines the top level configuration for a Tendermint node
|
||||
@@ -342,6 +350,29 @@ type RPCConfig struct {
|
||||
// to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
MaxSubscriptionsPerClient int `mapstructure:"max_subscriptions_per_client"`
|
||||
|
||||
// The number of events that can be buffered per subscription before
|
||||
// returning `ErrOutOfCapacity`.
|
||||
SubscriptionBufferSize int `mapstructure:"experimental_subscription_buffer_size"`
|
||||
|
||||
// The maximum number of responses that can be buffered per WebSocket
|
||||
// client. If clients cannot read from the WebSocket endpoint fast enough,
|
||||
// they will be disconnected, so increasing this parameter may reduce the
|
||||
// chances of them being disconnected (but will cause the node to use more
|
||||
// memory).
|
||||
//
|
||||
// Must be at least the same as `SubscriptionBufferSize`, otherwise
|
||||
// connections may be dropped unnecessarily.
|
||||
WebSocketWriteBufferSize int `mapstructure:"experimental_websocket_write_buffer_size"`
|
||||
|
||||
// If a WebSocket client cannot read fast enough, at present we may
|
||||
// silently drop events instead of generating an error or disconnecting the
|
||||
// client.
|
||||
//
|
||||
// Enabling this parameter will cause the WebSocket connection to be closed
|
||||
// instead if it cannot read fast enough, allowing for greater
|
||||
// predictability in subscription behaviour.
|
||||
CloseOnSlowClient bool `mapstructure:"experimental_close_on_slow_client"`
|
||||
|
||||
// How long to wait for a tx to be committed during /broadcast_tx_commit
|
||||
// WARNING: Using a value larger than 10s will result in increasing the
|
||||
// global HTTP write timeout, which applies to all connections and endpoints.
|
||||
@@ -391,7 +422,9 @@ func DefaultRPCConfig() *RPCConfig {
|
||||
|
||||
MaxSubscriptionClients: 100,
|
||||
MaxSubscriptionsPerClient: 5,
|
||||
SubscriptionBufferSize: defaultSubscriptionBufferSize,
|
||||
TimeoutBroadcastTxCommit: 10 * time.Second,
|
||||
WebSocketWriteBufferSize: defaultSubscriptionBufferSize,
|
||||
|
||||
MaxBodyBytes: int64(1000000), // 1MB
|
||||
MaxHeaderBytes: 1 << 20, // same as the net/http default
|
||||
@@ -425,6 +458,18 @@ func (cfg *RPCConfig) ValidateBasic() error {
|
||||
if cfg.MaxSubscriptionsPerClient < 0 {
|
||||
return errors.New("max_subscriptions_per_client can't be negative")
|
||||
}
|
||||
if cfg.SubscriptionBufferSize < minSubscriptionBufferSize {
|
||||
return fmt.Errorf(
|
||||
"experimental_subscription_buffer_size must be >= %d",
|
||||
minSubscriptionBufferSize,
|
||||
)
|
||||
}
|
||||
if cfg.WebSocketWriteBufferSize < cfg.SubscriptionBufferSize {
|
||||
return fmt.Errorf(
|
||||
"experimental_websocket_write_buffer_size must be >= experimental_subscription_buffer_size (%d)",
|
||||
cfg.SubscriptionBufferSize,
|
||||
)
|
||||
}
|
||||
if cfg.TimeoutBroadcastTxCommit < 0 {
|
||||
return errors.New("timeout_broadcast_tx_commit can't be negative")
|
||||
}
|
||||
@@ -636,6 +681,13 @@ func DefaultFuzzConnConfig() *FuzzConnConfig {
|
||||
|
||||
// MempoolConfig defines the configuration options for the Tendermint mempool
|
||||
type MempoolConfig struct {
|
||||
// Mempool version to use:
|
||||
// 1) "v0" - (default) FIFO mempool.
|
||||
// 2) "v1" - prioritized mempool.
|
||||
// WARNING: There's a known memory leak with the prioritized mempool
|
||||
// that the team are working on. Read more here:
|
||||
// https://github.com/tendermint/tendermint/issues/8775
|
||||
Version string `mapstructure:"version"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
@@ -659,20 +711,39 @@ type MempoolConfig struct {
|
||||
// Including space needed by encoding (one varint per transaction).
|
||||
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
MaxBatchBytes int `mapstructure:"max_batch_bytes"`
|
||||
|
||||
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
|
||||
// insertion time into the mempool is beyond TTLDuration.
|
||||
TTLDuration time.Duration `mapstructure:"ttl-duration"`
|
||||
|
||||
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLDuration is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if
|
||||
// it's insertion time into the mempool is beyond TTLDuration.
|
||||
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
|
||||
}
|
||||
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
|
||||
func DefaultMempoolConfig() *MempoolConfig {
|
||||
return &MempoolConfig{
|
||||
Version: MempoolV0,
|
||||
Recheck: true,
|
||||
Broadcast: true,
|
||||
WalPath: "",
|
||||
// Each signature verification takes .5ms, Size reduced until we implement
|
||||
// ABCI Recheck
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
TTLDuration: 0 * time.Second,
|
||||
TTLNumBlocks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1017,7 +1088,12 @@ type TxIndexConfig struct {
|
||||
// 1) "null"
|
||||
// 2) "kv" (default) - the simplest possible indexer,
|
||||
// backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
// 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
Indexer string `mapstructure:"indexer"`
|
||||
|
||||
// The PostgreSQL connection configuration, the connection format:
|
||||
// postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
|
||||
PsqlConn string `mapstructure:"psql-conn"`
|
||||
}
|
||||
|
||||
// DefaultTxIndexConfig returns a default configuration for the transaction indexer.
|
||||
|
||||
@@ -206,6 +206,33 @@ max_subscription_clients = {{ .RPC.MaxSubscriptionClients }}
|
||||
# the estimated # maximum number of broadcast_tx_commit calls per block.
|
||||
max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }}
|
||||
|
||||
# Experimental parameter to specify the maximum number of events a node will
|
||||
# buffer, per subscription, before returning an error and closing the
|
||||
# subscription. Must be set to at least 100, but higher values will accommodate
|
||||
# higher event throughput rates (and will use more memory).
|
||||
experimental_subscription_buffer_size = {{ .RPC.SubscriptionBufferSize }}
|
||||
|
||||
# Experimental parameter to specify the maximum number of RPC responses that
|
||||
# can be buffered per WebSocket client. If clients cannot read from the
|
||||
# WebSocket endpoint fast enough, they will be disconnected, so increasing this
|
||||
# parameter may reduce the chances of them being disconnected (but will cause
|
||||
# the node to use more memory).
|
||||
#
|
||||
# Must be at least the same as "experimental_subscription_buffer_size",
|
||||
# otherwise connections could be dropped unnecessarily. This value should
|
||||
# ideally be somewhat higher than "experimental_subscription_buffer_size" to
|
||||
# accommodate non-subscription-related RPC responses.
|
||||
experimental_websocket_write_buffer_size = {{ .RPC.WebSocketWriteBufferSize }}
|
||||
|
||||
# If a WebSocket client cannot read fast enough, at present we may
|
||||
# silently drop events instead of generating an error or disconnecting the
|
||||
# client.
|
||||
#
|
||||
# Enabling this experimental parameter will cause the WebSocket connection to
|
||||
# be closed instead if it cannot read fast enough, allowing for greater
|
||||
# predictability in subscription behaviour.
|
||||
experimental_close_on_slow_client = {{ .RPC.CloseOnSlowClient }}
|
||||
|
||||
# How long to wait for a tx to be committed during /broadcast_tx_commit.
|
||||
# WARNING: Using a value larger than 10s will result in increasing the
|
||||
# global HTTP write timeout, which applies to all connections and endpoints.
|
||||
@@ -315,6 +342,11 @@ dial_timeout = "{{ .P2P.DialTimeout }}"
|
||||
#######################################################
|
||||
[mempool]
|
||||
|
||||
# Mempool version to use:
|
||||
# 1) "v0" - (default) FIFO mempool.
|
||||
# 2) "v1" - prioritized mempool.
|
||||
version = "{{ .Mempool.Version }}"
|
||||
|
||||
recheck = {{ .Mempool.Recheck }}
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
wal_dir = "{{ js .Mempool.WalPath }}"
|
||||
@@ -344,6 +376,22 @@ max_tx_bytes = {{ .Mempool.MaxTxBytes }}
|
||||
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
max_batch_bytes = {{ .Mempool.MaxBatchBytes }}
|
||||
|
||||
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
|
||||
# insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-duration = "{{ .Mempool.TTLDuration }}"
|
||||
|
||||
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-duration is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if
|
||||
# it's insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
|
||||
|
||||
#######################################################
|
||||
### State Sync Configuration Options ###
|
||||
#######################################################
|
||||
@@ -446,8 +494,14 @@ peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
# 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
indexer = "{{ .TxIndex.Indexer }}"
|
||||
|
||||
# The PostgreSQL connection configuration, the connection format:
|
||||
# postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
|
||||
psql-conn = "{{ .TxIndex.PsqlConn }}"
|
||||
|
||||
#######################################################
|
||||
### Instrumentation Configuration Options ###
|
||||
#######################################################
|
||||
|
||||
@@ -21,6 +21,10 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
@@ -58,14 +62,31 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
// one for mempool, one for consensus
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
var mempool mempl.Mempool
|
||||
|
||||
switch thisConfig.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
}
|
||||
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log/term"
|
||||
"github.com/go-kit/log/term"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"path"
|
||||
@@ -31,6 +31,8 @@ import (
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
@@ -390,12 +392,34 @@ func newStateWithConfigAndBlockStore(
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
|
||||
// Make Mempool
|
||||
memplMetrics := mempl.NopMetrics()
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
var mempool mempl.Mempool
|
||||
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
logger := consensusLogger()
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
}
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
@@ -60,6 +60,22 @@ type Metrics struct {
|
||||
|
||||
// Number of blockparts transmitted by peer.
|
||||
BlockParts metrics.Counter
|
||||
|
||||
// QuroumPrevoteMessageDelay is the interval in seconds between the proposal
|
||||
// timestamp and the timestamp of the earliest prevote that achieved a quorum
|
||||
// during the prevote step.
|
||||
//
|
||||
// To compute it, sum the voting power over each prevote received, in increasing
|
||||
// order of timestamp. The timestamp of the first prevote to increase the sum to
|
||||
// be above 2/3 of the total voting power of the network defines the endpoint
|
||||
// the endpoint of the interval. Subtract the proposal timestamp from this endpoint
|
||||
// to obtain the quorum delay.
|
||||
QuorumPrevoteMessageDelay metrics.Gauge
|
||||
|
||||
// FullPrevoteMessageDelay is the interval in seconds between the proposal
|
||||
// timestamp and the timestamp of the latest prevote in a round where 100%
|
||||
// of the voting power on the network issued prevotes.
|
||||
FullPrevoteMessageDelay metrics.Gauge
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns Metrics build using Prometheus client library.
|
||||
@@ -186,6 +202,20 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "block_parts",
|
||||
Help: "Number of blockparts transmitted by peer.",
|
||||
}, append(labels, "peer_id")).With(labelsAndValues...),
|
||||
QuorumPrevoteMessageDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "quorum_prevote_message_delay",
|
||||
Help: "Difference in seconds between the proposal timestamp and the timestamp " +
|
||||
"of the latest prevote that achieved a quorum in the prevote step.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
FullPrevoteMessageDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "full_prevote_message_delay",
|
||||
Help: "Difference in seconds between the proposal timestamp and the timestamp " +
|
||||
"of the latest prevote that achieved 100% of the voting power in the prevote step.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,12 +239,14 @@ func NopMetrics() *Metrics {
|
||||
|
||||
BlockIntervalSeconds: discard.NewHistogram(),
|
||||
|
||||
NumTxs: discard.NewGauge(),
|
||||
BlockSizeBytes: discard.NewGauge(),
|
||||
TotalTxs: discard.NewGauge(),
|
||||
CommittedHeight: discard.NewGauge(),
|
||||
FastSyncing: discard.NewGauge(),
|
||||
StateSyncing: discard.NewGauge(),
|
||||
BlockParts: discard.NewCounter(),
|
||||
NumTxs: discard.NewGauge(),
|
||||
BlockSizeBytes: discard.NewGauge(),
|
||||
TotalTxs: discard.NewGauge(),
|
||||
CommittedHeight: discard.NewGauge(),
|
||||
FastSyncing: discard.NewGauge(),
|
||||
StateSyncing: discard.NewGauge(),
|
||||
BlockParts: discard.NewCounter(),
|
||||
QuorumPrevoteMessageDelay: discard.NewGauge(),
|
||||
FullPrevoteMessageDelay: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,6 +46,7 @@ type Reactor struct {
|
||||
mtx tmsync.RWMutex
|
||||
waitSync bool
|
||||
eventBus *types.EventBus
|
||||
rs *cstypes.RoundState
|
||||
|
||||
Metrics *Metrics
|
||||
}
|
||||
@@ -58,6 +59,7 @@ func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption)
|
||||
conR := &Reactor{
|
||||
conS: consensusState,
|
||||
waitSync: waitSync,
|
||||
rs: consensusState.GetRoundState(),
|
||||
Metrics: NopMetrics(),
|
||||
}
|
||||
conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR)
|
||||
@@ -78,6 +80,7 @@ func (conR *Reactor) OnStart() error {
|
||||
go conR.peerStatsRoutine()
|
||||
|
||||
conR.subscribeToBroadcastEvents()
|
||||
go conR.updateRoundStateRoutine()
|
||||
|
||||
if !conR.WaitSync() {
|
||||
err := conR.conS.Start()
|
||||
@@ -482,11 +485,31 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage)
|
||||
}
|
||||
|
||||
func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
peer.Send(StateChannel, MustEncode(nrsMsg))
|
||||
}
|
||||
|
||||
func (conR *Reactor) updateRoundStateRoutine() {
|
||||
t := time.NewTicker(100 * time.Microsecond)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
if !conR.IsRunning() {
|
||||
return
|
||||
}
|
||||
rs := conR.conS.GetRoundState()
|
||||
conR.mtx.Lock()
|
||||
conR.rs = rs
|
||||
conR.mtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (conR *Reactor) getRoundState() *cstypes.RoundState {
|
||||
conR.mtx.RLock()
|
||||
defer conR.mtx.RUnlock()
|
||||
return conR.rs
|
||||
}
|
||||
|
||||
func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
logger := conR.Logger.With("peer", peer)
|
||||
|
||||
@@ -494,10 +517,9 @@ OUTER_LOOP:
|
||||
for {
|
||||
// Manage disconnects from self or peer.
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
logger.Info("Stopping gossipDataRoutine for peer")
|
||||
return
|
||||
}
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
|
||||
// Send proposal Block parts?
|
||||
@@ -638,10 +660,9 @@ OUTER_LOOP:
|
||||
for {
|
||||
// Manage disconnects from self or peer.
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
logger.Info("Stopping gossipVotesRoutine for peer")
|
||||
return
|
||||
}
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
|
||||
switch sleeping {
|
||||
@@ -763,19 +784,17 @@ func (conR *Reactor) gossipVotesForHeight(
|
||||
// NOTE: `queryMaj23Routine` has a simple crude design since it only comes
|
||||
// into play for liveness when there's a signature DDoS attack happening.
|
||||
func (conR *Reactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) {
|
||||
logger := conR.Logger.With("peer", peer)
|
||||
|
||||
OUTER_LOOP:
|
||||
for {
|
||||
// Manage disconnects from self or peer.
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
logger.Info("Stopping queryMaj23Routine for peer")
|
||||
return
|
||||
}
|
||||
|
||||
// Maybe send Height/Round/Prevotes
|
||||
{
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
|
||||
@@ -792,7 +811,7 @@ OUTER_LOOP:
|
||||
|
||||
// Maybe send Height/Round/Precommits
|
||||
{
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
|
||||
@@ -809,7 +828,7 @@ OUTER_LOOP:
|
||||
|
||||
// Maybe send Height/Round/ProposalPOL
|
||||
{
|
||||
rs := conR.conS.GetRoundState()
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
|
||||
@@ -1246,12 +1265,12 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
|
||||
}
|
||||
|
||||
func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) {
|
||||
logger := ps.logger.With(
|
||||
ps.logger.Debug("setHasVote",
|
||||
"peerH/R",
|
||||
fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
|
||||
log.NewLazySprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
|
||||
"H/R",
|
||||
fmt.Sprintf("%d/%d", height, round))
|
||||
logger.Debug("setHasVote", "type", voteType, "index", index)
|
||||
log.NewLazySprintf("%d/%d", height, round),
|
||||
"type", voteType, "index", index)
|
||||
|
||||
// NOTE: some may be nil BitArrays -> no side effects.
|
||||
psVotes := ps.getVoteBitArray(height, round, voteType)
|
||||
@@ -1476,7 +1495,7 @@ func (m *NewRoundStepMessage) ValidateHeight(initialHeight int64) error {
|
||||
m.LastCommitRound, initialHeight)
|
||||
}
|
||||
if m.Height > initialHeight && m.LastCommitRound < 0 {
|
||||
return fmt.Errorf("LastCommitRound can only be negative for initial height %v", // nolint
|
||||
return fmt.Errorf("LastCommitRound can only be negative for initial height %v",
|
||||
initialHeight)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -29,6 +29,8 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
p2pmock "github.com/tendermint/tendermint/p2p/mock"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
@@ -152,14 +154,33 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
memplMetrics := mempl.NopMetrics()
|
||||
// one for mempool, one for consensus
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
var mempool mempl.Mempool
|
||||
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
)
|
||||
}
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
@@ -15,12 +15,18 @@ type emptyMempool struct{}
|
||||
|
||||
var _ mempl.Mempool = emptyMempool{}
|
||||
|
||||
func (emptyMempool) Lock() {}
|
||||
func (emptyMempool) Unlock() {}
|
||||
func (emptyMempool) Size() int { return 0 }
|
||||
func (emptyMempool) Lock() {}
|
||||
func (emptyMempool) Unlock() {}
|
||||
func (emptyMempool) Size() int { return 0 }
|
||||
func (emptyMempool) SizeBytes() int64 { return 0 }
|
||||
func (emptyMempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (txmp emptyMempool) RemoveTxByKey(txKey types.TxKey) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) Update(
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
@@ -304,6 +305,15 @@ func (cs *State) OnStart() error {
|
||||
}
|
||||
}
|
||||
|
||||
// we need the timeoutRoutine for replay so
|
||||
// we don't block on the tick chan.
|
||||
// NOTE: we will get a build up of garbage go routines
|
||||
// firing on the tockChan until the receiveRoutine is started
|
||||
// to deal with them (by that point, at most one will be valid)
|
||||
if err := cs.timeoutTicker.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We may have lost some votes if the process crashed reload from consensus
|
||||
// log to catchup.
|
||||
if cs.doWALCatchup {
|
||||
@@ -360,15 +370,6 @@ func (cs *State) OnStart() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// we need the timeoutRoutine for replay so
|
||||
// we don't block on the tick chan.
|
||||
// NOTE: we will get a build up of garbage go routines
|
||||
// firing on the tockChan until the receiveRoutine is started
|
||||
// to deal with them (by that point, at most one will be valid)
|
||||
if err := cs.timeoutTicker.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Double Signing Risk Reduction
|
||||
if err := cs.checkDoubleSigningRisk(cs.Height); err != nil {
|
||||
return err
|
||||
@@ -801,7 +802,6 @@ func (cs *State) receiveRoutine(maxSteps int) {
|
||||
func (cs *State) handleMsg(mi msgInfo) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
|
||||
var (
|
||||
added bool
|
||||
err error
|
||||
@@ -818,6 +818,24 @@ func (cs *State) handleMsg(mi msgInfo) {
|
||||
case *BlockPartMessage:
|
||||
// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
|
||||
added, err = cs.addProposalBlockPart(msg, peerID)
|
||||
|
||||
// We unlock here to yield to any routines that need to read the the RoundState.
|
||||
// Previously, this code held the lock from the point at which the final block
|
||||
// part was recieved until the block executed against the application.
|
||||
// This prevented the reactor from being able to retrieve the most updated
|
||||
// version of the RoundState. The reactor needs the updated RoundState to
|
||||
// gossip the now completed block.
|
||||
//
|
||||
// This code can be further improved by either always operating on a copy
|
||||
// of RoundState and only locking when switching out State's copy of
|
||||
// RoundState with the updated copy or by emitting RoundState events in
|
||||
// more places for routines depending on it to listen for.
|
||||
cs.mtx.Unlock()
|
||||
|
||||
cs.mtx.Lock()
|
||||
if added && cs.ProposalBlockParts.IsComplete() {
|
||||
cs.handleCompleteProposal(msg.Height)
|
||||
}
|
||||
if added {
|
||||
cs.statsMsgQueue <- mi
|
||||
}
|
||||
@@ -866,8 +884,8 @@ func (cs *State) handleMsg(mi msgInfo) {
|
||||
"height", cs.Height,
|
||||
"round", cs.Round,
|
||||
"peer", peerID,
|
||||
"msg_type", fmt.Sprintf("%T", msg),
|
||||
"err", err,
|
||||
"msg", msg,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -963,7 +981,7 @@ func (cs *State) enterNewRound(height int64, round int32) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
|
||||
logger.Debug(
|
||||
"entering new round with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -972,7 +990,7 @@ func (cs *State) enterNewRound(height int64, round int32) {
|
||||
logger.Debug("need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now)
|
||||
}
|
||||
|
||||
logger.Debug("entering new round", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering new round", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
// increment validators if necessary
|
||||
validators := cs.Validators
|
||||
@@ -1045,12 +1063,12 @@ func (cs *State) enterPropose(height int64, round int32) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering propose step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("entering propose step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering propose step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPropose:
|
||||
@@ -1212,7 +1230,7 @@ func (cs *State) enterPrevote(height int64, round int32) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering prevote step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -1223,7 +1241,7 @@ func (cs *State) enterPrevote(height int64, round int32) {
|
||||
cs.newStep()
|
||||
}()
|
||||
|
||||
logger.Debug("entering prevote step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering prevote step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
// Sign and broadcast vote as necessary
|
||||
cs.doPrevote(height, round)
|
||||
@@ -1272,7 +1290,7 @@ func (cs *State) enterPrevoteWait(height int64, round int32) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering prevote wait step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -1284,7 +1302,7 @@ func (cs *State) enterPrevoteWait(height int64, round int32) {
|
||||
))
|
||||
}
|
||||
|
||||
logger.Debug("entering prevote wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering prevote wait step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrevoteWait:
|
||||
@@ -1308,12 +1326,12 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
|
||||
logger.Debug(
|
||||
"entering precommit step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("entering precommit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering precommit step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommit:
|
||||
@@ -1431,7 +1449,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) {
|
||||
logger.Debug(
|
||||
"entering precommit wait step with invalid args",
|
||||
"triggered_timeout", cs.TriggeredTimeoutPrecommit,
|
||||
"current", fmt.Sprintf("%v/%v", cs.Height, cs.Round),
|
||||
"current", log.NewLazySprintf("%v/%v", cs.Height, cs.Round),
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -1443,7 +1461,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) {
|
||||
))
|
||||
}
|
||||
|
||||
logger.Debug("entering precommit wait step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering precommit wait step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommitWait:
|
||||
@@ -1462,12 +1480,12 @@ func (cs *State) enterCommit(height int64, commitRound int32) {
|
||||
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
|
||||
logger.Debug(
|
||||
"entering commit step with invalid args",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("entering commit step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug("entering commit step", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterCommit:
|
||||
@@ -1500,7 +1518,7 @@ func (cs *State) enterCommit(height int64, commitRound int32) {
|
||||
if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) {
|
||||
logger.Info(
|
||||
"commit is for a block we do not know about; set ProposalBlock=nil",
|
||||
"proposal", cs.ProposalBlock.Hash(),
|
||||
"proposal", log.NewLazyBlockHash(cs.ProposalBlock),
|
||||
"commit", blockID.Hash,
|
||||
)
|
||||
|
||||
@@ -1537,7 +1555,7 @@ func (cs *State) tryFinalizeCommit(height int64) {
|
||||
// TODO: ^^ wait, why does it matter that we're a validator?
|
||||
logger.Debug(
|
||||
"failed attempt to finalize commit; we do not have the commit block",
|
||||
"proposal_block", cs.ProposalBlock.Hash(),
|
||||
"proposal_block", log.NewLazyBlockHash(cs.ProposalBlock),
|
||||
"commit_block", blockID.Hash,
|
||||
)
|
||||
return
|
||||
@@ -1553,11 +1571,13 @@ func (cs *State) finalizeCommit(height int64) {
|
||||
if cs.Height != height || cs.Step != cstypes.RoundStepCommit {
|
||||
logger.Debug(
|
||||
"entering finalize commit step",
|
||||
"current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
"current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
cs.calculatePrevoteMessageDelayMetrics()
|
||||
|
||||
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts
|
||||
|
||||
@@ -1577,11 +1597,11 @@ func (cs *State) finalizeCommit(height int64) {
|
||||
|
||||
logger.Info(
|
||||
"finalizing commit of block",
|
||||
"hash", block.Hash(),
|
||||
"hash", log.NewLazyBlockHash(block),
|
||||
"root", block.AppHash,
|
||||
"num_txs", len(block.Txs),
|
||||
)
|
||||
logger.Debug(fmt.Sprintf("%v", block))
|
||||
logger.Debug("committed block", "block", log.NewLazySprintf("%v", block))
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
@@ -1885,44 +1905,43 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add
|
||||
if err := cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()); err != nil {
|
||||
cs.Logger.Error("failed publishing event complete proposal", "err", err)
|
||||
}
|
||||
}
|
||||
return added, nil
|
||||
}
|
||||
|
||||
// Update Valid* if we can.
|
||||
prevotes := cs.Votes.Prevotes(cs.Round)
|
||||
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
|
||||
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Debug(
|
||||
"updating valid block to new proposal block",
|
||||
"valid_round", cs.Round,
|
||||
"valid_block_hash", cs.ProposalBlock.Hash(),
|
||||
)
|
||||
func (cs *State) handleCompleteProposal(blockHeight int64) {
|
||||
// Update Valid* if we can.
|
||||
prevotes := cs.Votes.Prevotes(cs.Round)
|
||||
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
|
||||
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Debug(
|
||||
"updating valid block to new proposal block",
|
||||
"valid_round", cs.Round,
|
||||
"valid_block_hash", log.NewLazyBlockHash(cs.ProposalBlock),
|
||||
)
|
||||
|
||||
cs.ValidRound = cs.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
}
|
||||
// TODO: In case there is +2/3 majority in Prevotes set for some
|
||||
// block and cs.ProposalBlock contains different block, either
|
||||
// proposer is faulty or voting power of faulty processes is more
|
||||
// than 1/3. We should trigger in the future accountability
|
||||
// procedure at this point.
|
||||
cs.ValidRound = cs.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
}
|
||||
|
||||
if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() {
|
||||
// Move onto the next step
|
||||
cs.enterPrevote(height, cs.Round)
|
||||
if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added
|
||||
cs.enterPrecommit(height, cs.Round)
|
||||
}
|
||||
} else if cs.Step == cstypes.RoundStepCommit {
|
||||
// If we're waiting on the proposal block...
|
||||
cs.tryFinalizeCommit(height)
|
||||
}
|
||||
|
||||
return added, nil
|
||||
// TODO: In case there is +2/3 majority in Prevotes set for some
|
||||
// block and cs.ProposalBlock contains different block, either
|
||||
// proposer is faulty or voting power of faulty processes is more
|
||||
// than 1/3. We should trigger in the future accountability
|
||||
// procedure at this point.
|
||||
}
|
||||
|
||||
return added, nil
|
||||
if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() {
|
||||
// Move onto the next step
|
||||
cs.enterPrevote(blockHeight, cs.Round)
|
||||
if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added
|
||||
cs.enterPrecommit(blockHeight, cs.Round)
|
||||
}
|
||||
} else if cs.Step == cstypes.RoundStepCommit {
|
||||
// If we're waiting on the proposal block...
|
||||
cs.tryFinalizeCommit(blockHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
|
||||
@@ -2073,7 +2092,7 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error
|
||||
} else {
|
||||
cs.Logger.Debug(
|
||||
"valid block we do not know about; set ProposalBlock=nil",
|
||||
"proposal", cs.ProposalBlock.Hash(),
|
||||
"proposal", log.NewLazyBlockHash(cs.ProposalBlock),
|
||||
"block_id", blockID.Hash,
|
||||
)
|
||||
|
||||
@@ -2274,6 +2293,32 @@ func (cs *State) checkDoubleSigningRisk(height int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *State) calculatePrevoteMessageDelayMetrics() {
|
||||
if cs.Proposal == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ps := cs.Votes.Prevotes(cs.Round)
|
||||
pl := ps.List()
|
||||
|
||||
sort.Slice(pl, func(i, j int) bool {
|
||||
return pl[i].Timestamp.Before(pl[j].Timestamp)
|
||||
})
|
||||
|
||||
var votingPowerSeen int64
|
||||
for _, v := range pl {
|
||||
_, val := cs.Validators.GetByAddress(v.ValidatorAddress)
|
||||
votingPowerSeen += val.VotingPower
|
||||
if votingPowerSeen >= cs.Validators.TotalVotingPower()*2/3+1 {
|
||||
cs.metrics.QuorumPrevoteMessageDelay.Set(v.Timestamp.Sub(cs.Proposal.Timestamp).Seconds())
|
||||
break
|
||||
}
|
||||
}
|
||||
if ps.HasAll() {
|
||||
cs.metrics.FullPrevoteMessageDelay.Set(pl[len(pl)-1].Timestamp.Sub(cs.Proposal.Timestamp).Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------
|
||||
|
||||
func CompareHRS(h1 int64, r1 int32, s1 cstypes.RoundStepType, h2 int64, r2 int32, s2 cstypes.RoundStepType) int {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build gofuzz
|
||||
// +build gofuzz
|
||||
|
||||
package consensus
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"golang.org/x/crypto/openpgp/armor"
|
||||
"golang.org/x/crypto/openpgp/armor" // nolint: staticcheck
|
||||
)
|
||||
|
||||
func EncodeArmor(blockType string, headers map[string]string, data []byte) string {
|
||||
|
||||
@@ -122,6 +122,26 @@ func GenPrivKeySecp256k1(secret []byte) PrivKey {
|
||||
return PrivKey(privKey32)
|
||||
}
|
||||
|
||||
// used to reject malleable signatures
|
||||
// see:
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
|
||||
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
|
||||
|
||||
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
|
||||
// The returned signature will be of the form R || S (in lower-S form).
|
||||
func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
|
||||
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey)
|
||||
|
||||
sig, err := priv.Sign(crypto.Sha256(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sigBytes := serializeSig(sig)
|
||||
return sigBytes, nil
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
var _ crypto.PubKey = PubKey{}
|
||||
@@ -152,7 +172,7 @@ func (pubKey PubKey) Address() crypto.Address {
|
||||
return crypto.Address(hasherRIPEMD160.Sum(nil))
|
||||
}
|
||||
|
||||
// Bytes returns the pubkey marshalled with amino encoding.
|
||||
// Bytes returns the pubkey marshaled with amino encoding.
|
||||
func (pubKey PubKey) Bytes() []byte {
|
||||
return []byte(pubKey)
|
||||
}
|
||||
@@ -171,3 +191,47 @@ func (pubKey PubKey) Equals(other crypto.PubKey) bool {
|
||||
func (pubKey PubKey) Type() string {
|
||||
return KeyType
|
||||
}
|
||||
|
||||
// VerifySignature verifies a signature of the form R || S.
|
||||
// It rejects signatures which are not in lower-S form.
|
||||
func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
|
||||
if len(sigStr) != 64 {
|
||||
return false
|
||||
}
|
||||
|
||||
pub, err := secp256k1.ParsePubKey(pubKey, secp256k1.S256())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// parse the signature:
|
||||
signature := signatureFromBytes(sigStr)
|
||||
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
|
||||
// see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
if signature.S.Cmp(secp256k1halfN) > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return signature.Verify(crypto.Sha256(msg), pub)
|
||||
}
|
||||
|
||||
// Read Signature struct from R || S. Caller needs to ensure
|
||||
// that len(sigStr) == 64.
|
||||
func signatureFromBytes(sigStr []byte) *secp256k1.Signature {
|
||||
return &secp256k1.Signature{
|
||||
R: new(big.Int).SetBytes(sigStr[:32]),
|
||||
S: new(big.Int).SetBytes(sigStr[32:64]),
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize signature to R || S.
|
||||
// R, S are padded to 32 bytes respectively.
|
||||
func serializeSig(sig *secp256k1.Signature) []byte {
|
||||
rBytes := sig.R.Bytes()
|
||||
sBytes := sig.S.Bytes()
|
||||
sigBytes := make([]byte, 64)
|
||||
// 0 pad the byte arrays from the left if they aren't big enough.
|
||||
copy(sigBytes[32-len(rBytes):32], rBytes)
|
||||
copy(sigBytes[64-len(sBytes):64], sBytes)
|
||||
return sigBytes
|
||||
}
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
// +build !libsecp256k1
|
||||
|
||||
package secp256k1
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
// used to reject malleable signatures
|
||||
// see:
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
|
||||
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
|
||||
|
||||
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
|
||||
// The returned signature will be of the form R || S (in lower-S form).
|
||||
func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
|
||||
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey)
|
||||
|
||||
sig, err := priv.Sign(crypto.Sha256(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sigBytes := serializeSig(sig)
|
||||
return sigBytes, nil
|
||||
}
|
||||
|
||||
// VerifySignature verifies a signature of the form R || S.
|
||||
// It rejects signatures which are not in lower-S form.
|
||||
func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
|
||||
if len(sigStr) != 64 {
|
||||
return false
|
||||
}
|
||||
|
||||
pub, err := secp256k1.ParsePubKey(pubKey, secp256k1.S256())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// parse the signature:
|
||||
signature := signatureFromBytes(sigStr)
|
||||
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
|
||||
// see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
if signature.S.Cmp(secp256k1halfN) > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return signature.Verify(crypto.Sha256(msg), pub)
|
||||
}
|
||||
|
||||
// Read Signature struct from R || S. Caller needs to ensure
|
||||
// that len(sigStr) == 64.
|
||||
func signatureFromBytes(sigStr []byte) *secp256k1.Signature {
|
||||
return &secp256k1.Signature{
|
||||
R: new(big.Int).SetBytes(sigStr[:32]),
|
||||
S: new(big.Int).SetBytes(sigStr[32:64]),
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize signature to R || S.
|
||||
// R, S are padded to 32 bytes respectively.
|
||||
func serializeSig(sig *secp256k1.Signature) []byte {
|
||||
rBytes := sig.R.Bytes()
|
||||
sBytes := sig.S.Bytes()
|
||||
sigBytes := make([]byte, 64)
|
||||
// 0 pad the byte arrays from the left if they aren't big enough.
|
||||
copy(sigBytes[32-len(rBytes):32], rBytes)
|
||||
copy(sigBytes[64-len(sBytes):64], sBytes)
|
||||
return sigBytes
|
||||
}
|
||||
@@ -36,7 +36,7 @@ func TestPubKeySecp256k1Address(t *testing.T) {
|
||||
addrBbz, _, _ := base58.CheckDecode(d.addr)
|
||||
addrB := crypto.Address(addrBbz)
|
||||
|
||||
var priv secp256k1.PrivKey = secp256k1.PrivKey(privB)
|
||||
priv := secp256k1.PrivKey(privB)
|
||||
|
||||
pubKey := priv.PubKey()
|
||||
pubT, _ := pubKey.(secp256k1.PubKey)
|
||||
|
||||
@@ -25,19 +25,19 @@ func TestRandom(t *testing.T) {
|
||||
plaintext := make([]byte, pl)
|
||||
_, err := cr.Read(key[:])
|
||||
if err != nil {
|
||||
t.Errorf("error on read: %w", err)
|
||||
t.Errorf("error on read: %v", err)
|
||||
}
|
||||
_, err = cr.Read(nonce[:])
|
||||
if err != nil {
|
||||
t.Errorf("error on read: %w", err)
|
||||
t.Errorf("error on read: %v", err)
|
||||
}
|
||||
_, err = cr.Read(ad)
|
||||
if err != nil {
|
||||
t.Errorf("error on read: %w", err)
|
||||
t.Errorf("error on read: %v", err)
|
||||
}
|
||||
_, err = cr.Read(plaintext)
|
||||
if err != nil {
|
||||
t.Errorf("error on read: %w", err)
|
||||
t.Errorf("error on read: %v", err)
|
||||
}
|
||||
|
||||
aead, err := New(key[:])
|
||||
@@ -83,7 +83,7 @@ func TestRandom(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// AFOREMENTIONED LICENCE
|
||||
// AFOREMENTIONED LICENSE
|
||||
// Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
|
||||
@@ -22,10 +22,6 @@ module.exports = {
|
||||
index: "tendermint"
|
||||
},
|
||||
versions: [
|
||||
{
|
||||
"label": "v0.32",
|
||||
"key": "v0.32"
|
||||
},
|
||||
{
|
||||
"label": "v0.33",
|
||||
"key": "v0.33"
|
||||
@@ -35,8 +31,8 @@ module.exports = {
|
||||
"key": "v0.34"
|
||||
},
|
||||
{
|
||||
"label": "master",
|
||||
"key": "master"
|
||||
"label": "v0.35",
|
||||
"key": "v0.35"
|
||||
}
|
||||
],
|
||||
topbar: {
|
||||
@@ -49,12 +45,10 @@ module.exports = {
|
||||
title: 'Resources',
|
||||
children: [
|
||||
{
|
||||
title: 'Developer Sessions',
|
||||
path: '/DEV_SESSIONS.html'
|
||||
},
|
||||
{
|
||||
// TODO(creachadair): Figure out how to make this per-branch.
|
||||
// See: https://github.com/tendermint/tendermint/issues/7908
|
||||
title: 'RPC',
|
||||
path: 'https://docs.tendermint.com/master/rpc/',
|
||||
path: 'https://docs.tendermint.com/v0.35/rpc/',
|
||||
static: true
|
||||
},
|
||||
]
|
||||
@@ -166,6 +160,12 @@ module.exports = {
|
||||
{
|
||||
ga: 'UA-51029217-11'
|
||||
}
|
||||
],
|
||||
[
|
||||
'@vuepress/plugin-html-redirect',
|
||||
{
|
||||
countdown: 0
|
||||
}
|
||||
]
|
||||
]
|
||||
};
|
||||
|
||||
1
docs/.vuepress/redirects
Normal file
1
docs/.vuepress/redirects
Normal file
@@ -0,0 +1 @@
|
||||
/master/ /v0.35/
|
||||
@@ -63,7 +63,7 @@ as `abci-cli` above. The kvstore just stores transactions in a merkle
|
||||
tree.
|
||||
|
||||
Its code can be found
|
||||
[here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go)
|
||||
[here](https://github.com/tendermint/tendermint/blob/v0.34.x/abci/cmd/abci-cli/abci-cli.go)
|
||||
and looks like:
|
||||
|
||||
```go
|
||||
@@ -220,7 +220,7 @@ Now that we've got the hang of it, let's try another application, the
|
||||
"counter" app.
|
||||
|
||||
Like the kvstore app, its code can be found
|
||||
[here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go)
|
||||
[here](https://github.com/tendermint/tendermint/blob/v0.34.x/abci/cmd/abci-cli/abci-cli.go)
|
||||
and looks like:
|
||||
|
||||
```go
|
||||
|
||||
@@ -31,24 +31,61 @@ For example:
|
||||
|
||||
would be equal to the composite key of `jack.account.number`.
|
||||
|
||||
Let's take a look at the `[tx_index]` config section:
|
||||
By default, Tendermint will index all transactions by their respective hashes
|
||||
and height and blocks by their height.
|
||||
|
||||
## Configuration
|
||||
|
||||
Operators can configure indexing via the `[tx_index]` section. The `indexer`
|
||||
field takes a series of supported indexers. If `null` is included, indexing will
|
||||
be turned off regardless of other values provided.
|
||||
|
||||
```toml
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
[tx-index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
# The backend database to back the indexer.
|
||||
# If indexer is "null", no indexer service will be used.
|
||||
#
|
||||
# The application will set which txs to index. In some cases a node operator will be able
|
||||
# to decide which txs to index based on configuration set in the application.
|
||||
#
|
||||
# Options:
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
# 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
# indexer = "kv"
|
||||
```
|
||||
|
||||
By default, Tendermint will index all transactions by their respective hashes
|
||||
and height and blocks by their height.
|
||||
### Supported Indexers
|
||||
|
||||
You can turn off indexing completely by setting `tx_index` to `null`.
|
||||
#### KV
|
||||
|
||||
The `kv` indexer type is an embedded key-value store supported by the main
|
||||
underlying Tendermint database. Using the `kv` indexer type allows you to query
|
||||
for block and transaction events directly against Tendermint's RPC. However, the
|
||||
query syntax is limited and so this indexer type might be deprecated or removed
|
||||
entirely in the future.
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
The `psql` indexer type allows an operator to enable block and transaction event
|
||||
indexing by proxying it to an external PostgreSQL instance allowing for the events
|
||||
to be stored in relational models. Since the events are stored in a RDBMS, operators
|
||||
can leverage SQL to perform a series of rich and complex queries that are not
|
||||
supported by the `kv` indexer type. Since operators can leverage SQL directly,
|
||||
searching is not enabled for the `psql` indexer type via Tendermint's RPC -- any
|
||||
such query will fail.
|
||||
|
||||
Note, the SQL schema is stored in `state/indexer/sink/psql/schema.sql` and operators
|
||||
must explicitly create the relations prior to starting Tendermint and enabling
|
||||
the `psql` indexer type.
|
||||
|
||||
Example:
|
||||
|
||||
```shell
|
||||
$ psql ... -f state/indexer/sink/psql/schema.sql
|
||||
```
|
||||
|
||||
## Default Indexes
|
||||
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
order: false
|
||||
---
|
||||
|
||||
# Architecture Decision Records (ADR)
|
||||
|
||||
This is a location to record all high-level architecture decisions in the tendermint project.
|
||||
|
||||
You can read more about the ADR concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t).
|
||||
|
||||
An ADR should provide:
|
||||
|
||||
- Context on the relevant goals and the current state
|
||||
- Proposed changes to achieve the goals
|
||||
- Summary of pros and cons
|
||||
- References
|
||||
- Changelog
|
||||
|
||||
Note the distinction between an ADR and a spec. The ADR provides the context, intuition, reasoning, and
|
||||
justification for a change in architecture, or for the architecture of something
|
||||
new. The spec is much more compressed and streamlined summary of everything as
|
||||
it stands today.
|
||||
|
||||
If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match.
|
||||
|
||||
Note the context/background should be written in the present tense.
|
||||
|
||||
### Table of Contents:
|
||||
|
||||
- [ADR-001-Logging](./adr-001-logging.md)
|
||||
- [ADR-002-Event-Subscription](./adr-002-event-subscription.md)
|
||||
- [ADR-003-ABCI-APP-RPC](./adr-003-abci-app-rpc.md)
|
||||
- [ADR-004-Historical-Validators](./adr-004-historical-validators.md)
|
||||
- [ADR-005-Consensus-Params](./adr-005-consensus-params.md)
|
||||
- [ADR-006-Trust-Metric](./adr-006-trust-metric.md)
|
||||
- [ADR-007-Trust-Metric-Usage](./adr-007-trust-metric-usage.md)
|
||||
- [ADR-008-Priv-Validator](./adr-008-priv-validator.md)
|
||||
- [ADR-009-ABCI-Design](./adr-009-ABCI-design.md)
|
||||
- [ADR-010-Crypto-Changes](./adr-010-crypto-changes.md)
|
||||
- [ADR-011-Monitoring](./adr-011-monitoring.md)
|
||||
- [ADR-012-Peer-Transport](./adr-012-peer-transport.md)
|
||||
- [ADR-013-Symmetric-Crypto](./adr-013-symmetric-crypto.md)
|
||||
- [ADR-014-Secp-Malleability](./adr-014-secp-malleability.md)
|
||||
- [ADR-015-Crypto-Encoding](./adr-015-crypto-encoding.md)
|
||||
- [ADR-016-Protocol-Versions](./adr-016-protocol-versions.md)
|
||||
- [ADR-017-Chain-Versions](./adr-017-chain-versions.md)
|
||||
- [ADR-018-ABCI-Validators](./adr-018-ABCI-Validators.md)
|
||||
- [ADR-019-Multisigs](./adr-019-multisigs.md)
|
||||
- [ADR-020-Block-Size](./adr-020-block-size.md)
|
||||
- [ADR-021-ABCI-Events](./adr-021-abci-events.md)
|
||||
- [ADR-022-ABCI-Errors](./adr-022-abci-errors.md)
|
||||
- [ADR-023-ABCI-Propose-tx](./adr-023-ABCI-propose-tx.md)
|
||||
- [ADR-024-Sign-Bytes](./adr-024-sign-bytes.md)
|
||||
- [ADR-025-Commit](./adr-025-commit.md)
|
||||
- [ADR-026-General-Merkle-Proof](./adr-026-general-merkle-proof.md)
|
||||
- [ADR-028-libp2p](./adr-028-libp2p.md)
|
||||
- [ADR-029-Check-Tx-Consensus](./adr-029-check-tx-consensus.md)
|
||||
- [ADR-030-Consensus-Refactor](./adr-030-consensus-refactor.md)
|
||||
- [ADR-030-Changelog-structure](./adr-031-changelog.md)
|
||||
- [ADR-033-Pubsub](./adr-033-pubsub.md)
|
||||
- [ADR-034-Priv-Validator-File-Structure](./adr-034-priv-validator-file-structure.md)
|
||||
- [ADR-035-Documentation](./adr-035-documentation.md)
|
||||
- [ADR-037-Deliver-Block](./adr-037-deliver-block.md)
|
||||
- [ADR-038-non-zero-start-height](./adr-038-non-zero-start-height.md)
|
||||
- [ADR-039-Peer-Behaviour](./adr-039-peer-behaviour.md)
|
||||
- [ADR-041-Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md)
|
||||
- [ADR-043-Blockchain-RiRi-Org](./adr-043-blockchain-riri-org.md)
|
||||
- [ADR-044-Lite-Client-With-Weak-Subjectivity](./adr-044-lite-client-with-weak-subjectivity.md)
|
||||
- [ADR-045-ABCI-Evidence](./adr-045-abci-evidence.md)
|
||||
- [ADR-046-Light-Client-Implementation](./adr-046-light-client-implementation.md)
|
||||
- [ADR-047-Handling-Evidence-From-Light-Client](./adr-047-handling-evidence-from-light-client.md)
|
||||
- [ADR-051-Double-Signing-Risk-Reduction](./adr-051-double-signing-risk-reduction.md)
|
||||
- [ADR-052-Tendermint-Mode](./adr-052-tendermint-mode.md)
|
||||
- [ADR-053-State-Sync-Prototype](./adr-053-state-sync-prototype.md)
|
||||
- [ADR-054-crypto-encoding-2](./adr-054-crypto-encoding-2.md)
|
||||
- [ADR-055-protobuf-design](./adr-055-protobuf-design.md)
|
||||
- [ADR-056-proving-amnesia-attacks](./adr-056-proving-amnesia-attacks.md)
|
||||
- [ADR-057-RPC](./adr-057-RPC.md)
|
||||
- [ADR-058-event-hashing](./adr-058-event-hashing.md)
|
||||
@@ -1,216 +0,0 @@
|
||||
# ADR 1: Logging
|
||||
|
||||
## Context
|
||||
|
||||
Current logging system in Tendermint is very static and not flexible enough.
|
||||
|
||||
Issues: [358](https://github.com/tendermint/tendermint/issues/358), [375](https://github.com/tendermint/tendermint/issues/375).
|
||||
|
||||
What we want from the new system:
|
||||
|
||||
- per package dynamic log levels
|
||||
- dynamic logger setting (logger tied to the processing struct)
|
||||
- conventions
|
||||
- be more visually appealing
|
||||
|
||||
"dynamic" here means the ability to set smth in runtime.
|
||||
|
||||
## Decision
|
||||
|
||||
### 1) An interface
|
||||
|
||||
First, we will need an interface for all of our libraries (`tmlibs`, Tendermint, etc.). My personal preference is go-kit `Logger` interface (see Appendix A.), but that is too much a bigger change. Plus we will still need levels.
|
||||
|
||||
```go
|
||||
# log.go
|
||||
type Logger interface {
|
||||
Debug(msg string, keyvals ...interface{}) error
|
||||
Info(msg string, keyvals ...interface{}) error
|
||||
Error(msg string, keyvals ...interface{}) error
|
||||
|
||||
With(keyvals ...interface{}) Logger
|
||||
}
|
||||
```
|
||||
|
||||
On a side note: difference between `Info` and `Notice` is subtle. We probably
|
||||
could do without `Notice`. Don't think we need `Panic` or `Fatal` as a part of
|
||||
the interface. These funcs could be implemented as helpers. In fact, we already
|
||||
have some in `tmlibs/common`.
|
||||
|
||||
- `Debug` - extended output for devs
|
||||
- `Info` - all that is useful for a user
|
||||
- `Error` - errors
|
||||
|
||||
`Notice` should become `Info`, `Warn` either `Error` or `Debug` depending on the message, `Crit` -> `Error`.
|
||||
|
||||
This interface should go into `tmlibs/log`. All libraries which are part of the core (tendermint/tendermint) should obey it.
|
||||
|
||||
### 2) Logger with our current formatting
|
||||
|
||||
On top of this interface, we will need to implement a stdout logger, which will be used when Tendermint is configured to output logs to STDOUT.
|
||||
|
||||
Many people say that they like the current output, so let's stick with it.
|
||||
|
||||
```
|
||||
NOTE[2017-04-25|14:45:08] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0
|
||||
```
|
||||
|
||||
Couple of minor changes:
|
||||
|
||||
```
|
||||
I[2017-04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0
|
||||
```
|
||||
|
||||
Notice the level is encoded using only one char plus milliseconds.
|
||||
|
||||
Note: there are many other formats out there like [logfmt](https://brandur.org/logfmt).
|
||||
|
||||
This logger could be implemented using any logger - [logrus](https://github.com/sirupsen/logrus), [go-kit/log](https://github.com/go-kit/kit/tree/master/log), [zap](https://github.com/uber-go/zap), log15 so far as it
|
||||
|
||||
a) supports coloring output<br>
|
||||
b) is moderately fast (buffering) <br>
|
||||
c) conforms to the new interface or adapter could be written for it <br>
|
||||
d) is somewhat configurable<br>
|
||||
|
||||
go-kit is my favorite so far. Check out how easy it is to color errors in red https://github.com/go-kit/kit/blob/master/log/term/example_test.go#L12. Although, coloring could only be applied to the whole string :(
|
||||
|
||||
```
|
||||
go-kit +: flexible, modular
|
||||
go-kit “-”: logfmt format https://brandur.org/logfmt
|
||||
|
||||
logrus +: popular, feature rich (hooks), API and output is more like what we want
|
||||
logrus -: not so flexible
|
||||
```
|
||||
|
||||
```go
|
||||
# tm_logger.go
|
||||
// NewTmLogger returns a logger that encodes keyvals to the Writer in
|
||||
// tm format.
|
||||
func NewTmLogger(w io.Writer) Logger {
|
||||
return &tmLogger{kitlog.NewLogfmtLogger(w)}
|
||||
}
|
||||
|
||||
func (l tmLogger) SetLevel(level string() {
|
||||
switch (level) {
|
||||
case "debug":
|
||||
l.sourceLogger = level.NewFilter(l.sourceLogger, level.AllowDebug())
|
||||
}
|
||||
}
|
||||
|
||||
func (l tmLogger) Info(msg string, keyvals ...interface{}) error {
|
||||
l.sourceLogger.Log("msg", msg, keyvals...)
|
||||
}
|
||||
|
||||
# log.go
|
||||
func With(logger Logger, keyvals ...interface{}) Logger {
|
||||
kitlog.With(logger.sourceLogger, keyvals...)
|
||||
}
|
||||
```
|
||||
|
||||
Usage:
|
||||
|
||||
```go
|
||||
logger := log.NewTmLogger(os.Stdout)
|
||||
logger.SetLevel(config.GetString("log_level"))
|
||||
node.SetLogger(log.With(logger, "node", Name))
|
||||
```
|
||||
|
||||
**Other log formatters**
|
||||
|
||||
In the future, we may want other formatters like JSONFormatter.
|
||||
|
||||
```
|
||||
{ "level": "notice", "time": "2017-04-25 14:45:08.562471297 -0400 EDT", "module": "consensus", "msg": "ABCI Replay Blocks", "appHeight": 0, "storeHeight": 0, "stateHeight": 0 }
|
||||
```
|
||||
|
||||
### 3) Dynamic logger setting
|
||||
|
||||
https://dave.cheney.net/2017/01/23/the-package-level-logger-anti-pattern
|
||||
|
||||
This is the hardest part and where the most work will be done. logger should be tied to the processing struct, or the context if it adds some fields to the logger.
|
||||
|
||||
```go
|
||||
type BaseService struct {
|
||||
log log15.Logger
|
||||
name string
|
||||
started uint32 // atomic
|
||||
stopped uint32 // atomic
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
BaseService already contains `log` field, so most of the structs embedding it should be fine. We should rename it to `logger`.
|
||||
|
||||
The only thing missing is the ability to set logger:
|
||||
|
||||
```
|
||||
func (bs *BaseService) SetLogger(l log.Logger) {
|
||||
bs.logger = l
|
||||
}
|
||||
```
|
||||
|
||||
### 4) Conventions
|
||||
|
||||
Important keyvals should go first. Example:
|
||||
|
||||
```
|
||||
correct
|
||||
I[2017-04-25|14:45:08.322] ABCI Replay Blocks module=consensus instance=1 appHeight=0 storeHeight=0 stateHeight=0
|
||||
```
|
||||
|
||||
not
|
||||
|
||||
```
|
||||
wrong
|
||||
I[2017-04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 instance=1
|
||||
```
|
||||
|
||||
for that in most cases you'll need to add `instance` field to a logger upon creating, not when u log a particular message:
|
||||
|
||||
```go
|
||||
colorFn := func(keyvals ...interface{}) term.FgBgColor {
|
||||
for i := 1; i < len(keyvals); i += 2 {
|
||||
if keyvals[i] == "instance" && keyvals[i+1] == "1" {
|
||||
return term.FgBgColor{Fg: term.Blue}
|
||||
} else if keyvals[i] == "instance" && keyvals[i+1] == "1" {
|
||||
return term.FgBgColor{Fg: term.Red}
|
||||
}
|
||||
}
|
||||
return term.FgBgColor{}
|
||||
}
|
||||
logger := term.NewLogger(os.Stdout, log.NewTmLogger, colorFn)
|
||||
|
||||
c1 := NewConsensusReactor(...)
|
||||
c1.SetLogger(log.With(logger, "instance", 1))
|
||||
|
||||
c2 := NewConsensusReactor(...)
|
||||
c2.SetLogger(log.With(logger, "instance", 2))
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
proposed
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
Dynamic logger, which could be turned off for some modules at runtime. Public interface for other projects using Tendermint libraries.
|
||||
|
||||
### Negative
|
||||
|
||||
We may loose the ability to color keys in keyvalue pairs. go-kit allow you to easily change foreground / background colors of the whole string, but not its parts.
|
||||
|
||||
### Neutral
|
||||
|
||||
## Appendix A.
|
||||
|
||||
I really like a minimalistic approach go-kit took with his logger https://github.com/go-kit/kit/tree/master/log:
|
||||
|
||||
```
|
||||
type Logger interface {
|
||||
Log(keyvals ...interface{}) error
|
||||
}
|
||||
```
|
||||
|
||||
See [The Hunt for a Logger Interface](https://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide). The advantage is greater composability (check out how go-kit defines colored logging or log-leveled logging on top of this interface https://github.com/go-kit/kit/tree/master/log).
|
||||
@@ -1,88 +0,0 @@
|
||||
# ADR 2: Event Subscription
|
||||
|
||||
## Context
|
||||
|
||||
In the light client (or any other client), the user may want to **subscribe to
|
||||
a subset of transactions** (rather than all of them) using `/subscribe?event=X`. For
|
||||
example, I want to subscribe for all transactions associated with a particular
|
||||
account. Same for fetching. The user may want to **fetch transactions based on
|
||||
some filter** (rather than fetching all the blocks). For example, I want to get
|
||||
all transactions for a particular account in the last two weeks (`tx's block time >= '2017-06-05'`).
|
||||
|
||||
Now you can't even subscribe to "all txs" in Tendermint.
|
||||
|
||||
The goal is a simple and easy to use API for doing that.
|
||||
|
||||

|
||||
|
||||
## Decision
|
||||
|
||||
ABCI app return tags with a `DeliverTx` response inside the `data` field (_for
|
||||
now, later we may create a separate field_). Tags is a list of key-value pairs,
|
||||
protobuf encoded.
|
||||
|
||||
Example data:
|
||||
|
||||
```json
|
||||
{
|
||||
"abci.account.name": "Igor",
|
||||
"abci.account.address": "0xdeadbeef",
|
||||
"tx.gas": 7
|
||||
}
|
||||
```
|
||||
|
||||
### Subscribing for transactions events
|
||||
|
||||
If the user wants to receive only a subset of transactions, ABCI-app must
|
||||
return a list of tags with a `DeliverTx` response. These tags will be parsed and
|
||||
matched with the current queries (subscribers). If the query matches the tags,
|
||||
subscriber will get the transaction event.
|
||||
|
||||
```
|
||||
/subscribe?query="tm.event = Tx AND tx.hash = AB0023433CF0334223212243BDD AND abci.account.invoice.number = 22"
|
||||
```
|
||||
|
||||
A new package must be developed to replace the current `events` package. It
|
||||
will allow clients to subscribe to a different types of events in the future:
|
||||
|
||||
```
|
||||
/subscribe?query="abci.account.invoice.number = 22"
|
||||
/subscribe?query="abci.account.invoice.owner CONTAINS Igor"
|
||||
```
|
||||
|
||||
### Fetching transactions
|
||||
|
||||
This is a bit tricky because a) we want to support a number of indexers, all of
|
||||
which have a different API b) we don't know whenever tags will be sufficient
|
||||
for the most apps (I guess we'll see).
|
||||
|
||||
```
|
||||
/txs/search?query="tx.hash = AB0023433CF0334223212243BDD AND abci.account.owner CONTAINS Igor"
|
||||
/txs/search?query="abci.account.owner = Igor"
|
||||
```
|
||||
|
||||
For historic queries we will need a indexing storage (Postgres, SQLite, ...).
|
||||
|
||||
### Issues
|
||||
|
||||
- https://github.com/tendermint/tendermint/issues/376
|
||||
- https://github.com/tendermint/tendermint/issues/287
|
||||
- https://github.com/tendermint/tendermint/issues/525 (related)
|
||||
|
||||
## Status
|
||||
|
||||
proposed
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- same format for event notifications and search APIs
|
||||
- powerful enough query
|
||||
|
||||
### Negative
|
||||
|
||||
- performance of the `match` function (where we have too many queries / subscribers)
|
||||
- there is an issue where there are too many txs in the DB
|
||||
|
||||
### Neutral
|
||||
@@ -1,34 +0,0 @@
|
||||
# ADR 3: Must an ABCI-app have an RPC server?
|
||||
|
||||
## Context
|
||||
|
||||
ABCI-server could expose its own RPC-server and act as a proxy to Tendermint.
|
||||
|
||||
The idea was for the Tendermint RPC to just be a transparent proxy to the app.
|
||||
Clients need to talk to Tendermint for proofs, unless we burden all app devs
|
||||
with exposing Tendermint proof stuff. Also seems less complex to lock down one
|
||||
server than two, but granted it makes querying a bit more kludgy since it needs
|
||||
to be passed as a `Query`. Also, **having a very standard rpc interface means
|
||||
the light-client can work with all apps and handle proofs**. The only
|
||||
app-specific logic is decoding the binary data to a more readable form (eg.
|
||||
json). This is a huge advantage for code-reuse and standardization.
|
||||
|
||||
## Decision
|
||||
|
||||
We dont expose an RPC server on any of our ABCI-apps.
|
||||
|
||||
## Status
|
||||
|
||||
accepted
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Unified interface for all apps
|
||||
|
||||
### Negative
|
||||
|
||||
- `Query` interface
|
||||
|
||||
### Neutral
|
||||
@@ -1,38 +0,0 @@
|
||||
# ADR 004: Historical Validators
|
||||
|
||||
## Context
|
||||
|
||||
Right now, we can query the present validator set, but there is no history.
|
||||
If you were offline for a long time, there is no way to reconstruct past validators. This is needed for the light client and we agreed needs enhancement of the API.
|
||||
|
||||
## Decision
|
||||
|
||||
For every block, store a new structure that contains either the latest validator set,
|
||||
or the height of the last block for which the validator set changed. Note this is not
|
||||
the height of the block which returned the validator set change itself, but the next block,
|
||||
ie. the first block it comes into effect for.
|
||||
|
||||
Storing the validators will be handled by the `state` package.
|
||||
|
||||
At some point in the future, we may consider more efficient storage in the case where the validators
|
||||
are updated frequently - for instance by only saving the diffs, rather than the whole set.
|
||||
|
||||
An alternative approach suggested keeping the validator set, or diffs of it, in a merkle IAVL tree.
|
||||
While it might afford cheaper proofs that a validator set has not changed, it would be more complex,
|
||||
and likely less efficient.
|
||||
|
||||
## Status
|
||||
|
||||
Accepted.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Can query old validator sets, with proof.
|
||||
|
||||
### Negative
|
||||
|
||||
- Writes an extra structure to disk with every block.
|
||||
|
||||
### Neutral
|
||||
@@ -1,85 +0,0 @@
|
||||
# ADR 005: Consensus Params
|
||||
|
||||
## Context
|
||||
|
||||
Consensus critical parameters controlling blockchain capacity have until now been hard coded, loaded from a local config, or neglected.
|
||||
Since they may be need to be different in different networks, and potentially to evolve over time within
|
||||
networks, we seek to initialize them in a genesis file, and expose them through the ABCI.
|
||||
|
||||
While we have some specific parameters now, like maximum block and transaction size, we expect to have more in the future,
|
||||
such as a period over which evidence is valid, or the frequency of checkpoints.
|
||||
|
||||
## Decision
|
||||
|
||||
### ConsensusParams
|
||||
|
||||
No consensus critical parameters should ever be found in the `config.toml`.
|
||||
|
||||
A new `ConsensusParams` is optionally included in the `genesis.json` file,
|
||||
and loaded into the `State`. Any items not included are set to their default value.
|
||||
A value of 0 is undefined (see ABCI, below). A value of -1 is used to indicate the parameter does not apply.
|
||||
The parameters are used to determine the validity of a block (and tx) via the union of all relevant parameters.
|
||||
|
||||
```
|
||||
type ConsensusParams struct {
|
||||
BlockSize
|
||||
TxSize
|
||||
BlockGossip
|
||||
}
|
||||
|
||||
type BlockSize struct {
|
||||
MaxBytes int
|
||||
MaxTxs int
|
||||
MaxGas int
|
||||
}
|
||||
|
||||
type TxSize struct {
|
||||
MaxBytes int
|
||||
MaxGas int
|
||||
}
|
||||
|
||||
type BlockGossip struct {
|
||||
BlockPartSizeBytes int
|
||||
}
|
||||
```
|
||||
|
||||
The `ConsensusParams` can evolve over time by adding new structs that cover different aspects of the consensus rules.
|
||||
|
||||
The `BlockPartSizeBytes` and the `BlockSize.MaxBytes` are enforced to be greater than 0.
|
||||
The former because we need a part size, the latter so that we always have at least some sanity check over the size of blocks.
|
||||
|
||||
### ABCI
|
||||
|
||||
#### InitChain
|
||||
|
||||
InitChain currently takes the initial validator set. It should be extended to also take parts of the ConsensusParams.
|
||||
There is some case to be made for it to take the entire Genesis, except there may be things in the genesis,
|
||||
like the BlockPartSize, that the app shouldn't really know about.
|
||||
|
||||
#### EndBlock
|
||||
|
||||
The EndBlock response includes a `ConsensusParams`, which includes BlockSize and TxSize, but not BlockGossip.
|
||||
Other param struct can be added to `ConsensusParams` in the future.
|
||||
The `0` value is used to denote no change.
|
||||
Any other value will update that parameter in the `State.ConsensusParams`, to be applied for the next block.
|
||||
Tendermint should have hard-coded upper limits as sanity checks.
|
||||
|
||||
## Status
|
||||
|
||||
Proposed.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Alternative capacity limits and consensus parameters can be specified without re-compiling the software.
|
||||
- They can also change over time under the control of the application
|
||||
|
||||
### Negative
|
||||
|
||||
- More exposed parameters is more complexity
|
||||
- Different rules at different heights in the blockchain complicates fast sync
|
||||
|
||||
### Neutral
|
||||
|
||||
- The TxSize, which checks validity, may be in conflict with the config's `max_block_size_tx`, which determines proposal sizes
|
||||
@@ -1,229 +0,0 @@
|
||||
# ADR 006: Trust Metric Design
|
||||
|
||||
## Context
|
||||
|
||||
The proposed trust metric will allow Tendermint to maintain local trust rankings for peers it has directly interacted with, which can then be used to implement soft security controls. The calculations were obtained from the [TrustGuard](https://dl.acm.org/citation.cfm?id=1060808) project.
|
||||
|
||||
### Background
|
||||
|
||||
The Tendermint Core project developers would like to improve Tendermint security and reliability by keeping track of the level of trustworthiness peers have demonstrated within the peer-to-peer network. This way, undesirable outcomes from peers will not immediately result in them being dropped from the network (potentially causing drastic changes to take place). Instead, peers behavior can be monitored with appropriate metrics and be removed from the network once Tendermint Core is certain the peer is a threat. For example, when the PEXReactor makes a request for peers network addresses from a already known peer, and the returned network addresses are unreachable, this untrustworthy behavior should be tracked. Returning a few bad network addresses probably shouldn’t cause a peer to be dropped, while excessive amounts of this behavior does qualify the peer being dropped.
|
||||
|
||||
Trust metrics can be circumvented by malicious nodes through the use of strategic oscillation techniques, which adapts the malicious node’s behavior pattern in order to maximize its goals. For instance, if the malicious node learns that the time interval of the Tendermint trust metric is _X_ hours, then it could wait _X_ hours in-between malicious activities. We could try to combat this issue by increasing the interval length, yet this will make the system less adaptive to recent events.
|
||||
|
||||
Instead, having shorter intervals, but keeping a history of interval values, will give our metric the flexibility needed in order to keep the network stable, while also making it resilient against a strategic malicious node in the Tendermint peer-to-peer network. Also, the metric can access trust data over a rather long period of time while not greatly increasing its history size by aggregating older history values over a larger number of intervals, and at the same time, maintain great precision for the recent intervals. This approach is referred to as fading memories, and closely resembles the way human beings remember their experiences. The trade-off to using history data is that the interval values should be preserved in-between executions of the node.
|
||||
|
||||
### References
|
||||
|
||||
S. Mudhakar, L. Xiong, and L. Liu, “TrustGuard: Countering Vulnerabilities in Reputation Management for Decentralized Overlay Networks,” in _Proceedings of the 14th international conference on World Wide Web, pp. 422-431_, May 2005.
|
||||
|
||||
## Decision
|
||||
|
||||
The proposed trust metric will allow a developer to inform the trust metric store of all good and bad events relevant to a peer's behavior, and at any time, the metric can be queried for a peer's current trust ranking.
|
||||
|
||||
The three subsections below will cover the process being considered for calculating the trust ranking, the concept of the trust metric store, and the interface for the trust metric.
|
||||
|
||||
### Proposed Process
|
||||
|
||||
The proposed trust metric will count good and bad events relevant to the object, and calculate the percent of counters that are good over an interval with a predefined duration. This is the procedure that will continue for the life of the trust metric. When the trust metric is queried for the current **trust value**, a resilient equation will be utilized to perform the calculation.
|
||||
|
||||
The equation being proposed resembles a Proportional-Integral-Derivative (PID) controller used in control systems. The proportional component allows us to be sensitive to the value of the most recent interval, while the integral component allows us to incorporate trust values stored in the history data, and the derivative component allows us to give weight to sudden changes in the behavior of a peer. We compute the trust value of a peer in interval i based on its current trust ranking, its trust rating history prior to interval _i_ (over the past _maxH_ number of intervals) and its trust ranking fluctuation. We will break up the equation into the three components.
|
||||
|
||||
```math
|
||||
(1) Proportional Value = a * R[i]
|
||||
```
|
||||
|
||||
where _R_[*i*] denotes the raw trust value at time interval _i_ (where _i_ == 0 being current time) and _a_ is the weight applied to the contribution of the current reports. The next component of our equation uses a weighted sum over the last _maxH_ intervals to calculate the history value for time _i_:
|
||||
|
||||
`H[i] =` 
|
||||
|
||||
The weights can be chosen either optimistically or pessimistically. An optimistic weight creates larger weights for newer history data values, while the the pessimistic weight creates larger weights for time intervals with lower scores. The default weights used during the calculation of the history value are optimistic and calculated as _Wk_ = 0.8^_k_, for time interval _k_. With the history value available, we can now finish calculating the integral value:
|
||||
|
||||
```math
|
||||
(2) Integral Value = b * H[i]
|
||||
```
|
||||
|
||||
Where _H_[*i*] denotes the history value at time interval _i_ and _b_ is the weight applied to the contribution of past performance for the object being measured. The derivative component will be calculated as follows:
|
||||
|
||||
```math
|
||||
D[i] = R[i] – H[i]
|
||||
|
||||
(3) Derivative Value = c(D[i]) * D[i]
|
||||
```
|
||||
|
||||
Where the value of _c_ is selected based on the _D_[*i*] value relative to zero. The default selection process makes _c_ equal to 0 unless _D_[*i*] is a negative value, in which case c is equal to 1. The result is that the maximum penalty is applied when current behavior is lower than previously experienced behavior. If the current behavior is better than the previously experienced behavior, then the Derivative Value has no impact on the trust value. With the three components brought together, our trust value equation is calculated as follows:
|
||||
|
||||
```math
|
||||
TrustValue[i] = a * R[i] + b * H[i] + c(D[i]) * D[i]
|
||||
```
|
||||
|
||||
As a performance optimization that will keep the amount of raw interval data being saved to a reasonable size of _m_, while allowing us to represent 2^_m_ - 1 history intervals, we can employ the fading memories technique that will trade space and time complexity for the precision of the history data values by summarizing larger quantities of less recent values. While our equation above attempts to access up to _maxH_ (which can be 2^_m_ - 1), we will map those requests down to _m_ values using equation 4 below:
|
||||
|
||||
```math
|
||||
(4) j = index, where index > 0
|
||||
```
|
||||
|
||||
Where _j_ is one of _(0, 1, 2, … , m – 1)_ indices used to access history interval data. Now we can access the raw intervals using the following calculations:
|
||||
|
||||
```math
|
||||
R[0] = raw data for current time interval
|
||||
```
|
||||
|
||||
`R[j] =` 
|
||||
|
||||
### Trust Metric Store
|
||||
|
||||
Similar to the P2P subsystem AddrBook, the trust metric store will maintain information relevant to Tendermint peers. Additionally, the trust metric store will ensure that trust metrics will only be active for peers that a node is currently and directly engaged with.
|
||||
|
||||
Reactors will provide a peer key to the trust metric store in order to retrieve the associated trust metric. The trust metric can then record new positive and negative events experienced by the reactor, as well as provided the current trust score calculated by the metric.
|
||||
|
||||
When the node is shutting down, the trust metric store will save history data for trust metrics associated with all known peers. This saved information allows experiences with a peer to be preserved across node executions, which can span a tracking windows of days or weeks. The trust history data is loaded automatically during OnStart.
|
||||
|
||||
### Interface Detailed Design
|
||||
|
||||
Each trust metric allows for the recording of positive/negative events, querying the current trust value/score, and the stopping/pausing of tracking over time intervals. This can be seen below:
|
||||
|
||||
```go
|
||||
// TrustMetric - keeps track of peer reliability
|
||||
type TrustMetric struct {
|
||||
// Private elements.
|
||||
}
|
||||
|
||||
// Pause tells the metric to pause recording data over time intervals.
|
||||
// All method calls that indicate events will unpause the metric
|
||||
func (tm *TrustMetric) Pause() {}
|
||||
|
||||
// Stop tells the metric to stop recording data over time intervals
|
||||
func (tm *TrustMetric) Stop() {}
|
||||
|
||||
// BadEvents indicates that an undesirable event(s) took place
|
||||
func (tm *TrustMetric) BadEvents(num int) {}
|
||||
|
||||
// GoodEvents indicates that a desirable event(s) took place
|
||||
func (tm *TrustMetric) GoodEvents(num int) {}
|
||||
|
||||
// TrustValue gets the dependable trust value; always between 0 and 1
|
||||
func (tm *TrustMetric) TrustValue() float64 {}
|
||||
|
||||
// TrustScore gets a score based on the trust value always between 0 and 100
|
||||
func (tm *TrustMetric) TrustScore() int {}
|
||||
|
||||
// NewMetric returns a trust metric with the default configuration
|
||||
func NewMetric() *TrustMetric {}
|
||||
|
||||
//------------------------------------------------------------------------------------------------
|
||||
// For example
|
||||
|
||||
tm := NewMetric()
|
||||
|
||||
tm.BadEvents(1)
|
||||
score := tm.TrustScore()
|
||||
|
||||
tm.Stop()
|
||||
```
|
||||
|
||||
Some of the trust metric parameters can be configured. The weight values should probably be left alone in more cases, yet the time durations for the tracking window and individual time interval should be considered.
|
||||
|
||||
```go
|
||||
// TrustMetricConfig - Configures the weight functions and time intervals for the metric
|
||||
type TrustMetricConfig struct {
|
||||
// Determines the percentage given to current behavior
|
||||
ProportionalWeight float64
|
||||
|
||||
// Determines the percentage given to prior behavior
|
||||
IntegralWeight float64
|
||||
|
||||
// The window of time that the trust metric will track events across.
|
||||
// This can be set to cover many days without issue
|
||||
TrackingWindow time.Duration
|
||||
|
||||
// Each interval should be short for adapability.
|
||||
// Less than 30 seconds is too sensitive,
|
||||
// and greater than 5 minutes will make the metric numb
|
||||
IntervalLength time.Duration
|
||||
}
|
||||
|
||||
// DefaultConfig returns a config with values that have been tested and produce desirable results
|
||||
func DefaultConfig() TrustMetricConfig {}
|
||||
|
||||
// NewMetricWithConfig returns a trust metric with a custom configuration
|
||||
func NewMetricWithConfig(tmc TrustMetricConfig) *TrustMetric {}
|
||||
|
||||
//------------------------------------------------------------------------------------------------
|
||||
// For example
|
||||
|
||||
config := TrustMetricConfig{
|
||||
TrackingWindow: time.Minute * 60 * 24, // one day
|
||||
IntervalLength: time.Minute * 2,
|
||||
}
|
||||
|
||||
tm := NewMetricWithConfig(config)
|
||||
|
||||
tm.BadEvents(10)
|
||||
tm.Pause()
|
||||
tm.GoodEvents(1) // becomes active again
|
||||
```
|
||||
|
||||
A trust metric store should be created with a DB that has persistent storage so it can save history data across node executions. All trust metrics instantiated by the store will be created with the provided TrustMetricConfig configuration.
|
||||
|
||||
When you attempt to fetch the trust metric for a peer, and an entry does not exist in the trust metric store, a new metric is automatically created and the entry made within the store.
|
||||
|
||||
In additional to the fetching method, GetPeerTrustMetric, the trust metric store provides a method to call when a peer has disconnected from the node. This is so the metric can be paused (history data will not be saved) for periods of time when the node is not having direct experiences with the peer.
|
||||
|
||||
```go
|
||||
// TrustMetricStore - Manages all trust metrics for peers
|
||||
type TrustMetricStore struct {
|
||||
cmn.BaseService
|
||||
|
||||
// Private elements
|
||||
}
|
||||
|
||||
// OnStart implements Service
|
||||
func (tms *TrustMetricStore) OnStart() error {}
|
||||
|
||||
// OnStop implements Service
|
||||
func (tms *TrustMetricStore) OnStop() {}
|
||||
|
||||
// NewTrustMetricStore returns a store that saves data to the DB
|
||||
// and uses the config when creating new trust metrics
|
||||
func NewTrustMetricStore(db dbm.DB, tmc TrustMetricConfig) *TrustMetricStore {}
|
||||
|
||||
// Size returns the number of entries in the trust metric store
|
||||
func (tms *TrustMetricStore) Size() int {}
|
||||
|
||||
// GetPeerTrustMetric returns a trust metric by peer key
|
||||
func (tms *TrustMetricStore) GetPeerTrustMetric(key string) *TrustMetric {}
|
||||
|
||||
// PeerDisconnected pauses the trust metric associated with the peer identified by the key
|
||||
func (tms *TrustMetricStore) PeerDisconnected(key string) {}
|
||||
|
||||
//------------------------------------------------------------------------------------------------
|
||||
// For example
|
||||
|
||||
db := dbm.NewDB("trusthistory", "goleveldb", dirPathStr)
|
||||
tms := NewTrustMetricStore(db, DefaultConfig())
|
||||
|
||||
tm := tms.GetPeerTrustMetric(key)
|
||||
tm.BadEvents(1)
|
||||
|
||||
tms.PeerDisconnected(key)
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
Approved.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- The trust metric will allow Tendermint to make non-binary security and reliability decisions
|
||||
- Will help Tendermint implement deterrents that provide soft security controls, yet avoids disruption on the network
|
||||
- Will provide useful profiling information when analyzing performance over time related to peer interaction
|
||||
|
||||
### Negative
|
||||
|
||||
- Requires saving the trust metric history data across node executions
|
||||
|
||||
### Neutral
|
||||
|
||||
- Keep in mind that, good events need to be recorded just as bad events do using this implementation
|
||||
@@ -1,106 +0,0 @@
|
||||
# ADR 007: Trust Metric Usage Guide
|
||||
|
||||
## Context
|
||||
|
||||
Tendermint is required to monitor peer quality in order to inform its peer dialing and peer exchange strategies.
|
||||
|
||||
When a node first connects to the network, it is important that it can quickly find good peers.
|
||||
Thus, while a node has fewer connections, it should prioritize connecting to higher quality peers.
|
||||
As the node becomes well connected to the rest of the network, it can dial lesser known or lesser
|
||||
quality peers and help assess their quality. Similarly, when queried for peers, a node should make
|
||||
sure they dont return low quality peers.
|
||||
|
||||
Peer quality can be tracked using a trust metric that flags certain behaviours as good or bad. When enough
|
||||
bad behaviour accumulates, we can mark the peer as bad and disconnect.
|
||||
For example, when the PEXReactor makes a request for peers network addresses from an already known peer, and the returned network addresses are unreachable, this undesirable behavior should be tracked. Returning a few bad network addresses probably shouldn’t cause a peer to be dropped, while excessive amounts of this behavior does qualify the peer for removal. The originally proposed approach and design document for the trust metric can be found in the [ADR 006](adr-006-trust-metric.md) document.
|
||||
|
||||
The trust metric implementation allows a developer to obtain a peer's trust metric from a trust metric store, and track good and bad events relevant to a peer's behavior, and at any time, the peer's metric can be queried for a current trust value. The current trust value is calculated with a formula that utilizes current behavior, previous behavior, and change between the two. Current behavior is calculated as the percentage of good behavior within a time interval. The time interval is short; probably set between 30 seconds and 5 minutes. On the other hand, the historic data can estimate a peer's behavior over days worth of tracking. At the end of a time interval, the current behavior becomes part of the historic data, and a new time interval begins with the good and bad counters reset to zero.
|
||||
|
||||
These are some important things to keep in mind regarding how the trust metrics handle time intervals and scoring:
|
||||
|
||||
- Each new time interval begins with a perfect score
|
||||
- Bad events quickly bring the score down and good events cause the score to slowly rise
|
||||
- When the time interval is over, the percentage of good events becomes historic data.
|
||||
|
||||
Some useful information about the inner workings of the trust metric:
|
||||
|
||||
- When a trust metric is first instantiated, a timer (ticker) periodically fires in order to handle transitions between trust metric time intervals
|
||||
- If a peer is disconnected from a node, the timer should be paused, since the node is no longer connected to that peer
|
||||
- The ability to pause the metric is supported with the store **PeerDisconnected** method and the metric **Pause** method
|
||||
- After a pause, if a good or bad event method is called on a metric, it automatically becomes unpaused and begins a new time interval.
|
||||
|
||||
## Decision
|
||||
|
||||
The trust metric capability is now available, yet, it still leaves the question of how should it be applied throughout Tendermint in order to properly track the quality of peers?
|
||||
|
||||
### Proposed Process
|
||||
|
||||
Peers are managed using an address book and a trust metric:
|
||||
|
||||
- The address book keeps a record of peers and provides selection methods
|
||||
- The trust metric tracks the quality of the peers
|
||||
|
||||
#### Presence in Address Book
|
||||
|
||||
Outbound peers are added to the address book before they are dialed,
|
||||
and inbound peers are added once the peer connection is set up.
|
||||
Peers are also added to the address book when they are received in response to
|
||||
a pexRequestMessage.
|
||||
|
||||
While a node has less than `needAddressThreshold`, it will periodically request more,
|
||||
via pexRequestMessage, from randomly selected peers and from newly dialed outbound peers.
|
||||
|
||||
When a new address is added to an address book that has more than `0.5*needAddressThreshold` addresses,
|
||||
then with some low probability, a randomly chosen low quality peer is removed.
|
||||
|
||||
#### Outbound Peers
|
||||
|
||||
Peers attempt to maintain a minimum number of outbound connections by
|
||||
repeatedly querying the address book for peers to connect to.
|
||||
While a node has few to no outbound connections, the address book is biased to return
|
||||
higher quality peers. As the node increases the number of outbound connections,
|
||||
the address book is biased to return less-vetted or lower-quality peers.
|
||||
|
||||
#### Inbound Peers
|
||||
|
||||
Peers also maintain a maximum number of total connections, MaxNumPeers.
|
||||
If a peer has MaxNumPeers, new incoming connections will be accepted with low probability.
|
||||
When such a new connection is accepted, the peer disconnects from a probabilistically chosen low ranking peer
|
||||
so it does not exceed MaxNumPeers.
|
||||
|
||||
#### Peer Exchange
|
||||
|
||||
When a peer receives a pexRequestMessage, it returns a random sample of high quality peers from the address book. Peers with no score or low score should not be inclided in a response to pexRequestMessage.
|
||||
|
||||
#### Peer Quality
|
||||
|
||||
Peer quality is tracked in the connection and across the reactors by storing the TrustMetric in the peer's
|
||||
thread safe Data store.
|
||||
|
||||
Peer behaviour is then defined as one of the following:
|
||||
|
||||
- Fatal - something outright malicious that causes us to disconnect the peer and ban it from the address book for some amount of time
|
||||
- Bad - Any kind of timeout, messages that don't unmarshal, fail other validity checks, or messages we didn't ask for or aren't expecting (usually worth one bad event)
|
||||
- Neutral - Unknown channels/message types/version upgrades (no good or bad events recorded)
|
||||
- Correct - Normal correct behavior (worth one good event)
|
||||
- Good - some random majority of peers per reactor sending us useful messages (worth more than one good event).
|
||||
|
||||
Note that Fatal behaviour causes us to remove the peer, and neutral behaviour does not affect the score.
|
||||
|
||||
## Status
|
||||
|
||||
Proposed.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Bringing the address book and trust metric store together will cause the network to be built in a way that encourages greater security and reliability.
|
||||
|
||||
### Negative
|
||||
|
||||
- TBD
|
||||
|
||||
### Neutral
|
||||
|
||||
- Keep in mind that, good events need to be recorded just as bad events do using this implementation.
|
||||
@@ -1,35 +0,0 @@
|
||||
# ADR 008: SocketPV
|
||||
|
||||
Tendermint node's should support only two in-process PrivValidator
|
||||
implementations:
|
||||
|
||||
- FilePV uses an unencrypted private key in a "priv_validator.json" file - no
|
||||
configuration required (just `tendermint init`).
|
||||
- TCPVal and IPCVal use TCP and Unix sockets respectively to send signing requests
|
||||
to another process - the user is responsible for starting that process themselves.
|
||||
|
||||
Both TCPVal and IPCVal addresses can be provided via flags at the command line
|
||||
or in the configuration file; TCPVal addresses must be of the form
|
||||
`tcp://<ip_address>:<port>` and IPCVal addresses `unix:///path/to/file.sock` -
|
||||
doing so will cause Tendermint to ignore any private validator files.
|
||||
|
||||
TCPVal will listen on the given address for incoming connections from an external
|
||||
private validator process. It will halt any operation until at least one external
|
||||
process successfully connected.
|
||||
|
||||
The external priv_validator process will dial the address to connect to
|
||||
Tendermint, and then Tendermint will send requests on the ensuing connection to
|
||||
sign votes and proposals. Thus the external process initiates the connection,
|
||||
but the Tendermint process makes all requests. In a later stage we're going to
|
||||
support multiple validators for fault tolerance. To prevent double signing they
|
||||
need to be synced, which is deferred to an external solution (see #1185).
|
||||
|
||||
Conversely, IPCVal will make an outbound connection to an existing socket opened
|
||||
by the external validator process.
|
||||
|
||||
In addition, Tendermint will provide implementations that can be run in that
|
||||
external process. These include:
|
||||
|
||||
- FilePV will encrypt the private key, and the user must enter password to
|
||||
decrypt key when process is started.
|
||||
- LedgerPV uses a Ledger Nano S to handle all signing.
|
||||
@@ -1,271 +0,0 @@
|
||||
# ADR 009: ABCI UX Improvements
|
||||
|
||||
## Changelog
|
||||
|
||||
23-06-2018: Some minor fixes from review
|
||||
07-06-2018: Some updates based on discussion with Jae
|
||||
07-06-2018: Initial draft to match what was released in ABCI v0.11
|
||||
|
||||
## Context
|
||||
|
||||
The ABCI was first introduced in late 2015. It's purpose is to be:
|
||||
|
||||
- a generic interface between state machines and their replication engines
|
||||
- agnostic to the language the state machine is written in
|
||||
- agnostic to the replication engine that drives it
|
||||
|
||||
This means ABCI should provide an interface for both pluggable applications and
|
||||
pluggable consensus engines.
|
||||
|
||||
To achieve this, it uses Protocol Buffers (proto3) for message types. The dominant
|
||||
implementation is in Go.
|
||||
|
||||
After some recent discussions with the community on github, the following were
|
||||
identified as pain points:
|
||||
|
||||
- Amino encoded types
|
||||
- Managing validator sets
|
||||
- Imports in the protobuf file
|
||||
|
||||
See the [references](#references) for more.
|
||||
|
||||
### Imports
|
||||
|
||||
The native proto library in Go generates inflexible and verbose code.
|
||||
Many in the Go community have adopted a fork called
|
||||
[gogoproto](https://github.com/gogo/protobuf) that provides a
|
||||
variety of features aimed to improve the developer experience.
|
||||
While `gogoproto` is nice, it creates an additional dependency, and compiling
|
||||
the protobuf types for other languages has been reported to fail when `gogoproto` is used.
|
||||
|
||||
### Amino
|
||||
|
||||
Amino is an encoding protocol designed to improve over insufficiencies of protobuf.
|
||||
It's goal is to be proto4.
|
||||
|
||||
Many people are frustrated by incompatibility with protobuf,
|
||||
and with the requirement for Amino to be used at all within ABCI.
|
||||
|
||||
We intend to make Amino successful enough that we can eventually use it for ABCI
|
||||
message types directly. By then it should be called proto4. In the meantime,
|
||||
we want it to be easy to use.
|
||||
|
||||
### PubKey
|
||||
|
||||
PubKeys are encoded using Amino (and before that, go-wire).
|
||||
Ideally, PubKeys are an interface type where we don't know all the
|
||||
implementation types, so its unfitting to use `oneof` or `enum`.
|
||||
|
||||
### Addresses
|
||||
|
||||
The address for ED25519 pubkey is the RIPEMD160 of the Amino
|
||||
encoded pubkey. This introduces an Amino dependency in the address generation,
|
||||
a functionality that is widely required and should be easy to compute as
|
||||
possible.
|
||||
|
||||
### Validators
|
||||
|
||||
To change the validator set, applications can return a list of validator updates
|
||||
with ResponseEndBlock. In these updates, the public key _must_ be included,
|
||||
because Tendermint requires the public key to verify validator signatures. This
|
||||
means ABCI developers have to work with PubKeys. That said, it would also be
|
||||
convenient to work with address information, and for it to be simple to do so.
|
||||
|
||||
### AbsentValidators
|
||||
|
||||
Tendermint also provides a list of validators in BeginBlock who did not sign the
|
||||
last block. This allows applications to reflect availability behaviour in the
|
||||
application, for instance by punishing validators for not having votes included
|
||||
in commits.
|
||||
|
||||
### InitChain
|
||||
|
||||
Tendermint passes in a list of validators here, and nothing else. It would
|
||||
benefit the application to be able to control the initial validator set. For
|
||||
instance the genesis file could include application-based information about the
|
||||
initial validator set that the application could process to determine the
|
||||
initial validator set. Additionally, InitChain would benefit from getting all
|
||||
the genesis information.
|
||||
|
||||
### Header
|
||||
|
||||
ABCI provides the Header in RequestBeginBlock so the application can have
|
||||
important information about the latest state of the blockchain.
|
||||
|
||||
## Decision
|
||||
|
||||
### Imports
|
||||
|
||||
Move away from gogoproto. In the short term, we will just maintain a second
|
||||
protobuf file without the gogoproto annotations. In the medium term, we will
|
||||
make copies of all the structs in Golang and shuttle back and forth. In the long
|
||||
term, we will use Amino.
|
||||
|
||||
### Amino
|
||||
|
||||
To simplify ABCI application development in the short term,
|
||||
Amino will be completely removed from the ABCI:
|
||||
|
||||
- It will not be required for PubKey encoding
|
||||
- It will not be required for computing PubKey addresses
|
||||
|
||||
That said, we are working to make Amino a huge success, and to become proto4.
|
||||
To facilitate adoption and cross-language compatibility in the near-term, Amino
|
||||
v1 will:
|
||||
|
||||
- be fully compatible with the subset of proto3 that excludes `oneof`
|
||||
- use the Amino prefix system to provide interface types, as opposed to `oneof`
|
||||
style union types.
|
||||
|
||||
That said, an Amino v2 will be worked on to improve the performance of the
|
||||
format and its useability in cryptographic applications.
|
||||
|
||||
### PubKey
|
||||
|
||||
Encoding schemes infect software. As a generic middleware, ABCI aims to have
|
||||
some cross scheme compatibility. For this it has no choice but to include opaque
|
||||
bytes from time to time. While we will not enforce Amino encoding for these
|
||||
bytes yet, we need to provide a type system. The simplest way to do this is to
|
||||
use a type string.
|
||||
|
||||
PubKey will now look like:
|
||||
|
||||
```
|
||||
message PubKey {
|
||||
string type
|
||||
bytes data
|
||||
}
|
||||
```
|
||||
|
||||
where `type` can be:
|
||||
|
||||
- "ed225519", with `data = <raw 32-byte pubkey>`
|
||||
- "secp256k1", with `data = <33-byte OpenSSL compressed pubkey>`
|
||||
|
||||
As we want to retain flexibility here, and since ideally, PubKey would be an
|
||||
interface type, we do not use `enum` or `oneof`.
|
||||
|
||||
### Addresses
|
||||
|
||||
To simplify and improve computing addresses, we change it to the first 20-bytes of the SHA256
|
||||
of the raw 32-byte public key.
|
||||
|
||||
We continue to use the Bitcoin address scheme for secp256k1 keys.
|
||||
|
||||
### Validators
|
||||
|
||||
Add a `bytes address` field:
|
||||
|
||||
```
|
||||
message Validator {
|
||||
bytes address
|
||||
PubKey pub_key
|
||||
int64 power
|
||||
}
|
||||
```
|
||||
|
||||
### RequestBeginBlock and AbsentValidators
|
||||
|
||||
To simplify this, RequestBeginBlock will include the complete validator set,
|
||||
including the address, and voting power of each validator, along
|
||||
with a boolean for whether or not they voted:
|
||||
|
||||
```
|
||||
message RequestBeginBlock {
|
||||
bytes hash
|
||||
Header header
|
||||
LastCommitInfo last_commit_info
|
||||
repeated Evidence byzantine_validators
|
||||
}
|
||||
|
||||
message LastCommitInfo {
|
||||
int32 CommitRound
|
||||
repeated SigningValidator validators
|
||||
}
|
||||
|
||||
message SigningValidator {
|
||||
Validator validator
|
||||
bool signed_last_block
|
||||
}
|
||||
```
|
||||
|
||||
Note that in Validators in RequestBeginBlock, we DO NOT include public keys. Public keys are
|
||||
larger than addresses and in the future, with quantum computers, will be much
|
||||
larger. The overhead of passing them, especially during fast-sync, is
|
||||
significant.
|
||||
|
||||
Additional, addresses are changing to be simpler to compute, further removing
|
||||
the need to include pubkeys here.
|
||||
|
||||
In short, ABCI developers must be aware of both addresses and public keys.
|
||||
|
||||
### ResponseEndBlock
|
||||
|
||||
Since ResponseEndBlock includes Validator, it must now include their address.
|
||||
|
||||
### InitChain
|
||||
|
||||
Change RequestInitChain to give the app all the information from the genesis file:
|
||||
|
||||
```
|
||||
message RequestInitChain {
|
||||
int64 time
|
||||
string chain_id
|
||||
ConsensusParams consensus_params
|
||||
repeated Validator validators
|
||||
bytes app_state_bytes
|
||||
}
|
||||
```
|
||||
|
||||
Change ResponseInitChain to allow the app to specify the initial validator set
|
||||
and consensus parameters.
|
||||
|
||||
```
|
||||
message ResponseInitChain {
|
||||
ConsensusParams consensus_params
|
||||
repeated Validator validators
|
||||
}
|
||||
```
|
||||
|
||||
### Header
|
||||
|
||||
Now that Tendermint Amino will be compatible with proto3, the Header in ABCI
|
||||
should exactly match the Tendermint header - they will then be encoded
|
||||
identically in ABCI and in Tendermint Core.
|
||||
|
||||
## Status
|
||||
|
||||
Accepted.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Easier for developers to build on the ABCI
|
||||
- ABCI and Tendermint headers are identically serialized
|
||||
|
||||
### Negative
|
||||
|
||||
- Maintenance overhead of alternative type encoding scheme
|
||||
- Performance overhead of passing all validator info every block (at least its
|
||||
only addresses, and not also pubkeys)
|
||||
- Maintenance overhead of duplicate types
|
||||
|
||||
### Neutral
|
||||
|
||||
- ABCI developers must know about validator addresses
|
||||
|
||||
## References
|
||||
|
||||
- [ABCI v0.10.3 Specification (before this
|
||||
proposal)](https://github.com/tendermint/abci/blob/v0.10.3/specification.rst)
|
||||
- [ABCI v0.11.0 Specification (implementing first draft of this
|
||||
proposal)](https://github.com/tendermint/abci/blob/v0.11.0/specification.md)
|
||||
- [Ed25519 addresses](https://github.com/tendermint/go-crypto/issues/103)
|
||||
- [InitChain contains the
|
||||
Genesis](https://github.com/tendermint/abci/issues/216)
|
||||
- [PubKeys](https://github.com/tendermint/tendermint/issues/1524)
|
||||
- [Notes on
|
||||
Header](https://github.com/tendermint/tendermint/issues/1605)
|
||||
- [Gogoproto issues](https://github.com/tendermint/abci/issues/256)
|
||||
- [Absent Validators](https://github.com/tendermint/abci/issues/231)
|
||||
@@ -1,75 +0,0 @@
|
||||
# ADR 010: Crypto Changes
|
||||
|
||||
## Context
|
||||
|
||||
Tendermint is a cryptographic protocol that uses and composes a variety of cryptographic primitives.
|
||||
|
||||
After nearly 4 years of development, Tendermint has recently undergone multiple security reviews to search for vulnerabilities and to assess the the use and composition of cryptographic primitives.
|
||||
|
||||
### Hash Functions
|
||||
|
||||
Tendermint uses RIPEMD160 universally as a hash function, most notably in its Merkle tree implementation.
|
||||
|
||||
RIPEMD160 was chosen because it provides the shortest fingerprint that is long enough to be considered secure (ie. birthday bound of 80-bits).
|
||||
It was also developed in the open academic community, unlike NSA-designed algorithms like SHA256.
|
||||
|
||||
That said, the cryptographic community appears to unanimously agree on the security of SHA256. It has become a universal standard, especially now that SHA1 is broken, being required in TLS connections and having optimized support in hardware.
|
||||
|
||||
### Merkle Trees
|
||||
|
||||
Tendermint uses a simple Merkle tree to compute digests of large structures like transaction batches
|
||||
and even blockchain headers. The Merkle tree length prefixes byte arrays before concatenating and hashing them.
|
||||
It uses RIPEMD160.
|
||||
|
||||
### Addresses
|
||||
|
||||
ED25519 addresses are computed using the RIPEMD160 of the Amino encoding of the public key.
|
||||
RIPEMD160 is generally considered an outdated hash function, and is much slower
|
||||
than more modern functions like SHA256 or Blake2.
|
||||
|
||||
### Authenticated Encryption
|
||||
|
||||
Tendermint P2P connections use authenticated encryption to provide privacy and authentication in the communications.
|
||||
This is done using the simple Station-to-Station protocol with the NaCL Ed25519 library.
|
||||
|
||||
While there have been no vulnerabilities found in the implementation, there are some concerns:
|
||||
|
||||
- NaCL uses Salsa20, a not-widely used and relatively out-dated stream cipher that has been obsoleted by ChaCha20
|
||||
- Connections use RIPEMD160 to compute a value that is used for the encryption nonce with subtle requirements on how it's used
|
||||
|
||||
## Decision
|
||||
|
||||
### Hash Functions
|
||||
|
||||
Use the first 20-bytes of the SHA256 hash instead of RIPEMD160 for everything
|
||||
|
||||
### Merkle Trees
|
||||
|
||||
TODO
|
||||
|
||||
### Addresses
|
||||
|
||||
Compute ED25519 addresses as the first 20-bytes of the SHA256 of the raw 32-byte public key
|
||||
|
||||
### Authenticated Encryption
|
||||
|
||||
Make the following changes:
|
||||
|
||||
- Use xChaCha20 instead of xSalsa20 - https://github.com/tendermint/tendermint/issues/1124
|
||||
- Use an HKDF instead of RIPEMD160 to compute nonces - https://github.com/tendermint/tendermint/issues/1165
|
||||
|
||||
## Status
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- More modern and standard cryptographic functions with wider adoption and hardware acceleration
|
||||
|
||||
### Negative
|
||||
|
||||
- Exact authenticated encryption construction isn't already provided in a well-used library
|
||||
|
||||
### Neutral
|
||||
|
||||
## References
|
||||
@@ -1,116 +0,0 @@
|
||||
# ADR 011: Monitoring
|
||||
|
||||
## Changelog
|
||||
|
||||
08-06-2018: Initial draft
|
||||
11-06-2018: Reorg after @xla comments
|
||||
13-06-2018: Clarification about usage of labels
|
||||
|
||||
## Context
|
||||
|
||||
In order to bring more visibility into Tendermint, we would like it to report
|
||||
metrics and, maybe later, traces of transactions and RPC queries. See
|
||||
https://github.com/tendermint/tendermint/issues/986.
|
||||
|
||||
A few solutions were considered:
|
||||
|
||||
1. [Prometheus](https://prometheus.io)
|
||||
a) Prometheus API
|
||||
b) [go-kit metrics package](https://github.com/go-kit/kit/tree/master/metrics) as an interface plus Prometheus
|
||||
c) [telegraf](https://github.com/influxdata/telegraf)
|
||||
d) new service, which will listen to events emitted by pubsub and report metrics
|
||||
2. [OpenCensus](https://opencensus.io/introduction/)
|
||||
|
||||
### 1. Prometheus
|
||||
|
||||
Prometheus seems to be the most popular product out there for monitoring. It has
|
||||
a Go client library, powerful queries, alerts.
|
||||
|
||||
**a) Prometheus API**
|
||||
|
||||
We can commit to using Prometheus in Tendermint, but I think Tendermint users
|
||||
should be free to choose whatever monitoring tool they feel will better suit
|
||||
their needs (if they don't have existing one already). So we should try to
|
||||
abstract interface enough so people can switch between Prometheus and other
|
||||
similar tools.
|
||||
|
||||
**b) go-kit metrics package as an interface**
|
||||
|
||||
metrics package provides a set of uniform interfaces for service
|
||||
instrumentation and offers adapters to popular metrics packages:
|
||||
|
||||
https://godoc.org/github.com/go-kit/kit/metrics#pkg-subdirectories
|
||||
|
||||
Comparing to Prometheus API, we're losing customisability and control, but gaining
|
||||
freedom in choosing any instrument from the above list given we will extract
|
||||
metrics creation into a separate function (see "providers" in node/node.go).
|
||||
|
||||
**c) telegraf**
|
||||
|
||||
Unlike already discussed options, telegraf does not require modifying Tendermint
|
||||
source code. You create something called an input plugin, which polls
|
||||
Tendermint RPC every second and calculates the metrics itself.
|
||||
|
||||
While it may sound good, but some metrics we want to report are not exposed via
|
||||
RPC or pubsub, therefore can't be accessed externally.
|
||||
|
||||
**d) service, listening to pubsub**
|
||||
|
||||
Same issue as the above.
|
||||
|
||||
### 2. opencensus
|
||||
|
||||
opencensus provides both metrics and tracing, which may be important in the
|
||||
future. It's API looks different from go-kit and Prometheus, but looks like it
|
||||
covers everything we need.
|
||||
|
||||
Unfortunately, OpenCensus go client does not define any
|
||||
interfaces, so if we want to abstract away metrics we
|
||||
will need to write interfaces ourselves.
|
||||
|
||||
### List of metrics
|
||||
|
||||
| | Name | Type | Description |
|
||||
| --- | ------------------------------------ | ------ | ----------------------------------------------------------------------------- |
|
||||
| A | consensus_height | Gauge | |
|
||||
| A | consensus_validators | Gauge | Number of validators who signed |
|
||||
| A | consensus_validators_power | Gauge | Total voting power of all validators |
|
||||
| A | consensus_missing_validators | Gauge | Number of validators who did not sign |
|
||||
| A | consensus_missing_validators_power | Gauge | Total voting power of the missing validators |
|
||||
| A | consensus_byzantine_validators | Gauge | Number of validators who tried to double sign |
|
||||
| A | consensus_byzantine_validators_power | Gauge | Total voting power of the byzantine validators |
|
||||
| A | consensus_block_interval | Timing | Time between this and last block (Block.Header.Time) |
|
||||
| | consensus_block_time | Timing | Time to create a block (from creating a proposal to commit) |
|
||||
| | consensus_time_between_blocks | Timing | Time between committing last block and (receiving proposal creating proposal) |
|
||||
| A | consensus_rounds | Gauge | Number of rounds |
|
||||
| | consensus_prevotes | Gauge | |
|
||||
| | consensus_precommits | Gauge | |
|
||||
| | consensus_prevotes_total_power | Gauge | |
|
||||
| | consensus_precommits_total_power | Gauge | |
|
||||
| A | consensus_num_txs | Gauge | |
|
||||
| A | mempool_size | Gauge | |
|
||||
| A | consensus_total_txs | Gauge | |
|
||||
| A | consensus_block_size | Gauge | In bytes |
|
||||
| A | p2p_peers | Gauge | Number of peers node's connected to |
|
||||
|
||||
`A` - will be implemented in the fist place.
|
||||
|
||||
**Proposed solution**
|
||||
|
||||
## Status
|
||||
|
||||
Proposed.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
Better visibility, support of variety of monitoring backends
|
||||
|
||||
### Negative
|
||||
|
||||
One more library to audit, messing metrics reporting code with business domain.
|
||||
|
||||
### Neutral
|
||||
|
||||
-
|
||||
@@ -1,113 +0,0 @@
|
||||
# ADR 012: PeerTransport
|
||||
|
||||
## Context
|
||||
|
||||
One of the more apparent problems with the current architecture in the p2p
|
||||
package is that there is no clear separation of concerns between different
|
||||
components. Most notably the `Switch` is currently doing physical connection
|
||||
handling. An artifact is the dependency of the Switch on
|
||||
`[config.P2PConfig`](https://github.com/tendermint/tendermint/blob/05a76fb517f50da27b4bfcdc7b4cf185fc61eff6/config/config.go#L272-L339).
|
||||
|
||||
Addresses:
|
||||
|
||||
- [#2046](https://github.com/tendermint/tendermint/issues/2046)
|
||||
- [#2047](https://github.com/tendermint/tendermint/issues/2047)
|
||||
|
||||
First iteraton in [#2067](https://github.com/tendermint/tendermint/issues/2067)
|
||||
|
||||
## Decision
|
||||
|
||||
Transport concerns will be handled by a new component (`PeerTransport`) which
|
||||
will provide Peers at its boundary to the caller. In turn `Switch` will use
|
||||
this new component accept new `Peer`s and dial them based on `NetAddress`.
|
||||
|
||||
### PeerTransport
|
||||
|
||||
Responsible for emitting and connecting to Peers. The implementation of `Peer`
|
||||
is left to the transport, which implies that the chosen transport dictates the
|
||||
characteristics of the implementation handed back to the `Switch`. Each
|
||||
transport implementation is responsible to filter establishing peers specific
|
||||
to its domain, for the default multiplexed implementation the following will
|
||||
apply:
|
||||
|
||||
- connections from our own node
|
||||
- handshake fails
|
||||
- upgrade to secret connection fails
|
||||
- prevent duplicate ip
|
||||
- prevent duplicate id
|
||||
- nodeinfo incompatibility
|
||||
|
||||
```go
|
||||
// PeerTransport proxies incoming and outgoing peer connections.
|
||||
type PeerTransport interface {
|
||||
// Accept returns a newly connected Peer.
|
||||
Accept() (Peer, error)
|
||||
|
||||
// Dial connects to a Peer.
|
||||
Dial(NetAddress) (Peer, error)
|
||||
}
|
||||
|
||||
// EXAMPLE OF DEFAULT IMPLEMENTATION
|
||||
|
||||
// multiplexTransport accepts tcp connections and upgrades to multiplexted
|
||||
// peers.
|
||||
type multiplexTransport struct {
|
||||
listener net.Listener
|
||||
|
||||
acceptc chan accept
|
||||
closec <-chan struct{}
|
||||
listenc <-chan struct{}
|
||||
|
||||
dialTimeout time.Duration
|
||||
handshakeTimeout time.Duration
|
||||
nodeAddr NetAddress
|
||||
nodeInfo NodeInfo
|
||||
nodeKey NodeKey
|
||||
|
||||
// TODO(xla): Remove when MConnection is refactored into mPeer.
|
||||
mConfig conn.MConnConfig
|
||||
}
|
||||
|
||||
var _ PeerTransport = (*multiplexTransport)(nil)
|
||||
|
||||
// NewMTransport returns network connected multiplexed peers.
|
||||
func NewMTransport(
|
||||
nodeAddr NetAddress,
|
||||
nodeInfo NodeInfo,
|
||||
nodeKey NodeKey,
|
||||
) *multiplexTransport
|
||||
```
|
||||
|
||||
### Switch
|
||||
|
||||
From now the Switch will depend on a fully setup `PeerTransport` to
|
||||
retrieve/reach out to its peers. As the more low-level concerns are pushed to
|
||||
the transport, we can omit passing the `config.P2PConfig` to the Switch.
|
||||
|
||||
```go
|
||||
func NewSwitch(transport PeerTransport, opts ...SwitchOption) *Switch
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
In Review.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- free Switch from transport concerns - simpler implementation
|
||||
- pluggable transport implementation - simpler test setup
|
||||
- remove Switch dependency on P2PConfig - easier to test
|
||||
|
||||
### Negative
|
||||
|
||||
- more setup for tests which depend on Switches
|
||||
|
||||
### Neutral
|
||||
|
||||
- multiplexed will be the default implementation
|
||||
|
||||
[0] These guards could be potentially extended to be pluggable much like
|
||||
middlewares to express different concerns required by differentally configured
|
||||
environments.
|
||||
@@ -1,99 +0,0 @@
|
||||
# ADR 013: Need for symmetric cryptography
|
||||
|
||||
## Context
|
||||
|
||||
We require symmetric ciphers to handle how we encrypt keys in the sdk,
|
||||
and to potentially encrypt `priv_validator.json` in tendermint.
|
||||
|
||||
Currently we use AEAD's to support symmetric encryption,
|
||||
which is great since we want data integrity in addition to privacy and authenticity.
|
||||
We don't currently have a scenario where we want to encrypt without data integrity,
|
||||
so it is fine to optimize our code to just use AEAD's.
|
||||
Currently there is not a way to switch out AEAD's easily, this ADR outlines a way
|
||||
to easily swap these out.
|
||||
|
||||
### How do we encrypt with AEAD's
|
||||
|
||||
AEAD's typically require a nonce in addition to the key.
|
||||
For the purposes we require symmetric cryptography for,
|
||||
we need encryption to be stateless.
|
||||
Because of this we use random nonces.
|
||||
(Thus the AEAD must support random nonces)
|
||||
|
||||
We currently construct a random nonce, and encrypt the data with it.
|
||||
The returned value is `nonce || encrypted data`.
|
||||
The limitation of this is that does not provide a way to identify
|
||||
which algorithm was used in encryption.
|
||||
Consequently decryption with multiple algoritms is sub-optimal.
|
||||
(You have to try them all)
|
||||
|
||||
## Decision
|
||||
|
||||
We should create the following two methods in a new `crypto/encoding/symmetric` package:
|
||||
|
||||
```golang
|
||||
func Encrypt(aead cipher.AEAD, plaintext []byte) (ciphertext []byte, err error)
|
||||
func Decrypt(key []byte, ciphertext []byte) (plaintext []byte, err error)
|
||||
func Register(aead cipher.AEAD, algo_name string, NewAead func(key []byte) (cipher.Aead, error)) error
|
||||
```
|
||||
|
||||
This allows you to specify the algorithm in encryption, but not have to specify
|
||||
it in decryption.
|
||||
This is intended for ease of use in downstream applications, in addition to people
|
||||
looking at the file directly.
|
||||
One downside is that for the encrypt function you must have already initialized an AEAD,
|
||||
but I don't really see this as an issue.
|
||||
|
||||
If there is no error in encryption, Encrypt will return `algo_name || nonce || aead_ciphertext`.
|
||||
`algo_name` should be length prefixed, using standard varuint encoding.
|
||||
This will be binary data, but thats not a problem considering the nonce and ciphertext are also binary.
|
||||
|
||||
This solution requires a mapping from aead type to name.
|
||||
We can achieve this via reflection.
|
||||
|
||||
```golang
|
||||
func getType(myvar interface{}) string {
|
||||
if t := reflect.TypeOf(myvar); t.Kind() == reflect.Ptr {
|
||||
return "*" + t.Elem().Name()
|
||||
} else {
|
||||
return t.Name()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then we maintain a map from the name returned from `getType(aead)` to `algo_name`.
|
||||
|
||||
In decryption, we read the `algo_name`, and then instantiate a new AEAD with the key.
|
||||
Then we call the AEAD's decrypt method on the provided nonce/ciphertext.
|
||||
|
||||
`Register` allows a downstream user to add their own desired AEAD to the symmetric package.
|
||||
It will error if the AEAD name is already registered.
|
||||
This prevents a malicious import from modifying / nullifying an AEAD at runtime.
|
||||
|
||||
## Implementation strategy
|
||||
|
||||
The golang implementation of what is proposed is rather straight forward.
|
||||
The concern is that we will break existing private keys if we just switch to this.
|
||||
If this is concerning, we can make a simple script which doesn't require decoding privkeys,
|
||||
for converting from the old format to the new one.
|
||||
|
||||
## Status
|
||||
|
||||
Proposed.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Allows us to support new AEAD's, in a way that makes decryption easier
|
||||
- Allows downstream users to add their own AEAD
|
||||
|
||||
### Negative
|
||||
|
||||
- We will have to break all private keys stored on disk.
|
||||
They can be recovered using seed words, and upgrade scripts are simple.
|
||||
|
||||
### Neutral
|
||||
|
||||
- Caller has to instantiate the AEAD with the private key.
|
||||
However it forces them to be aware of what signing algorithm they are using, which is a positive.
|
||||
@@ -1,63 +0,0 @@
|
||||
# ADR 014: Secp256k1 Signature Malleability
|
||||
|
||||
## Context
|
||||
|
||||
Secp256k1 has two layers of malleability.
|
||||
The signer has a random nonce, and thus can produce many different valid signatures.
|
||||
This ADR is not concerned with that.
|
||||
The second layer of malleability basically allows one who is given a signature
|
||||
to produce exactly one more valid signature for the same message from the same public key.
|
||||
(They don't even have to know the message!)
|
||||
The math behind this will be explained in the subsequent section.
|
||||
|
||||
Note that in many downstream applications, signatures will appear in a transaction, and therefore in the tx hash.
|
||||
This means that if someone broadcasts a transaction with secp256k1 signature, the signature can be altered into the other form by anyone in the p2p network.
|
||||
Thus the tx hash will change, and this altered tx hash may be committed instead.
|
||||
This breaks the assumption that you can broadcast a valid transaction and just wait for its hash to be included on chain.
|
||||
One example is if you are broadcasting a tx in cosmos,
|
||||
and you wait for it to appear on chain before incrementing your sequence number.
|
||||
You may never increment your sequence number if a different tx hash got committed.
|
||||
Removing this second layer of signature malleability concerns could ease downstream development.
|
||||
|
||||
### ECDSA context
|
||||
|
||||
Secp256k1 is ECDSA over a particular curve.
|
||||
The signature is of the form `(r, s)`, where `s` is a field element.
|
||||
(The particular field is the `Z_n`, where the elliptic curve has order `n`)
|
||||
However `(r, -s)` is also another valid solution.
|
||||
Note that anyone can negate a group element, and therefore can get this second signature.
|
||||
|
||||
## Decision
|
||||
|
||||
We can just distinguish a canonical form for the ECDSA signatures.
|
||||
Then we require that all ECDSA signatures be in the form which we defined as canonical.
|
||||
We reject signatures in non-canonical form.
|
||||
|
||||
A canonical form is rather easy to define and check.
|
||||
It would just be the smaller of the two values for `s`, defined lexicographically.
|
||||
This is a simple check, instead of checking if `s < n`, instead check `s <= (n - 1)/2`.
|
||||
An example of another cryptosystem using this
|
||||
is the parity definition here https://github.com/zkcrypto/pairing/pull/30#issuecomment-372910663.
|
||||
|
||||
This is the same solution Ethereum has chosen for solving secp malleability.
|
||||
|
||||
## Proposed Implementation
|
||||
|
||||
Fork https://github.com/btcsuite/btcd, and just update the [parse sig method](https://github.com/btcsuite/btcd/blob/11fcd83963ab0ecd1b84b429b1efc1d2cdc6d5c5/btcec/signature.go#L195) and serialize functions to enforce our canonical form.
|
||||
|
||||
## Status
|
||||
|
||||
Implemented
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Lets us maintain the ability to expect a tx hash to appear in the blockchain.
|
||||
|
||||
### Negative
|
||||
|
||||
- More work in all future implementations (Though this is a very simple check)
|
||||
- Requires us to maintain another fork
|
||||
|
||||
### Neutral
|
||||
@@ -1,84 +0,0 @@
|
||||
# ADR 015: Crypto encoding
|
||||
|
||||
## Context
|
||||
|
||||
We must standardize our method for encoding public keys and signatures on chain.
|
||||
Currently we amino encode the public keys and signatures.
|
||||
The reason we are using amino here is primarily due to ease of support in
|
||||
parsing for other languages.
|
||||
We don't need its upgradability properties in cryptosystems, as a change in
|
||||
the crypto that requires adapting the encoding, likely warrants being deemed
|
||||
a new cryptosystem.
|
||||
(I.e. using new public parameters)
|
||||
|
||||
## Decision
|
||||
|
||||
### Public keys
|
||||
|
||||
For public keys, we will continue to use amino encoding on the canonical
|
||||
representation of the pubkey.
|
||||
(Canonical as defined by the cryptosystem itself)
|
||||
This has two significant drawbacks.
|
||||
Amino encoding is less space-efficient, due to requiring support for upgradability.
|
||||
Amino encoding support requires forking protobuf and adding this new interface support
|
||||
option in the language of choice.
|
||||
|
||||
The reason for continuing to use amino however is that people can create code
|
||||
more easily in languages that already have an up to date amino library.
|
||||
It is possible that this will change in the future, if it is deemed that
|
||||
requiring amino for interacting with Tendermint cryptography is unnecessary.
|
||||
|
||||
The arguments for space efficiency here are refuted on the basis that there are
|
||||
far more egregious wastages of space in the SDK.
|
||||
The space requirement of the public keys doesn't cause many problems beyond
|
||||
increasing the space attached to each validator / account.
|
||||
|
||||
The alternative to using amino here would be for us to create an enum type.
|
||||
Switching to just an enum type is worthy of investigation post-launch.
|
||||
For reference, part of amino encoding interfaces is basically a 4 byte enum
|
||||
type definition.
|
||||
Enum types would just change that 4 bytes to be a variant, and it would remove
|
||||
the protobuf overhead, but it would be hard to integrate into the existing API.
|
||||
|
||||
### Signatures
|
||||
|
||||
Signatures should be switched to be `[]byte`.
|
||||
Spatial efficiency in the signatures is quite important,
|
||||
as it directly affects the gas cost of every transaction,
|
||||
and the throughput of the chain.
|
||||
Signatures don't need to encode what type they are for (unlike public keys)
|
||||
since public keys must already be known.
|
||||
Therefore we can validate the signature without needing to encode its type.
|
||||
|
||||
When placed in state, signatures will still be amino encoded, but it will be the
|
||||
primitive type `[]byte` getting encoded.
|
||||
|
||||
#### Ed25519
|
||||
|
||||
Use the canonical representation for signatures.
|
||||
|
||||
#### Secp256k1
|
||||
|
||||
There isn't a clear canonical representation here.
|
||||
Signatures have two elements `r,s`.
|
||||
These bytes are encoded as `r || s`, where `r` and `s` are both exactly
|
||||
32 bytes long, encoded big-endian.
|
||||
This is basically Ethereum's encoding, but without the leading recovery bit.
|
||||
|
||||
## Status
|
||||
|
||||
Implemented
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- More space efficient signatures
|
||||
|
||||
### Negative
|
||||
|
||||
- We have an amino dependency for cryptography.
|
||||
|
||||
### Neutral
|
||||
|
||||
- No change to public keys
|
||||
@@ -1,308 +0,0 @@
|
||||
# ADR 016: Protocol Versions
|
||||
|
||||
## TODO
|
||||
|
||||
- How to / should we version the authenticated encryption handshake itself (ie.
|
||||
upfront protocol negotiation for the P2PVersion)
|
||||
- How to / should we version ABCI itself? Should it just be absorbed by the
|
||||
BlockVersion?
|
||||
|
||||
## Changelog
|
||||
|
||||
- 18-09-2018: Updates after working a bit on implementation
|
||||
- ABCI Handshake needs to happen independently of starting the app
|
||||
conns so we can see the result
|
||||
- Add question about ABCI protocol version
|
||||
- 16-08-2018: Updates after discussion with SDK team
|
||||
- Remove signalling for next version from Header/ABCI
|
||||
- 03-08-2018: Updates from discussion with Jae:
|
||||
- ProtocolVersion contains Block/AppVersion, not Current/Next
|
||||
- signal upgrades to Tendermint using EndBlock fields
|
||||
- dont restrict peer compatibilty by version to simplify syncing old nodes
|
||||
- 28-07-2018: Updates from review
|
||||
- split into two ADRs - one for protocol, one for chains
|
||||
- include signalling for upgrades in header
|
||||
- 16-07-2018: Initial draft - was originally joint ADR for protocol and chain
|
||||
versions
|
||||
|
||||
## Context
|
||||
|
||||
Here we focus on software-agnostic protocol versions.
|
||||
|
||||
The Software Version is covered by SemVer and described elsewhere.
|
||||
It is not relevant to the protocol description, suffice to say that if any protocol version
|
||||
changes, the software version changes, but not necessarily vice versa.
|
||||
|
||||
Software version should be included in NodeInfo for convenience/diagnostics.
|
||||
|
||||
We are also interested in versioning across different blockchains in a
|
||||
meaningful way, for instance to differentiate branches of a contentious
|
||||
hard-fork. We leave that for a later ADR.
|
||||
|
||||
## Requirements
|
||||
|
||||
We need to version components of the blockchain that may be independently upgraded.
|
||||
We need to do it in a way that is scalable and maintainable - we can't just litter
|
||||
the code with conditionals.
|
||||
|
||||
We can consider the complete version of the protocol to contain the following sub-versions:
|
||||
BlockVersion, P2PVersion, AppVersion. These versions reflect the major sub-components
|
||||
of the software that are likely to evolve together, at different rates, and in different ways,
|
||||
as described below.
|
||||
|
||||
The BlockVersion defines the core of the blockchain data structures and
|
||||
should change infrequently.
|
||||
|
||||
The P2PVersion defines how peers connect and communicate with eachother - it's
|
||||
not part of the blockchain data structures, but defines the protocols used to build the
|
||||
blockchain. It may change gradually.
|
||||
|
||||
The AppVersion determines how we compute app specific information, like the
|
||||
AppHash and the Results.
|
||||
|
||||
All of these versions may change over the life of a blockchain, and we need to
|
||||
be able to help new nodes sync up across version changes. This means we must be willing
|
||||
to connect to peers with older version.
|
||||
|
||||
### BlockVersion
|
||||
|
||||
- All tendermint hashed data-structures (headers, votes, txs, responses, etc.).
|
||||
- Note the semantic meaning of a transaction may change according to the AppVersion, but the way txs are merklized into the header is part of the BlockVersion
|
||||
- It should be the least frequent/likely to change.
|
||||
- Tendermint should be stabilizing - it's just Atomic Broadcast.
|
||||
- We can start considering for Tendermint v2.0 in a year
|
||||
- It's easy to determine the version of a block from its serialized form
|
||||
|
||||
### P2PVersion
|
||||
|
||||
- All p2p and reactor messaging (messages, detectable behaviour)
|
||||
- Will change gradually as reactors evolve to improve performance and support new features - eg proposed new message types BatchTx in the mempool and HasBlockPart in the consensus
|
||||
- It's easy to determine the version of a peer from its first serialized message/s
|
||||
- New versions must be compatible with at least one old version to allow gradual upgrades
|
||||
|
||||
### AppVersion
|
||||
|
||||
- The ABCI state machine (txs, begin/endblock behaviour, commit hashing)
|
||||
- Behaviour and message types will change abruptly in the course of the life of a chain
|
||||
- Need to minimize complexity of the code for supporting different AppVersions at different heights
|
||||
- Ideally, each version of the software supports only a _single_ AppVersion at one time
|
||||
- this means we checkout different versions of the software at different heights instead of littering the code
|
||||
with conditionals
|
||||
- minimize the number of data migrations required across AppVersion (ie. most AppVersion should be able to read the same state from disk as previous AppVersion).
|
||||
|
||||
## Ideal
|
||||
|
||||
Each component of the software is independently versioned in a modular way and its easy to mix and match and upgrade.
|
||||
|
||||
## Proposal
|
||||
|
||||
Each of BlockVersion, AppVersion, P2PVersion, is a monotonically increasing uint64.
|
||||
|
||||
To use these versions, we need to update the block Header, the p2p NodeInfo, and the ABCI.
|
||||
|
||||
### Header
|
||||
|
||||
Block Header should include a `Version` struct as its first field like:
|
||||
|
||||
```
|
||||
type Version struct {
|
||||
Block uint64
|
||||
App uint64
|
||||
}
|
||||
```
|
||||
|
||||
Here, `Version.Block` defines the rules for the current block, while
|
||||
`Version.App` defines the app version that processed the last block and computed
|
||||
the `AppHash` in the current block. Together they provide a complete description
|
||||
of the consensus-critical protocol.
|
||||
|
||||
Since we have settled on a proto3 header, the ability to read the BlockVersion out of the serialized header is unanimous.
|
||||
|
||||
Using a Version struct gives us more flexibility to add fields without breaking
|
||||
the header.
|
||||
|
||||
The ProtocolVersion struct includes both the Block and App versions - it should
|
||||
serve as a complete description of the consensus-critical protocol.
|
||||
|
||||
### NodeInfo
|
||||
|
||||
NodeInfo should include a Version struct as its first field like:
|
||||
|
||||
```
|
||||
type Version struct {
|
||||
P2P uint64
|
||||
Block uint64
|
||||
App uint64
|
||||
|
||||
Other []string
|
||||
}
|
||||
```
|
||||
|
||||
Note this effectively makes `Version.P2P` the first field in the NodeInfo, so it
|
||||
should be easy to read this out of the serialized header if need be to facilitate an upgrade.
|
||||
|
||||
The `Version.Other` here should include additional information like the name of the software client and
|
||||
it's SemVer version - this is for convenience only. Eg.
|
||||
`tendermint-core/v0.22.8`. It's a `[]string` so it can include information about
|
||||
the version of Tendermint, of the app, of Tendermint libraries, etc.
|
||||
|
||||
### ABCI
|
||||
|
||||
Since the ABCI is responsible for keeping Tendermint and the App in sync, we
|
||||
need to communicate version information through it.
|
||||
|
||||
On startup, we use Info to perform a basic handshake. It should include all the
|
||||
version information.
|
||||
|
||||
We also need to be able to update versions in the life of a blockchain. The
|
||||
natural place to do this is EndBlock.
|
||||
|
||||
Note that currently the result of the Handshake isn't exposed anywhere, as the
|
||||
handshaking happens inside the `proxy.AppConns` abstraction. We will need to
|
||||
remove the handshaking from the `proxy` package so we can call it independently
|
||||
and get the result, which should contain the application version.
|
||||
|
||||
#### Info
|
||||
|
||||
RequestInfo should add support for protocol versions like:
|
||||
|
||||
```
|
||||
message RequestInfo {
|
||||
string version
|
||||
uint64 block_version
|
||||
uint64 p2p_version
|
||||
}
|
||||
```
|
||||
|
||||
Similarly, ResponseInfo should return the versions:
|
||||
|
||||
```
|
||||
message ResponseInfo {
|
||||
string data
|
||||
|
||||
string version
|
||||
uint64 app_version
|
||||
|
||||
int64 last_block_height
|
||||
bytes last_block_app_hash
|
||||
}
|
||||
```
|
||||
|
||||
The existing `version` fields should be called `software_version` but we leave
|
||||
them for now to reduce the number of breaking changes.
|
||||
|
||||
#### EndBlock
|
||||
|
||||
Updating the version could be done either with new fields or by using the
|
||||
existing `tags`. Since we're trying to communicate information that will be
|
||||
included in Tendermint block Headers, it should be native to the ABCI, and not
|
||||
something embedded through some scheme in the tags. Thus, version updates should
|
||||
be communicated through EndBlock.
|
||||
|
||||
EndBlock already contains `ConsensusParams`. We can add version information to
|
||||
the ConsensusParams as well:
|
||||
|
||||
```
|
||||
message ConsensusParams {
|
||||
|
||||
BlockSize block_size
|
||||
EvidenceParams evidence_params
|
||||
VersionParams version
|
||||
}
|
||||
|
||||
message VersionParams {
|
||||
uint64 block_version
|
||||
uint64 app_version
|
||||
}
|
||||
```
|
||||
|
||||
For now, the `block_version` will be ignored, as we do not allow block version
|
||||
to be updated live. If the `app_version` is set, it signals that the app's
|
||||
protocol version has changed, and the new `app_version` will be included in the
|
||||
`Block.Header.Version.App` for the next block.
|
||||
|
||||
### BlockVersion
|
||||
|
||||
BlockVersion is included in both the Header and the NodeInfo.
|
||||
|
||||
Changing BlockVersion should happen quite infrequently and ideally only for
|
||||
critical upgrades. For now, it is not encoded in ABCI, though it's always
|
||||
possible to use tags to signal an external process to co-ordinate an upgrade.
|
||||
|
||||
Note Ethereum has not had to make an upgrade like this (everything has been at state machine level, AFAIK).
|
||||
|
||||
### P2PVersion
|
||||
|
||||
P2PVersion is not included in the block Header, just the NodeInfo.
|
||||
|
||||
P2PVersion is the first field in the NodeInfo. NodeInfo is also proto3 so this is easy to read out.
|
||||
|
||||
Note we need the peer/reactor protocols to take the versions of peers into account when sending messages:
|
||||
|
||||
- don't send messages they don't understand
|
||||
- don't send messages they don't expect
|
||||
|
||||
Doing this will be specific to the upgrades being made.
|
||||
|
||||
Note we also include the list of reactor channels in the NodeInfo and already don't send messages for channels the peer doesn't understand.
|
||||
If upgrades always use new channels, this simplifies the development cost of backwards compatibility.
|
||||
|
||||
Note NodeInfo is only exchanged after the authenticated encryption handshake to ensure that it's private.
|
||||
Doing any version exchange before encrypting could be considered information leakage, though I'm not sure
|
||||
how much that matters compared to being able to upgrade the protocol.
|
||||
|
||||
XXX: if needed, can we change the meaning of the first byte of the first message to encode a handshake version?
|
||||
this is the first byte of a 32-byte ed25519 pubkey.
|
||||
|
||||
### AppVersion
|
||||
|
||||
AppVersion is also included in the block Header and the NodeInfo.
|
||||
|
||||
AppVersion essentially defines how the AppHash and LastResults are computed.
|
||||
|
||||
### Peer Compatibility
|
||||
|
||||
Restricting peer compatibility based on version is complicated by the need to
|
||||
help old peers, possibly on older versions, sync the blockchain.
|
||||
|
||||
We might be tempted to say that we only connect to peers with the same
|
||||
AppVersion and BlockVersion (since these define the consensus critical
|
||||
computations), and a select list of P2PVersions (ie. those compatible with
|
||||
ours), but then we'd need to make accomodations for connecting to peers with the
|
||||
right Block/AppVersion for the height they're on.
|
||||
|
||||
For now, we will connect to peers with any version and restrict compatibility
|
||||
solely based on the ChainID. We leave more restrictive rules on peer
|
||||
compatibiltiy to a future proposal.
|
||||
|
||||
### Future Changes
|
||||
|
||||
It may be valuable to support an `/unsafe_stop?height=_` endpoint to tell Tendermint to shutdown at a given height.
|
||||
This could be use by an external manager process that oversees upgrades by
|
||||
checking out and installing new software versions and restarting the process. It
|
||||
would subscribe to the relevant upgrade event (needs to be implemented) and call `/unsafe_stop` at
|
||||
the correct height (of course only after getting approval from its user!)
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Make tendermint and application versions native to the ABCI to more clearly
|
||||
communicate about them
|
||||
- Distinguish clearly between protocol versions and software version to
|
||||
facilitate implementations in other languages
|
||||
- Versions included in key data structures in easy to discern way
|
||||
- Allows proposers to signal for upgrades and apps to decide when to actually change the
|
||||
version (and start signalling for a new version)
|
||||
|
||||
### Neutral
|
||||
|
||||
- Unclear how to version the initial P2P handshake itself
|
||||
- Versions aren't being used (yet) to restrict peer compatibility
|
||||
- Signalling for a new version happens through the proposer and must be
|
||||
tallied/tracked in the app.
|
||||
|
||||
### Negative
|
||||
|
||||
- Adds more fields to the ABCI
|
||||
- Implies that a single codebase must be able to handle multiple versions
|
||||
@@ -1,99 +0,0 @@
|
||||
# ADR 017: Chain Versions
|
||||
|
||||
## TODO
|
||||
|
||||
- clarify how to handle slashing when ChainID changes
|
||||
|
||||
## Changelog
|
||||
|
||||
- 28-07-2018: Updates from review
|
||||
- split into two ADRs - one for protocol, one for chains
|
||||
- 16-07-2018: Initial draft - was originally joint ADR for protocol and chain
|
||||
versions
|
||||
|
||||
## Context
|
||||
|
||||
Software and Protocol versions are covered in a separate ADR.
|
||||
|
||||
Here we focus on chain versions.
|
||||
|
||||
## Requirements
|
||||
|
||||
We need to version blockchains across protocols, networks, forks, etc.
|
||||
We need chain identifiers and descriptions so we can talk about a multitude of chains,
|
||||
and especially the differences between them, in a meaningful way.
|
||||
|
||||
### Networks
|
||||
|
||||
We need to support many independent networks running the same version of the software,
|
||||
even possibly starting from the same initial state.
|
||||
They must have distinct identifiers so that peers know which one they are joining and so
|
||||
validators and users can prevent replay attacks.
|
||||
|
||||
Call this the `NetworkName` (note we currently call this `ChainID` in the software. In this
|
||||
ADR, ChainID has a different meaning).
|
||||
It represents both the application being run and the community or intention
|
||||
of running it.
|
||||
|
||||
Peers only connect to other peers with the same NetworkName.
|
||||
|
||||
### Forks
|
||||
|
||||
We need to support existing networks upgrading and forking, wherein they may do any of:
|
||||
|
||||
- revert back to some height, continue with the same versions but new blocks
|
||||
- arbitrarily mutate state at some height, continue with the same versions (eg. Dao Fork)
|
||||
- change the AppVersion at some height
|
||||
|
||||
Note because of Tendermint's voting power threshold rules, a chain can only be extended under the "original" rules and under the new rules
|
||||
if 1/3 or more is double signing, which is expressly prohibited, and is supposed to result in their punishment on both chains. Since they can censor
|
||||
the punishment, the chain is expected to be hardforked to remove the validators. Thus, if both branches are to continue after a fork,
|
||||
they will each require a new identifier, and the old chain identifier will be retired (ie. only useful for syncing history, not for new blocks)..
|
||||
|
||||
TODO: explain how to handle slashing when chain id changed!
|
||||
|
||||
We need a consistent way to describe forks.
|
||||
|
||||
## Proposal
|
||||
|
||||
### ChainDescription
|
||||
|
||||
ChainDescription is a complete immutable description of a blockchain. It takes the following form:
|
||||
|
||||
```
|
||||
ChainDescription = <NetworkName>/<BlockVersion>/<AppVersion>/<StateHash>/<ValHash>/<ConsensusParamsHash>
|
||||
```
|
||||
|
||||
Here, StateHash is the merkle root of the initial state, ValHash is the merkle root of the initial Tendermint validator set,
|
||||
and ConsensusParamsHash is the merkle root of the initial Tendermint consensus parameters.
|
||||
|
||||
The `genesis.json` file must contain enough information to compute this value. It need not contain the StateHash or ValHash itself,
|
||||
but contain the state from which they can be computed with the given protocol versions.
|
||||
|
||||
NOTE: consider splitting NetworkName into NetworkName and AppName - this allows
|
||||
folks to independently use the same application for different networks (ie we
|
||||
could imagine multiple communities of validators wanting to put up a Hub using
|
||||
the same app but having a distinct network name. Arguably not needed if
|
||||
differences will come via different initial state / validators).
|
||||
|
||||
#### ChainID
|
||||
|
||||
Define `ChainID = TMHASH(ChainDescriptor)`. It's the unique ID of a blockchain.
|
||||
|
||||
It should be Bech32 encoded when handled by users, eg. with `cosmoschain` prefix.
|
||||
|
||||
#### Forks and Uprades
|
||||
|
||||
When a chain forks or upgrades but continues the same history, it takes a new ChainDescription as follows:
|
||||
|
||||
```
|
||||
ChainDescription = <ChainID>/x/<Height>/<ForkDescription>
|
||||
```
|
||||
|
||||
Where
|
||||
|
||||
- ChainID is the ChainID from the previous ChainDescription (ie. its hash)
|
||||
- `x` denotes that a change occured
|
||||
- `Height` is the height the change occured
|
||||
- ForkDescription has the same form as ChainDescription but for the fork
|
||||
- this allows forks to specify new versions for tendermint or the app, as well as arbitrary changes to the state or validator set
|
||||
@@ -1,100 +0,0 @@
|
||||
# ADR 018: ABCI Validator Improvements
|
||||
|
||||
## Changelog
|
||||
|
||||
016-08-2018: Follow up from review: - Revert changes to commit round - Remind about justification for removing pubkey - Update pros/cons
|
||||
05-08-2018: Initial draft
|
||||
|
||||
## Context
|
||||
|
||||
ADR 009 introduced major improvements to the ABCI around validators and the use
|
||||
of Amino. Here we follow up with some additional changes to improve the naming
|
||||
and expected use of Validator messages.
|
||||
|
||||
## Decision
|
||||
|
||||
### Validator
|
||||
|
||||
Currently a Validator contains `address` and `pub_key`, and one or the other is
|
||||
optional/not-sent depending on the use case. Instead, we should have a
|
||||
`Validator` (with just the address, used for RequestBeginBlock)
|
||||
and a `ValidatorUpdate` (with the pubkey, used for ResponseEndBlock):
|
||||
|
||||
```
|
||||
message Validator {
|
||||
bytes address
|
||||
int64 power
|
||||
}
|
||||
|
||||
message ValidatorUpdate {
|
||||
PubKey pub_key
|
||||
int64 power
|
||||
}
|
||||
```
|
||||
|
||||
As noted in [ADR-009](adr-009-ABCI-design.md),
|
||||
the `Validator` does not contain a pubkey because quantum public keys are
|
||||
quite large and it would be wasteful to send them all over ABCI with every block.
|
||||
Thus, applications that want to take advantage of the information in BeginBlock
|
||||
are _required_ to store pubkeys in state (or use much less efficient lazy means
|
||||
of verifying BeginBlock data).
|
||||
|
||||
### RequestBeginBlock
|
||||
|
||||
LastCommitInfo currently has an array of `SigningValidator` that contains
|
||||
information for each validator in the entire validator set.
|
||||
Instead, this should be called `VoteInfo`, since it is information about the
|
||||
validator votes.
|
||||
|
||||
Note that all votes in a commit must be from the same round.
|
||||
|
||||
```
|
||||
message LastCommitInfo {
|
||||
int64 round
|
||||
repeated VoteInfo commit_votes
|
||||
}
|
||||
|
||||
message VoteInfo {
|
||||
Validator validator
|
||||
bool signed_last_block
|
||||
}
|
||||
```
|
||||
|
||||
### ResponseEndBlock
|
||||
|
||||
Use ValidatorUpdates instead of Validators. Then it's clear we don't need an
|
||||
address, and we do need a pubkey.
|
||||
|
||||
We could require the address here as well as a sanity check, but it doesn't seem
|
||||
necessary.
|
||||
|
||||
### InitChain
|
||||
|
||||
Use ValidatorUpdates for both Request and Response. InitChain
|
||||
is about setting/updating the initial validator set, unlike BeginBlock
|
||||
which is just informational.
|
||||
|
||||
## Status
|
||||
|
||||
Proposal.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Clarifies the distinction between the different uses of validator information
|
||||
|
||||
### Negative
|
||||
|
||||
- Apps must still store the public keys in state to utilize the RequestBeginBlock info
|
||||
|
||||
### Neutral
|
||||
|
||||
- ResponseEndBlock does not require an address
|
||||
|
||||
## References
|
||||
|
||||
- [Latest ABCI Spec](https://github.com/tendermint/tendermint/blob/v0.22.8/docs/app-dev/abci-spec.md)
|
||||
- [ADR-009](https://github.com/tendermint/tendermint/blob/v0.22.8/docs/architecture/adr-009-ABCI-design.md)
|
||||
- [Issue #1712 - Don't send PubKey in
|
||||
RequestBeginBlock](https://github.com/tendermint/tendermint/issues/1712)
|
||||
@@ -1,160 +0,0 @@
|
||||
# ADR 019: Encoding standard for Multisignatures
|
||||
|
||||
## Changelog
|
||||
|
||||
06-08-2018: Minor updates
|
||||
|
||||
27-07-2018: Update draft to use amino encoding
|
||||
|
||||
11-07-2018: Initial Draft
|
||||
|
||||
## Context
|
||||
|
||||
Multisignatures, or technically _Accountable Subgroup Multisignatures_ (ASM),
|
||||
are signature schemes which enable any subgroup of a set of signers to sign any message,
|
||||
and reveal to the verifier exactly who the signers were.
|
||||
This allows for complex conditionals of when to validate a signature.
|
||||
|
||||
Suppose the set of signers is of size _n_.
|
||||
If we validate a signature if any subgroup of size _k_ signs a message,
|
||||
this becomes what is commonly reffered to as a _k of n multisig_ in Bitcoin.
|
||||
|
||||
This ADR specifies the encoding standard for general accountable subgroup multisignatures,
|
||||
k of n accountable subgroup multisignatures, and its weighted variant.
|
||||
|
||||
In the future, we can also allow for more complex conditionals on the accountable subgroup.
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
### New structs
|
||||
|
||||
Every ASM will then have its own struct, implementing the crypto.Pubkey interface.
|
||||
|
||||
This ADR assumes that [replacing crypto.Signature with []bytes](https://github.com/tendermint/tendermint/issues/1957) has been accepted.
|
||||
|
||||
#### K of N threshold signature
|
||||
|
||||
The pubkey is the following struct:
|
||||
|
||||
```golang
|
||||
type ThresholdMultiSignaturePubKey struct { // K of N threshold multisig
|
||||
K uint `json:"threshold"`
|
||||
Pubkeys []crypto.Pubkey `json:"pubkeys"`
|
||||
}
|
||||
```
|
||||
|
||||
We will derive N from the length of pubkeys. (For spatial efficiency in encoding)
|
||||
|
||||
`Verify` will expect an `[]byte` encoded version of the Multisignature.
|
||||
(Multisignature is described in the next section)
|
||||
The multisignature will be rejected if the bitmap has less than k indices,
|
||||
or if any signature at any of the k indices is not a valid signature from
|
||||
the kth public key on the message.
|
||||
(If more than k signatures are included, all must be valid)
|
||||
|
||||
`Bytes` will be the amino encoded version of the pubkey.
|
||||
|
||||
Address will be `Hash(amino_encoded_pubkey)`
|
||||
|
||||
The reason this doesn't use `log_8(n)` bytes per signer is because that heavily optimizes for the case where a very small number of signers are required.
|
||||
e.g. for `n` of size `24`, that would only be more space efficient for `k < 3`.
|
||||
This seems less likely, and that it should not be the case optimized for.
|
||||
|
||||
#### Weighted threshold signature
|
||||
|
||||
The pubkey is the following struct:
|
||||
|
||||
```golang
|
||||
type WeightedThresholdMultiSignaturePubKey struct {
|
||||
Weights []uint `json:"weights"`
|
||||
Threshold uint `json:"threshold"`
|
||||
Pubkeys []crypto.Pubkey `json:"pubkeys"`
|
||||
}
|
||||
```
|
||||
|
||||
Weights and Pubkeys must be of the same length.
|
||||
Everything else proceeds identically to the K of N multisig,
|
||||
except the multisig fails if the sum of the weights is less than the threshold.
|
||||
|
||||
#### Multisignature
|
||||
|
||||
The inter-mediate phase of the signatures (as it accrues more signatures) will be the following struct:
|
||||
|
||||
```golang
|
||||
type Multisignature struct {
|
||||
BitArray CryptoBitArray // Documented later
|
||||
Sigs [][]byte
|
||||
```
|
||||
|
||||
It is important to recall that each private key will output a signature on the provided message itself.
|
||||
So no signing algorithm ever outputs the multisignature.
|
||||
The UI will take a signature, cast into a multisignature, and then keep adding
|
||||
new signatures into it, and when done marshal into `[]byte`.
|
||||
This will require the following helper methods:
|
||||
|
||||
```golang
|
||||
func SigToMultisig(sig []byte, n int)
|
||||
func GetIndex(pk crypto.Pubkey, []crypto.Pubkey)
|
||||
func AddSignature(sig Signature, index int, multiSig *Multisignature)
|
||||
```
|
||||
|
||||
The multisignature will be converted to an `[]byte` using amino.MarshalBinaryBare. \*
|
||||
|
||||
#### Bit Array
|
||||
|
||||
We would be using a new implementation of a bitarray. The struct it would be encoded/decoded from is
|
||||
|
||||
```golang
|
||||
type CryptoBitArray struct {
|
||||
ExtraBitsStored byte `json:"extra_bits"` // The number of extra bits in elems.
|
||||
Elems []byte `json:"elems"`
|
||||
}
|
||||
```
|
||||
|
||||
The reason for not using the BitArray currently implemented in `libs/common/bit_array.go`
|
||||
is that it is less space efficient, due to a space / time trade-off.
|
||||
Evidence for this is outlined in [this issue](https://github.com/tendermint/tendermint/issues/2077).
|
||||
|
||||
In the multisig, we will not be performing arithmetic operations,
|
||||
so there is no performance increase with the current implementation,
|
||||
and just loss of spatial efficiency.
|
||||
Implementing this new bit array with `[]byte` _should_ be simple, as no
|
||||
arithmetic operations between bit arrays are required, and save a couple of bytes.
|
||||
(Explained in that same issue)
|
||||
|
||||
When this bit array encoded, the number of elements is encoded due to amino.
|
||||
However we may be encoding a full byte for what we actually only need 1-7 bits for.
|
||||
We store that difference in ExtraBitsStored.
|
||||
This allows for us to have an unbounded number of signers, and is more space efficient than what is currently used in `libs/common`.
|
||||
Again the implementation of this space saving feature is straight forward.
|
||||
|
||||
### Encoding the structs
|
||||
|
||||
We will use straight forward amino encoding. This is chosen for ease of compatibility in other languages.
|
||||
|
||||
### Future points of discussion
|
||||
|
||||
If desired, we can use ed25519 batch verification for all ed25519 keys.
|
||||
This is a future point of discussion, but would be backwards compatible as this information won't need to be marshalled.
|
||||
(There may even be cofactor concerns without ristretto)
|
||||
Aggregation of pubkeys / sigs in Schnorr sigs / BLS sigs is not backwards compatible, and would need to be a new ASM type.
|
||||
|
||||
## Status
|
||||
|
||||
Proposed.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Supports multisignatures, in a way that won't require any special cases in our downstream verification code.
|
||||
- Easy to serialize / deserialize
|
||||
- Unbounded number of signers
|
||||
|
||||
### Negative
|
||||
|
||||
- Larger codebase, however this should reside in a subfolder of tendermint/crypto, as it provides no new interfaces. (Ref #https://github.com/tendermint/go-crypto/issues/136)
|
||||
- Space inefficient due to utilization of amino encoding
|
||||
- Suggested implementation requires a new struct for every ASM.
|
||||
|
||||
### Neutral
|
||||
@@ -1,104 +0,0 @@
|
||||
# ADR 020: Limiting txs size inside a block
|
||||
|
||||
## Changelog
|
||||
|
||||
13-08-2018: Initial Draft
|
||||
15-08-2018: Second version after Dev's comments
|
||||
28-08-2018: Third version after Ethan's comments
|
||||
30-08-2018: AminoOverheadForBlock => MaxAminoOverheadForBlock
|
||||
31-08-2018: Bounding evidence and chain ID
|
||||
13-01-2019: Add section on MaxBytes vs MaxDataBytes
|
||||
|
||||
## Context
|
||||
|
||||
We currently use MaxTxs to reap txs from the mempool when proposing a block,
|
||||
but enforce MaxBytes when unmarshalling a block, so we could easily propose a
|
||||
block thats too large to be valid.
|
||||
|
||||
We should just remove MaxTxs all together and stick with MaxBytes, and have a
|
||||
`mempool.ReapMaxBytes`.
|
||||
|
||||
But we can't just reap BlockSize.MaxBytes, since MaxBytes is for the entire block,
|
||||
not for the txs inside the block. There's extra amino overhead + the actual
|
||||
headers on top of the actual transactions + evidence + last commit.
|
||||
We could also consider using a MaxDataBytes instead of or in addition to MaxBytes.
|
||||
|
||||
## MaxBytes vs MaxDataBytes
|
||||
|
||||
The [PR #3045](https://github.com/tendermint/tendermint/pull/3045) suggested
|
||||
additional clarity/justification was necessary here, wither respect to the use
|
||||
of MaxDataBytes in addition to, or instead of, MaxBytes.
|
||||
|
||||
MaxBytes provides a clear limit on the total size of a block that requires no
|
||||
additional calculation if you want to use it to bound resource usage, and there
|
||||
has been considerable discussions about optimizing tendermint around 1MB blocks.
|
||||
Regardless, we need some maximum on the size of a block so we can avoid
|
||||
unmarshalling blocks that are too big during the consensus, and it seems more
|
||||
straightforward to provide a single fixed number for this rather than a
|
||||
computation of "MaxDataBytes + everything else you need to make room for
|
||||
(signatures, evidence, header)". MaxBytes provides a simple bound so we can
|
||||
always say "blocks are less than X MB".
|
||||
|
||||
Having both MaxBytes and MaxDataBytes feels like unnecessary complexity. It's
|
||||
not particularly surprising for MaxBytes to imply the maximum size of the
|
||||
entire block (not just txs), one just has to know that a block includes header,
|
||||
txs, evidence, votes. For more fine grained control over the txs included in the
|
||||
block, there is the MaxGas. In practice, the MaxGas may be expected to do most of
|
||||
the tx throttling, and the MaxBytes to just serve as an upper bound on the total
|
||||
size. Applications can use MaxGas as a MaxDataBytes by just taking the gas for
|
||||
every tx to be its size in bytes.
|
||||
|
||||
## Proposed solution
|
||||
|
||||
Therefore, we should
|
||||
|
||||
1) Get rid of MaxTxs.
|
||||
2) Rename MaxTxsBytes to MaxBytes.
|
||||
|
||||
When we need to ReapMaxBytes from the mempool, we calculate the upper bound as follows:
|
||||
|
||||
```
|
||||
ExactLastCommitBytes = {number of validators currently enabled} * {MaxVoteBytes}
|
||||
MaxEvidenceBytesPerBlock = MaxBytes / 10
|
||||
ExactEvidenceBytes = cs.evpool.PendingEvidence(MaxEvidenceBytesPerBlock) * MaxEvidenceBytes
|
||||
|
||||
mempool.ReapMaxBytes(MaxBytes - MaxAminoOverheadForBlock - ExactLastCommitBytes - ExactEvidenceBytes - MaxHeaderBytes)
|
||||
```
|
||||
|
||||
where MaxVoteBytes, MaxEvidenceBytes, MaxHeaderBytes and MaxAminoOverheadForBlock
|
||||
are constants defined inside the `types` package:
|
||||
|
||||
- MaxVoteBytes - 170 bytes
|
||||
- MaxEvidenceBytes - 364 bytes
|
||||
- MaxHeaderBytes - 476 bytes (~276 bytes hashes + 200 bytes - 50 UTF-8 encoded
|
||||
symbols of chain ID 4 bytes each in the worst case + amino overhead)
|
||||
- MaxAminoOverheadForBlock - 8 bytes (assuming MaxHeaderBytes includes amino
|
||||
overhead for encoding header, MaxVoteBytes - for encoding vote, etc.)
|
||||
|
||||
ChainID needs to bound to 50 symbols max.
|
||||
|
||||
When reaping evidence, we use MaxBytes to calculate the upper bound (e.g. 1/10)
|
||||
to save some space for transactions.
|
||||
|
||||
NOTE while reaping the `max int` bytes in mempool, we should account that every
|
||||
transaction will take `len(tx)+aminoOverhead`, where aminoOverhead=1-4 bytes.
|
||||
|
||||
We should write a test that fails if the underlying structs got changed, but
|
||||
MaxXXX stayed the same.
|
||||
|
||||
## Status
|
||||
|
||||
Accepted.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
* one way to limit the size of a block
|
||||
* less variables to configure
|
||||
|
||||
### Negative
|
||||
|
||||
* constants that need to be adjusted if the underlying structs got changed
|
||||
|
||||
### Neutral
|
||||
@@ -1,52 +0,0 @@
|
||||
# ADR 012: ABCI Events
|
||||
|
||||
## Changelog
|
||||
|
||||
- *2018-09-02* Remove ABCI errors component. Update description for events
|
||||
- *2018-07-12* Initial version
|
||||
|
||||
## Context
|
||||
|
||||
ABCI tags were first described in [ADR 002](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-002-event-subscription.md).
|
||||
They are key-value pairs that can be used to index transactions.
|
||||
|
||||
Currently, ABCI messages return a list of tags to describe an
|
||||
"event" that took place during the Check/DeliverTx/Begin/EndBlock,
|
||||
where each tag refers to a different property of the event, like the sending and receiving account addresses.
|
||||
|
||||
Since there is only one list of tags, recording data for multiple such events in
|
||||
a single Check/DeliverTx/Begin/EndBlock must be done using prefixes in the key
|
||||
space.
|
||||
|
||||
Alternatively, groups of tags that constitute an event can be separated by a
|
||||
special tag that denotes a break between the events. This would allow
|
||||
straightforward encoding of multiple events into a single list of tags without
|
||||
prefixing, at the cost of these "special" tags to separate the different events.
|
||||
|
||||
TODO: brief description of how the indexing works
|
||||
|
||||
## Decision
|
||||
|
||||
Instead of returning a list of tags, return a list of events, where
|
||||
each event is a list of tags. This way we naturally capture the concept of
|
||||
multiple events happening during a single ABCI message.
|
||||
|
||||
TODO: describe impact on indexing and querying
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Ability to track distinct events separate from ABCI calls (DeliverTx/BeginBlock/EndBlock)
|
||||
- More powerful query abilities
|
||||
|
||||
### Negative
|
||||
|
||||
- More complex query syntax
|
||||
- More complex search implementation
|
||||
|
||||
### Neutral
|
||||
@@ -1,64 +0,0 @@
|
||||
# ADR 023: ABCI Codespaces
|
||||
|
||||
## Changelog
|
||||
|
||||
- *2018-09-01* Initial version
|
||||
|
||||
## Context
|
||||
|
||||
ABCI errors should provide an abstraction between application details
|
||||
and the client interface responsible for formatting & displaying errors to the user.
|
||||
|
||||
Currently, this abstraction consists of a single integer (the `code`), where any
|
||||
`code > 0` is considered an error (ie. invalid transaction) and all type
|
||||
information about the error is contained in the code. This integer is
|
||||
expected to be decoded by the client into a known error string, where any
|
||||
more specific data is contained in the `data`.
|
||||
|
||||
In a [previous conversation](https://github.com/tendermint/abci/issues/165#issuecomment-353704015),
|
||||
it was suggested that not all non-zero codes need to be errors, hence why it's called `code` and not `error code`.
|
||||
It is unclear exactly how the semantics of the `code` field will evolve, though
|
||||
better lite-client proofs (like discussed for tags
|
||||
[here](https://github.com/tendermint/tendermint/issues/1007#issuecomment-413917763))
|
||||
may play a role.
|
||||
|
||||
Note that having all type information in a single integer
|
||||
precludes an easy coordination method between "module implementers" and "client
|
||||
implementers", especially for apps with many "modules". With an unbounded error domain (such as a string), module
|
||||
implementers can pick a globally unique prefix & error code set, so client
|
||||
implementers could easily implement support for "module A" regardless of which
|
||||
particular blockchain network it was running in and which other modules were running with it. With
|
||||
only error codes, globally unique codes are difficult/impossible, as the space
|
||||
is finite and collisions are likely without an easy way to coordinate.
|
||||
|
||||
For instance, while trying to build an ecosystem of modules that can be composed into a single
|
||||
ABCI application, the Cosmos-SDK had to hack a higher level "codespace" into the
|
||||
single integer so that each module could have its own space to express its
|
||||
errors.
|
||||
|
||||
## Decision
|
||||
|
||||
Include a `string code_space` in all ABCI messages that have a `code`.
|
||||
This allows applications to namespace the codes so they can experiment with
|
||||
their own code schemes.
|
||||
|
||||
It is the responsibility of applications to limit the size of the `code_space`
|
||||
string.
|
||||
|
||||
How the codespace is hashed into block headers (ie. so it can be queried
|
||||
efficiently by lite clients) is left for a separate ADR.
|
||||
|
||||
## Consequences
|
||||
|
||||
## Positive
|
||||
|
||||
- No need for complex codespacing on a single integer
|
||||
- More expressive type system for errors
|
||||
|
||||
## Negative
|
||||
|
||||
- Another field in the response needs to be accounted for
|
||||
- Some redundancy with `code` field
|
||||
- May encourage more error/code type info to move to the `codespace` string, which
|
||||
could impact lite clients.
|
||||
|
||||
@@ -1,183 +0,0 @@
|
||||
# ADR 012: ABCI `ProposeTx` Method
|
||||
|
||||
## Changelog
|
||||
|
||||
25-06-2018: Initial draft based on [#1776](https://github.com/tendermint/tendermint/issues/1776)
|
||||
|
||||
## Context
|
||||
|
||||
[#1776](https://github.com/tendermint/tendermint/issues/1776) was
|
||||
opened in relation to implementation of a Plasma child chain using Tendermint
|
||||
Core as consensus/replication engine.
|
||||
|
||||
Due to the requirements of [Minimal Viable Plasma (MVP)](https://ethresear.ch/t/minimal-viable-plasma/426) and [Plasma Cash](https://ethresear.ch/t/plasma-cash-plasma-with-much-less-per-user-data-checking/1298), it is necessary for ABCI apps to have a mechanism to handle the following cases (more may emerge in the near future):
|
||||
|
||||
1. `deposit` transactions on the Root Chain, which must consist of a block
|
||||
with a single transaction, where there are no inputs and only one output
|
||||
made in favour of the depositor. In this case, a `block` consists of
|
||||
a transaction with the following shape:
|
||||
|
||||
```
|
||||
[0, 0, 0, 0, #input1 - zeroed out
|
||||
0, 0, 0, 0, #input2 - zeroed out
|
||||
<depositor_address>, <amount>, #output1 - in favour of depositor
|
||||
0, 0, #output2 - zeroed out
|
||||
<fee>,
|
||||
]
|
||||
```
|
||||
|
||||
`exit` transactions may also be treated in a similar manner, wherein the
|
||||
input is the UTXO being exited on the Root Chain, and the output belongs to
|
||||
a reserved "burn" address, e.g., `0x0`. In such cases, it is favourable for
|
||||
the containing block to only hold a single transaction that may receive
|
||||
special treatment.
|
||||
|
||||
2. Other "internal" transactions on the child chain, which may be initiated
|
||||
unilaterally. The most basic example of is a coinbase transaction
|
||||
implementing validator node incentives, but may also be app-specific. In
|
||||
these cases, it may be favourable for such transactions to
|
||||
be ordered in a specific manner, e.g., coinbase transactions will always be
|
||||
at index 0. In general, such strategies increase the determinism and
|
||||
predictability of blockchain applications.
|
||||
|
||||
While it is possible to deal with the cases enumerated above using the
|
||||
existing ABCI, currently available result in suboptimal workarounds. Two are
|
||||
explained in greater detail below.
|
||||
|
||||
### Solution 1: App state-based Plasma chain
|
||||
|
||||
In this work around, the app maintains a `PlasmaStore` with a corresponding
|
||||
`Keeper`. The PlasmaStore is responsible for maintaing a second, separate
|
||||
blockchain that complies with the MVP specification, including `deposit`
|
||||
blocks and other "internal" transactions. These "virtual" blocks are then broadcasted
|
||||
to the Root Chain.
|
||||
|
||||
This naive approach is, however, fundamentally flawed, as it by definition
|
||||
diverges from the canonical chain maintained by Tendermint. This is further
|
||||
exacerbated if the business logic for generating such transactions is
|
||||
potentially non-deterministic, as this should not even be done in
|
||||
`Begin/EndBlock`, which may, as a result, break consensus guarantees.
|
||||
|
||||
Additinoally, this has serious implications for "watchers" - independent third parties,
|
||||
or even an auxilliary blockchain, responsible for ensuring that blocks recorded
|
||||
on the Root Chain are consistent with the Plasma chain's. Since, in this case,
|
||||
the Plasma chain is inconsistent with the canonical one maintained by Tendermint
|
||||
Core, it seems that there exists no compact means of verifying the legitimacy of
|
||||
the Plasma chain without replaying every state transition from genesis (!).
|
||||
|
||||
### Solution 2: Broadcast to Tendermint Core from ABCI app
|
||||
|
||||
This approach is inspired by `tendermint`, in which Ethereum transactions are
|
||||
relayed to Tendermint Core. It requires the app to maintain a client connection
|
||||
to the consensus engine.
|
||||
|
||||
Whenever an "internal" transaction needs to be created, the proposer of the
|
||||
current block broadcasts the transaction or transactions to Tendermint as
|
||||
needed in order to ensure that the Tendermint chain and Plasma chain are
|
||||
completely consistent.
|
||||
|
||||
This allows "internal" transactions to pass through the full consensus
|
||||
process, and can be validated in methods like `CheckTx`, i.e., signed by the
|
||||
proposer, is the semantically correct, etc. Note that this involves informing
|
||||
the ABCI app of the block proposer, which was temporarily hacked in as a means
|
||||
of conducting this experiment, although this should not be necessary when the
|
||||
current proposer is passed to `BeginBlock`.
|
||||
|
||||
It is much easier to relay these transactions directly to the Root
|
||||
Chain smart contract and/or maintain a "compressed" auxiliary chain comprised
|
||||
of Plasma-friendly blocks that 100% reflect the canonical (Tendermint)
|
||||
blockchain. Unfortunately, this approach not idiomatic (i.e., utilises the
|
||||
Tendermint consensus engine in unintended ways). Additionally, it does not
|
||||
allow the application developer to:
|
||||
|
||||
- Control the _ordering_ of transactions in the proposed block (e.g., index 0,
|
||||
or 0 to `n` for coinbase transactions)
|
||||
- Control the _number_ of transactions in the block (e.g., when a `deposit`
|
||||
block is required)
|
||||
|
||||
Since determinism is of utmost importance in blockchain engineering, this approach,
|
||||
while more viable, should also not be considered as fit for production.
|
||||
|
||||
## Decision
|
||||
|
||||
### `ProposeTx`
|
||||
|
||||
In order to address the difficulties described above, the ABCI interface must
|
||||
expose an additional method, tentatively named `ProposeTx`.
|
||||
|
||||
It should have the following signature:
|
||||
|
||||
```
|
||||
ProposeTx(RequestProposeTx) ResponseProposeTx
|
||||
```
|
||||
|
||||
Where `RequestProposeTx` and `ResponseProposeTx` are `message`s with the
|
||||
following shapes:
|
||||
|
||||
```
|
||||
message RequestProposeTx {
|
||||
int64 next_block_height = 1; // height of the block the proposed tx would be part of
|
||||
Validator proposer = 2; // the proposer details
|
||||
}
|
||||
|
||||
message ResponseProposeTx {
|
||||
int64 num_tx = 1; // the number of tx to include in proposed block
|
||||
repeated bytes txs = 2; // ordered transaction data to include in block
|
||||
bool exclusive = 3; // whether the block should include other transactions (from `mempool`)
|
||||
}
|
||||
```
|
||||
|
||||
`ProposeTx` would be called by before `mempool.Reap` at this
|
||||
[line](https://github.com/tendermint/tendermint/blob/9cd9f3338bc80a12590631632c23c8dbe3ff5c34/consensus/state.go#L935).
|
||||
Depending on whether `exclusive` is `true` or `false`, the proposed
|
||||
transactions are then pushed on top of the transactions received from
|
||||
`mempool.Reap`.
|
||||
|
||||
### `DeliverTx`
|
||||
|
||||
Since the list of `tx` received from `ProposeTx` are _not_ passed through `CheckTx`,
|
||||
it is probably a good idea to provide a means of differentiatiating "internal" transactions
|
||||
from user-generated ones, in case the app developer needs/wants to take extra measures to
|
||||
ensure validity of the proposed transactions.
|
||||
|
||||
Therefore, the `RequestDeliverTx` message should be changed to provide an additional flag, like so:
|
||||
|
||||
```
|
||||
message RequestDeliverTx {
|
||||
bytes tx = 1;
|
||||
bool internal = 2;
|
||||
}
|
||||
```
|
||||
|
||||
Alternatively, an additional method `DeliverProposeTx` may be added as an accompanient to
|
||||
`ProposeTx`. However, it is not clear at this stage if this additional overhead is necessary
|
||||
to preserve consensus guarantees given that a simple flag may suffice for now.
|
||||
|
||||
## Status
|
||||
|
||||
Pending
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Tendermint ABCI apps will be able to function as minimally viable Plasma chains.
|
||||
- It will thereby become possible to add an extension to `cosmos-sdk` to enable
|
||||
ABCI apps to support both IBC and Plasma, maximising interop.
|
||||
- ABCI apps will have great control and flexibility in managing blockchain state,
|
||||
without having to resort to non-deterministic hacks and/or unsafe workarounds
|
||||
|
||||
### Negative
|
||||
|
||||
- Maintenance overhead of exposing additional ABCI method
|
||||
- Potential security issues that may have been overlooked and must now be tested extensively
|
||||
|
||||
### Neutral
|
||||
|
||||
- ABCI developers must deal with increased (albeit nominal) API surface area.
|
||||
|
||||
## References
|
||||
|
||||
- [#1776 Plasma and "Internal" Transactions in ABCI Apps](https://github.com/tendermint/tendermint/issues/1776)
|
||||
- [Minimal Viable Plasma](https://ethresear.ch/t/minimal-viable-plasma/426)
|
||||
- [Plasma Cash: Plasma with much less per-user data checking](https://ethresear.ch/t/plasma-cash-plasma-with-much-less-per-user-data-checking/1298)
|
||||
@@ -1,234 +0,0 @@
|
||||
# ADR 024: SignBytes and validator types in privval
|
||||
|
||||
## Context
|
||||
|
||||
Currently, the messages exchanged between tendermint and a (potentially remote) signer/validator,
|
||||
namely votes, proposals, and heartbeats, are encoded as a JSON string
|
||||
(e.g., via `Vote.SignBytes(...)`) and then
|
||||
signed . JSON encoding is sub-optimal for both, hardware wallets
|
||||
and for usage in ethereum smart contracts. Both is laid down in detail in [issue#1622].
|
||||
|
||||
Also, there are currently no differences between sign-request and -replies. Also, there is no possibility
|
||||
for a remote signer to include an error code or message in case something went wrong.
|
||||
The messages exchanged between tendermint and a remote signer currently live in
|
||||
[privval/socket.go] and encapsulate the corresponding types in [types].
|
||||
|
||||
|
||||
[privval/socket.go]: https://github.com/tendermint/tendermint/blob/d419fffe18531317c28c29a292ad7d253f6cafdf/privval/socket.go#L496-L502
|
||||
[issue#1622]: https://github.com/tendermint/tendermint/issues/1622
|
||||
[types]: https://github.com/tendermint/tendermint/tree/master/types
|
||||
|
||||
|
||||
## Decision
|
||||
|
||||
- restructure vote, proposal, and heartbeat such that their encoding is easily parseable by
|
||||
hardware devices and smart contracts using a binary encoding format ([amino] in this case)
|
||||
- split up the messages exchanged between tendermint and remote signers into requests and
|
||||
responses (see details below)
|
||||
- include an error type in responses
|
||||
|
||||
### Overview
|
||||
```
|
||||
+--------------+ +----------------+
|
||||
| | SignXRequest | |
|
||||
|Remote signer |<---------------------+ tendermint |
|
||||
| (e.g. KMS) | | |
|
||||
| +--------------------->| |
|
||||
+--------------+ SignedXReply +----------------+
|
||||
|
||||
|
||||
SignXRequest {
|
||||
x: X
|
||||
}
|
||||
|
||||
SignedXReply {
|
||||
x: X
|
||||
sig: Signature // []byte
|
||||
err: Error{
|
||||
code: int
|
||||
desc: string
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
TODO: Alternatively, the type `X` might directly include the signature. A lot of places expect a vote with a
|
||||
signature and do not necessarily deal with "Replies".
|
||||
Still exploring what would work best here.
|
||||
This would look like (exemplified using X = Vote):
|
||||
```
|
||||
Vote {
|
||||
// all fields besides signature
|
||||
}
|
||||
|
||||
SignedVote {
|
||||
Vote Vote
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
SignVoteRequest {
|
||||
Vote Vote
|
||||
}
|
||||
|
||||
SignedVoteReply {
|
||||
Vote SignedVote
|
||||
Err Error
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** There was a related discussion around including a fingerprint of, or, the whole public-key
|
||||
into each sign-request to tell the signer which corresponding private-key to
|
||||
use to sign the message. This is particularly relevant in the context of the KMS
|
||||
but is currently not considered in this ADR.
|
||||
|
||||
|
||||
[amino]: https://github.com/tendermint/go-amino/
|
||||
|
||||
### Vote
|
||||
|
||||
As explained in [issue#1622] `Vote` will be changed to contain the following fields
|
||||
(notation in protobuf-like syntax for easy readability):
|
||||
|
||||
```proto
|
||||
// vanilla protobuf / amino encoded
|
||||
message Vote {
|
||||
Version fixed32
|
||||
Height sfixed64
|
||||
Round sfixed32
|
||||
VoteType fixed32
|
||||
Timestamp Timestamp // << using protobuf definition
|
||||
BlockID BlockID // << as already defined
|
||||
ChainID string // at the end because length could vary a lot
|
||||
}
|
||||
|
||||
// this is an amino registered type; like currently privval.SignVoteMsg:
|
||||
// registered with "tendermint/socketpv/SignVoteRequest"
|
||||
message SignVoteRequest {
|
||||
Vote vote
|
||||
}
|
||||
|
||||
// amino registered type
|
||||
// registered with "tendermint/socketpv/SignedVoteReply"
|
||||
message SignedVoteReply {
|
||||
Vote Vote
|
||||
Signature Signature
|
||||
Err Error
|
||||
}
|
||||
|
||||
// we will use this type everywhere below
|
||||
message Error {
|
||||
Type uint // error code
|
||||
Description string // optional description
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
The `ChainID` gets moved into the vote message directly. Previously, it was injected
|
||||
using the [Signable] interface method `SignBytes(chainID string) []byte`. Also, the
|
||||
signature won't be included directly, only in the corresponding `SignedVoteReply` message.
|
||||
|
||||
[Signable]: https://github.com/tendermint/tendermint/blob/d419fffe18531317c28c29a292ad7d253f6cafdf/types/signable.go#L9-L11
|
||||
|
||||
### Proposal
|
||||
|
||||
```proto
|
||||
// vanilla protobuf / amino encoded
|
||||
message Proposal {
|
||||
Height sfixed64
|
||||
Round sfixed32
|
||||
Timestamp Timestamp // << using protobuf definition
|
||||
BlockPartsHeader PartSetHeader // as already defined
|
||||
POLRound sfixed32
|
||||
POLBlockID BlockID // << as already defined
|
||||
}
|
||||
|
||||
// amino registered with "tendermint/socketpv/SignProposalRequest"
|
||||
message SignProposalRequest {
|
||||
Proposal proposal
|
||||
}
|
||||
|
||||
// amino registered with "tendermint/socketpv/SignProposalReply"
|
||||
message SignProposalReply {
|
||||
Prop Proposal
|
||||
Sig Signature
|
||||
Err Error // as defined above
|
||||
}
|
||||
```
|
||||
|
||||
### Heartbeat
|
||||
|
||||
**TODO**: clarify if heartbeat also needs a fixed offset and update the fields accordingly:
|
||||
|
||||
```proto
|
||||
message Heartbeat {
|
||||
ValidatorAddress Address
|
||||
ValidatorIndex int
|
||||
Height int64
|
||||
Round int
|
||||
Sequence int
|
||||
}
|
||||
// amino registered with "tendermint/socketpv/SignHeartbeatRequest"
|
||||
message SignHeartbeatRequest {
|
||||
Hb Heartbeat
|
||||
}
|
||||
|
||||
// amino registered with "tendermint/socketpv/SignHeartbeatReply"
|
||||
message SignHeartbeatReply {
|
||||
Hb Heartbeat
|
||||
Sig Signature
|
||||
Err Error // as defined above
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## PubKey
|
||||
|
||||
TBA - this needs further thoughts: e.g. what todo like in the case of the KMS which holds
|
||||
several keys? How does it know with which key to reply?
|
||||
|
||||
## SignBytes
|
||||
`SignBytes` will not require a `ChainID` parameter:
|
||||
|
||||
```golang
|
||||
type Signable interface {
|
||||
SignBytes() []byte
|
||||
}
|
||||
|
||||
```
|
||||
And the implementation for vote, heartbeat, proposal will look like:
|
||||
```golang
|
||||
// type T is one of vote, sign, proposal
|
||||
func (tp *T) SignBytes() []byte {
|
||||
bz, err := cdc.MarshalBinary(tp)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bz
|
||||
}
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
DRAFT
|
||||
|
||||
## Consequences
|
||||
|
||||
|
||||
|
||||
### Positive
|
||||
|
||||
The most relevant positive effect is that the signing bytes can easily be parsed by a
|
||||
hardware module and a smart contract. Besides that:
|
||||
|
||||
- clearer separation between requests and responses
|
||||
- added error messages enable better error handling
|
||||
|
||||
|
||||
### Negative
|
||||
|
||||
- relatively huge change / refactoring touching quite some code
|
||||
- lot's of places assume a `Vote` with a signature included -> they will need to
|
||||
- need to modify some interfaces
|
||||
|
||||
### Neutral
|
||||
|
||||
not even the swiss are neutral
|
||||
@@ -1,150 +0,0 @@
|
||||
# ADR 025 Commit
|
||||
|
||||
## Context
|
||||
|
||||
Currently the `Commit` structure contains a lot of potentially redundant or unnecessary data.
|
||||
It contains a list of precommits from every validator, where the precommit
|
||||
includes the whole `Vote` structure. Thus each of the commit height, round,
|
||||
type, and blockID are repeated for every validator, and could be deduplicated,
|
||||
leading to very significant savings in block size.
|
||||
|
||||
```
|
||||
type Commit struct {
|
||||
BlockID BlockID `json:"block_id"`
|
||||
Precommits []*Vote `json:"precommits"`
|
||||
}
|
||||
|
||||
type Vote struct {
|
||||
ValidatorAddress Address `json:"validator_address"`
|
||||
ValidatorIndex int `json:"validator_index"`
|
||||
Height int64 `json:"height"`
|
||||
Round int `json:"round"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Type byte `json:"type"`
|
||||
BlockID BlockID `json:"block_id"`
|
||||
Signature []byte `json:"signature"`
|
||||
}
|
||||
```
|
||||
|
||||
The original tracking issue for this is [#1648](https://github.com/tendermint/tendermint/issues/1648).
|
||||
We have discussed replacing the `Vote` type in `Commit` with a new `CommitSig`
|
||||
type, which includes at minimum the vote signature. The `Vote` type will
|
||||
continue to be used in the consensus reactor and elsewhere.
|
||||
|
||||
A primary question is what should be included in the `CommitSig` beyond the
|
||||
signature. One current constraint is that we must include a timestamp, since
|
||||
this is how we calculuate BFT time, though we may be able to change this [in the
|
||||
future](https://github.com/tendermint/tendermint/issues/2840).
|
||||
|
||||
Other concerns here include:
|
||||
|
||||
- Validator Address [#3596](https://github.com/tendermint/tendermint/issues/3596) -
|
||||
Should the CommitSig include the validator address? It is very convenient to
|
||||
do so, but likely not necessary. This was also discussed in [#2226](https://github.com/tendermint/tendermint/issues/2226).
|
||||
- Absent Votes [#3591](https://github.com/tendermint/tendermint/issues/3591) -
|
||||
How to represent absent votes? Currently they are just present as `nil` in the
|
||||
Precommits list, which is actually problematic for serialization
|
||||
- Other BlockIDs [#3485](https://github.com/tendermint/tendermint/issues/3485) -
|
||||
How to represent votes for nil and for other block IDs? We currently allow
|
||||
votes for nil and votes for alternative block ids, but just ignore them
|
||||
|
||||
|
||||
## Decision
|
||||
|
||||
Deduplicate the fields and introduce `CommitSig`:
|
||||
|
||||
```
|
||||
type Commit struct {
|
||||
Height int64
|
||||
Round int
|
||||
BlockID BlockID `json:"block_id"`
|
||||
Precommits []CommitSig `json:"precommits"`
|
||||
}
|
||||
|
||||
type CommitSig struct {
|
||||
BlockID BlockIDFlag
|
||||
ValidatorAddress Address
|
||||
Timestamp time.Time
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
|
||||
// indicate which BlockID the signature is for
|
||||
type BlockIDFlag int
|
||||
|
||||
const (
|
||||
BlockIDFlagAbsent BlockIDFlag = iota // vote is not included in the Commit.Precommits
|
||||
BlockIDFlagCommit // voted for the Commit.BlockID
|
||||
BlockIDFlagNil // voted for nil
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
Re the concerns outlined in the context:
|
||||
|
||||
**Timestamp**: Leave the timestamp for now. Removing it and switching to
|
||||
proposer based time will take more analysis and work, and will be left for a
|
||||
future breaking change. In the meantime, the concerns with the current approach to
|
||||
BFT time [can be
|
||||
mitigated](https://github.com/tendermint/tendermint/issues/2840#issuecomment-529122431).
|
||||
|
||||
**ValidatorAddress**: we include it in the `CommitSig` for now. While this
|
||||
does increase the block size unecessarily (20-bytes per validator), it has some ergonomic and debugging advantages:
|
||||
|
||||
- `Commit` contains everything necessary to reconstruct `[]Vote`, and doesn't depend on additional access to a `ValidatorSet`
|
||||
- Lite clients can check if they know the validators in a commit without
|
||||
re-downloading the validator set
|
||||
- Easy to see directly in a commit which validators signed what without having
|
||||
to fetch the validator set
|
||||
|
||||
If and when we change the `CommitSig` again, for instance to remove the timestamp,
|
||||
we can reconsider whether the ValidatorAddress should be removed.
|
||||
|
||||
**Absent Votes**: we include absent votes explicitly with no Signature or
|
||||
Timestamp but with the ValidatorAddress. This should resolve the serialization
|
||||
issues and make it easy to see which validator's votes failed to be included.
|
||||
|
||||
**Other BlockIDs**: We use a single byte to indicate which blockID a `CommitSig`
|
||||
is for. The only options are:
|
||||
- `Absent` - no vote received from the this validator, so no signature
|
||||
- `Nil` - validator voted Nil - meaning they did not see a polka in time
|
||||
- `Commit` - validator voted for this block
|
||||
|
||||
Note this means we don't allow votes for any other blockIDs. If a signature is
|
||||
included in a commit, it is either for nil or the correct blockID. According to
|
||||
the Tendermint protocol and assumptions, there is no way for a correct validator to
|
||||
precommit for a conflicting blockID in the same round an actual commit was
|
||||
created. This was the consensus from
|
||||
[#3485](https://github.com/tendermint/tendermint/issues/3485)
|
||||
|
||||
We may want to consider supporting other blockIDs later, as a way to capture
|
||||
evidence that might be helpful. We should clarify if/when/how doing so would
|
||||
actually help first. To implement it, we could change the `Commit.BlockID`
|
||||
field to a slice, where the first entry is the correct block ID and the other
|
||||
entries are other BlockIDs that validators precommited before. The BlockIDFlag
|
||||
enum can be extended to represent these additional block IDs on a per block
|
||||
basis.
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
Removing the Type/Height/Round/Index and the BlockID saves roughly 80 bytes per precommit.
|
||||
It varies because some integers are varint. The BlockID contains two 32-byte hashes an integer,
|
||||
and the Height is 8-bytes.
|
||||
|
||||
For a chain with 100 validators, that's up to 8kB in savings per block!
|
||||
|
||||
|
||||
### Negative
|
||||
|
||||
- Large breaking change to the block and commit structure
|
||||
- Requires differentiating in code between the Vote and CommitSig objects, which may add some complexity (votes need to be reconstructed to be verified and gossiped)
|
||||
|
||||
### Neutral
|
||||
|
||||
- Commit.Precommits no longer contains nil values
|
||||
@@ -1,47 +0,0 @@
|
||||
# ADR 026: General Merkle Proof
|
||||
|
||||
## Context
|
||||
|
||||
We are using raw `[]byte` for merkle proofs in `abci.ResponseQuery`. It makes hard to handle multilayer merkle proofs and general cases. Here, new interface `ProofOperator` is defined. The users can defines their own Merkle proof format and layer them easily.
|
||||
|
||||
Goals:
|
||||
- Layer Merkle proofs without decoding/reencoding
|
||||
- Provide general way to chain proofs
|
||||
- Make the proof format extensible, allowing thirdparty proof types
|
||||
|
||||
## Decision
|
||||
|
||||
### ProofOperator
|
||||
|
||||
`type ProofOperator` is an interface for Merkle proofs. The definition is:
|
||||
|
||||
```go
|
||||
type ProofOperator interface {
|
||||
Run([][]byte) ([][]byte, error)
|
||||
GetKey() []byte
|
||||
ProofOp() ProofOp
|
||||
}
|
||||
```
|
||||
|
||||
Since a proof can treat various data type, `Run()` takes `[][]byte` as the argument, not `[]byte`. For example, a range proof's `Run()` can take multiple key-values as its argument. It will then return the root of the tree for the further process, calculated with the input value.
|
||||
|
||||
`ProofOperator` does not have to be a Merkle proof - it can be a function that transforms the argument for intermediate process e.g. prepending the length to the `[]byte`.
|
||||
|
||||
### ProofOp
|
||||
|
||||
`type ProofOp` is a protobuf message which is a triple of `Type string`, `Key []byte`, and `Data []byte`. `ProofOperator` and `ProofOp`are interconvertible, using `ProofOperator.ProofOp()` and `OpDecoder()`, where `OpDecoder` is a function that each proof type can register for their own encoding scheme. For example, we can add an byte for encoding scheme before the serialized proof, supporting JSON decoding.
|
||||
|
||||
## Status
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Layering becomes easier (no encoding/decoding at each step)
|
||||
- Thirdparty proof format is available
|
||||
|
||||
### Negative
|
||||
|
||||
- Larger size for abci.ResponseQuery
|
||||
- Unintuitive proof chaining(it is not clear what `Run()` is doing)
|
||||
- Additional codes for registering `OpDecoder`s
|
||||
@@ -1,38 +0,0 @@
|
||||
# ADR 028: : LibP2P Integration
|
||||
|
||||
## Changelog
|
||||
|
||||
- {date}: {changelog}
|
||||
|
||||
## Context
|
||||
|
||||
> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution.
|
||||
|
||||
## Decision
|
||||
|
||||
> This section explains all of the details of the proposed solution, including implementation details.
|
||||
> It should also describe affects / corollary items that may need to be changed as a part of this.
|
||||
> If the proposed change will be large, please also indicate a way to do the change to maximize ease of review.
|
||||
> (e.g. the optimal split of things to do between separate PR's)
|
||||
|
||||
## Status
|
||||
|
||||
> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement.
|
||||
|
||||
{Deprecated|Proposed|Accepted|Declined}
|
||||
|
||||
## Consequences
|
||||
|
||||
> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones.
|
||||
|
||||
### Positive
|
||||
|
||||
### Negative
|
||||
|
||||
### Neutral
|
||||
|
||||
## References
|
||||
|
||||
> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here!
|
||||
|
||||
- {reference link}
|
||||
@@ -1,128 +0,0 @@
|
||||
# ADR 029: Check block txs before prevote
|
||||
|
||||
## Changelog
|
||||
|
||||
04-10-2018: Update with link to issue
|
||||
[#2384](https://github.com/tendermint/tendermint/issues/2384) and reason for rejection
|
||||
19-09-2018: Initial Draft
|
||||
|
||||
## Context
|
||||
|
||||
We currently check a tx's validity through 2 ways.
|
||||
|
||||
1. Through checkTx in mempool connection.
|
||||
2. Through deliverTx in consensus connection.
|
||||
|
||||
The 1st is called when external tx comes in, so the node should be a proposer this time. The 2nd is called when external block comes in and reach the commit phase, the node doesn't need to be the proposer of the block, however it should check the txs in that block.
|
||||
|
||||
In the 2nd situation, if there are many invalid txs in the block, it would be too late for all nodes to discover that most txs in the block are invalid, and we'd better not record invalid txs in the blockchain too.
|
||||
|
||||
## Proposed solution
|
||||
|
||||
Therefore, we should find a way to check the txs' validity before send out a prevote. Currently we have cs.isProposalComplete() to judge whether a block is complete. We can have
|
||||
|
||||
```
|
||||
func (blockExec *BlockExecutor) CheckBlock(block *types.Block) error {
|
||||
// check txs of block.
|
||||
for _, tx := range block.Txs {
|
||||
reqRes := blockExec.proxyApp.CheckTxAsync(tx)
|
||||
reqRes.Wait()
|
||||
if reqRes.Response == nil || reqRes.Response.GetCheckTx() == nil || reqRes.Response.GetCheckTx().Code != abci.CodeTypeOK {
|
||||
return errors.Errorf("tx %v check failed. response: %v", tx, reqRes.Response)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
such a method in BlockExecutor to check all txs' validity in that block.
|
||||
|
||||
However, this method should not be implemented like that, because checkTx will share the same state used in mempool in the app. So we should define a new interface method checkBlock in Application to indicate it to use the same state as deliverTx.
|
||||
|
||||
```
|
||||
type Application interface {
|
||||
// Info/Query Connection
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
SetOption(RequestSetOption) ResponseSetOption // Set application option
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
|
||||
CheckBlock(RequestCheckBlock) ResponseCheckBlock
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
}
|
||||
```
|
||||
|
||||
All app should implement that method. For example, counter:
|
||||
|
||||
```
|
||||
func (app *CounterApplication) CheckBlock(block types.Request_CheckBlock) types.ResponseCheckBlock {
|
||||
if app.serial {
|
||||
app.originalTxCount = app.txCount //backup the txCount state
|
||||
for _, tx := range block.CheckBlock.Block.Txs {
|
||||
if len(tx) > 8 {
|
||||
return types.ResponseCheckBlock{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(tx):], tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckBlock{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
app.txCount++
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckBlock{Code: code.CodeTypeOK}
|
||||
}
|
||||
```
|
||||
|
||||
In BeginBlock, the app should restore the state to the orignal state before checking the block:
|
||||
|
||||
```
|
||||
func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
app.txCount = app.originalTxCount //restore the txCount state
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
```
|
||||
|
||||
The txCount is like the nonce in ethermint, it should be restored when entering the deliverTx phase. While some operation like checking the tx signature needs not to be done again. So the deliverTx can focus on how a tx can be applied, ignoring the checking of the tx, because all the checking has already been done in the checkBlock phase before.
|
||||
|
||||
An optional optimization is alter the deliverTx to deliverBlock. For the block has already been checked by checkBlock, so all the txs in it are valid. So the app can cache the block, and in the deliverBlock phase, it just needs to apply the block in the cache. This optimization can save network current in deliverTx.
|
||||
|
||||
|
||||
|
||||
## Status
|
||||
|
||||
Rejected
|
||||
|
||||
## Decision
|
||||
|
||||
Performance impact is considered too great. See [#2384](https://github.com/tendermint/tendermint/issues/2384)
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- more robust to defend the adversary to propose a block full of invalid txs.
|
||||
|
||||
### Negative
|
||||
|
||||
- add a new interface method. app logic needs to adjust to appeal to it.
|
||||
- sending all the tx data over the ABCI twice
|
||||
- potentially redundant validations (eg. signature checks in both CheckBlock and
|
||||
DeliverTx)
|
||||
|
||||
### Neutral
|
||||
@@ -1,458 +0,0 @@
|
||||
# ADR 030: Consensus Refactor
|
||||
|
||||
## Context
|
||||
|
||||
One of the biggest challenges this project faces is to proof that the
|
||||
implementations of the specifications are correct, much like we strive to
|
||||
formaly verify our alogrithms and protocols we should work towards high
|
||||
confidence about the correctness of our program code. One of those is the core
|
||||
of Tendermint - Consensus - which currently resides in the `consensus` package.
|
||||
Over time there has been high friction making changes to the package due to the
|
||||
algorithm being scattered in a side-effectful container (the current
|
||||
`ConsensusState`). In order to test the algorithm a large object-graph needs to
|
||||
be set up and even than the non-deterministic parts of the container makes will
|
||||
prevent high certainty. Where ideally we have a 1-to-1 representation of the
|
||||
[spec](https://github.com/tendermint/spec), ready and easy to test for domain
|
||||
experts.
|
||||
|
||||
Addresses:
|
||||
|
||||
- [#1495](https://github.com/tendermint/tendermint/issues/1495)
|
||||
- [#1692](https://github.com/tendermint/tendermint/issues/1692)
|
||||
|
||||
## Decision
|
||||
|
||||
To remedy these issues we plan a gradual, non-invasive refactoring of the
|
||||
`consensus` package. Starting of by isolating the consensus alogrithm into
|
||||
a pure function and a finite state machine to address the most pressuring issue
|
||||
of lack of confidence. Doing so while leaving the rest of the package in tact
|
||||
and have follow-up optional changes to improve the sepration of concerns.
|
||||
|
||||
### Implementation changes
|
||||
|
||||
The core of Consensus can be modelled as a function with clear defined inputs:
|
||||
|
||||
* `State` - data container for current round, height, etc.
|
||||
* `Event`- significant events in the network
|
||||
|
||||
producing clear outputs;
|
||||
|
||||
* `State` - updated input
|
||||
* `Message` - signal what actions to perform
|
||||
|
||||
```go
|
||||
type Event int
|
||||
|
||||
const (
|
||||
EventUnknown Event = iota
|
||||
EventProposal
|
||||
Majority23PrevotesBlock
|
||||
Majority23PrecommitBlock
|
||||
Majority23PrevotesAny
|
||||
Majority23PrecommitAny
|
||||
TimeoutNewRound
|
||||
TimeoutPropose
|
||||
TimeoutPrevotes
|
||||
TimeoutPrecommit
|
||||
)
|
||||
|
||||
type Message int
|
||||
|
||||
const (
|
||||
MeesageUnknown Message = iota
|
||||
MessageProposal
|
||||
MessageVotes
|
||||
MessageDecision
|
||||
)
|
||||
|
||||
type State struct {
|
||||
height uint64
|
||||
round uint64
|
||||
step uint64
|
||||
lockedValue interface{} // TODO: Define proper type.
|
||||
lockedRound interface{} // TODO: Define proper type.
|
||||
validValue interface{} // TODO: Define proper type.
|
||||
validRound interface{} // TODO: Define proper type.
|
||||
// From the original notes: valid(v)
|
||||
valid interface{} // TODO: Define proper type.
|
||||
// From the original notes: proposer(h, r)
|
||||
proposer interface{} // TODO: Define proper type.
|
||||
}
|
||||
|
||||
func Consensus(Event, State) (State, Message) {
|
||||
// Consolidate implementation.
|
||||
}
|
||||
```
|
||||
|
||||
Tracking of relevant information to feed `Event` into the function and act on
|
||||
the output is left to the `ConsensusExecutor` (formerly `ConsensusState`).
|
||||
|
||||
Benefits for testing surfacing nicely as testing for a sequence of events
|
||||
against algorithm could be as simple as the following example:
|
||||
|
||||
``` go
|
||||
func TestConsensusXXX(t *testing.T) {
|
||||
type expected struct {
|
||||
message Message
|
||||
state State
|
||||
}
|
||||
|
||||
// Setup order of events, initial state and expectation.
|
||||
var (
|
||||
events = []struct {
|
||||
event Event
|
||||
want expected
|
||||
}{
|
||||
// ...
|
||||
}
|
||||
state = State{
|
||||
// ...
|
||||
}
|
||||
)
|
||||
|
||||
for _, e := range events {
|
||||
sate, msg = Consensus(e.event, state)
|
||||
|
||||
// Test message expectation.
|
||||
if msg != e.want.message {
|
||||
t.Fatalf("have %v, want %v", msg, e.want.message)
|
||||
}
|
||||
|
||||
// Test state expectation.
|
||||
if !reflect.DeepEqual(state, e.want.state) {
|
||||
t.Fatalf("have %v, want %v", state, e.want.state)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Consensus Executor
|
||||
|
||||
## Consensus Core
|
||||
|
||||
```go
|
||||
type Event interface{}
|
||||
|
||||
type EventNewHeight struct {
|
||||
Height int64
|
||||
ValidatorId int
|
||||
}
|
||||
|
||||
type EventNewRound HeightAndRound
|
||||
|
||||
type EventProposal struct {
|
||||
Height int64
|
||||
Round int
|
||||
Timestamp Time
|
||||
BlockID BlockID
|
||||
POLRound int
|
||||
Sender int
|
||||
}
|
||||
|
||||
type Majority23PrevotesBlock struct {
|
||||
Height int64
|
||||
Round int
|
||||
BlockID BlockID
|
||||
}
|
||||
|
||||
type Majority23PrecommitBlock struct {
|
||||
Height int64
|
||||
Round int
|
||||
BlockID BlockID
|
||||
}
|
||||
|
||||
type HeightAndRound struct {
|
||||
Height int64
|
||||
Round int
|
||||
}
|
||||
|
||||
type Majority23PrevotesAny HeightAndRound
|
||||
type Majority23PrecommitAny HeightAndRound
|
||||
type TimeoutPropose HeightAndRound
|
||||
type TimeoutPrevotes HeightAndRound
|
||||
type TimeoutPrecommit HeightAndRound
|
||||
|
||||
|
||||
type Message interface{}
|
||||
|
||||
type MessageProposal struct {
|
||||
Height int64
|
||||
Round int
|
||||
BlockID BlockID
|
||||
POLRound int
|
||||
}
|
||||
|
||||
type VoteType int
|
||||
|
||||
const (
|
||||
VoteTypeUnknown VoteType = iota
|
||||
Prevote
|
||||
Precommit
|
||||
)
|
||||
|
||||
|
||||
type MessageVote struct {
|
||||
Height int64
|
||||
Round int
|
||||
BlockID BlockID
|
||||
Type VoteType
|
||||
}
|
||||
|
||||
|
||||
type MessageDecision struct {
|
||||
Height int64
|
||||
Round int
|
||||
BlockID BlockID
|
||||
}
|
||||
|
||||
type TriggerTimeout struct {
|
||||
Height int64
|
||||
Round int
|
||||
Duration Duration
|
||||
}
|
||||
|
||||
|
||||
type RoundStep int
|
||||
|
||||
const (
|
||||
RoundStepUnknown RoundStep = iota
|
||||
RoundStepPropose
|
||||
RoundStepPrevote
|
||||
RoundStepPrecommit
|
||||
RoundStepCommit
|
||||
)
|
||||
|
||||
type State struct {
|
||||
Height int64
|
||||
Round int
|
||||
Step RoundStep
|
||||
LockedValue BlockID
|
||||
LockedRound int
|
||||
ValidValue BlockID
|
||||
ValidRound int
|
||||
ValidatorId int
|
||||
ValidatorSetSize int
|
||||
}
|
||||
|
||||
func proposer(height int64, round int) int {}
|
||||
func getValue() BlockID {}
|
||||
|
||||
func Consensus(event Event, state State) (State, Message, TriggerTimeout) {
|
||||
msg = nil
|
||||
timeout = nil
|
||||
switch event := event.(type) {
|
||||
case EventNewHeight:
|
||||
if event.Height > state.Height {
|
||||
state.Height = event.Height
|
||||
state.Round = -1
|
||||
state.Step = RoundStepPropose
|
||||
state.LockedValue = nil
|
||||
state.LockedRound = -1
|
||||
state.ValidValue = nil
|
||||
state.ValidRound = -1
|
||||
state.ValidatorId = event.ValidatorId
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case EventNewRound:
|
||||
if event.Height == state.Height and event.Round > state.Round {
|
||||
state.Round = eventRound
|
||||
state.Step = RoundStepPropose
|
||||
if proposer(state.Height, state.Round) == state.ValidatorId {
|
||||
proposal = state.ValidValue
|
||||
if proposal == nil {
|
||||
proposal = getValue()
|
||||
}
|
||||
msg = MessageProposal { state.Height, state.Round, proposal, state.ValidRound }
|
||||
}
|
||||
timeout = TriggerTimeout { state.Height, state.Round, timeoutPropose(state.Round) }
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case EventProposal:
|
||||
if event.Height == state.Height and event.Round == state.Round and
|
||||
event.Sender == proposal(state.Height, state.Round) and state.Step == RoundStepPropose {
|
||||
if event.POLRound >= state.LockedRound or event.BlockID == state.BlockID or state.LockedRound == -1 {
|
||||
msg = MessageVote { state.Height, state.Round, event.BlockID, Prevote }
|
||||
}
|
||||
state.Step = RoundStepPrevote
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case TimeoutPropose:
|
||||
if event.Height == state.Height and event.Round == state.Round and state.Step == RoundStepPropose {
|
||||
msg = MessageVote { state.Height, state.Round, nil, Prevote }
|
||||
state.Step = RoundStepPrevote
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case Majority23PrevotesBlock:
|
||||
if event.Height == state.Height and event.Round == state.Round and state.Step >= RoundStepPrevote and event.Round > state.ValidRound {
|
||||
state.ValidRound = event.Round
|
||||
state.ValidValue = event.BlockID
|
||||
if state.Step == RoundStepPrevote {
|
||||
state.LockedRound = event.Round
|
||||
state.LockedValue = event.BlockID
|
||||
msg = MessageVote { state.Height, state.Round, event.BlockID, Precommit }
|
||||
state.Step = RoundStepPrecommit
|
||||
}
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case Majority23PrevotesAny:
|
||||
if event.Height == state.Height and event.Round == state.Round and state.Step == RoundStepPrevote {
|
||||
timeout = TriggerTimeout { state.Height, state.Round, timeoutPrevote(state.Round) }
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case TimeoutPrevote:
|
||||
if event.Height == state.Height and event.Round == state.Round and state.Step == RoundStepPrevote {
|
||||
msg = MessageVote { state.Height, state.Round, nil, Precommit }
|
||||
state.Step = RoundStepPrecommit
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case Majority23PrecommitBlock:
|
||||
if event.Height == state.Height {
|
||||
state.Step = RoundStepCommit
|
||||
state.LockedValue = event.BlockID
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case Majority23PrecommitAny:
|
||||
if event.Height == state.Height and event.Round == state.Round {
|
||||
timeout = TriggerTimeout { state.Height, state.Round, timeoutPrecommit(state.Round) }
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case TimeoutPrecommit:
|
||||
if event.Height == state.Height and event.Round == state.Round {
|
||||
state.Round = state.Round + 1
|
||||
}
|
||||
return state, msg, timeout
|
||||
}
|
||||
}
|
||||
|
||||
func ConsensusExecutor() {
|
||||
proposal = nil
|
||||
votes = HeightVoteSet { Height: 1 }
|
||||
state = State {
|
||||
Height: 1
|
||||
Round: 0
|
||||
Step: RoundStepPropose
|
||||
LockedValue: nil
|
||||
LockedRound: -1
|
||||
ValidValue: nil
|
||||
ValidRound: -1
|
||||
}
|
||||
|
||||
event = EventNewHeight {1, id}
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
|
||||
event = EventNewRound {state.Height, 0}
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
|
||||
if msg != nil {
|
||||
send msg
|
||||
}
|
||||
|
||||
if timeout != nil {
|
||||
trigger timeout
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case message := <- msgCh:
|
||||
switch msg := message.(type) {
|
||||
case MessageProposal:
|
||||
|
||||
case MessageVote:
|
||||
if msg.Height == state.Height {
|
||||
newVote = votes.AddVote(msg)
|
||||
if newVote {
|
||||
switch msg.Type {
|
||||
case Prevote:
|
||||
prevotes = votes.Prevotes(msg.Round)
|
||||
if prevotes.WeakCertificate() and msg.Round > state.Round {
|
||||
event = EventNewRound { msg.Height, msg.Round }
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
state = handleStateChange(state, msg, timeout)
|
||||
}
|
||||
|
||||
if blockID, ok = prevotes.TwoThirdsMajority(); ok and blockID != nil {
|
||||
if msg.Round == state.Round and hasBlock(blockID) {
|
||||
event = Majority23PrevotesBlock { msg.Height, msg.Round, blockID }
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
state = handleStateChange(state, msg, timeout)
|
||||
}
|
||||
if proposal != nil and proposal.POLRound == msg.Round and hasBlock(blockID) {
|
||||
event = EventProposal {
|
||||
Height: state.Height
|
||||
Round: state.Round
|
||||
BlockID: blockID
|
||||
POLRound: proposal.POLRound
|
||||
Sender: message.Sender
|
||||
}
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
state = handleStateChange(state, msg, timeout)
|
||||
}
|
||||
}
|
||||
|
||||
if prevotes.HasTwoThirdsAny() and msg.Round == state.Round {
|
||||
event = Majority23PrevotesAny { msg.Height, msg.Round, blockID }
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
state = handleStateChange(state, msg, timeout)
|
||||
}
|
||||
|
||||
case Precommit:
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
case timeout := <- timeoutCh:
|
||||
|
||||
case block := <- blockCh:
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleStateChange(state, msg, timeout) State {
|
||||
if state.Step == Commit {
|
||||
state = ExecuteBlock(state.LockedValue)
|
||||
}
|
||||
if msg != nil {
|
||||
send msg
|
||||
}
|
||||
if timeout != nil {
|
||||
trigger timeout
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Implementation roadmap
|
||||
|
||||
* implement proposed implementation
|
||||
* replace currently scattered calls in `ConsensusState` with calls to the new
|
||||
`Consensus` function
|
||||
* rename `ConsensusState` to `ConsensusExecutor` to avoid confusion
|
||||
* propose design for improved separation and clear information flow between
|
||||
`ConsensusExecutor` and `ConsensusReactor`
|
||||
|
||||
## Status
|
||||
|
||||
Draft.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- isolated implementation of the algorithm
|
||||
- improved testability - simpler to proof correctness
|
||||
- clearer separation of concerns - easier to reason
|
||||
|
||||
### Negative
|
||||
|
||||
### Neutral
|
||||
@@ -1,38 +0,0 @@
|
||||
# ADR 031: Changelog Structure
|
||||
|
||||
## Changelog
|
||||
|
||||
- {date}: {changelog}
|
||||
|
||||
## Context
|
||||
|
||||
> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution.
|
||||
|
||||
## Decision
|
||||
|
||||
> This section explains all of the details of the proposed solution, including implementation details.
|
||||
> It should also describe affects / corollary items that may need to be changed as a part of this.
|
||||
> If the proposed change will be large, please also indicate a way to do the change to maximize ease of review.
|
||||
> (e.g. the optimal split of things to do between separate PR's)
|
||||
|
||||
## Status
|
||||
|
||||
> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement.
|
||||
|
||||
{Deprecated|Proposed|Accepted|Declined}
|
||||
|
||||
## Consequences
|
||||
|
||||
> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones.
|
||||
|
||||
### Positive
|
||||
|
||||
### Negative
|
||||
|
||||
### Neutral
|
||||
|
||||
## References
|
||||
|
||||
> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here!
|
||||
|
||||
- {reference link}
|
||||
@@ -1,247 +0,0 @@
|
||||
# ADR 033: pubsub 2.0
|
||||
|
||||
Author: Anton Kaliaev (@melekes)
|
||||
|
||||
## Changelog
|
||||
|
||||
02-10-2018: Initial draft
|
||||
|
||||
16-01-2019: Second version based on our conversation with Jae
|
||||
|
||||
17-01-2019: Third version explaining how new design solves current issues
|
||||
|
||||
25-01-2019: Fourth version to treat buffered and unbuffered channels differently
|
||||
|
||||
## Context
|
||||
|
||||
Since the initial version of the pubsub, there's been a number of issues
|
||||
raised: [#951], [#1879], [#1880]. Some of them are high-level issues questioning the
|
||||
core design choices made. Others are minor and mostly about the interface of
|
||||
`Subscribe()` / `Publish()` functions.
|
||||
|
||||
### Sync vs Async
|
||||
|
||||
Now, when publishing a message to subscribers, we can do it in a goroutine:
|
||||
|
||||
_using channels for data transmission_
|
||||
```go
|
||||
for each subscriber {
|
||||
out := subscriber.outc
|
||||
go func() {
|
||||
out <- msg
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
_by invoking callback functions_
|
||||
```go
|
||||
for each subscriber {
|
||||
go subscriber.callbackFn()
|
||||
}
|
||||
```
|
||||
|
||||
This gives us greater performance and allows us to avoid "slow client problem"
|
||||
(when other subscribers have to wait for a slow subscriber). A pool of
|
||||
goroutines can be used to avoid uncontrolled memory growth.
|
||||
|
||||
In certain cases, this is what you want. But in our case, because we need
|
||||
strict ordering of events (if event A was published before B, the guaranteed
|
||||
delivery order will be A -> B), we can't publish msg in a new goroutine every time.
|
||||
|
||||
We can also have a goroutine per subscriber, although we'd need to be careful
|
||||
with the number of subscribers. It's more difficult to implement as well +
|
||||
unclear if we'll benefit from it (cause we'd be forced to create N additional
|
||||
channels to distribute msg to these goroutines).
|
||||
|
||||
### Non-blocking send
|
||||
|
||||
There is also a question whenever we should have a non-blocking send.
|
||||
Currently, sends are blocking, so publishing to one client can block on
|
||||
publishing to another. This means a slow or unresponsive client can halt the
|
||||
system. Instead, we can use a non-blocking send:
|
||||
|
||||
```go
|
||||
for each subscriber {
|
||||
out := subscriber.outc
|
||||
select {
|
||||
case out <- msg:
|
||||
default:
|
||||
log("subscriber %v buffer is full, skipping...")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This fixes the "slow client problem", but there is no way for a slow client to
|
||||
know if it had missed a message. We could return a second channel and close it
|
||||
to indicate subscription termination. On the other hand, if we're going to
|
||||
stick with blocking send, **devs must always ensure subscriber's handling code
|
||||
does not block**, which is a hard task to put on their shoulders.
|
||||
|
||||
The interim option is to run goroutines pool for a single message, wait for all
|
||||
goroutines to finish. This will solve "slow client problem", but we'd still
|
||||
have to wait `max(goroutine_X_time)` before we can publish the next message.
|
||||
|
||||
### Channels vs Callbacks
|
||||
|
||||
Yet another question is whether we should use channels for message transmission or
|
||||
call subscriber-defined callback functions. Callback functions give subscribers
|
||||
more flexibility - you can use mutexes in there, channels, spawn goroutines,
|
||||
anything you really want. But they also carry local scope, which can result in
|
||||
memory leaks and/or memory usage increase.
|
||||
|
||||
Go channels are de-facto standard for carrying data between goroutines.
|
||||
|
||||
### Why `Subscribe()` accepts an `out` channel?
|
||||
|
||||
Because in our tests, we create buffered channels (cap: 1). Alternatively, we
|
||||
can make capacity an argument and return a channel.
|
||||
|
||||
## Decision
|
||||
|
||||
### MsgAndTags
|
||||
|
||||
Use a `MsgAndTags` struct on the subscription channel to indicate what tags the
|
||||
msg matched.
|
||||
|
||||
```go
|
||||
type MsgAndTags struct {
|
||||
Msg interface{}
|
||||
Tags TagMap
|
||||
}
|
||||
```
|
||||
|
||||
### Subscription Struct
|
||||
|
||||
|
||||
Change `Subscribe()` function to return a `Subscription` struct:
|
||||
|
||||
```go
|
||||
type Subscription struct {
|
||||
// private fields
|
||||
}
|
||||
|
||||
func (s *Subscription) Out() <-chan MsgAndTags
|
||||
func (s *Subscription) Cancelled() <-chan struct{}
|
||||
func (s *Subscription) Err() error
|
||||
```
|
||||
|
||||
`Out()` returns a channel onto which messages and tags are published.
|
||||
`Unsubscribe`/`UnsubscribeAll` does not close the channel to avoid clients from
|
||||
receiving a nil message.
|
||||
|
||||
`Cancelled()` returns a channel that's closed when the subscription is terminated
|
||||
and supposed to be used in a select statement.
|
||||
|
||||
If the channel returned by `Cancelled()` is not closed yet, `Err()` returns nil.
|
||||
If the channel is closed, `Err()` returns a non-nil error explaining why:
|
||||
`ErrUnsubscribed` if the subscriber choose to unsubscribe,
|
||||
`ErrOutOfCapacity` if the subscriber is not pulling messages fast enough and the channel returned by `Out()` became full.
|
||||
After `Err()` returns a non-nil error, successive calls to `Err() return the same error.
|
||||
|
||||
```go
|
||||
subscription, err := pubsub.Subscribe(...)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case msgAndTags <- subscription.Out():
|
||||
// ...
|
||||
case <-subscription.Cancelled():
|
||||
return subscription.Err()
|
||||
}
|
||||
```
|
||||
|
||||
### Capacity and Subscriptions
|
||||
|
||||
Make the `Out()` channel buffered (with capacity 1) by default. In most cases, we want to
|
||||
terminate the slow subscriber. Only in rare cases, we want to block the pubsub
|
||||
(e.g. when debugging consensus). This should lower the chances of the pubsub
|
||||
being frozen.
|
||||
|
||||
```go
|
||||
// outCap can be used to set capacity of Out channel
|
||||
// (1 by default, must be greater than 0).
|
||||
Subscribe(ctx context.Context, clientID string, query Query, outCap... int) (Subscription, error) {
|
||||
```
|
||||
|
||||
Use a different function for an unbuffered channel:
|
||||
|
||||
```go
|
||||
// Subscription uses an unbuffered channel. Publishing will block.
|
||||
SubscribeUnbuffered(ctx context.Context, clientID string, query Query) (Subscription, error) {
|
||||
```
|
||||
|
||||
SubscribeUnbuffered should not be exposed to users.
|
||||
|
||||
### Blocking/Nonblocking
|
||||
|
||||
The publisher should treat these kinds of channels separately.
|
||||
It should block on unbuffered channels (for use with internal consensus events
|
||||
in the consensus tests) and not block on the buffered ones. If a client is too
|
||||
slow to keep up with it's messages, it's subscription is terminated:
|
||||
|
||||
for each subscription {
|
||||
out := subscription.outChan
|
||||
if cap(out) == 0 {
|
||||
// block on unbuffered channel
|
||||
out <- msg
|
||||
} else {
|
||||
// don't block on buffered channels
|
||||
select {
|
||||
case out <- msg:
|
||||
default:
|
||||
// set the error, notify on the cancel chan
|
||||
subscription.err = fmt.Errorf("client is too slow for msg)
|
||||
close(subscription.cancelChan)
|
||||
|
||||
// ... unsubscribe and close out
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
### How this new design solves the current issues?
|
||||
|
||||
[#951] ([#1880]):
|
||||
|
||||
Because of non-blocking send, situation where we'll deadlock is not possible
|
||||
anymore. If the client stops reading messages, it will be removed.
|
||||
|
||||
[#1879]:
|
||||
|
||||
MsgAndTags is used now instead of a plain message.
|
||||
|
||||
### Future problems and their possible solutions
|
||||
|
||||
[#2826]
|
||||
|
||||
One question I am still pondering about: how to prevent pubsub from slowing
|
||||
down consensus. We can increase the pubsub queue size (which is 0 now). Also,
|
||||
it's probably a good idea to limit the total number of subscribers.
|
||||
|
||||
This can be made automatically. Say we set queue size to 1000 and, when it's >=
|
||||
80% full, refuse new subscriptions.
|
||||
|
||||
## Status
|
||||
|
||||
In review
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- more idiomatic interface
|
||||
- subscribers know what tags msg was published with
|
||||
- subscribers aware of the reason their subscription was cancelled
|
||||
|
||||
### Negative
|
||||
|
||||
- (since v1) no concurrency when it comes to publishing messages
|
||||
|
||||
### Neutral
|
||||
|
||||
|
||||
[#951]: https://github.com/tendermint/tendermint/issues/951
|
||||
[#1879]: https://github.com/tendermint/tendermint/issues/1879
|
||||
[#1880]: https://github.com/tendermint/tendermint/issues/1880
|
||||
[#2826]: https://github.com/tendermint/tendermint/issues/2826
|
||||
@@ -1,72 +0,0 @@
|
||||
# ADR 034: PrivValidator file structure
|
||||
|
||||
## Changelog
|
||||
|
||||
03-11-2018: Initial Draft
|
||||
|
||||
## Context
|
||||
|
||||
For now, the PrivValidator file `priv_validator.json` contains mutable and immutable parts.
|
||||
Even in an insecure mode which does not encrypt private key on disk, it is reasonable to separate
|
||||
the mutable part and immutable part.
|
||||
|
||||
References:
|
||||
[#1181](https://github.com/tendermint/tendermint/issues/1181)
|
||||
[#2657](https://github.com/tendermint/tendermint/issues/2657)
|
||||
[#2313](https://github.com/tendermint/tendermint/issues/2313)
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
We can split mutable and immutable parts with two structs:
|
||||
```go
|
||||
// FilePVKey stores the immutable part of PrivValidator
|
||||
type FilePVKey struct {
|
||||
Address types.Address `json:"address"`
|
||||
PubKey crypto.PubKey `json:"pub_key"`
|
||||
PrivKey crypto.PrivKey `json:"priv_key"`
|
||||
|
||||
filePath string
|
||||
}
|
||||
|
||||
// FilePVState stores the mutable part of PrivValidator
|
||||
type FilePVLastSignState struct {
|
||||
Height int64 `json:"height"`
|
||||
Round int `json:"round"`
|
||||
Step int8 `json:"step"`
|
||||
Signature []byte `json:"signature,omitempty"`
|
||||
SignBytes cmn.HexBytes `json:"signbytes,omitempty"`
|
||||
|
||||
filePath string
|
||||
mtx sync.Mutex
|
||||
}
|
||||
```
|
||||
|
||||
Then we can combine `FilePVKey` with `FilePVLastSignState` and will get the original `FilePV`.
|
||||
|
||||
```go
|
||||
type FilePV struct {
|
||||
Key FilePVKey
|
||||
LastSignState FilePVLastSignState
|
||||
}
|
||||
```
|
||||
|
||||
As discussed, `FilePV` should be located in `config`, and `FilePVLastSignState` should be stored in `data`. The
|
||||
store path of each file should be specified in `config.yml`.
|
||||
|
||||
What we need to do next is changing the methods of `FilePV`.
|
||||
|
||||
## Status
|
||||
|
||||
Draft.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- separate the mutable and immutable of PrivValidator
|
||||
|
||||
### Negative
|
||||
|
||||
- need to add more config for file path
|
||||
|
||||
### Neutral
|
||||
@@ -1,40 +0,0 @@
|
||||
# ADR 035: Documentation
|
||||
|
||||
Author: @zramsay (Zach Ramsay)
|
||||
|
||||
## Changelog
|
||||
|
||||
### November 2nd 2018
|
||||
|
||||
- initial write-up
|
||||
|
||||
## Context
|
||||
|
||||
The Tendermint documentation has undergone several changes until settling on the current model. Originally, the documentation was hosted on the website and had to be updated asynchronously from the code. Along with the other repositories requiring documentation, the whole stack moved to using Read The Docs to automatically generate, publish, and host the documentation. This, however, was insufficient; the RTD site had advertisement, it wasn't easily accessible to devs, didn't collect metrics, was another set of external links, etc.
|
||||
|
||||
## Decision
|
||||
|
||||
For two reasons, the decision was made to use VuePress:
|
||||
|
||||
1) ability to get metrics (implemented on both Tendermint and SDK)
|
||||
2) host the documentation on the website as a `/docs` endpoint.
|
||||
|
||||
This is done while maintaining synchrony between the docs and code, i.e., the website is built whenever the docs are updated.
|
||||
|
||||
## Status
|
||||
|
||||
The two points above have been implemented; the `config.js` has a Google Analytics identifier and the documentation workflow has been up and running largely without problems for several months. Details about the documentation build & workflow can be found [here](../DOCS_README.md)
|
||||
|
||||
## Consequences
|
||||
|
||||
Because of the organizational seperation between Tendermint & Cosmos, there is a challenge of "what goes where" for certain aspects of documentation.
|
||||
|
||||
### Positive
|
||||
|
||||
This architecture is largely positive relative to prior docs arrangements.
|
||||
|
||||
### Negative
|
||||
|
||||
A significant portion of the docs automation / build process is in private repos with limited access/visibility to devs. However, these tasks are handled by the SRE team.
|
||||
|
||||
### Neutral
|
||||
@@ -1,38 +0,0 @@
|
||||
# ADR 036: Empty Blocks via ABCI
|
||||
|
||||
## Changelog
|
||||
|
||||
- {date}: {changelog}
|
||||
|
||||
## Context
|
||||
|
||||
> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution.
|
||||
|
||||
## Decision
|
||||
|
||||
> This section explains all of the details of the proposed solution, including implementation details.
|
||||
> It should also describe affects / corollary items that may need to be changed as a part of this.
|
||||
> If the proposed change will be large, please also indicate a way to do the change to maximize ease of review.
|
||||
> (e.g. the optimal split of things to do between separate PR's)
|
||||
|
||||
## Status
|
||||
|
||||
> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement.
|
||||
|
||||
{Deprecated|Proposed|Accepted|Declined}
|
||||
|
||||
## Consequences
|
||||
|
||||
> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones.
|
||||
|
||||
### Positive
|
||||
|
||||
### Negative
|
||||
|
||||
### Neutral
|
||||
|
||||
## References
|
||||
|
||||
> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here!
|
||||
|
||||
- {reference link}
|
||||
@@ -1,100 +0,0 @@
|
||||
# ADR 037: Deliver Block
|
||||
|
||||
Author: Daniil Lashin (@danil-lashin)
|
||||
|
||||
## Changelog
|
||||
|
||||
13-03-2019: Initial draft
|
||||
|
||||
## Context
|
||||
|
||||
Initial conversation: https://github.com/tendermint/tendermint/issues/2901
|
||||
|
||||
Some applications can handle transactions in parallel, or at least some
|
||||
part of tx processing can be parallelized. Now it is not possible for developer
|
||||
to execute txs in parallel because Tendermint delivers them consequentially.
|
||||
|
||||
## Decision
|
||||
|
||||
Now Tendermint have `BeginBlock`, `EndBlock`, `Commit`, `DeliverTx` steps
|
||||
while executing block. This doc proposes merging this steps into one `DeliverBlock`
|
||||
step. It will allow developers of applications to decide how they want to
|
||||
execute transactions (in parallel or consequentially). Also it will simplify and
|
||||
speed up communications between application and Tendermint.
|
||||
|
||||
As @jaekwon [mentioned](https://github.com/tendermint/tendermint/issues/2901#issuecomment-477746128)
|
||||
in discussion not all application will benefit from this solution. In some cases,
|
||||
when application handles transaction consequentially, it way slow down the blockchain,
|
||||
because it need to wait until full block is transmitted to application to start
|
||||
processing it. Also, in the case of complete change of ABCI, we need to force all the apps
|
||||
to change their implementation completely. That's why I propose to introduce one more ABCI
|
||||
type.
|
||||
|
||||
# Implementation Changes
|
||||
|
||||
In addition to default application interface which now have this structure
|
||||
|
||||
```go
|
||||
type Application interface {
|
||||
// Info and Mempool methods...
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
}
|
||||
```
|
||||
|
||||
this doc proposes to add one more:
|
||||
|
||||
```go
|
||||
type Application interface {
|
||||
// Info and Mempool methods...
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
|
||||
DeliverBlock(RequestDeliverBlock) ResponseDeliverBlock // Deliver full block
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
}
|
||||
|
||||
type RequestDeliverBlock struct {
|
||||
Hash []byte
|
||||
Header Header
|
||||
Txs Txs
|
||||
LastCommitInfo LastCommitInfo
|
||||
ByzantineValidators []Evidence
|
||||
}
|
||||
|
||||
type ResponseDeliverBlock struct {
|
||||
ValidatorUpdates []ValidatorUpdate
|
||||
ConsensusParamUpdates *ConsensusParams
|
||||
Tags []kv.Pair
|
||||
TxResults []ResponseDeliverTx
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Also, we will need to add new config param, which will specify what kind of ABCI application uses.
|
||||
For example, it can be `abci_type`. Then we will have 2 types:
|
||||
- `advanced` - current ABCI
|
||||
- `simple` - proposed implementation
|
||||
|
||||
## Status
|
||||
|
||||
In review
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- much simpler introduction and tutorials for new developers (instead of implementing 5 methods whey
|
||||
will need to implement only 3)
|
||||
- txs can be handled in parallel
|
||||
- simpler interface
|
||||
- faster communications between Tendermint and application
|
||||
|
||||
### Negative
|
||||
|
||||
- Tendermint should now support 2 kinds of ABCI
|
||||
@@ -1,38 +0,0 @@
|
||||
# ADR 038: Non-zero start height
|
||||
|
||||
## Changelog
|
||||
|
||||
- {date}: {changelog}
|
||||
|
||||
## Context
|
||||
|
||||
> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution.
|
||||
|
||||
## Decision
|
||||
|
||||
> This section explains all of the details of the proposed solution, including implementation details.
|
||||
> It should also describe affects / corollary items that may need to be changed as a part of this.
|
||||
> If the proposed change will be large, please also indicate a way to do the change to maximize ease of review.
|
||||
> (e.g. the optimal split of things to do between separate PR's)
|
||||
|
||||
## Status
|
||||
|
||||
> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement.
|
||||
|
||||
{Deprecated|Proposed|Accepted|Declined}
|
||||
|
||||
## Consequences
|
||||
|
||||
> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones.
|
||||
|
||||
### Positive
|
||||
|
||||
### Negative
|
||||
|
||||
### Neutral
|
||||
|
||||
## References
|
||||
|
||||
> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here!
|
||||
|
||||
- {reference link}
|
||||
@@ -1,159 +0,0 @@
|
||||
# ADR 039: Peer Behaviour Interface
|
||||
|
||||
## Changelog
|
||||
* 07-03-2019: Initial draft
|
||||
* 14-03-2019: Updates from feedback
|
||||
|
||||
## Context
|
||||
|
||||
The responsibility for signaling and acting upon peer behaviour lacks a single
|
||||
owning component and is heavily coupled with the network stack[<sup>1</sup>](#references). Reactors
|
||||
maintain a reference to the `p2p.Switch` which they use to call
|
||||
`switch.StopPeerForError(...)` when a peer misbehaves and
|
||||
`switch.MarkAsGood(...)` when a peer contributes in some meaningful way.
|
||||
While the switch handles `StopPeerForError` internally, the `MarkAsGood`
|
||||
method delegates to another component, `p2p.AddrBook`. This scheme of delegation
|
||||
across Switch obscures the responsibility for handling peer behaviour
|
||||
and ties up the reactors in a larger dependency graph when testing.
|
||||
|
||||
## Decision
|
||||
|
||||
Introduce a `PeerBehaviour` interface and concrete implementations which
|
||||
provide methods for reactors to signal peer behaviour without direct
|
||||
coupling `p2p.Switch`. Introduce a ErrorBehaviourPeer to provide
|
||||
concrete reasons for stopping peers. Introduce GoodBehaviourPeer to provide
|
||||
concrete ways in which a peer contributes.
|
||||
|
||||
### Implementation Changes
|
||||
|
||||
PeerBehaviour then becomes an interface for signaling peer errors as well
|
||||
as for marking peers as `good`.
|
||||
|
||||
```go
|
||||
type PeerBehaviour interface {
|
||||
Behaved(peer Peer, reason GoodBehaviourPeer)
|
||||
Errored(peer Peer, reason ErrorBehaviourPeer)
|
||||
}
|
||||
```
|
||||
|
||||
Instead of signaling peers to stop with arbitrary reasons:
|
||||
`reason interface{}`
|
||||
|
||||
We introduce a concrete error type ErrorBehaviourPeer:
|
||||
```go
|
||||
type ErrorBehaviourPeer int
|
||||
|
||||
const (
|
||||
ErrorBehaviourUnknown = iota
|
||||
ErrorBehaviourBadMessage
|
||||
ErrorBehaviourMessageOutofOrder
|
||||
...
|
||||
)
|
||||
```
|
||||
|
||||
To provide additional information on the ways a peer contributed, we introduce
|
||||
the GoodBehaviourPeer type.
|
||||
|
||||
```go
|
||||
type GoodBehaviourPeer int
|
||||
|
||||
const (
|
||||
GoodBehaviourVote = iota
|
||||
GoodBehaviourBlockPart
|
||||
...
|
||||
)
|
||||
```
|
||||
|
||||
As a first iteration we provide a concrete implementation which wraps
|
||||
the switch:
|
||||
```go
|
||||
type SwitchedPeerBehaviour struct {
|
||||
sw *Switch
|
||||
}
|
||||
|
||||
func (spb *SwitchedPeerBehaviour) Errored(peer Peer, reason ErrorBehaviourPeer) {
|
||||
spb.sw.StopPeerForError(peer, reason)
|
||||
}
|
||||
|
||||
func (spb *SwitchedPeerBehaviour) Behaved(peer Peer, reason GoodBehaviourPeer) {
|
||||
spb.sw.MarkPeerAsGood(peer)
|
||||
}
|
||||
|
||||
func NewSwitchedPeerBehaviour(sw *Switch) *SwitchedPeerBehaviour {
|
||||
return &SwitchedPeerBehaviour{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Reactors, which are often difficult to unit test[<sup>2</sup>](#references) could use an implementation which exposes the signals produced by the reactor in
|
||||
manufactured scenarios:
|
||||
|
||||
```go
|
||||
type ErrorBehaviours map[Peer][]ErrorBehaviourPeer
|
||||
type GoodBehaviours map[Peer][]GoodBehaviourPeer
|
||||
|
||||
type StorePeerBehaviour struct {
|
||||
eb ErrorBehaviours
|
||||
gb GoodBehaviours
|
||||
}
|
||||
|
||||
func NewStorePeerBehaviour() *StorePeerBehaviour{
|
||||
return &StorePeerBehaviour{
|
||||
eb: make(ErrorBehaviours),
|
||||
gb: make(GoodBehaviours),
|
||||
}
|
||||
}
|
||||
|
||||
func (spb StorePeerBehaviour) Errored(peer Peer, reason ErrorBehaviourPeer) {
|
||||
if _, ok := spb.eb[peer]; !ok {
|
||||
spb.eb[peer] = []ErrorBehaviours{reason}
|
||||
} else {
|
||||
spb.eb[peer] = append(spb.eb[peer], reason)
|
||||
}
|
||||
}
|
||||
|
||||
func (mpb *StorePeerBehaviour) GetErrored() ErrorBehaviours {
|
||||
return mpb.eb
|
||||
}
|
||||
|
||||
|
||||
func (spb StorePeerBehaviour) Behaved(peer Peer, reason GoodBehaviourPeer) {
|
||||
if _, ok := spb.gb[peer]; !ok {
|
||||
spb.gb[peer] = []GoodBehaviourPeer{reason}
|
||||
} else {
|
||||
spb.gb[peer] = append(spb.gb[peer], reason)
|
||||
}
|
||||
}
|
||||
|
||||
func (spb *StorePeerBehaviour) GetBehaved() GoodBehaviours {
|
||||
return spb.gb
|
||||
}
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
* De-couple signaling from acting upon peer behaviour.
|
||||
* Reduce the coupling of reactors and the Switch and the network
|
||||
stack
|
||||
* The responsibility of managing peer behaviour can be migrated to
|
||||
a single component instead of split between the switch and the
|
||||
address book.
|
||||
|
||||
### Negative
|
||||
|
||||
* The first iteration will simply wrap the Switch and introduce a
|
||||
level of indirection.
|
||||
|
||||
### Neutral
|
||||
|
||||
## References
|
||||
|
||||
1. Issue [#2067](https://github.com/tendermint/tendermint/issues/2067): P2P Refactor
|
||||
2. PR: [#3506](https://github.com/tendermint/tendermint/pull/3506): ADR 036: Blockchain Reactor Refactor
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user