mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-17 18:22:50 +00:00
Compare commits
253 Commits
abci_remov
...
main_backu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9feb1c023 | ||
|
|
188069b1d5 | ||
|
|
b1fbe26ffa | ||
|
|
9dd34ce4a1 | ||
|
|
bbcd128846 | ||
|
|
8b7ae932ff | ||
|
|
82ec855be4 | ||
|
|
6878b38812 | ||
|
|
1a5d5ed63b | ||
|
|
189d120c9f | ||
|
|
8895854e95 | ||
|
|
de29f6e46d | ||
|
|
9998dff847 | ||
|
|
2f4f224faa | ||
|
|
dfb6589d8c | ||
|
|
365af06517 | ||
|
|
3324f49fb7 | ||
|
|
ac48630fdb | ||
|
|
49502dae92 | ||
|
|
c3302b0dc9 | ||
|
|
5ba0d131c4 | ||
|
|
654e5652e4 | ||
|
|
0952a99f45 | ||
|
|
d09f4f503d | ||
|
|
ba84060b07 | ||
|
|
739b92bf01 | ||
|
|
1c0995c809 | ||
|
|
17c94bb0dc | ||
|
|
21b2801c60 | ||
|
|
18d38dd409 | ||
|
|
b8975ccff6 | ||
|
|
4290ad2982 | ||
|
|
0cbecba3d6 | ||
|
|
7efdb6362d | ||
|
|
fee53a074d | ||
|
|
790132a7d8 | ||
|
|
fd49683920 | ||
|
|
da204d371d | ||
|
|
9d01a6880e | ||
|
|
8fd66a6e8d | ||
|
|
f7bb0659be | ||
|
|
4af7568f99 | ||
|
|
3e766984a0 | ||
|
|
2d036c59fe | ||
|
|
12f0c4a624 | ||
|
|
7769467012 | ||
|
|
d16f17569f | ||
|
|
cc0c478c14 | ||
|
|
b9dcddd07a | ||
|
|
34ca3fb474 | ||
|
|
fc8df9a151 | ||
|
|
b85e13aa0c | ||
|
|
20ffa4fd32 | ||
|
|
f9bfdf4ce2 | ||
|
|
2b4436d1b4 | ||
|
|
627b77693f | ||
|
|
755e1474b1 | ||
|
|
b07e1fae89 | ||
|
|
ae164bf533 | ||
|
|
eb14a9564a | ||
|
|
a7dc8aaf91 | ||
|
|
d324430f82 | ||
|
|
99a7ac84dc | ||
|
|
f12588aab1 | ||
|
|
d534285bfe | ||
|
|
ffae184b62 | ||
|
|
c6a0dc8559 | ||
|
|
3aa6c816e5 | ||
|
|
ff0f98892f | ||
|
|
0beac722b0 | ||
|
|
45071d1f23 | ||
|
|
5a9a84eb02 | ||
|
|
f008a275d1 | ||
|
|
816c6bac00 | ||
|
|
9ec9085678 | ||
|
|
f58ba4d2f9 | ||
|
|
6bde634be2 | ||
|
|
d704c0a0b6 | ||
|
|
629cdc7f3d | ||
|
|
c8f9f061fd | ||
|
|
f138cb9c0c | ||
|
|
83b7f4ad5b | ||
|
|
09b8708314 | ||
|
|
d95e423756 | ||
|
|
95bd4b6701 | ||
|
|
98ad5f1bf3 | ||
|
|
82c29db2bc | ||
|
|
160a33fdb1 | ||
|
|
13bd4b63f8 | ||
|
|
bc15531e1d | ||
|
|
716a624d57 | ||
|
|
58b9e4f03d | ||
|
|
6a46a76163 | ||
|
|
0d4db7aae6 | ||
|
|
40a59d1e10 | ||
|
|
f6709208b0 | ||
|
|
2c40ca52c1 | ||
|
|
3136b7a084 | ||
|
|
af2981a2f7 | ||
|
|
3bd2153136 | ||
|
|
301211c2cb | ||
|
|
58ee42ca52 | ||
|
|
6e38fff9ed | ||
|
|
93ab364abc | ||
|
|
1c60efc0bc | ||
|
|
6768b98568 | ||
|
|
3cdfbda2eb | ||
|
|
4552cfc271 | ||
|
|
91fba07e49 | ||
|
|
5df9c410ff | ||
|
|
c8f203293d | ||
|
|
b06e1cea54 | ||
|
|
6ea968d576 | ||
|
|
b42c439776 | ||
|
|
387bf6795a | ||
|
|
4f3e87b2e4 | ||
|
|
a371b1e3a8 | ||
|
|
9dd99e9294 | ||
|
|
4fd19a275e | ||
|
|
8d26460f9d | ||
|
|
c0bdb2423a | ||
|
|
cdd3479f20 | ||
|
|
b1dc5a6def | ||
|
|
abbeb919df | ||
|
|
a02cc30e41 | ||
|
|
5c23ffb05b | ||
|
|
45518db3d0 | ||
|
|
f76f6535b4 | ||
|
|
20ffe9e101 | ||
|
|
af5281d704 | ||
|
|
ed68aadd2b | ||
|
|
e8ec611ed4 | ||
|
|
5fe1a72416 | ||
|
|
b7f1e1f218 | ||
|
|
a0ed437942 | ||
|
|
561440a56d | ||
|
|
f1dc5811c3 | ||
|
|
e48d5a0294 | ||
|
|
f2c32c9b3e | ||
|
|
84bc77cb1f | ||
|
|
fbcfecbc3a | ||
|
|
fe0aa4d30e | ||
|
|
080dfab992 | ||
|
|
ab4238a0e2 | ||
|
|
bfdeccd649 | ||
|
|
e84d43ec93 | ||
|
|
2d1ada4d52 | ||
|
|
c69ab68848 | ||
|
|
10d1156add | ||
|
|
0f857047c5 | ||
|
|
db26cff58f | ||
|
|
a8efef1854 | ||
|
|
21a3bbda3f | ||
|
|
0c96f0b434 | ||
|
|
3293ce6f94 | ||
|
|
d67be51ef4 | ||
|
|
10f3626e6f | ||
|
|
7bd84cd8cc | ||
|
|
7feb484765 | ||
|
|
2d77374d4f | ||
|
|
c1f163f39b | ||
|
|
e80dd00894 | ||
|
|
93ead3d0e5 | ||
|
|
8e90d294ca | ||
|
|
dfefd837e7 | ||
|
|
ac866e38d4 | ||
|
|
0f45086c5f | ||
|
|
5013bc3f4a | ||
|
|
59a711eabe | ||
|
|
43ebbed9c2 | ||
|
|
ffce253273 | ||
|
|
2ae117ef62 | ||
|
|
1944dfd873 | ||
|
|
d54ed6423f | ||
|
|
1067ba1571 | ||
|
|
101bf50e71 | ||
|
|
d7645628f1 | ||
|
|
e667b855c7 | ||
|
|
c71efd1283 | ||
|
|
6bdc970377 | ||
|
|
8655080a0f | ||
|
|
bf73c7985e | ||
|
|
6371f02810 | ||
|
|
b055535397 | ||
|
|
2ff11e5bc2 | ||
|
|
70fd943bdf | ||
|
|
f1835338d9 | ||
|
|
bdf0217bd8 | ||
|
|
cceea4de22 | ||
|
|
7b40167f58 | ||
|
|
2313f35800 | ||
|
|
3233568cee | ||
|
|
9f76e8da15 | ||
|
|
28cfb039c9 | ||
|
|
5f932a53f0 | ||
|
|
50b5c23d88 | ||
|
|
b37f062619 | ||
|
|
aa303edaef | ||
|
|
2d8df1bd4e | ||
|
|
d886bc8fdd | ||
|
|
daaf5d6441 | ||
|
|
3cc976482d | ||
|
|
0ca3a89c90 | ||
|
|
1a4d9397e9 | ||
|
|
c8302c5fcb | ||
|
|
fb9afcc969 | ||
|
|
622b930e3a | ||
|
|
1069ffc6aa | ||
|
|
7bd86ec004 | ||
|
|
9993514893 | ||
|
|
3003e05581 | ||
|
|
498657f128 | ||
|
|
c322b89b2a | ||
|
|
cbc7a1abcf | ||
|
|
670abbc330 | ||
|
|
7dc4f934b0 | ||
|
|
cb570f6672 | ||
|
|
f36999e484 | ||
|
|
ae1fc74f80 | ||
|
|
c4eb6113a8 | ||
|
|
25b0c7c78e | ||
|
|
a6dde14ec4 | ||
|
|
152a2fa5c9 | ||
|
|
f295b4d431 | ||
|
|
0bea0647fe | ||
|
|
f861062ee2 | ||
|
|
d5ec276052 | ||
|
|
bff63aec83 | ||
|
|
69845bb44e | ||
|
|
10e1ac8fea | ||
|
|
74dd21eb89 | ||
|
|
ad1f9b49bc | ||
|
|
ef4e37b532 | ||
|
|
03c79b666d | ||
|
|
1148759a94 | ||
|
|
608933b73e | ||
|
|
b92a19b2ce | ||
|
|
6b499aeb31 | ||
|
|
88186f76fc | ||
|
|
b2409b3345 | ||
|
|
d268e56383 | ||
|
|
4f30c90e62 | ||
|
|
ea271c534a | ||
|
|
2164883501 | ||
|
|
61619ab072 | ||
|
|
4206a0e9b7 | ||
|
|
eb762cf5d7 | ||
|
|
b6a515a818 | ||
|
|
07d242c461 | ||
|
|
1e9d81fb8a | ||
|
|
5305fa79cf | ||
|
|
49ec3b9780 | ||
|
|
58effdd8b3 |
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -7,6 +7,6 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @tendermint/tendermint-engineering
|
||||
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
|
||||
30
.github/dependabot.yml
vendored
30
.github/dependabot.yml
vendored
@@ -14,17 +14,21 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
target-branch: "v0.37.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
###################################
|
||||
##
|
||||
@@ -43,9 +47,23 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
interval: daily
|
||||
target-branch: "v0.37.x"
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
open-pull-requests-limit: 0
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
target-branch: "v0.34.x"
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
open-pull-requests-limit: 0
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
8
.github/mergify.yml
vendored
8
.github/mergify.yml
vendored
@@ -17,6 +17,14 @@ pull_request_rules:
|
||||
{{ title }} (#{{ number }})
|
||||
|
||||
{{ body }}
|
||||
- name: backport patches to v0.37.x branch
|
||||
conditions:
|
||||
- base=main
|
||||
- label=S:backport-to-v0.37.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.37.x
|
||||
- name: backport patches to v0.34.x branch
|
||||
conditions:
|
||||
- base=main
|
||||
|
||||
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
|
||||
10
.github/workflows/check-generated.yml
vendored
10
.github/workflows/check-generated.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: "1.18"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
@@ -26,8 +26,6 @@ jobs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
readonly MOCKERY=2.12.3 # N.B. no leading "v"
|
||||
curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf -
|
||||
make mockery 2>/dev/null
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
@@ -44,11 +42,11 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: "1.18"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1 # we need a .git directory to run git diff
|
||||
fetch-depth: 1 # we need a .git directory to run git diff
|
||||
|
||||
- name: "Check protobuf generated code"
|
||||
run: |
|
||||
@@ -61,7 +59,7 @@ jobs:
|
||||
export GOBIN="${tools}/bin"
|
||||
|
||||
go install github.com/bufbuild/buf/cmd/buf
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
go install github.com/cosmos/gogoproto/protoc-gen-gogofaster@latest
|
||||
|
||||
make proto-gen
|
||||
|
||||
|
||||
5
.github/workflows/docs-deployment.yml
vendored
5
.github/workflows/docs-deployment.yml
vendored
@@ -8,6 +8,7 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- v0.34.x
|
||||
paths:
|
||||
- docs/**
|
||||
- spec/**
|
||||
@@ -34,13 +35,15 @@ jobs:
|
||||
# versions will be available for the build.
|
||||
fetch-depth: 0
|
||||
- name: Build documentation
|
||||
env:
|
||||
NODE_OPTIONS: "--openssl-legacy-provider"
|
||||
run: |
|
||||
git config --global --add safe.directory "$PWD"
|
||||
make build-docs
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output/
|
||||
path: /tmp/tendermint-core-docs
|
||||
|
||||
deploy:
|
||||
name: Deploy to GitHub Pages
|
||||
|
||||
39
.github/workflows/docs-toc.yml
vendored
39
.github/workflows/docs-toc.yml
vendored
@@ -1,21 +1,20 @@
|
||||
# TODO(thane): Re-enable once we've pulled in the ADRs and RFCs from master.
|
||||
# Verify that important design docs have ToC entries.
|
||||
#name: Check documentation ToC
|
||||
#on:
|
||||
# pull_request:
|
||||
# push:
|
||||
# branches:
|
||||
# - main
|
||||
#
|
||||
#jobs:
|
||||
# check:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# - uses: technote-space/get-diff-action@v6
|
||||
# with:
|
||||
# PATTERNS: |
|
||||
# docs/architecture/**
|
||||
# docs/rfc/**
|
||||
# - run: ./docs/presubmit.sh
|
||||
# if: env.GIT_DIFF
|
||||
name: Check documentation ToC
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
docs/architecture/**
|
||||
docs/rfc/**
|
||||
- run: make check-docs-toc
|
||||
if: env.GIT_DIFF
|
||||
|
||||
38
.github/workflows/e2e-manual-multiversion.yml
vendored
Normal file
38
.github/workflows/e2e-manual-multiversion.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
# Manually run the nightly E2E tests for a particular branch, but test with
|
||||
# multiple versions.
|
||||
name: e2e-manual-multiversion
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
e2e-manual-multiversion-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', '04']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
# Generate multi-version tests with double the quantity of E2E nodes
|
||||
# based on the current branch as compared to the latest version.
|
||||
run: ./build/generator -g 5 -m "latest:1,local:2" -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
4
.github/workflows/e2e-manual.yml
vendored
4
.github/workflows/e2e-manual.yml
vendored
@@ -11,13 +11,13 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
group: ['00', '01', '02', '03', '04']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
|
||||
39
.github/workflows/e2e-nightly-34x.yml
vendored
39
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -15,18 +15,23 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
group: ['00', '01']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
@@ -41,18 +46,32 @@ jobs:
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
77
.github/workflows/e2e-nightly-37x.yml
vendored
Normal file
77
.github/workflows/e2e-nightly-37x.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
# Runs randomly generated E2E testnets nightly on the v0.37.x branch.
|
||||
|
||||
# !! This file should be kept in sync with the e2e-nightly-main.yml file,
|
||||
# modulo changes to the version labels.
|
||||
|
||||
name: e2e-nightly-37x
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.37.x'
|
||||
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
45
.github/workflows/e2e-nightly-main.yml
vendored
45
.github/workflows/e2e-nightly-main.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
@@ -46,28 +46,23 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on main
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on main
|
||||
SLACK_FOOTER: ''
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/workflows/e2e.yml
vendored
2
.github/workflows/e2e.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.18'
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
|
||||
33
.github/workflows/fuzz-nightly.yml
vendored
33
.github/workflows/fuzz-nightly.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
|
||||
- name: Set crashers count
|
||||
working-directory: test/fuzz
|
||||
run: echo "::set-output name=count::$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
run: echo "count=$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')" >> $GITHUB_OUTPUT
|
||||
id: set-crashers-count
|
||||
|
||||
outputs:
|
||||
@@ -75,13 +75,24 @@ jobs:
|
||||
if: ${{ needs.fuzz-nightly-test.outputs.crashers-count != 0 }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly Fuzz Tests
|
||||
SLACK_ICON_EMOJI: ':firecracker:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Crashers found in Nightly Fuzz tests
|
||||
SLACK_FOOTER: ''
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
CRASHERS: ${{ needs.fuzz-nightly-test.outputs.crashers-count }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly fuzz tests for `${{ env.BRANCH }}` failed with ${{ env.CRASHERS }} crasher(s). See the <${{ env.RUN_URL }}|run details>."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
31
.github/workflows/govulncheck.yml
vendored
Normal file
31
.github/workflows/govulncheck.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Check for Go vulnerabilities
|
||||
# Runs https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck to proactively
|
||||
# check for vulnerabilities in code packages if there were any changes made to
|
||||
# any Go code or dependencies.
|
||||
#
|
||||
# Run `make vulncheck` from the root of the repo to run this workflow locally.
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
govulncheck:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: govulncheck
|
||||
run: make vulncheck
|
||||
if: "env.GIT_DIFF != ''"
|
||||
2
.github/workflows/janitor.yml
vendored
2
.github/workflows/janitor.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.10.0
|
||||
- uses: styfle/cancel-workflow-action@0.11.0
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
7
.github/workflows/lint.yml
vendored
7
.github/workflows/lint.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.18'
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
@@ -31,10 +31,7 @@ jobs:
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.45
|
||||
version: v1.50.1
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
19
.github/workflows/markdown-links.yml
vendored
19
.github/workflows/markdown-links.yml
vendored
@@ -1,23 +1,20 @@
|
||||
name: Check Markdown links
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches: [main]
|
||||
schedule:
|
||||
# 2am UTC daily
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
strategy:
|
||||
matrix:
|
||||
branch: ['main', 'v0.37.x', 'v0.34.x']
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.md
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
ref: ${{ matrix.branch }}
|
||||
- uses: informalsystems/github-action-markdown-link-check@main
|
||||
with:
|
||||
check-modified-files-only: 'yes'
|
||||
config-file: '.md-link-check.json'
|
||||
if: env.GIT_DIFF
|
||||
|
||||
65
.github/workflows/pre-release.yml
vendored
Normal file
65
.github/workflows/pre-release.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: "Pre-release"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
prerelease:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
# Link to CHANGELOG_PENDING.md as release notes.
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG_PENDING.md > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
prerelease-success:
|
||||
needs: prerelease
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon pre-release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
2
.github/workflows/proto-lint.yml
vendored
2
.github/workflows/proto-lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.7.0
|
||||
- uses: bufbuild/buf-setup-action@v1.10.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
33
.github/workflows/release.yml
vendored
33
.github/workflows/release.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -16,10 +16,10 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
@@ -28,10 +28,35 @@ jobs:
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
release-success:
|
||||
needs: release
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
- uses: actions/stale@v6
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
|
||||
@@ -6,8 +6,10 @@ on:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -31,7 +33,7 @@ jobs:
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}"
|
||||
fi
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
@@ -39,17 +41,17 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.0.0
|
||||
uses: docker/setup-buildx-action@v2.2.1
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2.0.0
|
||||
uses: docker/login-action@v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3.1.0
|
||||
uses: docker/build-push-action@v3.2.0
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
60
.github/workflows/testapp-docker.yml
vendored
Normal file
60
.github/workflows/testapp-docker.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Docker E2E Node
|
||||
# Build & Push rebuilds the e2e Testapp docker image on every push to main and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/tendermint/e2e-node
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermint/e2e-node
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}"
|
||||
fi
|
||||
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.2.1
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3.2.0
|
||||
with:
|
||||
context: .
|
||||
file: ./test/e2e/docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'beep_boop' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
2
.github/workflows/tests.yml
vendored
2
.github/workflows/tests.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -38,6 +38,7 @@ terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
test/app/grpc_client
|
||||
test/loadtime/build
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
@@ -51,7 +52,8 @@ proto/spec/**/*.pb.go
|
||||
*.aux
|
||||
*.bbl
|
||||
*.blg
|
||||
*.log
|
||||
*.pdf
|
||||
*.gz
|
||||
*.dvi
|
||||
# Python virtual environments
|
||||
.venv
|
||||
|
||||
@@ -2,7 +2,6 @@ linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
@@ -21,12 +20,11 @@ linters:
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- staticcheck
|
||||
- structcheck
|
||||
# - structcheck // to be fixed by golangci-lint
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
|
||||
@@ -25,7 +25,8 @@ checksum:
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
name_template: "{{.Version}} (WARNING: BETA SOFTWARE)"
|
||||
prerelease: auto
|
||||
name_template: "{{.Version}}"
|
||||
|
||||
archives:
|
||||
- files:
|
||||
|
||||
@@ -2,5 +2,16 @@
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503]
|
||||
"aliveStatusCodes": [200, 206, 503],
|
||||
"httpHeaders": [
|
||||
{
|
||||
"urls": [
|
||||
"https://docs.github.com/",
|
||||
"https://help.github.com/"
|
||||
],
|
||||
"headers": {
|
||||
"Accept-Encoding": "zstd, br, gzip, deflate"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
170
CHANGELOG.md
170
CHANGELOG.md
@@ -2,6 +2,126 @@
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.24
|
||||
|
||||
*Nov 22, 2022*
|
||||
|
||||
Apart from one minor bug fix, this release aims to optimize the output of the
|
||||
RPC (both HTTP and WebSocket endpoints). See our [upgrading
|
||||
guidelines](./UPGRADING.md#v03424) for more details.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- `[rpc]` [\#9724](https://github.com/tendermint/tendermint/issues/9724) Remove
|
||||
useless whitespace in RPC output (@adizere, @thanethomson)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- `[rpc]` [\#9692](https://github.com/tendermint/tendermint/issues/9692) Remove
|
||||
`Cache-Control` header response from `/check_tx` endpoint (@JayT106)
|
||||
|
||||
## v0.34.23
|
||||
|
||||
*Nov 9, 2022*
|
||||
|
||||
This release introduces some new Prometheus metrics to help in determining what
|
||||
kinds of messages are consuming the most P2P bandwidth. This builds towards our
|
||||
broader goal of optimizing Tendermint bandwidth consumption, and will give us
|
||||
meaningful insights once we can establish these metrics for a number of chains.
|
||||
|
||||
We now also return `Cache-Control` headers for select RPC endpoints to help
|
||||
facilitate caching.
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106
|
||||
|
||||
### IMPROVEMENTS
|
||||
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
|
||||
Envelope type and associated methods for sending and receiving Envelopes
|
||||
instead of raw bytes. This also adds new metrics,
|
||||
`tendermint_p2p_message_send_bytes_total` and
|
||||
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
|
||||
each message type have been sent.
|
||||
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
|
||||
caching of RPC responses (@JayT106)
|
||||
|
||||
The following RPC endpoints will return `Cache-Control` headers with a maximum
|
||||
age of 1 day:
|
||||
|
||||
- `/abci_info`
|
||||
- `/block`, if `height` is supplied
|
||||
- `/block_by_hash`
|
||||
- `/block_results`, if `height` is supplied
|
||||
- `/blockchain`
|
||||
- `/check_tx`
|
||||
- `/commit`, if `height` is supplied
|
||||
- `/consensus_params`, if `height` is supplied
|
||||
- `/genesis`
|
||||
- `/genesis_chunked`
|
||||
- `/tx`
|
||||
- `/validators`, if `height` is supplied
|
||||
|
||||
## v0.34.22
|
||||
|
||||
This release includes several bug fixes, [one of
|
||||
which](https://github.com/tendermint/tendermint/pull/9518) we discovered while
|
||||
building up a baseline for v0.34 against which to compare our upcoming v0.37
|
||||
release during our [QA process](./docs/qa/).
|
||||
|
||||
Special thanks to external contributors on this release: @RiccardoM
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#9423](https://github.com/tendermint/tendermint/pull/9423) Support
|
||||
HTTPS URLs from the WebSocket client (@RiccardoM, @cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [config] [\#9483](https://github.com/tendermint/tendermint/issues/9483)
|
||||
Calling `tendermint init` would incorrectly leave out the new `[storage]`
|
||||
section delimiter in the generated configuration file - this has now been
|
||||
fixed
|
||||
- [p2p] [\#9500](https://github.com/tendermint/tendermint/issues/9500) Prevent
|
||||
peers who have errored being added to the peer set (@jmalicevic)
|
||||
- [indexer] [\#9473](https://github.com/tendermint/tendermint/issues/9473) Fix
|
||||
bug that caused the psql indexer to index empty blocks whenever one of the
|
||||
transactions returned a non zero code. The relevant deduplication logic has
|
||||
been moved within the kv indexer only (@cmwaters)
|
||||
- [blocksync] [\#9518](https://github.com/tendermint/tendermint/issues/9518) A
|
||||
block sync stall was observed during our QA process whereby the node was
|
||||
unable to make progress. Retrying block requests after a timeout fixes this.
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Release highlights include:
|
||||
|
||||
- A new `[storage]` configuration section and flag `discard_abci_responses`,
|
||||
which, if enabled, discards all ABCI responses except the latest one in order
|
||||
to reduce disk space usage in the state store. When enabled, the
|
||||
`block_results` RPC endpoint can no longer function and will return an error.
|
||||
- A new CLI command, `reindex-event`, to re-index block and tx events to the
|
||||
event sinks. You can run this command when the event store backend
|
||||
dropped/disconnected or you want to replace the backend. When
|
||||
`discard_abci_responses` is enabled, you will not be able to use this command.
|
||||
|
||||
Special thanks to external contributors on this release: @rootwarp & @animart
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#9083](https://github.com/tendermint/tendermint/issues/9083) Backport command to reindex missed events (@cmwaters)
|
||||
- [cli] [\#9107](https://github.com/tendermint/tendermint/issues/9107) Add the `p2p.external-address` argument to set the node P2P external address (@amimart)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#9054](https://github.com/tendermint/tendermint/issues/9054) `discard_abci_responses` flag added to discard all ABCI
|
||||
responses except the last in order to save on storage space in the state
|
||||
store (@samricotta)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [mempool] [\#9033](https://github.com/tendermint/tendermint/issues/9033) Rework lock discipline to mitigate callback deadlocks in the
|
||||
priority mempool
|
||||
- [cli] [\#9103](https://github.com/tendermint/tendermint/issues/9103) fix unsafe-reset-all for working with home path (@rootwarp)
|
||||
|
||||
## v0.34.20
|
||||
|
||||
Special thanks to external contributors on this release: @joeabbey @yihuang
|
||||
@@ -438,7 +558,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Remove `MockEvidence` in favor of testing with actual evidence types (`DuplicateVoteEvidence` & `LightClientAttackEvidence`) (@cmwaters)
|
||||
- [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker)
|
||||
- [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [db] [\#5233](https://github.com/tendermint/tendermint/pull/5233) Add support for `badgerdb` database backend (@erikgrinaker)
|
||||
- [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes)
|
||||
- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia (light client attack) evidence can be detected, verified and committed (@cmwaters)
|
||||
@@ -452,7 +572,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [rpc] [\#5017](https://github.com/tendermint/tendermint/pull/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes)
|
||||
- [rpc] [\#5108](https://github.com/tendermint/tendermint/pull/5108) Subscribe using the websocket for new evidence events (@cmwaters)
|
||||
- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section.
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
@@ -532,7 +652,7 @@ This security release fixes:
|
||||
Tendermint 0.33.0 and above allow block proposers to include signatures for the
|
||||
wrong block. This may happen naturally if you start a network, have it run for
|
||||
some time and restart it **without changing the chainID**. (It is a
|
||||
[misconfiguration](https://docs.tendermint.com/master/tendermint-core/using-tendermint.html)
|
||||
[misconfiguration](https://docs.tendermint.com/v0.33/tendermint-core/using-tendermint.html)
|
||||
to reuse chainIDs.) Correct block proposers will accidentally include signatures
|
||||
for the wrong block if they see these signatures, and then commits won't validate,
|
||||
making all proposed blocks invalid. A malicious validator (even with a minimal
|
||||
@@ -831,7 +951,7 @@ and a validator address plus a timestamp. Note we may remove the validator
|
||||
address & timestamp fields in the future (see ADR-25).
|
||||
|
||||
`lite2` package has been added to solve `lite` issues and introduce weak
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/tendermint/tree/main/spec/consensus/light-client) for complete details.
|
||||
`lite` package is now deprecated and will be removed in v0.34 release.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
@@ -1191,8 +1311,8 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah
|
||||
|
||||
*August 28, 2019*
|
||||
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md)
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://docs.tendermint.com/v0.34/tutorials/java.html)
|
||||
guide.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@@ -1225,7 +1345,7 @@ Special thanks to external contributors on this release:
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/main/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele)
|
||||
- [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors`
|
||||
option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors).
|
||||
@@ -1283,7 +1403,7 @@ This release contains a minor enhancement to the ABCI and some breaking changes
|
||||
|
||||
- [p2p] [\#3338](https://github.com/tendermint/tendermint/issues/3338) Prevent "sent next PEX request too soon" errors by not calling
|
||||
ensurePeers outside of ensurePeersRoutine
|
||||
- [behaviour] [\3772](https://github.com/tendermint/tendermint/pull/3772) Return correct reason in MessageOutOfOrder (@jim380)
|
||||
- [behavior] [\3772](https://github.com/tendermint/tendermint/pull/3772) Return correct reason in MessageOutOfOrder (@jim380)
|
||||
- [config] [\#3723](https://github.com/tendermint/tendermint/issues/3723) Add consensus_params to testnet config generation; document time_iota_ms (@ashleyvega)
|
||||
|
||||
|
||||
@@ -1542,7 +1662,7 @@ Special thanks to external contributors on this release:
|
||||
- [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation
|
||||
* Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
https://docs.tendermint.com/master/introduction/install.html#compile-with-cleveldb-support)
|
||||
<https://docs.tendermint.com>)
|
||||
* Use `boltdb` tag to compile Tendermint with bolt db
|
||||
- [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except
|
||||
when IP lookup fails)
|
||||
@@ -1603,7 +1723,7 @@ It brings back `NetAddress()` to `NodeInfo` and uses it instead of `SocketAddr`
|
||||
Additionally, it improves response time on the `/validators` or `/status` RPC endpoints.
|
||||
As a side-effect it makes these RPC endpoint more difficult to DoS and fixes a performance degradation in `ExecCommitBlock`.
|
||||
Also, it contains an [ADR](https://github.com/tendermint/tendermint/pull/3539) that proposes decoupling the
|
||||
responsibility for peer behaviour from the `p2p.Switch` (by @brapse).
|
||||
responsibility for peer behavior from the `p2p.Switch` (by @brapse).
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@brapse, @guagualvcha, @mydring
|
||||
@@ -1766,7 +1886,7 @@ more details.
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
|
||||
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
|
||||
* Apps
|
||||
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
|
||||
@@ -1819,7 +1939,7 @@ more details.
|
||||
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
|
||||
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
|
||||
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md)
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md)
|
||||
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
|
||||
(@guagualvcha)
|
||||
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
|
||||
@@ -2271,8 +2391,8 @@ Special thanks to external contributors on this release:
|
||||
- [blockchain] [\#2731](https://github.com/tendermint/tendermint/issues/2731) Retry both blocks if either is bad to avoid getting stuck during fast sync (@goolAdapter)
|
||||
- [consensus] [\#2893](https://github.com/tendermint/tendermint/issues/2893) Use genDoc.Validators instead of state.NextValidators on replay when appHeight==0 (@james-ray)
|
||||
- [log] [\#2868](https://github.com/tendermint/tendermint/issues/2868) Fix `module=main` setting overriding all others
|
||||
- NOTE: this changes the default logging behaviour to be much less verbose.
|
||||
Set `log_level="info"` to restore the previous behaviour.
|
||||
- NOTE: this changes the default logging behavior to be much less verbose.
|
||||
Set `log_level="info"` to restore the previous behavior.
|
||||
- [rpc] [\#2808](https://github.com/tendermint/tendermint/issues/2808) Fix `accum` field in `/validators` by calling `IncrementAccum` if necessary
|
||||
- [rpc] [\#2811](https://github.com/tendermint/tendermint/issues/2811) Allow integer IDs in JSON-RPC requests (@tomtau)
|
||||
- [txindex/kv] [\#2759](https://github.com/tendermint/tendermint/issues/2759) Fix tx.height range queries
|
||||
@@ -2403,7 +2523,7 @@ increasing attention to backwards compatibility. Thanks for bearing with us!
|
||||
* [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version field to State, breaking the format of State as
|
||||
encoded on disk.
|
||||
* [rpc] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `/abci_query` takes `prove` argument instead of `trusted` and switches the default
|
||||
behaviour to `prove=false`
|
||||
behavior to `prove=false`
|
||||
* [rpc] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Remove all `node_info.other.*_version` fields in `/status` and
|
||||
`/net_info`
|
||||
* [rpc] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Remove
|
||||
@@ -2517,7 +2637,7 @@ Special thanks to external contributors on this release:
|
||||
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
|
||||
It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md).
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
@@ -2548,7 +2668,7 @@ FEATURES:
|
||||
- [libs] [\#2286](https://github.com/tendermint/tendermint/issues/2286) Panic if `autofile` or `db/fsdb` permissions change from 0600.
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialised (@bradyjoestar)
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialized (@bradyjoestar)
|
||||
- [mempool] [\#2399](https://github.com/tendermint/tendermint/issues/2399) Make mempool cache a proper LRU (@bradyjoestar)
|
||||
- [p2p] [\#2126](https://github.com/tendermint/tendermint/issues/2126) Introduce PeerTransport interface to improve isolation of concerns
|
||||
- [libs/common] [\#2326](https://github.com/tendermint/tendermint/issues/2326) Service returns ErrNotStarted
|
||||
@@ -2582,7 +2702,7 @@ are affected by a change.
|
||||
|
||||
A few more breaking changes are in the works - each will come with a clear
|
||||
Architecture Decision Record (ADR) explaining the change. You can review ADRs
|
||||
[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture)
|
||||
[here](https://github.com/tendermint/tendermint/tree/main/docs/architecture)
|
||||
or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls).
|
||||
You can also check in on the [issues marked as
|
||||
breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking).
|
||||
@@ -2598,7 +2718,7 @@ BREAKING CHANGES:
|
||||
- [abci] Added address of the original proposer of the block to Header
|
||||
- [abci] Change ABCI Header to match Tendermint exactly
|
||||
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
- Remove PubKey from `Validator` (so it's just Address and Power)
|
||||
- Introduce `ValidatorUpdate` (with just PubKey and Power)
|
||||
- InitChain and EndBlock use ValidatorUpdate
|
||||
@@ -2620,7 +2740,7 @@ BREAKING CHANGES:
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
|
||||
- Add NextValidatorSet to State, changes on-disk representation of state
|
||||
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)).
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-020-block-size.md)).
|
||||
- Remove ConsensusParams.BlockSize.MaxTxs
|
||||
- Introduce maximum sizes for all components of a block, including ChainID
|
||||
- [types] Updates to the block Header:
|
||||
@@ -2631,7 +2751,7 @@ BREAKING CHANGES:
|
||||
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
|
||||
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
|
||||
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)):
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-014-secp-malleability.md)):
|
||||
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
|
||||
- malleability removed by requiring `s` to be in canonical form.
|
||||
|
||||
@@ -2861,7 +2981,7 @@ BREAKING CHANGES:
|
||||
FEATURES
|
||||
- [cmd] Added metrics (served under `/metrics` using a Prometheus client;
|
||||
disabled by default). See the new `instrumentation` section in the config and
|
||||
[metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html)
|
||||
[metrics](https://github.com/tendermint/tendermint/blob/main/docs/tendermint-core/metrics.md)
|
||||
guide.
|
||||
- [p2p] Add IPv6 support to peering.
|
||||
- [p2p] Add `external_address` to config to allow specifying the address for
|
||||
@@ -2975,7 +3095,7 @@ BREAKING:
|
||||
|
||||
FEATURES
|
||||
|
||||
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
|
||||
- [rpc] the RPC documentation is now published to https://github.com/tendermint/tendermint/tree/main/spec/rpc
|
||||
- [p2p] AllowDuplicateIP config option to refuse connections from same IP.
|
||||
- true by default for now, false by default in next breaking release
|
||||
- [docs] Add docs for query, tx indexing, events, pubsub
|
||||
@@ -3431,7 +3551,7 @@ Also includes the Grand Repo-Merge of 2017.
|
||||
BREAKING CHANGES:
|
||||
|
||||
- Config and Flags:
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/main/config/config.go#L11),
|
||||
containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig`
|
||||
- This affects the following flags:
|
||||
- `--seeds` is now `--p2p.seeds`
|
||||
@@ -3454,7 +3574,7 @@ containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusCon
|
||||
|
||||
- Logger
|
||||
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
|
||||
See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details
|
||||
See our new [logging library](https://github.com/tendermint/tendermint/blob/main/libs/log/logger.go) and [blog post](https://blog.cosmos.network/abstracting-the-logger-interface-in-go-4cf96bf90bb7) for more details
|
||||
- Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!)
|
||||
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger
|
||||
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
# Unreleased Changes
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
## v0.38.0
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
@@ -15,13 +11,105 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [p2p] \#9625 Remove unused p2p/trust package (@cmwaters)
|
||||
- [rpc] \#9655 Remove global environment and replace with constructor. (@williambanfield,@tychoish)
|
||||
- [node] \#9655 Move DBContext and DBProvider from the node package to the config package. (@williambanfield,@tychoish)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
- Tooling
|
||||
- [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [metrics] \#9682 move state-syncing and block-syncing metrics to their respective packages (@cmwaters)
|
||||
labels have moved from block_syncing -> blocksync_syncing and state_syncing -> statesync_syncing
|
||||
- [inspect] \#9655 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [proxy] \#9830 Introduce `NewUnsyncLocalClientCreator`, which allows local
|
||||
ABCI clients to have the same concurrency model as remote clients (i.e. one
|
||||
mutex per client "connection", for each of the four ABCI "connections").
|
||||
- [config] \#9680 Introduce `BootstrapPeers` to the config to allow nodes to list peers to be added to
|
||||
the addressbook upon start up (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [crypto/merkle] \#6443 & \#6513 Improve HashAlternatives performance (@cuonglm, @marbar3778)
|
||||
- [rpc] \#9650 Enable caching of RPC responses (@JayT106)
|
||||
- [consensus] \#9760 Save peer LastCommit correctly to achieve 50% reduction in gossiped precommits. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [docker] \#9462 ensure Docker image uses consistent version of Go
|
||||
- [abci-cli] \#9717 fix broken abci-cli help command
|
||||
|
||||
## v0.37.0
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [config] \#9259 Rename the fastsync section and the fast_sync key blocksync and block_sync respectively
|
||||
|
||||
- Apps
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
- [abci] \#5783 Make length delimiter encoding consistent (`uint64`) between ABCI and P2P wire-level protocols
|
||||
- [abci] \#9145 Removes unused Response/Request `SetOption` from ABCI (@samricotta)
|
||||
- [abci/params] \#9287 Deduplicate `ConsensusParams` and `BlockParams` so only `types` proto definitions are used (@cmwaters)
|
||||
- Remove `TimeIotaMs` and use a hard-coded 1 millisecond value to ensure monotonically increasing block times.
|
||||
- Rename `AppVersion` to `App` so as to not stutter.
|
||||
- [types] \#9287 Reduce the use of protobuf types in core logic. (@cmwaters)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams` have become native types.
|
||||
They still utilize protobuf when being sent over the wire or written to disk.
|
||||
- Moved `ValidateConsensusParams` inside (now native type) `ConsensusParams`, and renamed it to `ValidateBasic`.
|
||||
- [abci] \#9301 New ABCI methods `PrepareProposal` and `ProcessProposal` which give the app control over transactions proposed and allows for verification of proposed blocks.
|
||||
- [abci] \#8216 Renamed `EvidenceType` to `MisbehaviorType` and `Evidence` to `Misbehavior` as a more accurate label of their contents. (@williambanfield, @sergio-mena)
|
||||
- [abci] \#9122 Renamed `LastCommitInfo` to `CommitInfo` in preparation for vote extensions. (@cmwaters)
|
||||
- [abci] \#8656, \#8901 Added cli commands for `PrepareProposal` and `ProcessProposal`. (@jmalicevic, @hvanz)
|
||||
- [abci] \#6403 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [all] \#9144 Change spelling from British English to American (@cmwaters)
|
||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [#9083] backport cli command to reindex missed events (@cmwaters)
|
||||
- [abci] \#9301 New ABCI methods `PrepareProposal` and `ProcessProposal` which give the app control over transactions proposed and allows for verification of proposed blocks.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [crypto] \#9250 Update to use btcec v2 and the latest btcutil. (@wcsiu)
|
||||
|
||||
- [cli] \#9171 add `--hard` flag to rollback command (and a boolean to the `RollbackState` method). This will rollback
|
||||
state and remove the last block. This command can be triggered multiple times. The application must also rollback
|
||||
state to the same height. (@tsutsu, @cmwaters)
|
||||
- [proto] \#9356 Migrate from `gogo/protobuf` to `cosmos/gogoproto` (@julienrbrt)
|
||||
- [rpc] \#9276 Added `header` and `header_by_hash` queries to the RPC client (@samricotta)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
|
||||
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
|
||||
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
|
||||
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778 & @Yawning)
|
||||
- [types] \#6120 use batch verification for verifying commits signatures. (@marbar3778 & @cmwaters & @Yawning)
|
||||
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
|
||||
- [state] \#9505 Added logic so when pruning, the evidence period is taken into consideration and only deletes unecessary data (@samricotta)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)
|
||||
- [docker] \#9073 enable cross platform build using docker buildx
|
||||
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout
|
||||
|
||||
@@ -12,7 +12,7 @@ and hence to Tendermint.
|
||||
* We are committed to providing a friendly, safe and welcoming environment for
|
||||
all, regardless of level of experience, gender, gender identity and
|
||||
expression, sexual orientation, disability, personal appearance, body size,
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristics.
|
||||
|
||||
* On Slack, please avoid using overtly sexual nicknames or other nicknames that
|
||||
might detract from a friendly, safe and welcoming environment for all.
|
||||
@@ -27,7 +27,7 @@ and hence to Tendermint.
|
||||
want to experiment with, make a fork and see how it works.
|
||||
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone.
|
||||
That is not welcome behaviour. We interpret the term “harassment” as including
|
||||
That is not welcome behavior. We interpret the term “harassment” as including
|
||||
the definition in the [Citizen Code of Conduct][ccoc]; if you have any lack of
|
||||
clarity about what might be included in that concept, please read their
|
||||
definition. In particular, we don’t tolerate behavior that excludes people in
|
||||
@@ -40,7 +40,7 @@ and hence to Tendermint.
|
||||
making this community a safe place for you and we’ve got your back.
|
||||
|
||||
* Likewise any spamming, trolling, flaming, baiting or other attention-stealing
|
||||
behaviour is not welcome.
|
||||
behavior is not welcome.
|
||||
|
||||
----
|
||||
|
||||
|
||||
@@ -7,12 +7,12 @@ support permissionless value-carrying networks. While all contributions are
|
||||
welcome, contributors should bear this goal in mind in deciding if they should
|
||||
target the main Tendermint project or a potential fork. When targeting the
|
||||
main Tendermint project, the following process leads to the best chance of
|
||||
landing changes in master.
|
||||
landing changes in `main`.
|
||||
|
||||
All work on the code base should be motivated by a [Github
|
||||
Issue](https://github.com/tendermint/tendermint/issues).
|
||||
[Search](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)
|
||||
is a good place start when looking for places to contribute. If you
|
||||
is a good place to start when looking for places to contribute. If you
|
||||
would like to work on an issue which already exists, please indicate so
|
||||
by leaving a comment.
|
||||
|
||||
@@ -26,7 +26,7 @@ will indicate their support with a heartfelt emoji.
|
||||
|
||||
If the issue would benefit from thorough discussion, maintainers may
|
||||
request that you create a [Request For
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc)
|
||||
Comment](https://github.com/tendermint/tendermint/tree/main/docs/rfc)
|
||||
in the Tendermint spec repo. Discussion
|
||||
at the RFC stage will build collective understanding of the dimensions
|
||||
of the problems and help structure conversations around trade-offs.
|
||||
@@ -46,7 +46,7 @@ Find the largest existing ADR number and bump it by 1.
|
||||
When the problem as well as proposed solution are well understood,
|
||||
changes should start with a [draft
|
||||
pull request](https://github.blog/2019-02-14-introducing-draft-pull-requests/)
|
||||
against master. The draft signals that work is underway. When the work
|
||||
against `main`. The draft signals that work is underway. When the work
|
||||
is ready for feedback, hitting "Ready for Review" will signal to the
|
||||
maintainers to take a look.
|
||||
|
||||
@@ -54,7 +54,7 @@ maintainers to take a look.
|
||||
|
||||
Each stage of the process is aimed at creating feedback cycles which align contributors and maintainers to make sure:
|
||||
|
||||
- Contributors don’t waste their time implementing/proposing features which won’t land in master.
|
||||
- Contributors don’t waste their time implementing/proposing features which won’t land in `main`.
|
||||
- Maintainers have the necessary context in order to support and review contributions.
|
||||
|
||||
## Forking
|
||||
@@ -73,19 +73,19 @@ For instance, to create a fork and work on a branch of it, I would:
|
||||
- `git remote add origin git@github.com:ebuchman/basecoin.git`
|
||||
|
||||
Now `origin` refers to my fork and `upstream` refers to the Tendermint version.
|
||||
So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there.
|
||||
So I can `git push -u origin main` to update my fork, and make pull requests to tendermint from there.
|
||||
Of course, replace `ebuchman` with your git handle.
|
||||
|
||||
To pull in updates from the origin repo, run
|
||||
|
||||
- `git fetch upstream`
|
||||
- `git rebase upstream/master` (or whatever branch you want)
|
||||
- `git rebase upstream/main` (or whatever branch you want)
|
||||
|
||||
## Dependencies
|
||||
|
||||
We use [go modules](https://github.com/golang/go/wiki/Modules) to manage dependencies.
|
||||
|
||||
That said, the master branch of every Tendermint repository should just build
|
||||
That said, the `main` branch of every Tendermint repository should just build
|
||||
with `go get`, which means they should be kept up-to-date with their
|
||||
dependencies so we can get away with telling people they can just `go get` our
|
||||
software.
|
||||
@@ -106,7 +106,7 @@ specify exactly the dependency you want to update, eg.
|
||||
## Protobuf
|
||||
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
|
||||
with [`gogoproto`](https://github.com/gogo/protobuf) to generate code for use
|
||||
with [`gogoproto`](https://github.com/cosmos/gogoproto) to generate code for use
|
||||
across Tendermint Core.
|
||||
|
||||
To generate proto stubs, lint, and check protos for breaking changes, you will
|
||||
@@ -142,7 +142,6 @@ If you are a VS Code user, you may want to add the following to your `.vscode/se
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -153,10 +152,47 @@ If you are a VS Code user, you may want to add the following to your `.vscode/se
|
||||
Every fix, improvement, feature, or breaking change should be made in a
|
||||
pull-request that includes an update to the `CHANGELOG_PENDING.md` file.
|
||||
|
||||
A feature can also be worked on a feature branch, if its size and/or risk
|
||||
justifies it (see #branching-model-and-release) below.
|
||||
|
||||
### What does a good changelog entry look like?
|
||||
|
||||
Changelog entries should answer the question: "what is important about this
|
||||
change for users to know?" or "what problem does this solve for users?". It
|
||||
should not simply be a reiteration of the title of the associated PR, unless the
|
||||
title of the PR _very_ clearly explains the benefit of a change to a user.
|
||||
|
||||
Some good examples of changelog entry descriptions:
|
||||
|
||||
```md
|
||||
- [consensus] \#1111 Small transaction throughput improvement (approximately
|
||||
3-5\% from preliminary tests) through refactoring the way we use channels
|
||||
- [mempool] \#1112 Refactor Go API to be able to easily swap out the current
|
||||
mempool implementation in Tendermint forks
|
||||
- [p2p] \#1113 Automatically ban peers when their messages are unsolicited or
|
||||
are received too frequently
|
||||
```
|
||||
|
||||
Some bad examples of changelog entry descriptions:
|
||||
|
||||
```md
|
||||
- [consensus] \#1111 Refactor channel usage
|
||||
- [mempool] \#1112 Make API generic
|
||||
- [p2p] \#1113 Ban for PEX message abuse
|
||||
```
|
||||
|
||||
For more on how to write good changelog entries, see:
|
||||
|
||||
- <https://keepachangelog.com>
|
||||
- <https://docs.gitlab.com/ee/development/changelog.html#writing-good-changelog-entries>
|
||||
- <https://depfu.com/blog/what-makes-a-good-changelog>
|
||||
|
||||
### Changelog entry format
|
||||
|
||||
Changelog entries should be formatted as follows:
|
||||
|
||||
```md
|
||||
- [module] \#xxx Some description about the change (@contributor)
|
||||
- [module] \#xxx Some description of the change (@contributor)
|
||||
```
|
||||
|
||||
Here, `module` is the part of the code that changed (typically a
|
||||
@@ -177,37 +213,46 @@ Changes with multiple classifications should be doubly included (eg. a bug fix
|
||||
that is also a breaking change should be recorded under both).
|
||||
|
||||
Breaking changes are further subdivided according to the APIs/users they impact.
|
||||
Any change that effects multiple APIs/users should be recorded multiply - for
|
||||
Any change that affects multiple APIs/users should be recorded multiply - for
|
||||
instance, a change to the `Blockchain Protocol` that removes a field from the
|
||||
header should also be recorded under `CLI/RPC/Config` since the field will be
|
||||
removed from the header in RPC responses as well.
|
||||
|
||||
## Branching Model and Release
|
||||
|
||||
The main development branch is master.
|
||||
The main development branch is `main`.
|
||||
|
||||
Every release is maintained in a release branch named `vX.Y.Z`.
|
||||
|
||||
Pending minor releases have long-lived release candidate ("RC") branches. Minor release changes should be merged to these long-lived RC branches at the same time that the changes are merged to master.
|
||||
Pending minor releases have long-lived release candidate ("RC") branches. Minor release changes should be merged to these long-lived RC branches at the same time that the changes are merged to `main`.
|
||||
|
||||
If a feature's size is big and/or its risk is high, it can be implemented in a feature branch.
|
||||
While the feature work is in progress,
|
||||
pull requests are open and squash merged against the feature branch.
|
||||
Branch `main` is periodically merged (merge commit) into the feature branch,
|
||||
to reduce branch divergence.
|
||||
When the feature is complete, the feature branch is merged back (merge commit) into `main`.
|
||||
The moment of the final merge can be carefully chosen
|
||||
so as to land different features in different releases.
|
||||
|
||||
Note all pull requests should be squash merged except for merging to a release branch (named `vX.Y`). This keeps the commit history clean and makes it
|
||||
easy to reference the pull request where a change was introduced.
|
||||
|
||||
### Development Procedure
|
||||
|
||||
The latest state of development is on `master`, which must never fail `make test`. _Never_ force push `master`, unless fixing broken git history (which we rarely do anyways).
|
||||
The latest state of development is on `main`, which must never fail `make test`. _Never_ force push `main`, unless fixing broken git history (which we rarely do anyways).
|
||||
|
||||
To begin contributing, create a development branch either on `github.com/tendermint/tendermint`, or your fork (using `git remote add origin`).
|
||||
|
||||
Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `master`. (Since pull requests are squash-merged, either is fine!)
|
||||
Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `main`. (Since pull requests are squash-merged, either is fine!)
|
||||
|
||||
Update the `UPGRADING.md` if the change you've made is breaking and the
|
||||
instructions should be in place for a user on how he/she can upgrade it's
|
||||
instructions should be in place for a user on how he/she can upgrade its
|
||||
software (ABCI application, Tendermint-based blockchain, light client, wallet).
|
||||
|
||||
Once you have submitted a pull request label the pull request with either `R:minor`, if the change should be included in the next minor release, or `R:major`, if the change is meant for a major release.
|
||||
|
||||
Sometimes (often!) pull requests get out-of-date with master, as other people merge different pull requests to master. It is our convention that pull request authors are responsible for updating their branches with master. (This also means that you shouldn't update someone else's branch for them; even if it seems like you're doing them a favor, you may be interfering with their git flow in some way!)
|
||||
Sometimes (often!) pull requests get out-of-date with `main`, as other people merge different pull requests to `main`. It is our convention that pull request authors are responsible for updating their branches with `main`. (This also means that you shouldn't update someone else's branch for them; even if it seems like you're doing them a favor, you may be interfering with their git flow in some way!)
|
||||
|
||||
#### Merging Pull Requests
|
||||
|
||||
@@ -215,20 +260,20 @@ It is also our convention that authors merge their own pull requests, when possi
|
||||
|
||||
Before merging a pull request:
|
||||
|
||||
- Ensure pull branch is up-to-date with a recent `master` (GitHub won't let you merge without this!)
|
||||
- Ensure pull branch is up-to-date with a recent `main` (GitHub won't let you merge without this!)
|
||||
- Run `make test` to ensure that all tests pass
|
||||
- [Squash](https://stackoverflow.com/questions/5189560/squash-my-last-x-commits-together-using-git) merge pull request
|
||||
|
||||
#### Pull Requests for Minor Releases
|
||||
|
||||
If your change should be included in a minor release, please also open a PR against the long-lived minor release candidate branch (e.g., `rc1/v0.33.5`) _immediately after your change has been merged to master_.
|
||||
If your change should be included in a minor release, please also open a PR against the long-lived minor release candidate branch (e.g., `rc1/v0.33.5`) _immediately after your change has been merged to main_.
|
||||
|
||||
You can do this by cherry-picking your commit off master:
|
||||
You can do this by cherry-picking your commit off `main`:
|
||||
|
||||
```sh
|
||||
$ git checkout rc1/v0.33.5
|
||||
$ git checkout -b {new branch name}
|
||||
$ git cherry-pick {commit SHA from master}
|
||||
$ git cherry-pick {commit SHA from main}
|
||||
# may need to fix conflicts, and then use git add and git cherry-pick --continue
|
||||
$ git push origin {new branch name}
|
||||
```
|
||||
@@ -247,7 +292,7 @@ cmd/debug: execute p.Signal only when p is not nil
|
||||
Fixes #nnnn
|
||||
```
|
||||
|
||||
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
Each PR should have one commit once it lands on `main`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
|
||||
## Testing
|
||||
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
# Use a build arg to ensure that both stages use the same,
|
||||
# hopefully current, go version.
|
||||
ARG GOLANG_BASE_IMAGE=golang:1.18-alpine
|
||||
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.15-alpine as builder
|
||||
FROM --platform=$BUILDPLATFORM $GOLANG_BASE_IMAGE as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
COPY / /tendermint
|
||||
WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
RUN TARGETPLATFORM=$TARGETPLATFORM make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM golang:1.15-alpine
|
||||
FROM $GOLANG_BASE_IMAGE
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
|
||||
@@ -6,9 +6,9 @@ DockerHub tags for official releases are [here](https://hub.docker.com/r/tenderm
|
||||
|
||||
Official releases can be found [here](https://github.com/tendermint/tendermint/releases).
|
||||
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
|
||||
The Dockerfile for Tendermint is not expected to change in the near future. The main file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/main/DOCKER/Dockerfile).
|
||||
|
||||
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
|
||||
Respective versioned files can be found at `https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile` (replace the Xs with the version number).
|
||||
|
||||
## Quick reference
|
||||
|
||||
@@ -20,9 +20,9 @@ Respective versioned files can be found <https://raw.githubusercontent.com/tende
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
|
||||
|
||||
For more background, see the [the docs](https://docs.tendermint.com/master/introduction/#quick-start).
|
||||
For more background, see the [the docs](https://docs.tendermint.com/main/introduction/#quick-start).
|
||||
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/master/introduction/quick-start.html).
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/main/introduction/quick-start.html).
|
||||
|
||||
## How to use this image
|
||||
|
||||
@@ -37,7 +37,7 @@ docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app
|
||||
|
||||
## Local cluster
|
||||
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run:
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/main/Makefile) and run:
|
||||
|
||||
```sh
|
||||
make build-linux
|
||||
@@ -49,8 +49,8 @@ Note that this will build and use a different image than the ones provided here.
|
||||
|
||||
## License
|
||||
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/master/LICENSE).
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/main/LICENSE).
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/main/CONTRIBUTING.md) for more information.
|
||||
|
||||
6
LICENSE
6
LICENSE
@@ -1,5 +1,3 @@
|
||||
Tendermint Core
|
||||
License: Apache2.0
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
@@ -181,7 +179,7 @@ License: Apache2.0
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
@@ -189,7 +187,7 @@ License: Apache2.0
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 All in Bits, Inc
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
126
Makefile
126
Makefile
@@ -4,14 +4,8 @@ OUTPUT?=$(BUILDDIR)/tendermint
|
||||
|
||||
BUILD_TAGS?=tendermint
|
||||
|
||||
# If building a release, please checkout the version tag to get the correct version setting
|
||||
ifneq ($(shell git symbolic-ref -q --short HEAD),)
|
||||
VERSION := unreleased-$(shell git symbolic-ref -q --short HEAD)-$(shell git rev-parse HEAD)
|
||||
else
|
||||
VERSION := $(shell git describe)
|
||||
endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
COMMIT_HASH := $(shell git rev-parse --short HEAD)
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMGitCommitHash=$(COMMIT_HASH)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
CGO_ENABLED ?= 0
|
||||
@@ -53,6 +47,67 @@ endif
|
||||
# allow users to pass additional flags via the conventional LDFLAGS variable
|
||||
LD_FLAGS += $(LDFLAGS)
|
||||
|
||||
# Process Docker environment varible TARGETPLATFORM
|
||||
# in order to build binary with correspondent ARCH
|
||||
# by default will always build for linux/amd64
|
||||
TARGETPLATFORM ?=
|
||||
GOOS ?= linux
|
||||
GOARCH ?= amd64
|
||||
GOARM ?=
|
||||
|
||||
ifeq (linux/arm,$(findstring linux/arm,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm
|
||||
GOARM=7
|
||||
endif
|
||||
|
||||
ifeq (linux/arm/v6,$(findstring linux/arm/v6,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm
|
||||
GOARM=6
|
||||
endif
|
||||
|
||||
ifeq (linux/arm64,$(findstring linux/arm64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm64
|
||||
GOARM=7
|
||||
endif
|
||||
|
||||
ifeq (linux/386,$(findstring linux/386,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=386
|
||||
endif
|
||||
|
||||
ifeq (linux/amd64,$(findstring linux/amd64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=amd64
|
||||
endif
|
||||
|
||||
ifeq (linux/mips,$(findstring linux/mips,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips
|
||||
endif
|
||||
|
||||
ifeq (linux/mipsle,$(findstring linux/mipsle,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mipsle
|
||||
endif
|
||||
|
||||
ifeq (linux/mips64,$(findstring linux/mips64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips64
|
||||
endif
|
||||
|
||||
ifeq (linux/mips64le,$(findstring linux/mips64le,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips64le
|
||||
endif
|
||||
|
||||
ifeq (linux/riscv64,$(findstring linux/riscv64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=riscv64
|
||||
endif
|
||||
|
||||
all: check build test install
|
||||
.PHONY: all
|
||||
|
||||
@@ -70,6 +125,20 @@ install:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
.PHONY: install
|
||||
|
||||
###############################################################################
|
||||
### Metrics ###
|
||||
###############################################################################
|
||||
|
||||
metrics: testdata-metrics
|
||||
go generate -run="scripts/metricsgen" ./...
|
||||
.PHONY: metrics
|
||||
|
||||
# By convention, the go tool ignores subdirectories of directories named
|
||||
# 'testdata'. This command invokes the generate command on the folder directly
|
||||
# to avoid this.
|
||||
testdata-metrics:
|
||||
ls ./scripts/metricsgen/testdata | xargs -I{} go generate -v -run="scripts/metricsgen" ./scripts/metricsgen/testdata/{}
|
||||
.PHONY: testdata-metrics
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
@@ -85,7 +154,7 @@ mockery:
|
||||
|
||||
check-proto-deps:
|
||||
ifeq (,$(shell which protoc-gen-gogofaster))
|
||||
@go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
@go install github.com/cosmos/gogoproto/protoc-gen-gogofaster@latest
|
||||
endif
|
||||
.PHONY: check-proto-deps
|
||||
|
||||
@@ -99,6 +168,7 @@ proto-gen: check-proto-deps
|
||||
@echo "Generating Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf generate
|
||||
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
|
||||
@cp ./proto/tendermint/rpc/grpc/types.pb.go ./rpc/grpc
|
||||
.PHONY: proto-gen
|
||||
|
||||
# These targets are provided for convenience and are intended for local
|
||||
@@ -201,25 +271,46 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@golangci-lint run
|
||||
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
vulncheck:
|
||||
@go run golang.org/x/vuln/cmd/govulncheck@latest ./...
|
||||
.PHONY: vulncheck
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
###############################################################################
|
||||
### Documentation ###
|
||||
###############################################################################
|
||||
|
||||
DOCS_OUTPUT?=/tmp/tendermint-core-docs
|
||||
|
||||
# This builds a docs site for each branch/tag in `./docs/versions` and copies
|
||||
# each site to a version prefixed path. The last entry inside the `versions`
|
||||
# file will be the default root index.html. Only redirects that are built into
|
||||
# the "redirects" folder of each of the branches will be copied out to the root
|
||||
# of the build at the end.
|
||||
build-docs:
|
||||
@cd docs && \
|
||||
while read -r branch path_prefix; do \
|
||||
(git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
mkdir -p $(DOCS_OUTPUT)/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* $(DOCS_OUTPUT)/$${path_prefix}/ ; \
|
||||
cp $(DOCS_OUTPUT)/$${path_prefix}/index.html $(DOCS_OUTPUT) ; \
|
||||
cp $(DOCS_OUTPUT)/$${path_prefix}/404.html $(DOCS_OUTPUT) ; \
|
||||
cp -r $(DOCS_OUTPUT)/$${path_prefix}/redirects/* $(DOCS_OUTPUT) || true ; \
|
||||
done < versions ;
|
||||
.PHONY: build-docs
|
||||
|
||||
# Build and serve the local version of the docs on the current branch from
|
||||
# http://0.0.0.0:8080
|
||||
serve-docs:
|
||||
@cd docs && \
|
||||
npm ci && \
|
||||
npm run serve
|
||||
.PHONY: serve-docs
|
||||
|
||||
sync-docs:
|
||||
cd ~/output && \
|
||||
echo "role_arn = ${DEPLOYMENT_ROLE_ARN}" >> /root/.aws/config ; \
|
||||
@@ -228,6 +319,11 @@ sync-docs:
|
||||
aws cloudfront create-invalidation --distribution-id ${CF_DISTRIBUTION_ID} --profile terraform --path "/*" ;
|
||||
.PHONY: sync-docs
|
||||
|
||||
# Verify that important design docs have ToC entries.
|
||||
check-docs-toc:
|
||||
@./docs/presubmit.sh
|
||||
.PHONY: check-docs-toc
|
||||
|
||||
###############################################################################
|
||||
### Docker image ###
|
||||
###############################################################################
|
||||
@@ -244,7 +340,7 @@ build-docker: build-linux
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) $(MAKE) build
|
||||
.PHONY: build-linux
|
||||
|
||||
build-docker-localnode:
|
||||
@@ -308,4 +404,4 @@ $(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR)
|
||||
split-test-packages:$(BUILDDIR)/packages.txt
|
||||
split -d -n l/$(NUM_SPLIT) $< $<.
|
||||
test-group-%:split-test-packages
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=15m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
|
||||
45
README.md
45
README.md
@@ -10,12 +10,11 @@
|
||||
[![Go version][go-badge]][go-url]
|
||||
[![Discord chat][discord-badge]][discord-url]
|
||||
[![License][license-badge]][license-url]
|
||||
[![tendermint/tendermint][loc-badge]][loc-url]
|
||||
[![Sourcegraph][sg-badge]][sg-url]
|
||||
|
||||
| Branch | Tests | Coverage | Linting |
|
||||
|--------|-----------------------|------------------------------------------|---------------------|
|
||||
| main | ![Tests][tests-badge] | [![codecov][codecov-badge]][codecov-url] | ![Lint][lint-badge] |
|
||||
| Branch | Tests | Linting |
|
||||
|--------|------------------------------------|---------------------------------|
|
||||
| main | [![Tests][tests-badge]][tests-url] | [![Lint][lint-badge]][lint-url] |
|
||||
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a
|
||||
state transition machine - written in any programming language - and securely
|
||||
@@ -62,7 +61,7 @@ Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|-------------------|
|
||||
| Go version | Go 1.17 or higher |
|
||||
| Go version | Go 1.18 or higher |
|
||||
|
||||
### Install
|
||||
|
||||
@@ -71,8 +70,8 @@ See the [install instructions](./docs/introduction/install.md).
|
||||
### Quick Start
|
||||
|
||||
- [Single node](./docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](./docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md)
|
||||
- [Local cluster using docker-compose](./docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/networks/terraform-and-ansible.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -114,10 +113,15 @@ For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Supported Versions
|
||||
|
||||
Because we are a small core team, we only ship patch updates, including security
|
||||
updates, to the most recent minor release and the second-most recent minor
|
||||
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
|
||||
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
Because we are a small core team, we have limited capacity to ship patch
|
||||
updates, including security updates. Consequently, we strongly recommend keeping
|
||||
Tendermint up-to-date. Upgrading instructions can be found in
|
||||
[UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
Currently supported versions include:
|
||||
|
||||
- v0.34.x
|
||||
- v0.37.x (release candidate)
|
||||
|
||||
## Resources
|
||||
|
||||
@@ -144,9 +148,9 @@ Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
- [Tendermint Core Blog](https://medium.com/tendermint/tagged/tendermint-core)
|
||||
- [Cosmos Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
## Join us!
|
||||
## Join us
|
||||
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.io).
|
||||
If you'd like to work full-time on Tendermint Core,
|
||||
[we're hiring](https://interchain-gmbh.breezy.hr/)!
|
||||
|
||||
@@ -155,25 +159,22 @@ Funding for Tendermint Core development comes primarily from the
|
||||
Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the
|
||||
for-profit entity that also maintains [tendermint.com](https://tendermint.com).
|
||||
|
||||
|
||||
[bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance
|
||||
[smr]: https://en.wikipedia.org/wiki/State_machine_replication
|
||||
[Blockchain]: https://en.wikipedia.org/wiki/Blockchain
|
||||
[version-badge]: https://img.shields.io/github/tag/tendermint/tendermint.svg
|
||||
[version-badge]: https://img.shields.io/github/v/release/tendermint/tendermint.svg
|
||||
[version-url]: https://github.com/tendermint/tendermint/releases/latest
|
||||
[api-badge]: https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
||||
[api-url]: https://pkg.go.dev/github.com/tendermint/tendermint
|
||||
[go-badge]: https://img.shields.io/badge/go-1.17-blue.svg
|
||||
[go-badge]: https://img.shields.io/badge/go-1.18-blue.svg
|
||||
[go-url]: https://github.com/moovweb/gvm
|
||||
[discord-badge]: https://img.shields.io/discord/669268347736686612.svg
|
||||
[discord-url]: https://discord.gg/cosmosnetwork
|
||||
[license-badge]: https://img.shields.io/github/license/tendermint/tendermint.svg
|
||||
[license-url]: https://github.com/tendermint/tendermint/blob/main/LICENSE
|
||||
[loc-badge]: https://tokei.rs/b1/github/tendermint/tendermint?category=lines
|
||||
[loc-url]: https://github.com/tendermint/tendermint
|
||||
[sg-badge]: https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg
|
||||
[sg-url]: https://sourcegraph.com/github.com/tendermint/tendermint?badge
|
||||
[tests-badge]: https://github.com/tendermint/tendermint/workflows/Tests/badge.svg?branch=main
|
||||
[codecov-badge]: https://codecov.io/gh/tendermint/tendermint/branch/main/graph/badge.svg
|
||||
[codecov-url]: https://codecov.io/gh/tendermint/tendermint
|
||||
[lint-badge]: https://github.com/tendermint/tendermint/workflows/Lint/badge.svg
|
||||
[tests-url]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml
|
||||
[tests-badge]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml/badge.svg?branch=main
|
||||
[lint-badge]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml/badge.svg
|
||||
[lint-url]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml
|
||||
|
||||
220
RELEASES.md
220
RELEASES.md
@@ -22,8 +22,8 @@ We use Mergify's [backport feature](https://mergify.io/features/backports) to
|
||||
automatically backport to the needed branch. There should be a label for any
|
||||
backport branch that you'll be targeting. To notify the bot to backport a pull
|
||||
request, mark the pull request with the label corresponding to the correct
|
||||
backport branch. For example, to backport to v0.35.x, add the label
|
||||
`S:backport-to-v0.35.x`. Once the original pull request is merged, the bot will
|
||||
backport branch. For example, to backport to v0.38.x, add the label
|
||||
`S:backport-to-v0.38.x`. Once the original pull request is merged, the bot will
|
||||
try to cherry-pick the pull request to the backport branch. If the bot fails to
|
||||
backport, it will open a pull request. The author of the original pull request
|
||||
is responsible for solving the conflicts and merging the pull request.
|
||||
@@ -40,37 +40,54 @@ branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072)
|
||||
for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
the 0.38.x line.
|
||||
|
||||
1. Start on `main`
|
||||
|
||||
2. Create and push the backport branch:
|
||||
2. Ensure that there is a [branch protection
|
||||
rule](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/managing-a-branch-protection-rule) for the
|
||||
branch you are about to create (you will need admin access to the repository
|
||||
in order to do this).
|
||||
|
||||
3. Create and push the backport branch:
|
||||
|
||||
```sh
|
||||
git checkout -b v0.35.x
|
||||
git push origin v0.35.x
|
||||
git checkout -b v0.38.x
|
||||
git push origin v0.38.x
|
||||
```
|
||||
|
||||
3. Create a PR to update the documentation directory for the backport branch.
|
||||
4. Create a PR to update the documentation directory for the backport branch.
|
||||
|
||||
We only maintain RFC and ADR documents on main, to avoid confusion. In
|
||||
addition, we rewrite Markdown URLs pointing to main to point to the
|
||||
backport branch, so that generated documentation will link to the correct
|
||||
versions of files elsewhere in the repository. For context on the latter, see
|
||||
https://github.com/tendermint/tendermint/issues/7675.
|
||||
We rewrite any URLs pointing to `main` to point to the backport branch,
|
||||
so that generated documentation will link to the correct versions of files
|
||||
elsewhere in the repository. The following files are to be excluded from this
|
||||
search:
|
||||
|
||||
* [`README.md`](./README.md)
|
||||
* [`CHANGELOG.md`](./CHANGELOG.md)
|
||||
* [`UPGRADING.md`](./UPGRADING.md)
|
||||
|
||||
The following links are to always point to `main`, regardless of where they
|
||||
occur in the codebase:
|
||||
|
||||
* `https://github.com/tendermint/tendermint/blob/main/LICENSE`
|
||||
|
||||
Be sure to search for all of the following links and replace `main` with your
|
||||
corresponding branch label or version (e.g. `v0.38.x` or `v0.38`):
|
||||
|
||||
* `github.com/tendermint/tendermint/blob/main` ->
|
||||
`github.com/tendermint/tendermint/blob/v0.38.x`
|
||||
* `github.com/tendermint/tendermint/tree/main` ->
|
||||
`github.com/tendermint/tendermint/tree/v0.38.x`
|
||||
* `docs.tendermint.com/main` -> `docs.tendermint.com/v0.38`
|
||||
|
||||
Once you have updated all of the relevant documentation:
|
||||
|
||||
To prepare the PR:
|
||||
```sh
|
||||
# Remove the RFC and ADR documents from the backport.
|
||||
# We only maintain these on main to avoid confusion.
|
||||
git rm -r docs/rfc docs/architecture
|
||||
|
||||
# Update absolute links to point to the backport.
|
||||
go run ./scripts/linkpatch -recur -target v0.35.x -skip-path docs/DOCS_README.md,docs/README.md docs
|
||||
|
||||
# Create and push the PR.
|
||||
git checkout -b update-docs-v035x
|
||||
git commit -m "Update docs for v0.35.x backport branch." docs
|
||||
git push -u origin update-docs-v035x
|
||||
git checkout -b update-docs-v038x
|
||||
git commit -m "Update docs for v0.38.x backport branch."
|
||||
git push -u origin update-docs-v038x
|
||||
```
|
||||
|
||||
Be sure to merge this PR before making other changes on the newly-created
|
||||
@@ -78,69 +95,79 @@ the 0.35.x line.
|
||||
|
||||
After doing these steps, go back to `main` and do the following:
|
||||
|
||||
1. Tag `main` as the dev branch for the _next_ minor version release and push
|
||||
it up to GitHub.
|
||||
For example:
|
||||
```sh
|
||||
git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."
|
||||
git push origin v0.36.0-dev
|
||||
```
|
||||
|
||||
2. Create a new workflow to run e2e nightlies for the new backport branch. (See
|
||||
1. Create a new workflow to run e2e nightlies for the new backport branch. (See
|
||||
[e2e-nightly-main.yml][e2e] for an example.)
|
||||
|
||||
3. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x`
|
||||
2. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.38.x`
|
||||
[label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered.
|
||||
|
||||
4. Add a new section to the Dependabot config (`.github/dependabot.yml`) to
|
||||
3. Add a new section to the Dependabot config (`.github/dependabot.yml`) to
|
||||
enable automatic update of Go dependencies on this branch. Copy and edit one
|
||||
of the existing branch configurations to set the correct `target-branch`.
|
||||
|
||||
[e2e]: https://github.com/tendermint/tendermint/blob/main/.github/workflows/e2e-nightly-main.yml
|
||||
|
||||
## Release candidates
|
||||
## Pre-releases
|
||||
|
||||
Before creating an official release, especially a minor release, we may want to
|
||||
create a release candidate (RC) for our friends and partners to test out. We use
|
||||
git tags to create RCs, and we build them off of backport branches.
|
||||
create an alpha or beta version, or release candidate (RC) for our friends and
|
||||
partners to test out. We use git tags to create pre-releases, and we build them
|
||||
off of backport branches, for example:
|
||||
|
||||
Tags for RCs should follow the "standard" release naming conventions, with
|
||||
`-rcX` at the end (for example, `v0.35.0-rc0`).
|
||||
* `v0.38.0-alpha.1` - The first alpha release of `v0.38.0`. Subsequent alpha
|
||||
releases will be numbered `v0.38.0-alpha.2`, `v0.38.0-alpha.3`, etc.
|
||||
|
||||
Alpha releases are to be considered the _most_ unstable of pre-releases, and
|
||||
are most likely not yet properly QA'd. These are made available to allow early
|
||||
adopters to start integrating and testing new functionality before we're done
|
||||
with QA.
|
||||
|
||||
* `v0.38.0-beta.1` - The first beta release of `v0.38.0`. Subsequent beta
|
||||
releases will be numbered `v0.38.0-beta.2`, `v0.38.0-beta.3`, etc.
|
||||
|
||||
Beta releases can be considered more stable than alpha releases in that we
|
||||
will have QA'd them better than alpha releases, but there still may be
|
||||
minor breaking API changes if users have strong demands for such changes.
|
||||
|
||||
* `v0.38.0-rc1` - The first release candidate (RC) of `v0.38.0`. Subsequent RCs
|
||||
will be numbered `v0.38.0-rc2`, `v0.38.0-rc3`, etc.
|
||||
|
||||
RCs are considered more stable than beta releases in that we will have
|
||||
completed our QA on them. APIs will most likely be stable at this point. The
|
||||
difference between an RC and a release is that there may still be small
|
||||
changes (bug fixes, features) that may make their way into the series before
|
||||
cutting a final release.
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important
|
||||
that these branches have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a minor release, you'll have to make a new backport
|
||||
branch (see above). Otherwise:
|
||||
If this is the first pre-release for a minor release, you'll have to make a new
|
||||
backport branch (see above). Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
2. Run the integration tests and the e2e nightlies
|
||||
(which can be triggered from the Github UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||
3. Prepare the changelog:
|
||||
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. Each RC should have
|
||||
it's own changelog section. These will be squashed when the final candidate is released.
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
1. Start from the backport branch (e.g. `v0.38.x`).
|
||||
2. Run the integration tests and the E2E nightlies
|
||||
(which can be triggered from the GitHub UI;
|
||||
e.g., <https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-37x.yml>).
|
||||
3. Prepare the pre-release documentation:
|
||||
* Ensure that all relevant changes are in the `CHANGELOG_PENDING.md` file.
|
||||
This file's contents must only be included in the `CHANGELOG.md` when we
|
||||
cut final releases.
|
||||
* Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary.
|
||||
4. Prepare the versioning:
|
||||
* Bump TMVersionDefault version in `version.go`
|
||||
* Bump P2P and block protocol versions in `version.go`, if necessary.
|
||||
Check the changelog for breaking changes in these components.
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
6. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||
7. Push the tag back up to origin:
|
||||
`git push origin v0.35.0-rc0`
|
||||
* Bump ABCI protocol version in `version.go`, if necessary
|
||||
5. Open a PR with these changes against the backport branch.
|
||||
6. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
7. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.38.0-rc1 -m "Release Candidate v0.38.0-rc1`
|
||||
8. Push the tag back up to origin:
|
||||
`git push origin v0.38.0-rc1`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
8. Future RCs will continue to be built off of this branch.
|
||||
|
||||
Note that this process should only be used for "true" RCs -- release candidates
|
||||
that, if successful, will be the next release. For more experimental "RCs,"
|
||||
create a new, short-lived branch and tag that instead.
|
||||
9. Future pre-releases will continue to be built off of this branch.
|
||||
|
||||
## Minor release
|
||||
|
||||
@@ -151,35 +178,36 @@ branch, as described above.
|
||||
Before performing these steps, be sure the
|
||||
[Minor Release Checklist](#minor-release-checklist) has been completed.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
1. Start on the backport branch (e.g. `v0.38.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
* "Squash" changes from the changelog entries for the pre-releases into a
|
||||
single entry, and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or
|
||||
simplifying any intra-pre-release changes. It may also help to alphabetize
|
||||
the entries by package name.)
|
||||
* Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
* Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
* Bump TMVersionDefault version in `version.go`
|
||||
* Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
* Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.35.0`.
|
||||
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||
- `git push origin v0.35.0`
|
||||
This will trigger the actual release `v0.38.0`.
|
||||
* `git tag -a v0.38.0 -m 'Release v0.38.0'`
|
||||
* `git push origin v0.38.0`
|
||||
6. Make sure that `main` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
7. Add the release to the documentation site generator config (see
|
||||
[DOCS_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
- Start on branch `main`.
|
||||
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
[DOCS\_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
* Start on branch `main`.
|
||||
* Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
ensure the newest release is the default for the landing page.
|
||||
- Add a new entry to `themeConfig.versions` in
|
||||
* Add a new entry to `themeConfig.versions` in
|
||||
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
|
||||
release in the dropdown versions menu.
|
||||
- Commit these changes to `main` and backport them into the backport
|
||||
* Commit these changes to `main` and backport them into the backport
|
||||
branch for this release.
|
||||
|
||||
## Patch release
|
||||
@@ -193,24 +221,24 @@ changes may merit a release candidate.
|
||||
|
||||
To create a patch release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.38.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the TMDefaultVersion in `version.go`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
* Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
* Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
* Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
* Reset the `CHANGELOG_PENDING.md`
|
||||
* Bump the TMDefaultVersion in `version.go`
|
||||
* Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during patch releases, and only field additions are valid patch changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
4. Open a PR with these changes that will land them back on `v0.38.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
* `git tag -a v0.38.1 -m 'Release v0.38.1'`
|
||||
* `git push origin v0.38.1`
|
||||
6. Create a pull request back to main with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:patch` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into main.
|
||||
* Remove all `R:patch` labels from the pull requests that were included in the release.
|
||||
* Do not merge the backport branch into main.
|
||||
|
||||
## Minor Release Checklist
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig
|
||||
* Make use of table driven testing where possible and not-cumbersome
|
||||
* [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)
|
||||
* Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require)
|
||||
* When using mocks, it is recommended to use Testify [mock] (<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
* When using mocks, it is recommended to use Testify [mock](<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
) along with [Mockery](https://github.com/vektra/mockery) for autogeneration
|
||||
|
||||
## Errors
|
||||
|
||||
85
UPGRADING.md
85
UPGRADING.md
@@ -1,6 +1,53 @@
|
||||
# Upgrading Tendermint Core
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
This guide provides instructions for upgrading to specific versions of
|
||||
Tendermint Core.
|
||||
|
||||
## Unreleased
|
||||
|
||||
## Config Changes
|
||||
|
||||
* A new config field, `BootstrapPeers` has been introduced as a means of
|
||||
adding a list of addresses to the addressbook upon initializing a node. This is an
|
||||
alternative to `PersistentPeers`. `PersistentPeers` shold be only used for
|
||||
nodes that you want to keep a constant connection with i.e. sentry nodes
|
||||
|
||||
----
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* The `ABCIVersion` is now `1.0.0`.
|
||||
|
||||
* Added new ABCI methods `PrepareProposal` and `ProcessProposal`. For details,
|
||||
please see the [spec](spec/abci/README.md). Applications upgrading to
|
||||
v0.37.0 must implement these methods, at the very minimum, as described
|
||||
[here](./spec/abci/abci++_app_requirements.md)
|
||||
* Deduplicated `ConsensusParams` and `BlockParams`.
|
||||
In the v0.34 branch they are defined both in `abci/types.proto` and `types/params.proto`.
|
||||
The definitions in `abci/types.proto` have been removed.
|
||||
In-process applications should make sure they are not using the deleted
|
||||
version of those structures.
|
||||
* In v0.34, messages on the wire used to be length-delimited with `int64` varint
|
||||
values, which was inconsistent with the `uint64` varint length delimiters used
|
||||
in the P2P layer. Both now consistently use `uint64` varint length delimiters.
|
||||
* Added `AbciVersion` to `RequestInfo`.
|
||||
Applications should check that Tendermint's ABCI version matches the one they expect
|
||||
in order to ensure compatibility.
|
||||
* The `SetOption` method has been removed from the ABCI `Client` interface.
|
||||
The corresponding Protobuf types have been deprecated.
|
||||
* The `key` and `value` fields in the `EventAttribute` type have been changed
|
||||
from type `bytes` to `string`. As per the [Protocol Buffers updating
|
||||
guidelines](https://developers.google.com/protocol-buffers/docs/proto3#updating),
|
||||
this should have no effect on the wire-level encoding for UTF8-encoded
|
||||
strings.
|
||||
|
||||
## v0.34.24
|
||||
|
||||
Note that in [\#9724](https://github.com/tendermint/tendermint/pull/9724) we
|
||||
un-prettified the JSON output (i.e. removed all indentation) of the HTTP and
|
||||
WebSocket RPC for performance and subscription stability reasons. We recommend
|
||||
using a tool such as [jq](https://github.com/stedolan/jq) to obtain prettified
|
||||
output if you rely on that prettified output in some way.
|
||||
|
||||
## v0.34.20
|
||||
|
||||
@@ -15,7 +62,7 @@ and gas cost).
|
||||
Operators can enable the priority mempool by setting `mempool.version` to
|
||||
`"v1"` in the `config.toml`. For more technical details about the priority
|
||||
mempool, see [ADR 067: Mempool
|
||||
Refactor](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-067-mempool-refactor.md).
|
||||
Refactor](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-067-mempool-refactor.md).
|
||||
|
||||
## v0.34.0
|
||||
|
||||
@@ -23,7 +70,7 @@ Refactor](https://github.com/tendermint/tendermint/blob/master/docs/architecture
|
||||
This release is not compatible with previous blockchains due to changes to
|
||||
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
|
||||
|
||||
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
@@ -33,7 +80,7 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
were added to support the new State Sync feature.
|
||||
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
|
||||
new nodes are able to join a network in a matter of seconds.
|
||||
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
|
||||
Read [the spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#state-sync)
|
||||
if you want to learn more about State Sync, or if you'd like your application to use it.
|
||||
(If you don't want to support State Sync in your application, you can just implement these new
|
||||
ABCI methods as no-ops, leaving them empty.)
|
||||
@@ -49,7 +96,7 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
Applications should be able to handle these evidence types
|
||||
(i.e., through slashing or other accountability measures).
|
||||
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/main/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
(used in ABCI as part of `ValidatorUpdate`) now uses a `oneof` protobuf type.
|
||||
Note that since Tendermint only supports ed25519 validator keys, there's only one
|
||||
option in the `oneof`. For more, see "Protocol Buffers," below.
|
||||
@@ -64,12 +111,9 @@ directory. For more, see "Protobuf," below.
|
||||
|
||||
### Blockchain Protocol
|
||||
|
||||
* `Header#LastResultsHash` previously was the root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data)` responses.
|
||||
As of 0.34,`Header#LastResultsHash` is now the root hash of a Merkle tree built from:
|
||||
* `BeginBlock#Events`
|
||||
* Root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data,
|
||||
GasWanted, GasUsed, Events)` responses
|
||||
* `BeginBlock#Events`
|
||||
* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from
|
||||
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
|
||||
fields.
|
||||
|
||||
* Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input,
|
||||
to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962).
|
||||
@@ -159,7 +203,7 @@ The `bech32` package has moved to the Cosmos SDK:
|
||||
### CLI
|
||||
|
||||
The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API.
|
||||
See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
See [the docs](https://docs.tendermint.com/v0.33/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
|
||||
### Light Client
|
||||
|
||||
@@ -173,6 +217,7 @@ Other user-relevant changes include:
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The new light client stores headers and validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
|
||||
@@ -314,7 +359,7 @@ Evidence Params has been changed to include duration.
|
||||
### RPC Changes
|
||||
|
||||
* `/validators` is now paginated (default: 30 vals per page)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/master/rpc/#/Info/block_results)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/v0.33/rpc/#/Info/block_results)
|
||||
* Event suffix has been removed from the ID in event responses
|
||||
* IDs are now integers not `json-client-XYZ`
|
||||
|
||||
@@ -433,11 +478,11 @@ the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
<https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support>)
|
||||
<https://docs.tendermint.com/v0.33/introduction/install.html#compile-with-cleveldb-support>)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
This release contains a breaking change to the behaviour of the pubsub system.
|
||||
This release contains a breaking change to the behavior of the pubsub system.
|
||||
It also contains some minor breaking changes in the Go API and ABCI.
|
||||
There are no changes to the block or p2p protocols, so v0.31.0 should work fine
|
||||
with blockchains created from the v0.30 series.
|
||||
@@ -455,7 +500,7 @@ In this case, the WS client will receive an error with description:
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
|
||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -508,14 +553,14 @@ due to changes in how various data structures are hashed.
|
||||
Any implementations of Tendermint blockchain verification, including lite clients,
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Merkle tree](https://github.com/tendermint/spec/blob/master/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/spec/blob/master/spec/blockchain/state.md#consensusparams)
|
||||
* [Merkle tree](https://github.com/tendermint/tendermint/blob/main/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/tendermint/blob/main/spec/blockchain/state.md#consensusparams)
|
||||
|
||||
There was also a small change to field ordering in the vote struct. Any
|
||||
implementations of an out-of-process validator (like a Key-Management Server)
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Vote](https://github.com/tendermint/spec/blob/master/spec/consensus/signing.md#votes)
|
||||
* [Vote](https://github.com/tendermint/tendermint/blob/main/spec/consensus/signing.md#votes)
|
||||
|
||||
Finally, the proposer selection algorithm continues to evolve. See the
|
||||
[work-in-progress
|
||||
@@ -636,7 +681,7 @@ to `timeout_propose = "3s"`.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
The default behaviour of `/abci_query` has been changed to not return a proof,
|
||||
The default behavior of `/abci_query` has been changed to not return a proof,
|
||||
and the name of the parameter that controls this has been changed from `trusted`
|
||||
to `prove`. To get proofs with your queries, ensure you set `prove=true`.
|
||||
|
||||
|
||||
@@ -19,8 +19,8 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [A protobuf file](./types/types.proto)
|
||||
- [The main spec](https://github.com/tendermint/tendermint/blob/main/spec/abci/README.md)
|
||||
- [A protobuf file](../proto/tendermint/types/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
## Protocol Buffers
|
||||
|
||||
@@ -14,6 +14,8 @@ const (
|
||||
echoRetryIntervalSeconds = 1
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Client
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
// All `Async` methods return a `ReqRes` object.
|
||||
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
|
||||
@@ -28,34 +30,36 @@ type Client interface {
|
||||
FlushAsync() *ReqRes
|
||||
EchoAsync(msg string) *ReqRes
|
||||
InfoAsync(types.RequestInfo) *ReqRes
|
||||
SetOptionAsync(types.RequestSetOption) *ReqRes
|
||||
DeliverTxAsync(types.RequestDeliverTx) *ReqRes
|
||||
CheckTxAsync(types.RequestCheckTx) *ReqRes
|
||||
QueryAsync(types.RequestQuery) *ReqRes
|
||||
CommitAsync() *ReqRes
|
||||
InitChainAsync(types.RequestInitChain) *ReqRes
|
||||
PrepareProposalAsync(types.RequestPrepareProposal) *ReqRes
|
||||
BeginBlockAsync(types.RequestBeginBlock) *ReqRes
|
||||
EndBlockAsync(types.RequestEndBlock) *ReqRes
|
||||
ListSnapshotsAsync(types.RequestListSnapshots) *ReqRes
|
||||
OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes
|
||||
LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes
|
||||
ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes
|
||||
ProcessProposalAsync(types.RequestProcessProposal) *ReqRes
|
||||
|
||||
FlushSync() error
|
||||
EchoSync(msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(types.RequestInfo) (*types.ResponseInfo, error)
|
||||
SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error)
|
||||
DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync() (*types.ResponseCommit, error)
|
||||
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
PrepareProposalSync(types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)
|
||||
BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
ProcessProposalSync(types.RequestProcessProposal) (*types.ResponseProcessProposal, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -192,15 +192,6 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
req := types.ToRequestSetOption(params)
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
@@ -300,6 +291,25 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshot
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) PrepareProposalAsync(params types.RequestPrepareProposal) *ReqRes {
|
||||
req := types.ToRequestPrepareProposal(params)
|
||||
res, err := cli.client.PrepareProposal(context.Background(), req.GetPrepareProposal(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_PrepareProposal{PrepareProposal: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ProcessProposalAsync(params types.RequestProcessProposal) *ReqRes {
|
||||
req := types.ToRequestProcessProposal(params)
|
||||
res, err := cli.client.ProcessProposal(context.Background(), req.GetProcessProposal(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ProcessProposal{ProcessProposal: res}})
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
@@ -356,11 +366,6 @@ func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, err
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
reqres := cli.SetOptionAsync(req)
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.DeliverTxAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||
@@ -417,3 +422,14 @@ func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
reqres := cli.ApplySnapshotChunkAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) PrepareProposalSync(
|
||||
params types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
reqres := cli.PrepareProposalAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetPrepareProposal(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ProcessProposalSync(params types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
reqres := cli.ProcessProposalAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetProcessProposal(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
// case of malicious tx or query). It only makes sense for publicly exposed
|
||||
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
|
||||
@@ -24,8 +22,6 @@ var _ Client = (*localClient)(nil)
|
||||
|
||||
// NewLocalClient creates a local client, which will be directly calling the
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = new(tmsync.Mutex)
|
||||
@@ -75,17 +71,6 @@ func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
return app.callback(
|
||||
types.ToRequestSetOption(req),
|
||||
types.ToResponseSetOption(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -207,6 +192,28 @@ func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotCh
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestPrepareProposal(req),
|
||||
types.ToResponsePrepareProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestProcessProposal(req),
|
||||
types.ToResponseProcessProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync() error {
|
||||
@@ -225,14 +232,6 @@ func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, er
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -306,7 +305,8 @@ func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*type
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkSync(
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -315,7 +315,8 @@ func (app *localClient) LoadSnapshotChunkSync(
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkSync(
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*types.ResponseApplySnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -323,6 +324,22 @@ func (app *localClient) ApplySnapshotChunkSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v1.1.1. DO NOT EDIT.
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -575,6 +575,84 @@ func (_m *Client) OnStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// PrepareProposalAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) PrepareProposalAsync(_a0 types.RequestPrepareProposal) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// PrepareProposalSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) PrepareProposalSync(_a0 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestPrepareProposal) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ProcessProposalAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ProcessProposalAsync(_a0 types.RequestProcessProposal) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ProcessProposalSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ProcessProposalSync(_a0 types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseProcessProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// QueryAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) QueryAsync(_a0 types.RequestQuery) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
@@ -649,45 +727,6 @@ func (_m *Client) SetLogger(_a0 log.Logger) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// SetOptionAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetOptionAsync(_a0 types.RequestSetOption) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SetOptionSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetOptionSync(_a0 types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseSetOption
|
||||
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *types.ResponseSetOption); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseSetOption)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestSetOption) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SetResponseCallback provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) {
|
||||
_m.Called(_a0)
|
||||
@@ -734,3 +773,18 @@ func (_m *Client) String() string {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewClient interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewClient(t mockConstructorTestingTNewClient) *Client {
|
||||
mock := &Client{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -231,10 +231,6 @@ func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestSetOption(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
}
|
||||
@@ -279,6 +275,14 @@ func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotC
|
||||
return cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestPrepareProposal(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestProcessProposal(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync() error {
|
||||
@@ -308,15 +312,6 @@ func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, e
|
||||
return reqres.Response.GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestSetOption(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
@@ -417,6 +412,24 @@ func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
return reqres.Response.GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestPrepareProposal(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetPrepareProposal(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestProcessProposal(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetProcessProposal(), cli.Error()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
|
||||
@@ -468,8 +481,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Flush)
|
||||
case *types.Request_Info:
|
||||
_, ok = res.Value.(*types.Response_Info)
|
||||
case *types.Request_SetOption:
|
||||
_, ok = res.Value.(*types.Response_SetOption)
|
||||
case *types.Request_DeliverTx:
|
||||
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||
case *types.Request_CheckTx:
|
||||
@@ -492,6 +503,10 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_ListSnapshots)
|
||||
case *types.Request_OfferSnapshot:
|
||||
_, ok = res.Value.(*types.Response_OfferSnapshot)
|
||||
case *types.Request_PrepareProposal:
|
||||
_, ok = res.Value.(*types.Response_PrepareProposal)
|
||||
case *types.Request_ProcessProposal:
|
||||
_, ok = res.Value.(*types.Response_ProcessProposal)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
@@ -44,9 +43,6 @@ var (
|
||||
flagHeight int
|
||||
flagProve bool
|
||||
|
||||
// counter
|
||||
flagSerial bool
|
||||
|
||||
// kvstore
|
||||
flagPersist string
|
||||
)
|
||||
@@ -58,9 +54,7 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
||||
return nil
|
||||
case "version": // skip running for version command
|
||||
case "kvstore", "version", "help [command]":
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -89,10 +83,11 @@ var RootCmd = &cobra.Command{
|
||||
// Structure for data passed to print response.
|
||||
type response struct {
|
||||
// generic abci response
|
||||
Data []byte
|
||||
Code uint32
|
||||
Info string
|
||||
Log string
|
||||
Data []byte
|
||||
Code uint32
|
||||
Info string
|
||||
Log string
|
||||
Status int32
|
||||
|
||||
Query *queryResponse
|
||||
}
|
||||
@@ -135,10 +130,6 @@ func addQueryFlags() {
|
||||
"whether or not to return a merkle proof of the query result")
|
||||
}
|
||||
|
||||
func addCounterFlags() {
|
||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
||||
}
|
||||
|
||||
func addKVStoreFlags() {
|
||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
@@ -148,18 +139,17 @@ func addCommands() {
|
||||
RootCmd.AddCommand(consoleCmd)
|
||||
RootCmd.AddCommand(echoCmd)
|
||||
RootCmd.AddCommand(infoCmd)
|
||||
RootCmd.AddCommand(setOptionCmd)
|
||||
RootCmd.AddCommand(deliverTxCmd)
|
||||
RootCmd.AddCommand(checkTxCmd)
|
||||
RootCmd.AddCommand(commitCmd)
|
||||
RootCmd.AddCommand(versionCmd)
|
||||
RootCmd.AddCommand(testCmd)
|
||||
RootCmd.AddCommand(prepareProposalCmd)
|
||||
RootCmd.AddCommand(processProposalCmd)
|
||||
addQueryFlags()
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
|
||||
// examples
|
||||
addCounterFlags()
|
||||
RootCmd.AddCommand(counterCmd)
|
||||
addKVStoreFlags()
|
||||
RootCmd.AddCommand(kvstoreCmd)
|
||||
}
|
||||
@@ -176,7 +166,6 @@ you'd like to run:
|
||||
|
||||
where example.file looks something like:
|
||||
|
||||
set_option serial on
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
@@ -198,7 +187,7 @@ This command opens an interactive console for running any of the other commands
|
||||
without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"},
|
||||
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "prepare_proposal", "process_proposal", "commit", "query"},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
@@ -216,13 +205,6 @@ var infoCmd = &cobra.Command{
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
var setOptionCmd = &cobra.Command{
|
||||
Use: "set_option",
|
||||
Short: "set an option on the application",
|
||||
Long: "set an option on the application",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: cmdSetOption,
|
||||
}
|
||||
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
Use: "deliver_tx",
|
||||
@@ -259,6 +241,22 @@ var versionCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var prepareProposalCmd = &cobra.Command{
|
||||
Use: "prepare_proposal",
|
||||
Short: "prepare proposal",
|
||||
Long: "prepare proposal",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
RunE: cmdPrepareProposal,
|
||||
}
|
||||
|
||||
var processProposalCmd = &cobra.Command{
|
||||
Use: "process_proposal",
|
||||
Short: "process proposal",
|
||||
Long: "process proposal",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
RunE: cmdProcessProposal,
|
||||
}
|
||||
|
||||
var queryCmd = &cobra.Command{
|
||||
Use: "query",
|
||||
Short: "query the application state",
|
||||
@@ -267,14 +265,6 @@ var queryCmd = &cobra.Command{
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
var counterCmd = &cobra.Command{
|
||||
Use: "counter",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdCounter,
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
Use: "kvstore",
|
||||
Short: "ABCI demo example",
|
||||
@@ -324,7 +314,6 @@ func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
return compose(
|
||||
[]func() error{
|
||||
func() error { return servertest.InitChain(client) },
|
||||
func() error { return servertest.SetOption(client, "serial", "on") },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
@@ -339,6 +328,16 @@ func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
},
|
||||
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) },
|
||||
func() error {
|
||||
return servertest.PrepareProposal(client, [][]byte{
|
||||
{0x01},
|
||||
}, [][]byte{{0x01}}, nil)
|
||||
},
|
||||
func() error {
|
||||
return servertest.ProcessProposal(client, [][]byte{
|
||||
{0x01},
|
||||
}, types.ResponseProcessProposal_ACCEPT)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -439,8 +438,10 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
return cmdInfo(cmd, actualArgs)
|
||||
case "query":
|
||||
return cmdQuery(cmd, actualArgs)
|
||||
case "set_option":
|
||||
return cmdSetOption(cmd, actualArgs)
|
||||
case "prepare_proposal":
|
||||
return cmdPrepareProposal(cmd, actualArgs)
|
||||
case "process_proposal":
|
||||
return cmdProcessProposal(cmd, actualArgs)
|
||||
default:
|
||||
return cmdUnimplemented(cmd, pArgs)
|
||||
}
|
||||
@@ -459,12 +460,14 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
|
||||
fmt.Println("Available commands:")
|
||||
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
|
||||
fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short)
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", prepareProposalCmd.Use, prepareProposalCmd.Short)
|
||||
fmt.Printf("%s: %s\n", processProposalCmd.Use, processProposalCmd.Short)
|
||||
|
||||
fmt.Println("Use \"[command] --help\" for more information about a command.")
|
||||
|
||||
return nil
|
||||
@@ -504,25 +507,6 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
|
||||
const codeBad uint32 = 10
|
||||
|
||||
// Set an option on the application
|
||||
func cmdSetOption(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
printResponse(cmd, args, response{
|
||||
Code: codeBad,
|
||||
Log: "want at least arguments of the form: <key> <value>",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
key, val := args[0], args[1]
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printResponse(cmd, args, response{Log: "OK (SetOption doesn't return anything.)"}) // NOTE: Nothing to show...
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append a new tx to application
|
||||
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
@@ -625,30 +609,59 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
app := counter.NewApplication(flagSerial)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
func cmdPrepareProposal(cmd *cobra.Command, args []string) error {
|
||||
txsBytesArray := make([][]byte, len(args))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txsBytesArray[i] = txBytes
|
||||
}
|
||||
|
||||
res, err := client.PrepareProposalSync(types.RequestPrepareProposal{
|
||||
Txs: txsBytesArray,
|
||||
// kvstore has to have this parameter in order not to reject a tx as the default value is 0
|
||||
MaxTxBytes: 65536,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
resps := make([]response, 0, len(res.Txs))
|
||||
for _, tx := range res.Txs {
|
||||
resps = append(resps, response{
|
||||
Code: code.CodeTypeOK,
|
||||
Log: "Succeeded. Tx: " + string(tx),
|
||||
})
|
||||
}
|
||||
|
||||
printResponse(cmd, args, resps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdProcessProposal(cmd *cobra.Command, args []string) error {
|
||||
txsBytesArray := make([][]byte, len(args))
|
||||
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txsBytesArray[i] = txBytes
|
||||
}
|
||||
|
||||
res, err := client.ProcessProposalSync(types.RequestProcessProposal{
|
||||
Txs: txsBytesArray,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
printResponse(cmd, args, response{
|
||||
Status: int32(res.Status),
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
@@ -657,11 +670,14 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
app = kvstore.NewApplication()
|
||||
} else {
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
var err error
|
||||
flagPersist, err = os.MkdirTemp("", "persistent_kvstore_tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
@@ -687,44 +703,49 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
func printResponse(cmd *cobra.Command, args []string, rsp response) {
|
||||
func printResponse(cmd *cobra.Command, args []string, rsps ...response) {
|
||||
|
||||
if flagVerbose {
|
||||
fmt.Println(">", cmd.Use, strings.Join(args, " "))
|
||||
}
|
||||
|
||||
// Always print the status code.
|
||||
if rsp.Code == types.CodeTypeOK {
|
||||
fmt.Printf("-> code: OK\n")
|
||||
} else {
|
||||
fmt.Printf("-> code: %d\n", rsp.Code)
|
||||
for _, rsp := range rsps {
|
||||
// Always print the status code.
|
||||
if rsp.Code == types.CodeTypeOK {
|
||||
fmt.Printf("-> code: OK\n")
|
||||
} else {
|
||||
fmt.Printf("-> code: %d\n", rsp.Code)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if len(rsp.Data) != 0 {
|
||||
// Do no print this line when using the commit command
|
||||
// because the string comes out as gibberish
|
||||
if cmd.Use != "commit" {
|
||||
fmt.Printf("-> data: %s\n", rsp.Data)
|
||||
if len(rsp.Data) != 0 {
|
||||
// Do no print this line when using the commit command
|
||||
// because the string comes out as gibberish
|
||||
if cmd.Use != "commit" {
|
||||
fmt.Printf("-> data: %s\n", rsp.Data)
|
||||
}
|
||||
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
|
||||
}
|
||||
if rsp.Log != "" {
|
||||
fmt.Printf("-> log: %s\n", rsp.Log)
|
||||
}
|
||||
if cmd.Use == "process_proposal" {
|
||||
fmt.Printf("-> status: %s\n", types.ResponseProcessProposal_ProposalStatus_name[rsp.Status])
|
||||
}
|
||||
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
|
||||
}
|
||||
if rsp.Log != "" {
|
||||
fmt.Printf("-> log: %s\n", rsp.Log)
|
||||
}
|
||||
|
||||
if rsp.Query != nil {
|
||||
fmt.Printf("-> height: %d\n", rsp.Query.Height)
|
||||
if rsp.Query.Key != nil {
|
||||
fmt.Printf("-> key: %s\n", rsp.Query.Key)
|
||||
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
|
||||
}
|
||||
if rsp.Query.Value != nil {
|
||||
fmt.Printf("-> value: %s\n", rsp.Query.Value)
|
||||
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
|
||||
}
|
||||
if rsp.Query.ProofOps != nil {
|
||||
fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps)
|
||||
if rsp.Query != nil {
|
||||
fmt.Printf("-> height: %d\n", rsp.Query.Height)
|
||||
if rsp.Query.Key != nil {
|
||||
fmt.Printf("-> key: %s\n", rsp.Query.Key)
|
||||
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
|
||||
}
|
||||
if rsp.Query.Value != nil {
|
||||
fmt.Printf("-> value: %s\n", rsp.Query.Value)
|
||||
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
|
||||
}
|
||||
if rsp.Query.ProofOps != nil {
|
||||
fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,4 +7,6 @@ const (
|
||||
CodeTypeBadNonce uint32 = 2
|
||||
CodeTypeUnauthorized uint32 = 3
|
||||
CodeTypeUnknownError uint32 = 4
|
||||
CodeTypeExecuted uint32 = 5
|
||||
CodeTypeRejected uint32 = 6
|
||||
)
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
package counter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
hashCount int
|
||||
txCount int
|
||||
serial bool
|
||||
}
|
||||
|
||||
func NewApplication(serial bool) *Application {
|
||||
return &Application{serial: serial}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
key, value := req.Key, req.Value
|
||||
if key == "serial" && value == "on" {
|
||||
app.serial = true
|
||||
} else {
|
||||
/*
|
||||
TODO Panic and have the ABCI server pass an exception.
|
||||
The client can call SetOptionSync() and get an `error`.
|
||||
return types.ResponseSetOption{
|
||||
Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value),
|
||||
}
|
||||
*/
|
||||
return types.ResponseSetOption{}
|
||||
}
|
||||
|
||||
return types.ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() (resp types.ResponseCommit) {
|
||||
app.hashCount++
|
||||
if app.txCount == 0 {
|
||||
return types.ResponseCommit{}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
switch reqQuery.Path {
|
||||
case "hash":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
||||
case "tx":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
|
||||
default:
|
||||
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@ There are two app's here: the KVStoreApplication and the PersistentKVStoreApplic
|
||||
|
||||
## KVStoreApplication
|
||||
|
||||
The KVStoreApplication is a simple merkle key-value store.
|
||||
The KVStoreApplication is a simple merkle key-value store.
|
||||
Transactions of the form `key=value` are stored as key-value pairs in the tree.
|
||||
Transactions without an `=` sign set the value to the key.
|
||||
The app has no replay protection (other than what the mempool provides).
|
||||
@@ -27,4 +27,4 @@ Validator set changes are effected using the following transaction format:
|
||||
|
||||
where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one).
|
||||
To remove a validator from the validator set, set power to `0`.
|
||||
There is no sybil protection against new validators joining.
|
||||
There is no sybil protection against new validators joining.
|
||||
|
||||
@@ -68,6 +68,7 @@ type Application struct {
|
||||
|
||||
state State
|
||||
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
|
||||
txToRemove map[string]struct{}
|
||||
}
|
||||
|
||||
func NewApplication() *Application {
|
||||
@@ -78,7 +79,7 @@ func NewApplication() *Application {
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
Version: version.ABCISemVer,
|
||||
AppVersion: ProtocolVersion,
|
||||
LastBlockHeight: app.state.Height,
|
||||
LastBlockAppHash: app.state.AppHash,
|
||||
@@ -87,15 +88,19 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var key, value []byte
|
||||
if isReplacedTx(req.Tx) {
|
||||
app.txToRemove[string(req.Tx)] = struct{}{}
|
||||
}
|
||||
var key, value string
|
||||
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = parts[0], parts[1]
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = req.Tx, req.Tx
|
||||
key, value = string(req.Tx), string(req.Tx)
|
||||
}
|
||||
|
||||
err := app.state.db.Set(prefixKey(key), value)
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -105,10 +110,10 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko"), Index: true},
|
||||
{Key: []byte("key"), Value: key, Index: true},
|
||||
{Key: []byte("index_key"), Value: []byte("index is working"), Index: true},
|
||||
{Key: []byte("noindex_key"), Value: []byte("index is working"), Index: false},
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -117,6 +122,15 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if len(req.Tx) == 0 {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeRejected}
|
||||
}
|
||||
|
||||
if req.Type == types.CheckTxType_Recheck {
|
||||
if _, ok := app.txToRemove[string(req.Tx)]; ok {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeExecuted, GasWanted: 1}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
}
|
||||
|
||||
@@ -126,6 +140,8 @@ func (app *Application) Commit() types.ResponseCommit {
|
||||
binary.PutVarint(appHash, app.state.Size)
|
||||
app.state.AppHash = appHash
|
||||
app.state.Height++
|
||||
|
||||
// empty out the set of transactions to remove via rechecktx
|
||||
saveState(app.state)
|
||||
|
||||
resp := types.ResponseCommit{Data: appHash}
|
||||
@@ -170,3 +186,18 @@ func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.Respo
|
||||
|
||||
return resQuery
|
||||
}
|
||||
|
||||
func (app *Application) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
app.txToRemove = map[string]struct{}{}
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (app *Application) ProcessProposal(
|
||||
req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package kvstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -70,8 +70,26 @@ func TestKVStoreKV(t *testing.T) {
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreEmptyTX(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
tx := []byte("")
|
||||
reqCheck := types.RequestCheckTx{Tx: tx}
|
||||
resCheck := kvstore.CheckTx(reqCheck)
|
||||
require.Equal(t, resCheck.Code, code.CodeTypeRejected)
|
||||
|
||||
txs := make([][]byte, 0, 4)
|
||||
txs = append(txs, []byte("key=value"), []byte("key"), []byte(""), []byte("kee=value"))
|
||||
reqPrepare := types.RequestPrepareProposal{Txs: txs, MaxTxBytes: 10 * 1024}
|
||||
resPrepare := kvstore.PrepareProposal(reqPrepare)
|
||||
require.Equal(t, len(reqPrepare.Txs), len(resPrepare.Txs)+1, "Empty transaction not properly removed")
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -87,7 +105,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -119,7 +137,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -162,7 +180,7 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
|
||||
@@ -45,7 +45,10 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
|
||||
state := loadState(db)
|
||||
|
||||
return &PersistentKVStoreApplication{
|
||||
app: &Application{state: state},
|
||||
app: &Application{
|
||||
state: state,
|
||||
txToRemove: map[string]struct{}{},
|
||||
},
|
||||
valAddrToPubKeyMap: make(map[string]pc.PublicKey),
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
@@ -62,10 +65,6 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
|
||||
return res
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
return app.app.SetOption(req)
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
@@ -76,6 +75,10 @@ func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) t
|
||||
return app.execValidatorTx(req.Tx)
|
||||
}
|
||||
|
||||
if isPrepareTx(req.Tx) {
|
||||
return app.execPrepareTx(req.Tx)
|
||||
}
|
||||
|
||||
// otherwise, update the key-value store
|
||||
return app.app.DeliverTx(req)
|
||||
}
|
||||
@@ -126,7 +129,7 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
|
||||
if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
@@ -142,7 +145,7 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
return types.ResponseBeginBlock{}
|
||||
return app.app.BeginBlock(req)
|
||||
}
|
||||
|
||||
// Update the validator set
|
||||
@@ -170,6 +173,21 @@ func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) PrepareProposal(
|
||||
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
return types.ResponsePrepareProposal{Txs: app.substPrepareTx(req.Txs, req.MaxTxBytes)}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ProcessProposal(
|
||||
req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 || isPrepareTx(tx) {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
@@ -284,3 +302,46 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
|
||||
const (
|
||||
PreparePrefix = "prepare"
|
||||
ReplacePrefix = "replace"
|
||||
)
|
||||
|
||||
func isPrepareTx(tx []byte) bool { return bytes.HasPrefix(tx, []byte(PreparePrefix)) }
|
||||
|
||||
func isReplacedTx(tx []byte) bool {
|
||||
return bytes.HasPrefix(tx, []byte(ReplacePrefix))
|
||||
}
|
||||
|
||||
// execPrepareTx is noop. tx data is considered as placeholder
|
||||
// and is substitute at the PrepareProposal.
|
||||
func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.ResponseDeliverTx {
|
||||
// noop
|
||||
return types.ResponseDeliverTx{}
|
||||
}
|
||||
|
||||
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
|
||||
// proposal for transactions with the prefix stripped, while discarding invalid empty transactions.
|
||||
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte, maxTxBytes int64) [][]byte {
|
||||
txs := make([][]byte, 0, len(blockData))
|
||||
var totalBytes int64
|
||||
for _, tx := range blockData {
|
||||
if len(tx) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
txMod := tx
|
||||
if isPrepareTx(tx) {
|
||||
txMod = bytes.Replace(tx, []byte(PreparePrefix), []byte(ReplacePrefix), 1)
|
||||
}
|
||||
totalBytes += int64(len(txMod))
|
||||
if totalBytes > maxTxBytes {
|
||||
break
|
||||
}
|
||||
txs = append(txs, txMod)
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
Package server is used to start a new ABCI server.
|
||||
|
||||
It contains two server implementation:
|
||||
* gRPC server
|
||||
* socket server
|
||||
|
||||
- gRPC server
|
||||
- socket server
|
||||
*/
|
||||
package server
|
||||
|
||||
|
||||
@@ -200,9 +200,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_Info:
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_SetOption:
|
||||
res := s.app.SetOption(*r.SetOption)
|
||||
responses <- types.ToResponseSetOption(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
@@ -230,6 +227,12 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_OfferSnapshot:
|
||||
res := s.app.OfferSnapshot(*r.OfferSnapshot)
|
||||
responses <- types.ToResponseOfferSnapshot(res)
|
||||
case *types.Request_PrepareProposal:
|
||||
res := s.app.PrepareProposal(*r.PrepareProposal)
|
||||
responses <- types.ToResponsePrepareProposal(res)
|
||||
case *types.Request_ProcessProposal:
|
||||
res := s.app.ProcessProposal(*r.ProcessProposal)
|
||||
responses <- types.ToResponseProcessProposal(res)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
|
||||
responses <- types.ToResponseLoadSnapshotChunk(res)
|
||||
|
||||
@@ -29,17 +29,6 @@ func InitChain(client abcicli.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetOption(client abcicli.Client, key, value string) error {
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: SetOption")
|
||||
fmt.Printf("error while setting %v=%v: \nerror: %v\n", key, value, err)
|
||||
return err
|
||||
}
|
||||
fmt.Println("Passed test: SetOption")
|
||||
return nil
|
||||
}
|
||||
|
||||
func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
res, err := client.CommitSync()
|
||||
data := res.Data
|
||||
@@ -76,6 +65,32 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
|
||||
return nil
|
||||
}
|
||||
|
||||
func PrepareProposal(client abcicli.Client, txBytes [][]byte, txExpected [][]byte, dataExp []byte) error {
|
||||
res, _ := client.PrepareProposalSync(types.RequestPrepareProposal{Txs: txBytes})
|
||||
for i, tx := range res.Txs {
|
||||
if !bytes.Equal(tx, txExpected[i]) {
|
||||
fmt.Println("Failed test: PrepareProposal")
|
||||
fmt.Printf("PrepareProposal transaction was unexpected. Got %x expected %x.",
|
||||
tx, txExpected[i])
|
||||
return errors.New("PrepareProposal error")
|
||||
}
|
||||
}
|
||||
fmt.Println("Passed test: PrepareProposal")
|
||||
return nil
|
||||
}
|
||||
|
||||
func ProcessProposal(client abcicli.Client, txBytes [][]byte, statusExp types.ResponseProcessProposal_ProposalStatus) error {
|
||||
res, _ := client.ProcessProposalSync(types.RequestProcessProposal{Txs: txBytes})
|
||||
if res.Status != statusExp {
|
||||
fmt.Println("Failed test: ProcessProposal")
|
||||
fmt.Printf("ProcessProposal response status was unexpected. Got %v expected %v.",
|
||||
res.Status, statusExp)
|
||||
return errors.New("ProcessProposal error")
|
||||
}
|
||||
fmt.Println("Passed test: ProcessProposal")
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
func startClient(abciType string) abcicli.Client {
|
||||
// Start client
|
||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
client.SetLogger(logger.With("module", "abcicli"))
|
||||
if err := client.Start(); err != nil {
|
||||
panicf("connecting to abci_app: %v", err.Error())
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func setOption(client abcicli.Client, key, value string) {
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
|
||||
if err != nil {
|
||||
panicf("setting %v=%v: \nerr: %v", key, value, err)
|
||||
}
|
||||
}
|
||||
|
||||
func commit(client abcicli.Client, hashExp []byte) {
|
||||
res, err := client.CommitSync()
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(res.Data, hashExp) {
|
||||
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
|
||||
}
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
|
||||
}
|
||||
}
|
||||
|
||||
/*func checkTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.CheckTxSync(txBytes)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.IsErr() {
|
||||
panicf("checking tx %X: %v\nlog: %v", txBytes, res.Log)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("CheckTx response code was unexpected. Got %v expected %v. Log: %v",
|
||||
res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("CheckTx response data was unexpected. Got %X expected %X",
|
||||
res.Data, dataExp)
|
||||
}
|
||||
}*/
|
||||
|
||||
func panicf(format string, a ...interface{}) {
|
||||
panic(fmt.Sprintf(format, a...))
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
var abciType string
|
||||
|
||||
func init() {
|
||||
abciType = os.Getenv("ABCI")
|
||||
if abciType == "" {
|
||||
abciType = "socket"
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
testCounter()
|
||||
}
|
||||
|
||||
const (
|
||||
maxABCIConnectTries = 10
|
||||
)
|
||||
|
||||
func ensureABCIIsUp(typ string, n int) error {
|
||||
var err error
|
||||
cmdString := "abci-cli echo hello"
|
||||
if typ == "grpc" {
|
||||
cmdString = "abci-cli --abci grpc echo hello"
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
cmd := exec.Command("bash", "-c", cmdString)
|
||||
_, err = cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
<-time.After(500 * time.Millisecond)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func testCounter() {
|
||||
abciApp := os.Getenv("ABCI_APP")
|
||||
if abciApp == "" {
|
||||
panic("No ABCI_APP specified")
|
||||
}
|
||||
|
||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
||||
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
|
||||
cmd := exec.Command("bash", "-c", subCommand)
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error on process kill: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
log.Fatalf("echo failed: %v", err) //nolint:gocritic
|
||||
}
|
||||
|
||||
client := startClient(abciType)
|
||||
defer func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
log.Printf("error trying client stop: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
setOption(client, "serial", "on")
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
#! /bin/bash
|
||||
set -e
|
||||
|
||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
||||
|
||||
# Get the directory of where this script is.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd "$DIR"
|
||||
|
||||
echo "RUN COUNTER OVER SOCKET"
|
||||
# test golang counter
|
||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
|
||||
echo "RUN COUNTER OVER GRPC"
|
||||
# test golang counter via grpc
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
# test nodejs counter
|
||||
# TODO: fix node app
|
||||
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter
|
||||
@@ -1,5 +1,7 @@
|
||||
echo hello
|
||||
info
|
||||
prepare_proposal "abc"
|
||||
process_proposal "abc"
|
||||
commit
|
||||
deliver_tx "abc"
|
||||
info
|
||||
@@ -8,3 +10,9 @@ query "abc"
|
||||
deliver_tx "def=xyz"
|
||||
commit
|
||||
query "def"
|
||||
prepare_proposal "preparedef"
|
||||
process_proposal "replacedef"
|
||||
process_proposal "preparedef"
|
||||
prepare_proposal
|
||||
process_proposal
|
||||
commit
|
||||
|
||||
@@ -8,6 +8,14 @@
|
||||
-> data: {"size":0}
|
||||
-> data.hex: 0x7B2273697A65223A307D
|
||||
|
||||
> prepare_proposal "abc"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: abc
|
||||
|
||||
> process_proposal "abc"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
@@ -49,3 +57,25 @@
|
||||
-> value: xyz
|
||||
-> value.hex: 78797A
|
||||
|
||||
> prepare_proposal "preparedef"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: replacedef
|
||||
|
||||
> process_proposal "replacedef"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> process_proposal "preparedef"
|
||||
-> code: OK
|
||||
-> status: REJECT
|
||||
|
||||
> prepare_proposal
|
||||
|
||||
> process_proposal
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
set_option serial on
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
> set_option serial on
|
||||
-> code: OK
|
||||
-> log: OK (SetOption doesn't return anything.)
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
@@ -12,18 +8,16 @@
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: 2
|
||||
-> log: Invalid nonce. Expected >= 1, got 0
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x04
|
||||
-> code: 2
|
||||
-> log: Invalid nonce. Expected 2, got 4
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"hashes":0,"txs":2}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A327D
|
||||
-> data: {"size":3}
|
||||
-> data.hex: 0x7B2273697A65223A337D
|
||||
|
||||
|
||||
@@ -30,14 +30,42 @@ function testExample() {
|
||||
cat "${INPUT}.out.new"
|
||||
echo "Expected:"
|
||||
cat "${INPUT}.out"
|
||||
echo "Diff:"
|
||||
diff "${INPUT}.out" "${INPUT}.out.new"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm "${INPUT}".out.new
|
||||
}
|
||||
|
||||
function testHelp() {
|
||||
INPUT=$1
|
||||
APP="$2 $3"
|
||||
|
||||
echo "Test: $APP"
|
||||
$APP &> "${INPUT}.new" &
|
||||
sleep 2
|
||||
|
||||
pre=$(shasum < "${INPUT}")
|
||||
post=$(shasum < "${INPUT}.new")
|
||||
|
||||
if [[ "$pre" != "$post" ]]; then
|
||||
echo "You broke the tutorial"
|
||||
echo "Got:"
|
||||
cat "${INPUT}.new"
|
||||
echo "Expected:"
|
||||
cat "${INPUT}"
|
||||
echo "Diff:"
|
||||
diff "${INPUT}" "${INPUT}.new"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm "${INPUT}".new
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli counter
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli kvstore
|
||||
testHelp tests/test_cli/testHelp.out abci-cli help
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
30
abci/tests/test_cli/testHelp.out
Normal file
30
abci/tests/test_cli/testHelp.out
Normal file
@@ -0,0 +1,30 @@
|
||||
the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers
|
||||
|
||||
Usage:
|
||||
abci-cli [command]
|
||||
|
||||
Available Commands:
|
||||
batch run a batch of abci commands against an application
|
||||
check_tx validate a transaction
|
||||
commit commit the application state and return the Merkle root hash
|
||||
completion Generate the autocompletion script for the specified shell
|
||||
console start an interactive ABCI console for multiple commands
|
||||
deliver_tx deliver a new transaction to the application
|
||||
echo have the application echo a message
|
||||
help Help about any command
|
||||
info get some info about the application
|
||||
kvstore ABCI demo example
|
||||
prepare_proposal prepare proposal
|
||||
process_proposal process proposal
|
||||
query query the application state
|
||||
test run integration tests
|
||||
version print ABCI console version
|
||||
|
||||
Flags:
|
||||
--abci string either socket or grpc (default "socket")
|
||||
--address string address of application socket (default "tcp://0.0.0.0:26658")
|
||||
-h, --help help for abci-cli
|
||||
--log_level string set the logger level (default "debug")
|
||||
-v, --verbose print the command and results as if it were a console session
|
||||
|
||||
Use "abci-cli [command] --help" for more information about a command.
|
||||
@@ -4,21 +4,24 @@ import (
|
||||
context "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Application
|
||||
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
// to be driven by a blockchain-based replication engine via the ABCI.
|
||||
// All methods take a RequestXxx argument and return a ResponseXxx argument,
|
||||
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
|
||||
type Application interface {
|
||||
// Info/Query Connection
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
SetOption(RequestSetOption) ResponseSetOption // Set application option
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal
|
||||
ProcessProposal(RequestProcessProposal) ResponseProcessProposal
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
@@ -47,10 +50,6 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
}
|
||||
|
||||
func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
|
||||
return ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
@@ -95,6 +94,24 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
|
||||
return ResponseApplySnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal {
|
||||
txs := make([][]byte, 0, len(req.Txs))
|
||||
var totalBytes int64
|
||||
for _, tx := range req.Txs {
|
||||
totalBytes += int64(len(tx))
|
||||
if totalBytes > req.MaxTxBytes {
|
||||
break
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
return ResponsePrepareProposal{Txs: txs}
|
||||
}
|
||||
|
||||
func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProcessProposal {
|
||||
return ResponseProcessProposal{
|
||||
Status: ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// GRPCApplication is a GRPC wrapper for Application
|
||||
@@ -119,11 +136,6 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) {
|
||||
res := app.app.SetOption(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(*req)
|
||||
return &res, nil
|
||||
@@ -182,3 +194,15 @@ func (app *GRPCApplication) ApplySnapshotChunk(
|
||||
res := app.app.ApplySnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) PrepareProposal(
|
||||
ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
|
||||
res := app.app.PrepareProposal(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ProcessProposal(
|
||||
ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) {
|
||||
res := app.app.ProcessProposal(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -14,57 +14,19 @@ const (
|
||||
|
||||
// WriteMessage writes a varint length-delimited protobuf message.
|
||||
func WriteMessage(msg proto.Message, w io.Writer) error {
|
||||
bz, err := proto.Marshal(msg)
|
||||
protoWriter := protoio.NewDelimitedWriter(w)
|
||||
_, err := protoWriter.WriteMsg(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return encodeByteSlice(w, bz)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadMessage reads a varint length-delimited protobuf message.
|
||||
func ReadMessage(r io.Reader, msg proto.Message) error {
|
||||
return readProtoMsg(r, msg, maxMsgSize)
|
||||
}
|
||||
|
||||
func readProtoMsg(r io.Reader, msg proto.Message, maxSize int) error {
|
||||
// binary.ReadVarint takes an io.ByteReader, eg. a bufio.Reader
|
||||
reader, ok := r.(*bufio.Reader)
|
||||
if !ok {
|
||||
reader = bufio.NewReader(r)
|
||||
}
|
||||
length64, err := binary.ReadVarint(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
length := int(length64)
|
||||
if length < 0 || length > maxSize {
|
||||
return io.ErrShortBuffer
|
||||
}
|
||||
buf := make([]byte, length)
|
||||
if _, err := io.ReadFull(reader, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return proto.Unmarshal(buf, msg)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------
|
||||
// NOTE: we copied wire.EncodeByteSlice from go-wire rather than keep
|
||||
// go-wire as a dep
|
||||
|
||||
func encodeByteSlice(w io.Writer, bz []byte) (err error) {
|
||||
err = encodeVarint(w, int64(len(bz)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = w.Write(bz)
|
||||
return
|
||||
}
|
||||
|
||||
func encodeVarint(w io.Writer, i int64) (err error) {
|
||||
var buf [10]byte
|
||||
n := binary.PutVarint(buf[:], i)
|
||||
_, err = w.Write(buf[0:n])
|
||||
return
|
||||
_, err := protoio.NewDelimitedReader(r, maxMsgSize).ReadMsg(msg)
|
||||
return err
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -87,12 +49,6 @@ func ToRequestInfo(req RequestInfo) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestSetOption(req RequestSetOption) *Request {
|
||||
return &Request{
|
||||
Value: &Request_SetOption{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&req},
|
||||
@@ -159,6 +115,18 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestPrepareProposal(req RequestPrepareProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_PrepareProposal{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestProcessProposal(req RequestProcessProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ProcessProposal{&req},
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func ToResponseException(errStr string) *Response {
|
||||
@@ -185,12 +153,6 @@ func ToResponseInfo(res ResponseInfo) *Response {
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseSetOption(res ResponseSetOption) *Response {
|
||||
return &Response{
|
||||
Value: &Response_SetOption{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_DeliverTx{&res},
|
||||
@@ -256,3 +218,15 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
|
||||
Value: &Response_ApplySnapshotChunk{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponsePrepareProposal(res ResponsePrepareProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_PrepareProposal{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseProcessProposal(res ResponseProcessProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ProcessProposal{&res},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
@@ -25,7 +25,7 @@ func TestMarshalJSON(t *testing.T) {
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: []byte("pho"), Value: []byte("bo")},
|
||||
{Key: "pho", Value: "bo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -92,7 +92,7 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: []byte("abc"), Value: []byte("def")},
|
||||
{Key: "abc", Value: "def"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
224
abci/types/mocks/application.go
Normal file
224
abci/types/mocks/application.go
Normal file
@@ -0,0 +1,224 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// Application is an autogenerated mock type for the Application type
|
||||
type Application struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseApplySnapshotChunk)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// BeginBlock provides a mock function with given fields: _a0
|
||||
func (_m *Application) BeginBlock(_a0 types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseBeginBlock)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0
|
||||
func (_m *Application) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCheckTx)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields:
|
||||
func (_m *Application) Commit() types.ResponseCommit {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func() types.ResponseCommit); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCommit)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeliverTx provides a mock function with given fields: _a0
|
||||
func (_m *Application) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseDeliverTx)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// EndBlock provides a mock function with given fields: _a0
|
||||
func (_m *Application) EndBlock(_a0 types.RequestEndBlock) types.ResponseEndBlock {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseEndBlock)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0
|
||||
func (_m *Application) Info(_a0 types.RequestInfo) types.ResponseInfo {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) types.ResponseInfo); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInfo)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0
|
||||
func (_m *Application) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInitChain)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0
|
||||
func (_m *Application) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseListSnapshots)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseLoadSnapshotChunk)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0
|
||||
func (_m *Application) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseOfferSnapshot)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) PrepareProposal(_a0 types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponsePrepareProposal)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) ProcessProposal(_a0 types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseProcessProposal)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0
|
||||
func (_m *Application) Query(_a0 types.RequestQuery) types.ResponseQuery {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) types.ResponseQuery); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseQuery)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewApplication interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewApplication(t mockConstructorTestingTNewApplication) *Application {
|
||||
mock := &Application{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
187
abci/types/mocks/base.go
Normal file
187
abci/types/mocks/base.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// BaseMock provides a wrapper around the generated Application mock and a BaseApplication.
|
||||
// BaseMock first tries to use the mock's implementation of the method.
|
||||
// If no functionality was provided for the mock by the user, BaseMock dispatches
|
||||
// to the BaseApplication and uses its functionality.
|
||||
// BaseMock allows users to provide mocked functionality for only the methods that matter
|
||||
// for their test while avoiding a panic if the code calls Application methods that are
|
||||
// not relevant to the test.
|
||||
type BaseMock struct {
|
||||
base *types.BaseApplication
|
||||
*Application
|
||||
}
|
||||
|
||||
func NewBaseMock() BaseMock {
|
||||
return BaseMock{
|
||||
base: types.NewBaseApplication(),
|
||||
Application: new(Application),
|
||||
}
|
||||
}
|
||||
|
||||
// Info/Query Connection
|
||||
// Return application info
|
||||
func (m BaseMock) Info(input types.RequestInfo) types.ResponseInfo {
|
||||
var ret types.ResponseInfo
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Info(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Info(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) Query(input types.RequestQuery) types.ResponseQuery {
|
||||
var ret types.ResponseQuery
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Query(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Query(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Mempool Connection
|
||||
// Validate a tx for the mempool
|
||||
func (m BaseMock) CheckTx(input types.RequestCheckTx) types.ResponseCheckTx {
|
||||
var ret types.ResponseCheckTx
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.CheckTx(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.CheckTx(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Consensus Connection
|
||||
// Initialize blockchain w validators/other info from TendermintCore
|
||||
func (m BaseMock) InitChain(input types.RequestInitChain) types.ResponseInitChain {
|
||||
var ret types.ResponseInitChain
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.InitChain(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.InitChain(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) PrepareProposal(input types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
var ret types.ResponsePrepareProposal
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.PrepareProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.PrepareProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ProcessProposal(input types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
var ret types.ResponseProcessProposal
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ProcessProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ProcessProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Commit the state and return the application Merkle root hash
|
||||
func (m BaseMock) Commit() types.ResponseCommit {
|
||||
var ret types.ResponseCommit
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Commit()
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Commit()
|
||||
return ret
|
||||
}
|
||||
|
||||
// State Sync Connection
|
||||
// List available snapshots
|
||||
func (m BaseMock) ListSnapshots(input types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
var ret types.ResponseListSnapshots
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ListSnapshots(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ListSnapshots(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) OfferSnapshot(input types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
var ret types.ResponseOfferSnapshot
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.OfferSnapshot(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.OfferSnapshot(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) LoadSnapshotChunk(input types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
var ret types.ResponseLoadSnapshotChunk
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.LoadSnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.LoadSnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ApplySnapshotChunk(input types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
var ret types.ResponseApplySnapshotChunk
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ApplySnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ApplySnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) BeginBlock(input types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
var ret types.ResponseBeginBlock
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.BeginBlock(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.BeginBlock(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) DeliverTx(input types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var ret types.ResponseDeliverTx
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.DeliverTx(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.DeliverTx(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) EndBlock(input types.RequestEndBlock) types.ResponseEndBlock {
|
||||
var ret types.ResponseEndBlock
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.EndBlock(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.EndBlock(input)
|
||||
return ret
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
"github.com/cosmos/gogoproto/jsonpb"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -41,6 +41,16 @@ func (r ResponseQuery) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsAccepted returns true if Code is ACCEPT
|
||||
func (r ResponseProcessProposal) IsAccepted() bool {
|
||||
return r.Status == ResponseProcessProposal_ACCEPT
|
||||
}
|
||||
|
||||
// IsStatusUnknown returns true if Code is UNKNOWN
|
||||
func (r ResponseProcessProposal) IsStatusUnknown() bool {
|
||||
return r.Status == ResponseProcessProposal_UNKNOWN
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
@@ -52,16 +62,6 @@ var (
|
||||
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
|
||||
)
|
||||
|
||||
func (r *ResponseSetOption) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseSetOption) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
@@ -126,6 +126,5 @@ var _ jsonRoundTripper = (*ResponseCommit)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseQuery)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseSetOption)(nil)
|
||||
|
||||
var _ jsonRoundTripper = (*EventAttribute)(nil)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6,4 +6,4 @@ import (
|
||||
|
||||
// TODO: eliminate this after some version refactor
|
||||
|
||||
const Version = version.ABCIVersion
|
||||
const Version = version.ABCISemVer
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
/*
|
||||
Package Behaviour provides a mechanism for reactors to report behaviour of peers.
|
||||
|
||||
Instead of a reactor calling the switch directly it will call the behaviour module which will
|
||||
handle the stoping and marking peer as good on behalf of the reactor.
|
||||
|
||||
There are four different behaviours a reactor can report.
|
||||
|
||||
1. bad message
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
|
||||
2. message out of order
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
|
||||
3. consesnsus Vote
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
|
||||
4. block part
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
|
||||
*/
|
||||
package behaviour
|
||||
@@ -1,49 +0,0 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// PeerBehaviour is a struct describing a behaviour a peer performed.
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behaviour performed by the peer.
|
||||
type PeerBehaviour struct {
|
||||
peerID p2p.ID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BadMessage returns a badMessage PeerBehaviour.
|
||||
func BadMessage(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}}
|
||||
}
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour.
|
||||
func MessageOutOfOrder(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: messageOutOfOrder{explanation}}
|
||||
}
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// ConsensusVote returns a consensusVote PeerBehaviour.
|
||||
func ConsensusVote(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}}
|
||||
}
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BlockPart returns blockPart PeerBehaviour.
|
||||
func BlockPart(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}}
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// Reporter provides an interface for reactors to report the behaviour
|
||||
// of peers synchronously to other components.
|
||||
type Reporter interface {
|
||||
Report(behaviour PeerBehaviour) error
|
||||
}
|
||||
|
||||
// SwitchReporter reports peer behaviour to an internal Switch.
|
||||
type SwitchReporter struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
|
||||
// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch.
|
||||
func NewSwitchReporter(sw *p2p.Switch) *SwitchReporter {
|
||||
return &SwitchReporter{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
|
||||
// Report reports the behaviour of a peer to the Switch.
|
||||
func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error {
|
||||
peer := spbr.sw.Peers().Get(behaviour.peerID)
|
||||
if peer == nil {
|
||||
return errors.New("peer not found")
|
||||
}
|
||||
|
||||
switch reason := behaviour.reason.(type) {
|
||||
case consensusVote, blockPart:
|
||||
spbr.sw.MarkPeerAsGood(peer)
|
||||
case badMessage:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
case messageOutOfOrder:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
default:
|
||||
return errors.New("unknown reason reported")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockReporter is a concrete implementation of the Reporter
|
||||
// interface used in reactor tests to ensure reactors report the correct
|
||||
// behaviour in manufactured scenarios.
|
||||
type MockReporter struct {
|
||||
mtx tmsync.RWMutex
|
||||
pb map[p2p.ID][]PeerBehaviour
|
||||
}
|
||||
|
||||
// NewMockReporter returns a Reporter which records all reported
|
||||
// behaviours in memory.
|
||||
func NewMockReporter() *MockReporter {
|
||||
return &MockReporter{
|
||||
pb: map[p2p.ID][]PeerBehaviour{},
|
||||
}
|
||||
}
|
||||
|
||||
// Report stores the PeerBehaviour produced by the peer identified by peerID.
|
||||
func (mpbr *MockReporter) Report(behaviour PeerBehaviour) error {
|
||||
mpbr.mtx.Lock()
|
||||
defer mpbr.mtx.Unlock()
|
||||
mpbr.pb[behaviour.peerID] = append(mpbr.pb[behaviour.peerID], behaviour)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBehaviours returns all behaviours reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviours(peerID p2p.ID) []PeerBehaviour {
|
||||
mpbr.mtx.RLock()
|
||||
defer mpbr.mtx.RUnlock()
|
||||
if items, ok := mpbr.pb[peerID]; ok {
|
||||
result := make([]PeerBehaviour, len(items))
|
||||
copy(result, items)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
return []PeerBehaviour{}
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
package behaviour_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
bh "github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behaviour in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID p2p.ID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviours := pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 0 {
|
||||
t.Error("Expected to have no behaviours reported")
|
||||
}
|
||||
|
||||
badMessage := bh.BadMessage(peerID, "bad message")
|
||||
if err := pr.Report(badMessage); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
behaviours = pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 1 {
|
||||
t.Error("Expected the peer have one reported behaviour")
|
||||
}
|
||||
|
||||
if behaviours[0] != badMessage {
|
||||
t.Error("Expected Bad Message to have been reported")
|
||||
}
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID p2p.ID
|
||||
behaviour bh.PeerBehaviour
|
||||
}
|
||||
|
||||
// equalBehaviours returns true if a and b contain the same PeerBehaviours with
|
||||
// the same freequencies and otherwise false.
|
||||
func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
|
||||
aHistogram := map[bh.PeerBehaviour]int{}
|
||||
bHistogram := map[bh.PeerBehaviour]int{}
|
||||
|
||||
for _, behaviour := range a {
|
||||
aHistogram[behaviour]++
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
bHistogram[behaviour]++
|
||||
}
|
||||
|
||||
if len(aHistogram) != len(bHistogram) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, behaviour := range a {
|
||||
if aHistogram[behaviour] != bHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
if bHistogram[behaviour] != aHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices
|
||||
// of peer behaviours can be compared for the behaviours they contain and the
|
||||
// freequencies that those behaviours occur.
|
||||
func TestEqualPeerBehaviours(t *testing.T) {
|
||||
var (
|
||||
peerID p2p.ID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{}},
|
||||
// Single behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Equal Frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote, consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
// Equal frequencies different orders
|
||||
{[]bh.PeerBehaviour{consensusVote, blockPart},
|
||||
[]bh.PeerBehaviour{blockPart, consensusVote}},
|
||||
}
|
||||
unequals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Comparing empty sets to non empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Different behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{blockPart}},
|
||||
// Same behaviour with different frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
}
|
||||
)
|
||||
|
||||
for _, test := range equals {
|
||||
if !equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be equal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range unequals {
|
||||
if equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be unequal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPeerBehaviourConcurrency constructs a scenario in which
|
||||
// multiple goroutines are using the same MockReporter instance.
|
||||
// This test reproduces the conditions in which MockReporter will
|
||||
// be used within a Reactor `Receive` method tests to ensure thread safety.
|
||||
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviourScript = []struct {
|
||||
peerID p2p.ID
|
||||
behaviours []bh.PeerBehaviour
|
||||
}{
|
||||
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},
|
||||
{"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
|
||||
{
|
||||
"3",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", ""),
|
||||
bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", "")}},
|
||||
{
|
||||
"4",
|
||||
[]bh.PeerBehaviour{bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", "")}},
|
||||
{
|
||||
"5",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", ""),
|
||||
bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", "")}},
|
||||
}
|
||||
)
|
||||
|
||||
var receiveWg sync.WaitGroup
|
||||
pr := bh.NewMockReporter()
|
||||
scriptItems := make(chan scriptItem)
|
||||
done := make(chan int)
|
||||
numConsumers := 3
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
receiveWg.Add(1)
|
||||
go func() {
|
||||
defer receiveWg.Done()
|
||||
for {
|
||||
select {
|
||||
case pb := <-scriptItems:
|
||||
if err := pr.Report(pb.behaviour); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var sendingWg sync.WaitGroup
|
||||
sendingWg.Add(1)
|
||||
go func() {
|
||||
defer sendingWg.Done()
|
||||
for _, item := range behaviourScript {
|
||||
for _, reason := range item.behaviours {
|
||||
scriptItems <- scriptItem{item.peerID, reason}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
sendingWg.Wait()
|
||||
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
done <- 1
|
||||
}
|
||||
|
||||
receiveWg.Wait()
|
||||
|
||||
for _, items := range behaviourScript {
|
||||
reported := pr.GetBehaviours(items.peerID)
|
||||
if !equalBehaviours(reported, items.behaviours) {
|
||||
t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
|
||||
items.peerID, items.behaviours, reported)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// NOTE: keep up to date with bcproto.BlockResponse
|
||||
BlockResponseMessagePrefixSize = 4
|
||||
BlockResponseMessageFieldKeySize = 1
|
||||
MaxMsgSize = types.MaxBlockSizeBytes +
|
||||
BlockResponseMessagePrefixSize +
|
||||
BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
msg := bcproto.Message{}
|
||||
|
||||
switch pb := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
|
||||
case *bcproto.BlockResponse:
|
||||
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
|
||||
case *bcproto.NoBlockResponse:
|
||||
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
|
||||
case *bcproto.StatusRequest:
|
||||
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
|
||||
case *bcproto.StatusResponse:
|
||||
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", pb)
|
||||
}
|
||||
|
||||
bz, err := proto.Marshal(&msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *bcproto.Message_BlockRequest:
|
||||
return msg.BlockRequest, nil
|
||||
case *bcproto.Message_BlockResponse:
|
||||
return msg.BlockResponse, nil
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
return msg.NoBlockResponse, nil
|
||||
case *bcproto.Message_StatusRequest:
|
||||
return msg.StatusRequest, nil
|
||||
case *bcproto.Message_StatusResponse:
|
||||
return msg.StatusResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateMsg validates a message.
|
||||
func ValidateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
return errors.New("message cannot be nil")
|
||||
}
|
||||
|
||||
switch msg := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
if msg.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
case *bcproto.BlockResponse:
|
||||
_, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case *bcproto.NoBlockResponse:
|
||||
if msg.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
case *bcproto.StatusResponse:
|
||||
if msg.Base < 0 {
|
||||
return errors.New("negative Base")
|
||||
}
|
||||
if msg.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
if msg.Base > msg.Height {
|
||||
return fmt.Errorf("base %v cannot be greater than height %v", msg.Base, msg.Height)
|
||||
}
|
||||
case *bcproto.StatusRequest:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
flow "github.com/tendermint/tendermint/libs/flowrate"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//--------
|
||||
// Peer
|
||||
|
||||
// BpPeerParams stores the peer parameters that are used when creating a peer.
|
||||
type BpPeerParams struct {
|
||||
timeout time.Duration
|
||||
minRecvRate int64
|
||||
sampleRate time.Duration
|
||||
windowSize time.Duration
|
||||
}
|
||||
|
||||
// BpPeer is the datastructure associated with a fast sync peer.
|
||||
type BpPeer struct {
|
||||
logger log.Logger
|
||||
ID p2p.ID
|
||||
|
||||
Base int64 // the peer reported base
|
||||
Height int64 // the peer reported height
|
||||
NumPendingBlockRequests int // number of requests still waiting for block responses
|
||||
blocks map[int64]*types.Block // blocks received or expected to be received from this peer
|
||||
blockResponseTimer *time.Timer
|
||||
recvMonitor *flow.Monitor
|
||||
params *BpPeerParams // parameters for timer and monitor
|
||||
|
||||
onErr func(err error, peerID p2p.ID) // function to call on error
|
||||
}
|
||||
|
||||
// NewBpPeer creates a new peer.
|
||||
func NewBpPeer(peerID p2p.ID, base int64, height int64,
|
||||
onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer {
|
||||
|
||||
if params == nil {
|
||||
params = BpPeerDefaultParams()
|
||||
}
|
||||
return &BpPeer{
|
||||
ID: peerID,
|
||||
Base: base,
|
||||
Height: height,
|
||||
blocks: make(map[int64]*types.Block, maxRequestsPerPeer),
|
||||
logger: log.NewNopLogger(),
|
||||
onErr: onErr,
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of a peer.
|
||||
func (peer *BpPeer) String() string {
|
||||
return fmt.Sprintf("peer: %v height: %v pending: %v", peer.ID, peer.Height, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the peer.
|
||||
func (peer *BpPeer) SetLogger(l log.Logger) {
|
||||
peer.logger = l
|
||||
}
|
||||
|
||||
// Cleanup performs cleanup of the peer, removes blocks, requests, stops timer and monitor.
|
||||
func (peer *BpPeer) Cleanup() {
|
||||
if peer.blockResponseTimer != nil {
|
||||
peer.blockResponseTimer.Stop()
|
||||
}
|
||||
if peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending requests is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
if len(peer.blocks)-peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending blocks is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
for h := range peer.blocks {
|
||||
delete(peer.blocks, h)
|
||||
}
|
||||
peer.NumPendingBlockRequests = 0
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
// BlockAtHeight returns the block at a given height if available and errMissingBlock otherwise.
|
||||
func (peer *BpPeer) BlockAtHeight(height int64) (*types.Block, error) {
|
||||
block, ok := peer.blocks[height]
|
||||
if !ok {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
if block == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
return peer.blocks[height], nil
|
||||
}
|
||||
|
||||
// AddBlock adds a block at peer level. Block must be non-nil and recvSize a positive integer
|
||||
// The peer must have a pending request for this block.
|
||||
func (peer *BpPeer) AddBlock(block *types.Block, recvSize int) error {
|
||||
if block == nil || recvSize < 0 {
|
||||
panic("bad parameters")
|
||||
}
|
||||
existingBlock, ok := peer.blocks[block.Height]
|
||||
if !ok {
|
||||
peer.logger.Error("unsolicited block", "blockHeight", block.Height, "peer", peer.ID)
|
||||
return errMissingBlock
|
||||
}
|
||||
if existingBlock != nil {
|
||||
peer.logger.Error("already have a block for height", "height", block.Height)
|
||||
return errDuplicateBlock
|
||||
}
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
panic("peer does not have pending requests")
|
||||
}
|
||||
peer.blocks[block.Height] = block
|
||||
peer.NumPendingBlockRequests--
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.stopMonitor()
|
||||
peer.stopBlockResponseTimer()
|
||||
} else {
|
||||
peer.recvMonitor.Update(recvSize)
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBlock removes the block of given height
|
||||
func (peer *BpPeer) RemoveBlock(height int64) {
|
||||
delete(peer.blocks, height)
|
||||
}
|
||||
|
||||
// RequestSent records that a request was sent, and starts the peer timer and monitor if needed.
|
||||
func (peer *BpPeer) RequestSent(height int64) {
|
||||
peer.blocks[height] = nil
|
||||
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.startMonitor()
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
peer.NumPendingBlockRequests++
|
||||
}
|
||||
|
||||
// CheckRate verifies that the response rate of the peer is acceptable (higher than the minimum allowed).
|
||||
func (peer *BpPeer) CheckRate() error {
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
return nil
|
||||
}
|
||||
curRate := peer.recvMonitor.Status().CurRate
|
||||
// curRate can be 0 on start
|
||||
if curRate != 0 && curRate < peer.params.minRecvRate {
|
||||
err := errSlowPeer
|
||||
peer.logger.Error("SendTimeout", "peer", peer,
|
||||
"reason", err,
|
||||
"curRate", fmt.Sprintf("%d KB/s", curRate/1024),
|
||||
"minRate", fmt.Sprintf("%d KB/s", peer.params.minRecvRate/1024))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) onTimeout() {
|
||||
peer.onErr(errNoPeerResponse, peer.ID)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopMonitor() {
|
||||
peer.recvMonitor.Done()
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) startMonitor() {
|
||||
peer.recvMonitor = flow.New(peer.params.sampleRate, peer.params.windowSize)
|
||||
initialValue := float64(peer.params.minRecvRate) * math.E
|
||||
peer.recvMonitor.SetREMA(initialValue)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) resetBlockResponseTimer() {
|
||||
if peer.blockResponseTimer == nil {
|
||||
peer.blockResponseTimer = time.AfterFunc(peer.params.timeout, peer.onTimeout)
|
||||
} else {
|
||||
peer.blockResponseTimer.Reset(peer.params.timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopBlockResponseTimer() bool {
|
||||
if peer.blockResponseTimer == nil {
|
||||
return false
|
||||
}
|
||||
return peer.blockResponseTimer.Stop()
|
||||
}
|
||||
|
||||
// BpPeerDefaultParams returns the default peer parameters.
|
||||
func BpPeerDefaultParams() *BpPeerParams {
|
||||
return &BpPeerParams{
|
||||
// Timeout for a peer to respond to a block request.
|
||||
timeout: 15 * time.Second,
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending data at at least that rate, we
|
||||
// consider them to have timedout and we disconnect.
|
||||
//
|
||||
// Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s,
|
||||
// sending data across atlantic ~ 7.5 KB/s.
|
||||
minRecvRate: int64(7680),
|
||||
|
||||
// Monitor parameters
|
||||
sampleRate: time.Second,
|
||||
windowSize: 40 * time.Second,
|
||||
}
|
||||
}
|
||||
@@ -1,280 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestPeerMonitor(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
peer.startMonitor()
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
peer.stopMonitor()
|
||||
assert.Nil(t, peer.recvMonitor)
|
||||
}
|
||||
|
||||
func TestPeerResetBlockResponseTimer(t *testing.T) {
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the errFunc
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
params := &BpPeerParams{timeout: 20 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
// initial reset call with peer having a nil timer
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
// make sure timer is running and stop it
|
||||
checkByStoppingPeerTimer(t, peer, true)
|
||||
|
||||
// reset with running timer
|
||||
peer.resetBlockResponseTimer()
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
// let the timer expire and ...
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// ... check timer is not running
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
peerTestMtx.Lock()
|
||||
// ... check errNoPeerResponse has been sent
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, lastErr, errNoPeerResponse)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerRequestSent(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 1, peer.NumPendingBlockRequests)
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 2, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
func TestPeerGetAndRemoveBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// Change peer height
|
||||
peer.Height = int64(10)
|
||||
assert.Equal(t, int64(10), peer.Height)
|
||||
|
||||
// request some blocks and receive few of them
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i > 5 {
|
||||
// only receive blocks 1..5
|
||||
continue
|
||||
}
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 100, errMissingBlock, false},
|
||||
{"no block", 6, errMissingBlock, false},
|
||||
{"block 1 present", 1, nil, true},
|
||||
{"block max present", 5, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
b, err := peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
assert.Equal(t, tt.blockPresent, b != nil)
|
||||
|
||||
// remove the block
|
||||
peer.RemoveBlock(tt.height)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, errMissingBlock, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerAddBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// request some blocks, receive one
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i == 5 {
|
||||
// receive block 5
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 50, errMissingBlock, false},
|
||||
{"duplicate block", 5, errDuplicateBlock, true},
|
||||
{"block 1 successfully received", 1, nil, true},
|
||||
{"block max successfully received", 10, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
err := peer.AddBlock(makeSmallBlock(int(tt.height)), 10)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.blockPresent, err == nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) {
|
||||
|
||||
params := &BpPeerParams{timeout: 10 * time.Millisecond}
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the onErr function
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// timer should have expired by now, check that the on error function was called
|
||||
peerTestMtx.Lock()
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, errNoPeerResponse, lastErr)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerCheckRate(t *testing.T) {
|
||||
params := &BpPeerParams{
|
||||
timeout: time.Second,
|
||||
minRecvRate: int64(100), // 100 bytes/sec exponential moving average
|
||||
}
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
require.Nil(t, peer.CheckRate())
|
||||
|
||||
for i := 0; i < 40; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
}
|
||||
|
||||
// monitor starts with a higher rEMA (~ 2*minRecvRate), wait for it to go down
|
||||
time.Sleep(900 * time.Millisecond)
|
||||
|
||||
// normal peer - send a bit more than 100 bytes/sec, > 10 bytes/100msec, check peer is not considered slow
|
||||
for i := 0; i < 10; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 11)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.Nil(t, peer.CheckRate())
|
||||
}
|
||||
|
||||
// slow peer - send a bit less than 10 bytes/100msec
|
||||
for i := 10; i < 20; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 9)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
// check peer is considered slow
|
||||
assert.Equal(t, errSlowPeer, peer.CheckRate())
|
||||
}
|
||||
|
||||
func TestPeerCleanup(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
assert.Nil(t, peer.blockResponseTimer)
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
peer.Cleanup()
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
}
|
||||
|
||||
// Check if peer timer is running or not (a running timer can be successfully stopped).
|
||||
// Note: stops the timer.
|
||||
func checkByStoppingPeerTimer(t *testing.T, peer *BpPeer, running bool) {
|
||||
assert.NotPanics(t, func() {
|
||||
stopped := peer.stopBlockResponseTimer()
|
||||
if running {
|
||||
assert.True(t, stopped)
|
||||
} else {
|
||||
assert.False(t, stopped)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func makeSmallBlock(height int) *types.Block {
|
||||
return types.MakeBlock(int64(height), []types.Tx{types.Tx("foo")}, nil, nil)
|
||||
}
|
||||
@@ -1,370 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
logger log.Logger
|
||||
// Set of peers that have sent status responses, with height bigger than pool.Height
|
||||
peers map[p2p.ID]*BpPeer
|
||||
// Set of block heights and the corresponding peers from where a block response is expected or has been received.
|
||||
blocks map[int64]p2p.ID
|
||||
|
||||
plannedRequests map[int64]struct{} // list of blocks to be assigned peers for blockRequest
|
||||
nextRequestHeight int64 // next height to be added to plannedRequests
|
||||
|
||||
Height int64 // height of next block to execute
|
||||
MaxPeerHeight int64 // maximum height of all peers
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewBlockPool creates a new BlockPool.
|
||||
func NewBlockPool(height int64, toBcR bcReactor) *BlockPool {
|
||||
return &BlockPool{
|
||||
Height: height,
|
||||
MaxPeerHeight: 0,
|
||||
peers: make(map[p2p.ID]*BpPeer),
|
||||
blocks: make(map[int64]p2p.ID),
|
||||
plannedRequests: make(map[int64]struct{}),
|
||||
nextRequestHeight: height,
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the pool.
|
||||
func (pool *BlockPool) SetLogger(l log.Logger) {
|
||||
pool.logger = l
|
||||
}
|
||||
|
||||
// ReachedMaxHeight check if the pool has reached the maximum peer height.
|
||||
func (pool *BlockPool) ReachedMaxHeight() bool {
|
||||
return pool.Height >= pool.MaxPeerHeight
|
||||
}
|
||||
|
||||
func (pool *BlockPool) rescheduleRequest(peerID p2p.ID, height int64) {
|
||||
pool.logger.Info("reschedule requests made to peer for height ", "peerID", peerID, "height", height)
|
||||
pool.plannedRequests[height] = struct{}{}
|
||||
delete(pool.blocks, height)
|
||||
pool.peers[peerID].RemoveBlock(height)
|
||||
}
|
||||
|
||||
// Updates the pool's max height. If no peers are left MaxPeerHeight is set to 0.
|
||||
func (pool *BlockPool) updateMaxPeerHeight() {
|
||||
var newMax int64
|
||||
for _, peer := range pool.peers {
|
||||
peerHeight := peer.Height
|
||||
if peerHeight > newMax {
|
||||
newMax = peerHeight
|
||||
}
|
||||
}
|
||||
pool.MaxPeerHeight = newMax
|
||||
}
|
||||
|
||||
// UpdatePeer adds a new peer or updates an existing peer with a new base and height.
|
||||
// If a peer is short it is not added.
|
||||
func (pool *BlockPool) UpdatePeer(peerID p2p.ID, base int64, height int64) error {
|
||||
|
||||
peer := pool.peers[peerID]
|
||||
|
||||
if peer == nil {
|
||||
if height < pool.Height {
|
||||
pool.logger.Info("Peer height too small",
|
||||
"peer", peerID, "height", height, "fsm_height", pool.Height)
|
||||
return errPeerTooShort
|
||||
}
|
||||
// Add new peer.
|
||||
peer = NewBpPeer(peerID, base, height, pool.toBcR.sendPeerError, nil)
|
||||
peer.SetLogger(pool.logger.With("peer", peerID))
|
||||
pool.peers[peerID] = peer
|
||||
pool.logger.Info("added peer", "peerID", peerID, "base", base, "height", height, "num_peers", len(pool.peers))
|
||||
} else {
|
||||
// Check if peer is lowering its height. This is not allowed.
|
||||
if height < peer.Height {
|
||||
pool.RemovePeer(peerID, errPeerLowersItsHeight)
|
||||
return errPeerLowersItsHeight
|
||||
}
|
||||
// Update existing peer.
|
||||
peer.Base = base
|
||||
peer.Height = height
|
||||
}
|
||||
|
||||
// Update the pool's MaxPeerHeight if needed.
|
||||
pool.updateMaxPeerHeight()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleans and deletes the peer. Recomputes the max peer height.
|
||||
func (pool *BlockPool) deletePeer(peer *BpPeer) {
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, peer.ID)
|
||||
|
||||
if peer.Height == pool.MaxPeerHeight {
|
||||
pool.updateMaxPeerHeight()
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer removes the blocks and requests from the peer, reschedules them and deletes the peer.
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.ID, err error) {
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
pool.logger.Info("removing peer", "peerID", peerID, "error", err)
|
||||
|
||||
// Reschedule the block requests made to the peer, or received and not processed yet.
|
||||
// Note that some of the requests may be removed further down.
|
||||
for h := range pool.peers[peerID].blocks {
|
||||
pool.rescheduleRequest(peerID, h)
|
||||
}
|
||||
|
||||
oldMaxPeerHeight := pool.MaxPeerHeight
|
||||
// Delete the peer. This operation may result in the pool's MaxPeerHeight being lowered.
|
||||
pool.deletePeer(peer)
|
||||
|
||||
// Check if the pool's MaxPeerHeight has been lowered.
|
||||
// This may happen if the tallest peer has been removed.
|
||||
if oldMaxPeerHeight > pool.MaxPeerHeight {
|
||||
// Remove any planned requests for heights over the new MaxPeerHeight.
|
||||
for h := range pool.plannedRequests {
|
||||
if h > pool.MaxPeerHeight {
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
// Adjust the nextRequestHeight to the new max plus one.
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
pool.nextRequestHeight = pool.MaxPeerHeight + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeShortPeers() {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.Height < pool.Height {
|
||||
pool.RemovePeer(peer.ID, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeBadPeers() {
|
||||
pool.removeShortPeers()
|
||||
for _, peer := range pool.peers {
|
||||
if err := peer.CheckRate(); err != nil {
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
pool.toBcR.sendPeerError(err, peer.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MakeNextRequests creates more requests if the block pool is running low.
|
||||
func (pool *BlockPool) MakeNextRequests(maxNumRequests int) {
|
||||
heights := pool.makeRequestBatch(maxNumRequests)
|
||||
if len(heights) != 0 {
|
||||
pool.logger.Info("makeNextRequests will make following requests",
|
||||
"number", len(heights), "heights", heights)
|
||||
}
|
||||
|
||||
for _, height := range heights {
|
||||
h := int64(height)
|
||||
if !pool.sendRequest(h) {
|
||||
// If a good peer was not found for sending the request at height h then return,
|
||||
// as it shouldn't be possible to find a peer for h+1.
|
||||
return
|
||||
}
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
|
||||
// Makes a batch of requests sorted by height such that the block pool has up to maxNumRequests entries.
|
||||
func (pool *BlockPool) makeRequestBatch(maxNumRequests int) []int {
|
||||
pool.removeBadPeers()
|
||||
// At this point pool.requests may include heights for requests to be redone due to removal of peers:
|
||||
// - peers timed out or were removed by switch
|
||||
// - FSM timed out on waiting to advance the block execution due to missing blocks at h or h+1
|
||||
// Determine the number of requests needed by subtracting the number of requests already made from the maximum
|
||||
// allowed
|
||||
numNeeded := maxNumRequests - len(pool.blocks)
|
||||
for len(pool.plannedRequests) < numNeeded {
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
break
|
||||
}
|
||||
pool.plannedRequests[pool.nextRequestHeight] = struct{}{}
|
||||
pool.nextRequestHeight++
|
||||
}
|
||||
|
||||
heights := make([]int, 0, len(pool.plannedRequests))
|
||||
for k := range pool.plannedRequests {
|
||||
heights = append(heights, int(k))
|
||||
}
|
||||
sort.Ints(heights)
|
||||
return heights
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendRequest(height int64) bool {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.NumPendingBlockRequests >= maxRequestsPerPeer {
|
||||
continue
|
||||
}
|
||||
if peer.Base > height || peer.Height < height {
|
||||
continue
|
||||
}
|
||||
|
||||
err := pool.toBcR.sendBlockRequest(peer.ID, height)
|
||||
if err == errNilPeerForBlockRequest {
|
||||
// Switch does not have this peer, remove it and continue to look for another peer.
|
||||
pool.logger.Error("switch does not have peer..removing peer selected for height", "peer",
|
||||
peer.ID, "height", height)
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == errSendQueueFull {
|
||||
pool.logger.Error("peer queue is full", "peer", peer.ID, "height", height)
|
||||
continue
|
||||
}
|
||||
|
||||
pool.logger.Info("assigned request to peer", "peer", peer.ID, "height", height)
|
||||
|
||||
pool.blocks[height] = peer.ID
|
||||
peer.RequestSent(height)
|
||||
|
||||
return true
|
||||
}
|
||||
pool.logger.Error("could not find peer to send request for block at height", "height", height)
|
||||
return false
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and stores it in the 'blocks' map.
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) error {
|
||||
peer, ok := pool.peers[peerID]
|
||||
if !ok {
|
||||
pool.logger.Error("block from unknown peer", "height", block.Height, "peer", peerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
if wantPeerID, ok := pool.blocks[block.Height]; ok && wantPeerID != peerID {
|
||||
pool.logger.Error("block received from wrong peer", "height", block.Height,
|
||||
"peer", peerID, "expected_peer", wantPeerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
|
||||
return peer.AddBlock(block, blockSize)
|
||||
}
|
||||
|
||||
// BlockData stores the peer responsible to deliver a block and the actual block if delivered.
|
||||
type BlockData struct {
|
||||
block *types.Block
|
||||
peer *BpPeer
|
||||
}
|
||||
|
||||
// BlockAndPeerAtHeight retrieves the block and delivery peer at specified height.
|
||||
// Returns errMissingBlock if a block was not found
|
||||
func (pool *BlockPool) BlockAndPeerAtHeight(height int64) (bData *BlockData, err error) {
|
||||
peerID := pool.blocks[height]
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
|
||||
block, err := peer.BlockAtHeight(height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BlockData{peer: peer, block: block}, nil
|
||||
|
||||
}
|
||||
|
||||
// FirstTwoBlocksAndPeers returns the blocks and the delivery peers at pool's height H and H+1.
|
||||
func (pool *BlockPool) FirstTwoBlocksAndPeers() (first, second *BlockData, err error) {
|
||||
first, err = pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InvalidateFirstTwoBlocks removes the peers that sent us the first two blocks, blocks are removed by RemovePeer().
|
||||
func (pool *BlockPool) InvalidateFirstTwoBlocks(err error) {
|
||||
first, err1 := pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
|
||||
if err1 == nil {
|
||||
pool.RemovePeer(first.peer.ID, err)
|
||||
}
|
||||
if err2 == nil {
|
||||
pool.RemovePeer(second.peer.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessedCurrentHeightBlock performs cleanup after a block is processed. It removes block at pool height and
|
||||
// the peers that are now short.
|
||||
func (pool *BlockPool) ProcessedCurrentHeightBlock() {
|
||||
peerID, peerOk := pool.blocks[pool.Height]
|
||||
if peerOk {
|
||||
pool.peers[peerID].RemoveBlock(pool.Height)
|
||||
}
|
||||
delete(pool.blocks, pool.Height)
|
||||
pool.logger.Debug("removed block at height", "height", pool.Height)
|
||||
pool.Height++
|
||||
pool.removeShortPeers()
|
||||
}
|
||||
|
||||
// RemovePeerAtCurrentHeights checks if a block at pool's height H exists and if not, it removes the
|
||||
// delivery peer and returns. If a block at height H exists then the check and peer removal is done for H+1.
|
||||
// This function is called when the FSM is not able to make progress for some time.
|
||||
// This happens if either the block H or H+1 have not been delivered.
|
||||
func (pool *BlockPool) RemovePeerAtCurrentHeights(err error) {
|
||||
peerID := pool.blocks[pool.Height]
|
||||
peer, ok := pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height",
|
||||
"peer", peerID, "height", pool.Height)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
peerID = pool.blocks[pool.Height+1]
|
||||
peer, ok = pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height + 1); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height+1",
|
||||
"peer", peerID, "height", pool.Height+1)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup performs pool and peer cleanup
|
||||
func (pool *BlockPool) Cleanup() {
|
||||
for id, peer := range pool.peers {
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, id)
|
||||
}
|
||||
pool.plannedRequests = make(map[int64]struct{})
|
||||
pool.blocks = make(map[int64]p2p.ID)
|
||||
pool.nextRequestHeight = 0
|
||||
pool.Height = 0
|
||||
pool.MaxPeerHeight = 0
|
||||
}
|
||||
|
||||
// NumPeers returns the number of peers in the pool
|
||||
func (pool *BlockPool) NumPeers() int {
|
||||
return len(pool.peers)
|
||||
}
|
||||
|
||||
// NeedsBlocks returns true if more blocks are required.
|
||||
func (pool *BlockPool) NeedsBlocks() bool {
|
||||
return len(pool.blocks) < maxNumRequests
|
||||
}
|
||||
@@ -1,691 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type testPeer struct {
|
||||
id p2p.ID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
|
||||
type testBcR struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
type testValues struct {
|
||||
numRequestsSent int
|
||||
}
|
||||
|
||||
var testResults testValues
|
||||
|
||||
func resetPoolTestResults() {
|
||||
testResults.numRequestsSent = 0
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendPeerError(err error, peerID p2p.ID) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendStatusRequest() {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testResults.numRequestsSent++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testBcR) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) switchToConsensus() {
|
||||
|
||||
}
|
||||
|
||||
func newTestBcR() *testBcR {
|
||||
testBcR := &testBcR{logger: log.TestingLogger()}
|
||||
return testBcR
|
||||
}
|
||||
|
||||
type tPBlocks struct {
|
||||
id p2p.ID
|
||||
create bool
|
||||
}
|
||||
|
||||
// Makes a block pool with specified current height, list of peers, block requests and block responses
|
||||
func makeBlockPool(bcr *testBcR, height int64, peers []BpPeer, blocks map[int64]tPBlocks) *BlockPool {
|
||||
bPool := NewBlockPool(height, bcr)
|
||||
bPool.SetLogger(bcr.logger)
|
||||
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
var maxH int64
|
||||
for _, p := range peers {
|
||||
if p.Height > maxH {
|
||||
maxH = p.Height
|
||||
}
|
||||
bPool.peers[p.ID] = NewBpPeer(p.ID, p.Base, p.Height, bcr.sendPeerError, nil)
|
||||
bPool.peers[p.ID].SetLogger(bcr.logger)
|
||||
|
||||
}
|
||||
bPool.MaxPeerHeight = maxH
|
||||
for h, p := range blocks {
|
||||
bPool.blocks[h] = p.id
|
||||
bPool.peers[p.id].RequestSent(h)
|
||||
if p.create {
|
||||
// simulate that a block at height h has been received
|
||||
_ = bPool.peers[p.id].AddBlock(types.MakeBlock(h, txs, nil, nil), 100)
|
||||
}
|
||||
}
|
||||
return bPool
|
||||
}
|
||||
|
||||
func assertPeerSetsEquivalent(t *testing.T, set1 map[p2p.ID]*BpPeer, set2 map[p2p.ID]*BpPeer) {
|
||||
assert.Equal(t, len(set1), len(set2))
|
||||
for peerID, peer1 := range set1 {
|
||||
peer2 := set2[peerID]
|
||||
assert.NotNil(t, peer2)
|
||||
assert.Equal(t, peer1.NumPendingBlockRequests, peer2.NumPendingBlockRequests)
|
||||
assert.Equal(t, peer1.Height, peer2.Height)
|
||||
assert.Equal(t, peer1.Base, peer2.Base)
|
||||
assert.Equal(t, len(peer1.blocks), len(peer2.blocks))
|
||||
for h, block1 := range peer1.blocks {
|
||||
block2 := peer2.blocks[h]
|
||||
// block1 and block2 could be nil if a request was made but no block was received
|
||||
assert.Equal(t, block1, block2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assertBlockPoolEquivalent(t *testing.T, poolWanted, pool *BlockPool) {
|
||||
assert.Equal(t, poolWanted.blocks, pool.blocks)
|
||||
assertPeerSetsEquivalent(t, poolWanted.peers, pool.peers)
|
||||
assert.Equal(t, poolWanted.MaxPeerHeight, pool.MaxPeerHeight)
|
||||
assert.Equal(t, poolWanted.Height, pool.Height)
|
||||
|
||||
}
|
||||
|
||||
func TestBlockPoolUpdatePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args testPeer
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "add a first short peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 50},
|
||||
errWanted: errPeerTooShort,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "add a first good peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 101},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 101}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "add a first good peer with base",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 10, 101},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Base: 10, Height: 101}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "increase the height of P1 from 120 to 123",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 123},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 123}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 120 to 110",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 110},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 105 to 102 with blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 105}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 101: {"P1", true}, 102: {"P1", true}}),
|
||||
args: testPeer{"P1", 0, 102},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
err := pool.UpdatePeer(tt.args.id, tt.args.base, tt.args.height)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assert.Equal(t, tt.poolWanted.blocks, tt.pool.blocks)
|
||||
assertPeerSetsEquivalent(t, tt.poolWanted.peers, tt.pool.peers)
|
||||
assert.Equal(t, tt.poolWanted.MaxPeerHeight, tt.pool.MaxPeerHeight)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "attempt to delete non-existing peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P99", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer without blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers without blocks",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
|
||||
map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers without blocks",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
|
||||
map[int64]tPBlocks{}),
|
||||
args: args{"P2", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 200}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 200}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 110}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 110}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeer(tt.args.peerID, tt.args.err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemoveShortPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "no short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "one short peer",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 90}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "all short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 90}, {ID: "P2", Height: 91}, {ID: "P3", Height: 92}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
pool.removeShortPeers()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolSendRequestBatch(t *testing.T) {
|
||||
type testPeerResult struct {
|
||||
id p2p.ID
|
||||
numPendingBlockRequests int
|
||||
}
|
||||
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
maxRequestsPerPeer int
|
||||
expRequests map[int64]bool
|
||||
expRequestsSent int
|
||||
expPeerResults []testPeerResult
|
||||
}{
|
||||
{
|
||||
name: "one peer - send up to maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expRequestsSent: 2,
|
||||
expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}},
|
||||
},
|
||||
{
|
||||
name: "multiple peers - stops at gap between height and base",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{
|
||||
{ID: "P1", Base: 1, Height: 12},
|
||||
{ID: "P2", Base: 15, Height: 100},
|
||||
}, map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 10,
|
||||
expRequests: map[int64]bool{10: true, 11: true, 12: true},
|
||||
expRequestsSent: 3,
|
||||
expPeerResults: []testPeerResult{
|
||||
{id: "P1", numPendingBlockRequests: 3},
|
||||
{id: "P2", numPendingBlockRequests: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "n peers - send n*maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expRequestsSent: 4,
|
||||
expPeerResults: []testPeerResult{
|
||||
{id: "P1", numPendingBlockRequests: 2},
|
||||
{id: "P2", numPendingBlockRequests: 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
resetPoolTestResults()
|
||||
|
||||
var pool = tt.pool
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
pool.MakeNextRequests(10)
|
||||
|
||||
assert.Equal(t, tt.expRequestsSent, testResults.numRequestsSent)
|
||||
for _, tPeer := range tt.expPeerResults {
|
||||
var peer = pool.peers[tPeer.id]
|
||||
assert.NotNil(t, peer)
|
||||
assert.Equal(t, tPeer.numPendingBlockRequests, peer.NumPendingBlockRequests)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolAddBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
block *types.Block
|
||||
blockSize int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{name: "block from unknown peer",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "unexpected block 11 from known peer - waiting for 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(11), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer - already have 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
errWanted: errDuplicateBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer P2 - expected 10 to come from P1",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "expected block from known peer",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}}),
|
||||
errWanted: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.pool.AddBlock(tt.args.peerID, tt.args.block, tt.args.blockSize)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolFirstTwoBlocksAndPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
firstWanted int64
|
||||
secondWanted int64
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
firstWanted: 15,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{16: {"P2", true}, 18: {"P2", true}}),
|
||||
secondWanted: 16,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
firstWanted: 10,
|
||||
secondWanted: 11,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
gotFirst, gotSecond, err := pool.FirstTwoBlocksAndPeers()
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
|
||||
if tt.firstWanted != 0 {
|
||||
peer := pool.blocks[tt.firstWanted]
|
||||
block := pool.peers[peer].blocks[tt.firstWanted]
|
||||
assert.Equal(t, block, gotFirst.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.firstWanted, gotFirst.block.Height)
|
||||
}
|
||||
|
||||
if tt.secondWanted != 0 {
|
||||
peer := pool.blocks[tt.secondWanted]
|
||||
block := pool.peers[peer].blocks[tt.secondWanted]
|
||||
assert.Equal(t, block, gotSecond.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.secondWanted, gotSecond.block.Height)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolInvalidateFirstTwoBlocks(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.InvalidateFirstTwoBlocks(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessedCurrentHeightBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{101: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.ProcessedCurrentHeightBlock()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeerAtCurrentHeight(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", false}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "one peer, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", false}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", false}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeerAtCurrentHeights(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,569 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
trySyncIntervalMS = 10
|
||||
trySendIntervalMS = 10
|
||||
|
||||
// ask for best height every 10s
|
||||
statusUpdateIntervalSeconds = 10
|
||||
)
|
||||
|
||||
var (
|
||||
// Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks
|
||||
// have not been received.
|
||||
maxRequestsPerPeer = 20
|
||||
// Maximum number of block requests for the reactor, pending or for which blocks have been received.
|
||||
maxNumRequests = 64
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
initialState sm.State // immutable
|
||||
state sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
|
||||
fastSync bool
|
||||
stateSynced bool
|
||||
|
||||
fsm *BcReactorFSM
|
||||
blocksSynced uint64
|
||||
|
||||
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
|
||||
messagesForFSMCh chan bcReactorMessage
|
||||
|
||||
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
|
||||
// to this channel to be processed in the context of the poolRoutine.
|
||||
errorsForFSMCh chan bcReactorMessage
|
||||
|
||||
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
|
||||
// the switch.
|
||||
eventsFromFSMCh chan bcFsmMessage
|
||||
|
||||
swReporter *behaviour.SwitchReporter
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
}
|
||||
|
||||
const capacity = 1000
|
||||
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
|
||||
messagesForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
errorsForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
|
||||
startHeight := store.Height() + 1
|
||||
if startHeight == 1 {
|
||||
startHeight = state.InitialHeight
|
||||
}
|
||||
bcR := &BlockchainReactor{
|
||||
initialState: state,
|
||||
state: state,
|
||||
blockExec: blockExec,
|
||||
fastSync: fastSync,
|
||||
store: store,
|
||||
messagesForFSMCh: messagesForFSMCh,
|
||||
eventsFromFSMCh: eventsFromFSMCh,
|
||||
errorsForFSMCh: errorsForFSMCh,
|
||||
}
|
||||
fsm := NewFSM(startHeight, bcR)
|
||||
bcR.fsm = fsm
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
|
||||
// bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
|
||||
|
||||
return bcR
|
||||
}
|
||||
|
||||
// bcReactorMessage is used by the reactor to send messages to the FSM.
|
||||
type bcReactorMessage struct {
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
}
|
||||
|
||||
type bFsmEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
peerErrorEv = iota + 1
|
||||
syncFinishedEv
|
||||
)
|
||||
|
||||
type bFsmEventData struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// bcFsmMessage is used by the FSM to send messages to the reactor
|
||||
type bcFsmMessage struct {
|
||||
event bFsmEvent
|
||||
data bFsmEventData
|
||||
}
|
||||
|
||||
// SetLogger implements service.Service by setting the logger on reactor and pool.
|
||||
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
|
||||
bcR.BaseService.Logger = l
|
||||
bcR.fsm.SetLogger(l)
|
||||
}
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStart() error {
|
||||
bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
|
||||
if bcR.fastSync {
|
||||
go bcR.poolRoutine()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
_ = bcR.Stop()
|
||||
}
|
||||
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
|
||||
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
|
||||
bcR.fastSync = true
|
||||
bcR.initialState = state
|
||||
bcR.state = state
|
||||
bcR.stateSynced = true
|
||||
|
||||
bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR)
|
||||
bcR.fsm.SetLogger(bcR.Logger)
|
||||
go bcR.poolRoutine()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.updatePeer()
|
||||
}
|
||||
|
||||
// sendBlockToPeer loads a block and sends it to the requesting peer.
|
||||
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
|
||||
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
|
||||
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
|
||||
src p2p.Peer) (queued bool) {
|
||||
|
||||
block := bcR.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
pbbi, err := block.ToProto()
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Could not send block message to peer", "err", err)
|
||||
return false
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: pbbi})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
msgData := bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peer.ID(),
|
||||
err: errSwitchRemovesPeer,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error decoding message", "src", src, "chId", chID, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
if queued := bcR.sendBlockToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height)
|
||||
}
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
if queued := bcR.sendStatusResponseToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send status message to peer", "src", src)
|
||||
}
|
||||
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error transition block from protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: bi.Height,
|
||||
block: bi,
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.Logger.Info("Received", "src", src, "height", bi.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
case *bcproto.NoBlockResponse:
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: noBlockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: msg.Height,
|
||||
},
|
||||
}
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: msg.Height,
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
|
||||
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
|
||||
|
||||
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
doProcessBlockCh := make(chan struct{}, 1)
|
||||
|
||||
lastHundred := time.Now()
|
||||
lastRate := 0.0
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
case <-stopProcessing:
|
||||
bcR.Logger.Info("finishing block execution")
|
||||
break ForLoop
|
||||
case <-processReceivedBlockTicker.C: // try to execute blocks
|
||||
select {
|
||||
case doProcessBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doProcessBlockCh:
|
||||
for {
|
||||
err := bcR.processBlock()
|
||||
if err == errMissingBlock {
|
||||
break
|
||||
}
|
||||
// Notify FSM of block processing result.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
_ = bcR.fsm.Handle(&msgForFSM)
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
bcR.blocksSynced++
|
||||
if bcR.blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
height, maxPeerHeight := bcR.fsm.Status()
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", height,
|
||||
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
|
||||
func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
bcR.fsm.Start()
|
||||
|
||||
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
|
||||
stopProcessing := make(chan struct{}, 1)
|
||||
go bcR.processBlocksRoutine(stopProcessing)
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
|
||||
case <-sendBlockRequestTicker.C:
|
||||
if !bcR.fsm.NeedsBlocks() {
|
||||
continue
|
||||
}
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{
|
||||
maxNumRequests: maxNumRequests}})
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// Ask for status updates.
|
||||
go bcR.sendStatusRequest()
|
||||
|
||||
case msg := <-bcR.messagesForFSMCh:
|
||||
// Sent from the Receive() routine when status (statusResponseEv) and
|
||||
// block (blockResponseEv) response events are received
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.errorsForFSMCh:
|
||||
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
|
||||
// FSM state timer expiry routine (stateTimeoutEv).
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.eventsFromFSMCh:
|
||||
switch msg.event {
|
||||
case syncFinishedEv:
|
||||
stopProcessing <- struct{}{}
|
||||
// Sent from the FSM when it enters finished state.
|
||||
break ForLoop
|
||||
case peerErrorEv:
|
||||
// Sent from the FSM when it detects peer error
|
||||
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
|
||||
if msg.data.err == errNoPeerResponse {
|
||||
// Sent from the peer timeout handler routine
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: msg.data.peerID,
|
||||
err: msg.data.err,
|
||||
},
|
||||
})
|
||||
}
|
||||
// else {
|
||||
// For slow peers, or errors due to blocks received from wrong peer
|
||||
// the FSM had already removed the peers
|
||||
// }
|
||||
default:
|
||||
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
|
||||
}
|
||||
|
||||
case <-bcR.Quit():
|
||||
break ForLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) processBlock() error {
|
||||
|
||||
first, second, err := bcR.fsm.FirstTwoBlocks()
|
||||
if err != nil {
|
||||
// We need both to sync the first block.
|
||||
return err
|
||||
}
|
||||
|
||||
chainID := bcR.initialState.ChainID
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartSetHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err = bcR.state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error during commit verification", "err", err,
|
||||
"first", first.Height, "second", second.Height)
|
||||
return errBlockVerificationFailure
|
||||
}
|
||||
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
bcR.state, _, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// sendStatusRequest broadcasts `BlockStore` height.
|
||||
func (bcR *BlockchainReactor) sendStatusRequest() {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// BlockRequest sends `BlockRequest` height.
|
||||
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return errSendQueueFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) switchToConsensus() {
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced > 0 || bcR.stateSynced)
|
||||
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
|
||||
}
|
||||
// else {
|
||||
// Should only happen during testing.
|
||||
// }
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// Called by FSM and pool:
|
||||
// - pool calls when it detects slow peer or when peer times out
|
||||
// - FSM calls when:
|
||||
// - adding a block (addBlock) fails
|
||||
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
|
||||
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
|
||||
msgData := bcFsmMessage{
|
||||
event: peerErrorEv,
|
||||
data: bFsmEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
bcR.eventsFromFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
if timer == nil {
|
||||
panic("nil timer pointer parameter")
|
||||
}
|
||||
if *timer == nil {
|
||||
*timer = time.AfterFunc(timeout, func() {
|
||||
msg := bcReactorMessage{
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: name,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msg
|
||||
})
|
||||
} else {
|
||||
(*timer).Reset(timeout)
|
||||
}
|
||||
}
|
||||
@@ -1,462 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Blockchain Reactor State
|
||||
type bcReactorFSMState struct {
|
||||
name string
|
||||
|
||||
// called when transitioning out of current state
|
||||
handle func(*BcReactorFSM, bReactorEvent, bReactorEventData) (next *bcReactorFSMState, err error)
|
||||
// called when entering the state
|
||||
enter func(fsm *BcReactorFSM)
|
||||
|
||||
// timeout to ensure FSM is not stuck in a state forever
|
||||
// the timer is owned and run by the fsm instance
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (s *bcReactorFSMState) String() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// BcReactorFSM is the datastructure for the Blockchain Reactor State Machine
|
||||
type BcReactorFSM struct {
|
||||
logger log.Logger
|
||||
mtx sync.Mutex
|
||||
|
||||
startTime time.Time
|
||||
|
||||
state *bcReactorFSMState
|
||||
stateTimer *time.Timer
|
||||
pool *BlockPool
|
||||
|
||||
// interface used to call the Blockchain reactor to send StatusRequest, BlockRequest, reporting errors, etc.
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewFSM creates a new reactor FSM.
|
||||
func NewFSM(height int64, toBcR bcReactor) *BcReactorFSM {
|
||||
return &BcReactorFSM{
|
||||
state: unknown,
|
||||
startTime: time.Now(),
|
||||
pool: NewBlockPool(height, toBcR),
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// bReactorEventData is part of the message sent by the reactor to the FSM and used by the state handlers.
|
||||
type bReactorEventData struct {
|
||||
peerID p2p.ID
|
||||
err error // for peer error: timeout, slow; for processed block event if error occurred
|
||||
base int64 // for status response
|
||||
height int64 // for status response; for processed block event
|
||||
block *types.Block // for block response
|
||||
stateName string // for state timeout events
|
||||
length int // for block response event, length of received block, used to detect slow peers
|
||||
maxNumRequests int // for request needed event, maximum number of pending requests
|
||||
}
|
||||
|
||||
// Blockchain Reactor Events (the input to the state machine)
|
||||
type bReactorEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
startFSMEv = iota + 1
|
||||
statusResponseEv
|
||||
blockResponseEv
|
||||
noBlockResponseEv
|
||||
processedBlockEv
|
||||
makeRequestsEv
|
||||
stopFSMEv
|
||||
|
||||
// other events
|
||||
peerRemoveEv = iota + 256
|
||||
stateTimeoutEv
|
||||
)
|
||||
|
||||
func (msg *bcReactorMessage) String() string {
|
||||
var dataStr string
|
||||
|
||||
switch msg.event {
|
||||
case startFSMEv:
|
||||
dataStr = ""
|
||||
case statusResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v base=%v height=%v", msg.data.peerID, msg.data.base, msg.data.height)
|
||||
case blockResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v",
|
||||
msg.data.peerID, msg.data.block.Height, msg.data.length)
|
||||
case noBlockResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v requested height=%v",
|
||||
msg.data.peerID, msg.data.height)
|
||||
case processedBlockEv:
|
||||
dataStr = fmt.Sprintf("error=%v", msg.data.err)
|
||||
case makeRequestsEv:
|
||||
dataStr = ""
|
||||
case stopFSMEv:
|
||||
dataStr = ""
|
||||
case peerRemoveEv:
|
||||
dataStr = fmt.Sprintf("peer: %v is being removed by the switch", msg.data.peerID)
|
||||
case stateTimeoutEv:
|
||||
dataStr = fmt.Sprintf("state=%v", msg.data.stateName)
|
||||
default:
|
||||
dataStr = "cannot interpret message data"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v: %v", msg.event, dataStr)
|
||||
}
|
||||
|
||||
func (ev bReactorEvent) String() string {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
return "startFSMEv"
|
||||
case statusResponseEv:
|
||||
return "statusResponseEv"
|
||||
case blockResponseEv:
|
||||
return "blockResponseEv"
|
||||
case noBlockResponseEv:
|
||||
return "noBlockResponseEv"
|
||||
case processedBlockEv:
|
||||
return "processedBlockEv"
|
||||
case makeRequestsEv:
|
||||
return "makeRequestsEv"
|
||||
case stopFSMEv:
|
||||
return "stopFSMEv"
|
||||
case peerRemoveEv:
|
||||
return "peerRemoveEv"
|
||||
case stateTimeoutEv:
|
||||
return "stateTimeoutEv"
|
||||
default:
|
||||
return "event unknown"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// states
|
||||
var (
|
||||
unknown *bcReactorFSMState
|
||||
waitForPeer *bcReactorFSMState
|
||||
waitForBlock *bcReactorFSMState
|
||||
finished *bcReactorFSMState
|
||||
)
|
||||
|
||||
// timeouts for state timers
|
||||
const (
|
||||
waitForPeerTimeout = 3 * time.Second
|
||||
waitForBlockAtCurrentHeightTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// errors
|
||||
var (
|
||||
// internal to the package
|
||||
errNoErrorFinished = errors.New("fast sync is finished")
|
||||
errInvalidEvent = errors.New("invalid event in current state")
|
||||
errMissingBlock = errors.New("missing blocks")
|
||||
errNilPeerForBlockRequest = errors.New("peer for block request does not exist in the switch")
|
||||
errSendQueueFull = errors.New("block request not made, send-queue is full")
|
||||
errPeerTooShort = errors.New("peer height too low, old peer removed/ new peer not added")
|
||||
errSwitchRemovesPeer = errors.New("switch is removing peer")
|
||||
errTimeoutEventWrongState = errors.New("timeout event for a state different than the current one")
|
||||
errNoTallerPeer = errors.New("fast sync timed out on waiting for a peer taller than this node")
|
||||
|
||||
// reported eventually to the switch
|
||||
// handle return
|
||||
errPeerLowersItsHeight = errors.New("fast sync peer reports a height lower than previous")
|
||||
// handle return
|
||||
errNoPeerResponseForCurrentHeights = errors.New("fast sync timed out on peer block response for current heights")
|
||||
errNoPeerResponse = errors.New("fast sync timed out on peer block response") // xx
|
||||
errBadDataFromPeer = errors.New("fast sync received block from wrong peer or block is bad") // xx
|
||||
errDuplicateBlock = errors.New("fast sync received duplicate block from peer")
|
||||
errBlockVerificationFailure = errors.New("fast sync block verification failure") // xx
|
||||
errSlowPeer = errors.New("fast sync peer is not sending us data fast enough") // xx
|
||||
|
||||
)
|
||||
|
||||
func init() {
|
||||
unknown = &bcReactorFSMState{
|
||||
name: "unknown",
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
// Broadcast Status message. Currently doesn't return non-nil error.
|
||||
fsm.toBcR.sendStatusRequest()
|
||||
return waitForPeer, nil
|
||||
|
||||
case stopFSMEv:
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return unknown, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForPeer = &bcReactorFSMState{
|
||||
name: "waitForPeer",
|
||||
timeout: waitForPeerTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForPeer" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForPeer, errTimeoutEventWrongState
|
||||
}
|
||||
// There was no statusResponse received from any peer.
|
||||
// Should we send status request again?
|
||||
return finished, errNoTallerPeer
|
||||
|
||||
case statusResponseEv:
|
||||
if err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height); err != nil {
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
}
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForPeer, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForBlock = &bcReactorFSMState{
|
||||
name: "waitForBlock",
|
||||
timeout: waitForBlockAtCurrentHeightTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
|
||||
case statusResponseEv:
|
||||
err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
|
||||
case blockResponseEv:
|
||||
fsm.logger.Debug("blockResponseEv", "H", data.block.Height)
|
||||
err := fsm.pool.AddBlock(data.peerID, data.block, data.length)
|
||||
if err != nil {
|
||||
// A block was received that was unsolicited, from unexpected peer, or that we already have it.
|
||||
// Ignore block, remove peer and send error to switch.
|
||||
fsm.pool.RemovePeer(data.peerID, err)
|
||||
fsm.toBcR.sendPeerError(err, data.peerID)
|
||||
}
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
case noBlockResponseEv:
|
||||
fsm.logger.Error("peer does not have requested block", "peer", data.peerID)
|
||||
|
||||
return waitForBlock, nil
|
||||
case processedBlockEv:
|
||||
if data.err != nil {
|
||||
first, second, _ := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
fsm.logger.Error("error processing block", "err", data.err,
|
||||
"first", first.block.Height, "second", second.block.Height)
|
||||
fsm.logger.Error("send peer error for", "peer", first.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, first.peer.ID)
|
||||
fsm.logger.Error("send peer error for", "peer", second.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, second.peer.ID)
|
||||
// Remove the first two blocks. This will also remove the peers
|
||||
fsm.pool.InvalidateFirstTwoBlocks(data.err)
|
||||
} else {
|
||||
fsm.pool.ProcessedCurrentHeightBlock()
|
||||
// Since we advanced one block reset the state timer
|
||||
fsm.resetStateTimer()
|
||||
}
|
||||
|
||||
// Both cases above may result in achieving maximum height.
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
|
||||
return waitForBlock, data.err
|
||||
|
||||
case peerRemoveEv:
|
||||
// This event is sent by the switch to remove disconnected and errored peers.
|
||||
fsm.pool.RemovePeer(data.peerID, data.err)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, nil
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case makeRequestsEv:
|
||||
fsm.makeNextRequests(data.maxNumRequests)
|
||||
return waitForBlock, nil
|
||||
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForBlock" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForBlock, errTimeoutEventWrongState
|
||||
}
|
||||
// We haven't received the block at current height or height+1. Remove peer.
|
||||
fsm.pool.RemovePeerAtCurrentHeights(errNoPeerResponseForCurrentHeights)
|
||||
fsm.resetStateTimer()
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, errNoPeerResponseForCurrentHeights
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, errNoPeerResponseForCurrentHeights
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForBlock, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
finished = &bcReactorFSMState{
|
||||
name: "finished",
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
fsm.logger.Info("Time to switch to consensus reactor!", "height", fsm.pool.Height)
|
||||
fsm.toBcR.switchToConsensus()
|
||||
fsm.cleanup()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
return finished, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Interface used by FSM for sending Block and Status requests,
|
||||
// informing of peer errors and state timeouts
|
||||
// Implemented by BlockchainReactor and tests
|
||||
type bcReactor interface {
|
||||
sendStatusRequest()
|
||||
sendBlockRequest(peerID p2p.ID, height int64) error
|
||||
sendPeerError(err error, peerID p2p.ID)
|
||||
resetStateTimer(name string, timer **time.Timer, timeout time.Duration)
|
||||
switchToConsensus()
|
||||
}
|
||||
|
||||
// SetLogger sets the FSM logger.
|
||||
func (fsm *BcReactorFSM) SetLogger(l log.Logger) {
|
||||
fsm.logger = l
|
||||
fsm.pool.SetLogger(l)
|
||||
}
|
||||
|
||||
// Start starts the FSM.
|
||||
func (fsm *BcReactorFSM) Start() {
|
||||
_ = fsm.Handle(&bcReactorMessage{event: startFSMEv})
|
||||
}
|
||||
|
||||
// Handle processes messages and events sent to the FSM.
|
||||
func (fsm *BcReactorFSM) Handle(msg *bcReactorMessage) error {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
fsm.logger.Debug("FSM received", "event", msg, "state", fsm.state)
|
||||
|
||||
if fsm.state == nil {
|
||||
fsm.state = unknown
|
||||
}
|
||||
next, err := fsm.state.handle(fsm, msg.event, msg.data)
|
||||
if err != nil {
|
||||
fsm.logger.Error("FSM event handler returned", "err", err,
|
||||
"state", fsm.state, "event", msg.event)
|
||||
}
|
||||
|
||||
oldState := fsm.state.name
|
||||
fsm.transition(next)
|
||||
if oldState != fsm.state.name {
|
||||
fsm.logger.Info("FSM changed state", "new_state", fsm.state)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) transition(next *bcReactorFSMState) {
|
||||
if next == nil {
|
||||
return
|
||||
}
|
||||
if fsm.state != next {
|
||||
fsm.state = next
|
||||
if next.enter != nil {
|
||||
next.enter(fsm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called when entering an FSM state in order to detect lack of progress in the state machine.
|
||||
// Note the use of the 'bcr' interface to facilitate testing without timer expiring.
|
||||
func (fsm *BcReactorFSM) resetStateTimer() {
|
||||
fsm.toBcR.resetStateTimer(fsm.state.name, &fsm.stateTimer, fsm.state.timeout)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) isCaughtUp() bool {
|
||||
return fsm.state == finished
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) makeNextRequests(maxNumRequests int) {
|
||||
fsm.pool.MakeNextRequests(maxNumRequests)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) cleanup() {
|
||||
fsm.pool.Cleanup()
|
||||
}
|
||||
|
||||
// NeedsBlocks checks if more block requests are required.
|
||||
func (fsm *BcReactorFSM) NeedsBlocks() bool {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.state.name == "waitForBlock" && fsm.pool.NeedsBlocks()
|
||||
}
|
||||
|
||||
// FirstTwoBlocks returns the two blocks at pool height and height+1
|
||||
func (fsm *BcReactorFSM) FirstTwoBlocks() (first, second *types.Block, err error) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
firstBP, secondBP, err := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
if err == nil {
|
||||
first = firstBP.block
|
||||
second = secondBP.block
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Status returns the pool's height and the maximum peer height.
|
||||
func (fsm *BcReactorFSM) Status() (height, maxPeerHeight int64) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.pool.Height, fsm.pool.MaxPeerHeight
|
||||
}
|
||||
@@ -1,944 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type lastBlockRequestT struct {
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
type lastPeerErrorT struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// reactor for FSM testing
|
||||
type testReactor struct {
|
||||
logger log.Logger
|
||||
fsm *BcReactorFSM
|
||||
numStatusRequests int
|
||||
numBlockRequests int
|
||||
lastBlockRequest lastBlockRequestT
|
||||
lastPeerError lastPeerErrorT
|
||||
stateTimerStarts map[string]int
|
||||
}
|
||||
|
||||
func sendEventToFSM(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) error {
|
||||
return fsm.Handle(&bcReactorMessage{event: ev, data: data})
|
||||
}
|
||||
|
||||
type fsmStepTestValues struct {
|
||||
currentState string
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
|
||||
wantErr error
|
||||
wantState string
|
||||
wantStatusReqSent bool
|
||||
wantReqIncreased bool
|
||||
wantNewBlocks []int64
|
||||
wantRemovedPeers []p2p.ID
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helper test function for different FSM events, state and expected behavior
|
||||
func sStopFSMEv(current, expected string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stopFSMEv,
|
||||
wantState: expected,
|
||||
wantErr: errNoErrorFinished}
|
||||
}
|
||||
|
||||
func sUnknownFSMEv(current string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: 1234,
|
||||
wantState: current,
|
||||
wantErr: errInvalidEvent}
|
||||
}
|
||||
|
||||
func sStartFSMEv() fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: "unknown",
|
||||
event: startFSMEv,
|
||||
wantState: "waitForPeer",
|
||||
wantStatusReqSent: true}
|
||||
}
|
||||
|
||||
func sStateTimeoutEv(current, expected string, timedoutState string, wantErr error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: timedoutState,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
}
|
||||
}
|
||||
|
||||
func sProcessedBlockEv(current, expected string, reactorError error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: reactorError,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: reactorError,
|
||||
}
|
||||
}
|
||||
|
||||
func sStatusEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{peerID: peerID, height: height},
|
||||
wantState: expected,
|
||||
wantErr: err}
|
||||
}
|
||||
|
||||
func sMakeRequestsEv(current, expected string, maxPendingRequests int) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sMakeRequestsEvErrored(current, expected string,
|
||||
maxPendingRequests int, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantErr: err,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEv(current, expected string, peerID p2p.ID, height int64, prevBlocks []int64) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(height, txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantNewBlocks: append(prevBlocks, height),
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEvErrored(current, expected string,
|
||||
peerID p2p.ID, height int64, prevBlocks []int64, wantErr error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(height, txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantNewBlocks: prevBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
func sPeerRemoveEv(current, expected string, peerID p2p.ID, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
wantState: expected,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------
|
||||
|
||||
func newTestReactor(height int64) *testReactor {
|
||||
testBcR := &testReactor{logger: log.TestingLogger(), stateTimerStarts: make(map[string]int)}
|
||||
testBcR.fsm = NewFSM(height, testBcR)
|
||||
testBcR.fsm.SetLogger(testBcR.logger)
|
||||
return testBcR
|
||||
}
|
||||
|
||||
func fixBlockResponseEvStep(step *fsmStepTestValues, testBcR *testReactor) {
|
||||
// There is currently no good way to know to which peer a block request was sent.
|
||||
// So in some cases where it does not matter, before we simulate a block response
|
||||
// we cheat and look where it is expected from.
|
||||
if step.event == blockResponseEv {
|
||||
height := step.data.height
|
||||
peerID, ok := testBcR.fsm.pool.blocks[height]
|
||||
if ok {
|
||||
step.data.peerID = peerID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testFields struct {
|
||||
name string
|
||||
startingHeight int64
|
||||
maxRequestsPerPeer int
|
||||
maxPendingRequests int
|
||||
steps []fsmStepTestValues
|
||||
}
|
||||
|
||||
func executeFSMTests(t *testing.T, tests []testFields, matchRespToReq bool) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
step := step
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
var heightBefore int64
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightBefore = testBcR.fsm.pool.Height
|
||||
}
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
oldNumBlockRequests := testBcR.numBlockRequests
|
||||
if matchRespToReq {
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
if step.wantReqIncreased {
|
||||
assert.True(t, oldNumBlockRequests < testBcR.numBlockRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumBlockRequests, testBcR.numBlockRequests)
|
||||
}
|
||||
|
||||
for _, height := range step.wantNewBlocks {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(height)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightAfter := testBcR.fsm.pool.Height
|
||||
assert.Equal(t, heightBefore, heightAfter)
|
||||
firstAfter, err1 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
secondAfter, err2 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
assert.NotNil(t, err1)
|
||||
assert.NotNil(t, err2)
|
||||
assert.Nil(t, firstAfter)
|
||||
assert.Nil(t, secondAfter)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSMBasic(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "one block, one peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 2, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi block, multi peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 4, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 4, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 4, []int64{1, 2, 3}),
|
||||
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMBlockVerificationFailure(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block verification failure - TS2 variant",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
|
||||
// add P1 and get blocks 1-3 from it
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// process block failure, should remove P1 and all blocks
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", errBlockVerificationFailure),
|
||||
|
||||
// get blocks 1-3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
|
||||
// finish after processing blocks 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBadBlockFromPeer(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block we haven't asked for",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and ask for blocks 1-3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 300, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// blockResponseEv for height 100 should cause an error
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 100, []int64{}, errMissingBlock, []p2p.ID{}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block we already have",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 100, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
|
||||
// Get block 1 again. Since peer is removed together with block 1,
|
||||
// the blocks present in the pool should be {}
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 1, []int64{}, errDuplicateBlock, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from unknown peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
|
||||
// get block 1 from unknown peer P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from wrong peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, make requests for blocks 1-3 to P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// receive block 1 from P2
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBlockAtCurrentHeightDoesNotArriveInTime(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block at current height undelivered - TS5",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, get blocks 1 and 2, process block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// timeout on block 3, P1 should be removed
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
|
||||
// make requests and finish by receiving blocks 2 and 3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{2}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block at current height undelivered, at maxPeerHeight after peer removal - TS3",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, request blocks 1-3 from P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2 (tallest)
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 30, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// receive blocks 1-3 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// process blocks at heights 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// timeout on block at height 4
|
||||
sStateTimeoutEv("waitForBlock", "finished", "waitForBlock", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMPeerRelatedEvents(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "peer remove event with no blocks",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, P2, P3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P3", 3, nil),
|
||||
|
||||
// switch removes P2
|
||||
sPeerRemoveEv("waitForBlock", "waitForBlock", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer removed while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
|
||||
// switch removes P1
|
||||
sPeerRemoveEv("waitForBlock", "waitForPeer", "P1", errSwitchRemovesPeer, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer removed while in waitForBlock state, node reaches maxPeerHeight - TS4 ",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1 and process block at height 100
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// switch removes peer P1, should be finished
|
||||
sPeerRemoveEv("waitForBlock", "finished", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer lowers its height in waitForBlock state, node reaches maxPeerHeight - TS4",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
|
||||
// processed block at heights 100
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// P2 becomes short
|
||||
sStatusEv("waitForBlock", "finished", "P2", 100, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForPeer state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForPeer", "P1", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer updated with low height while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForPeer", "P1", 3, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "peer does not exist in the switch",
|
||||
startingHeight: 9999999,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 20000000, nil),
|
||||
// send request for block 9999999
|
||||
// Note: For this block request the "switch missing the peer" error is simulated,
|
||||
// see implementation of bcReactor interface, sendBlockRequest(), in this file.
|
||||
sMakeRequestsEvErrored("waitForBlock", "waitForBlock",
|
||||
maxNumRequests, nil, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMStopFSM(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "stopFSMEv in unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sStopFSMEv("unknown", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForPeer",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStopFSMEv("waitForPeer", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStopFSMEv("waitForBlock", "finished"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMUnknownElements(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "unknown event for state unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sUnknownFSMEv("unknown"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForPeer",
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sUnknownFSMEv("waitForPeer"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sUnknownFSMEv("waitForBlock"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMPeerStateTimeoutEvent(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in state waitForPeer - TS1",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "finished", "waitForPeer", errNoTallerPeer),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in a state != waitForPeer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "waitForPeer", "waitForBlock", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in state waitForBlock ",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForPeer", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in a state != waitForBlock",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForPeer", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock with multiple peers",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func makeCorrectTransitionSequence(startingHeight int64, numBlocks int64, numPeers int, randomPeerHeights bool,
|
||||
maxRequestsPerPeer int, maxPendingRequests int) testFields {
|
||||
|
||||
// Generate numPeers peers with random or numBlocks heights according to the randomPeerHeights flag.
|
||||
peerHeights := make([]int64, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
if i == 0 {
|
||||
peerHeights[0] = numBlocks
|
||||
continue
|
||||
}
|
||||
if randomPeerHeights {
|
||||
peerHeights[i] = int64(tmmath.MaxInt(tmrand.Intn(int(numBlocks)), int(startingHeight)+1))
|
||||
} else {
|
||||
peerHeights[i] = numBlocks
|
||||
}
|
||||
}
|
||||
|
||||
// Approximate the slice capacity to save time for appends.
|
||||
testSteps := make([]fsmStepTestValues, 0, 3*numBlocks+int64(numPeers))
|
||||
|
||||
testName := fmt.Sprintf("%v-blocks %v-startingHeight %v-peers %v-maxRequestsPerPeer %v-maxNumRequests",
|
||||
numBlocks, startingHeight, numPeers, maxRequestsPerPeer, maxPendingRequests)
|
||||
|
||||
// Add startFSMEv step.
|
||||
testSteps = append(testSteps, sStartFSMEv())
|
||||
|
||||
// For each peer, add statusResponseEv step.
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerName := fmt.Sprintf("P%d", i)
|
||||
if i == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sStatusEv("waitForPeer", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
} else {
|
||||
testSteps = append(testSteps,
|
||||
sStatusEv("waitForBlock", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
}
|
||||
}
|
||||
|
||||
height := startingHeight
|
||||
numBlocksReceived := 0
|
||||
prevBlocks := make([]int64, 0, maxPendingRequests)
|
||||
|
||||
forLoop:
|
||||
for i := 0; i < int(numBlocks); i++ {
|
||||
|
||||
// Add the makeRequestEv step periodically.
|
||||
if i%maxRequestsPerPeer == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
)
|
||||
}
|
||||
|
||||
// Add the blockRespEv step
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P0", height, prevBlocks))
|
||||
prevBlocks = append(prevBlocks, height)
|
||||
height++
|
||||
numBlocksReceived++
|
||||
|
||||
// Add the processedBlockEv step periodically.
|
||||
if numBlocksReceived >= maxRequestsPerPeer || height >= numBlocks {
|
||||
for j := int(height) - numBlocksReceived; j < int(height); j++ {
|
||||
if j >= int(numBlocks) {
|
||||
// This is the last block that is processed, we should be in "finished" state.
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil))
|
||||
break forLoop
|
||||
}
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil))
|
||||
}
|
||||
numBlocksReceived = 0
|
||||
prevBlocks = make([]int64, 0, maxPendingRequests)
|
||||
}
|
||||
}
|
||||
|
||||
return testFields{
|
||||
name: testName,
|
||||
startingHeight: startingHeight,
|
||||
maxRequestsPerPeer: maxRequestsPerPeer,
|
||||
maxPendingRequests: maxPendingRequests,
|
||||
steps: testSteps,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
maxStartingHeightTest = 100
|
||||
maxRequestsPerPeerTest = 20
|
||||
maxTotalPendingRequestsTest = 600
|
||||
maxNumPeersTest = 1000
|
||||
maxNumBlocksInChainTest = 10000 // should be smaller than 9999999
|
||||
)
|
||||
|
||||
func makeCorrectTransitionSequenceWithRandomParameters() testFields {
|
||||
// Generate a starting height for fast sync.
|
||||
startingHeight := int64(tmrand.Intn(maxStartingHeightTest) + 1)
|
||||
|
||||
// Generate the number of requests per peer.
|
||||
maxRequestsPerPeer := tmrand.Intn(maxRequestsPerPeerTest) + 1
|
||||
|
||||
// Generate the maximum number of total pending requests, >= maxRequestsPerPeer.
|
||||
maxPendingRequests := tmrand.Intn(maxTotalPendingRequestsTest-maxRequestsPerPeer) + maxRequestsPerPeer
|
||||
|
||||
// Generate the number of blocks to be synced.
|
||||
numBlocks := int64(tmrand.Intn(maxNumBlocksInChainTest)) + startingHeight
|
||||
|
||||
// Generate a number of peers.
|
||||
numPeers := tmrand.Intn(maxNumPeersTest) + 1
|
||||
|
||||
return makeCorrectTransitionSequence(startingHeight, numBlocks, numPeers, true, maxRequestsPerPeer, maxPendingRequests)
|
||||
}
|
||||
|
||||
func shouldApplyProcessedBlockEvStep(step *fsmStepTestValues, testBcR *testReactor) bool {
|
||||
if step.event == processedBlockEv {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
_, err = testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestFSMCorrectTransitionSequences(t *testing.T) {
|
||||
|
||||
tests := []testFields{
|
||||
makeCorrectTransitionSequence(1, 100, 10, true, 10, 40),
|
||||
makeCorrectTransitionSequenceWithRandomParameters(),
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
step := step
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
if !shouldApplyProcessedBlockEvStep(&step, testBcR) {
|
||||
continue
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
// implements the bcRNotifier
|
||||
func (testR *testReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
testR.logger.Info("Reactor received sendPeerError call from FSM", "peer", peerID, "err", err)
|
||||
testR.lastPeerError.peerID = peerID
|
||||
testR.lastPeerError.err = err
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendStatusRequest() {
|
||||
testR.logger.Info("Reactor received sendStatusRequest call from FSM")
|
||||
testR.numStatusRequests++
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testR.logger.Info("Reactor received sendBlockRequest call from FSM", "peer", peerID, "height", height)
|
||||
testR.numBlockRequests++
|
||||
testR.lastBlockRequest.peerID = peerID
|
||||
testR.lastBlockRequest.height = height
|
||||
if height == 9999999 {
|
||||
// simulate switch does not have peer
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
testR.logger.Info("Reactor received resetStateTimer call from FSM", "state", name, "timeout", timeout)
|
||||
if _, ok := testR.stateTimerStarts[name]; !ok {
|
||||
testR.stateTimerStarts[name] = 1
|
||||
} else {
|
||||
testR.stateTimerStarts[name]++
|
||||
}
|
||||
}
|
||||
|
||||
func (testR *testReactor) switchToConsensus() {
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
@@ -1,365 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeVote(
|
||||
t *testing.T,
|
||||
header *types.Header,
|
||||
blockID types.BlockID,
|
||||
valset *types.ValidatorSet,
|
||||
privVal types.PrivValidator) *types.Vote {
|
||||
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
valIdx, _ := valset.GetByAddress(pubKey.Address())
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: pubKey.Address(),
|
||||
ValidatorIndex: valIdx,
|
||||
Height: header.Height,
|
||||
Round: 1,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: blockID,
|
||||
}
|
||||
|
||||
vpb := vote.ToProto()
|
||||
|
||||
_ = privVal.SignVote(header.ChainID, vpb)
|
||||
vote.Signature = vpb.Signature
|
||||
|
||||
return vote
|
||||
}
|
||||
|
||||
type BlockchainReactorPair struct {
|
||||
bcR *BlockchainReactor
|
||||
conR *consensusReactorTest
|
||||
}
|
||||
|
||||
func newBlockchainReactor(
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) *BlockchainReactor {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 1, types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote := makeVote(t, &lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0])
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round, lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error apply block: %w", err))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return bcReactor
|
||||
}
|
||||
|
||||
func newBlockchainReactorPair(
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) BlockchainReactorPair {
|
||||
|
||||
consensusReactor := &consensusReactorTest{}
|
||||
consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor)
|
||||
|
||||
return BlockchainReactorPair{
|
||||
newBlockchainReactor(t, logger, genDoc, privVals, maxBlockHeight),
|
||||
consensusReactor}
|
||||
}
|
||||
|
||||
type consensusReactorTest struct {
|
||||
p2p.BaseReactor // BaseService + p2p.Switch
|
||||
switchedToConsensus bool
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced bool) {
|
||||
conR.mtx.Lock()
|
||||
defer conR.mtx.Unlock()
|
||||
conR.switchedToConsensus = true
|
||||
}
|
||||
|
||||
func TestFastSyncNoBlockResponse(t *testing.T) {
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_new_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 2)
|
||||
|
||||
logger := log.TestingLogger()
|
||||
reactorPairs[0] = newBlockchainReactorPair(t, logger, genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactorPair(t, logger, genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger.With("module", moduleName))
|
||||
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{maxBlockHeight + 100, false},
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
reactorPairs[1].conR.mtx.Lock()
|
||||
if reactorPairs[1].conR.switchedToConsensus {
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
}
|
||||
|
||||
assert.Equal(t, maxBlockHeight, reactorPairs[0].bcR.store.Height())
|
||||
|
||||
for _, tt := range tests {
|
||||
block := reactorPairs[1].bcR.store.LoadBlock(tt.height)
|
||||
if tt.existent {
|
||||
assert.True(t, block != nil)
|
||||
} else {
|
||||
assert.True(t, block == nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestFastSyncBadBlockStopsPeer(t *testing.T) {
|
||||
numNodes := 4
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
otherChain := newBlockchainReactorPair(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
defer func() {
|
||||
_ = otherChain.bcR.Stop()
|
||||
_ = otherChain.conR.Stop()
|
||||
}()
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, numNodes)
|
||||
logger := make([]log.Logger, numNodes)
|
||||
|
||||
for i := 0; i < numNodes; i++ {
|
||||
logger[i] = log.TestingLogger()
|
||||
height := int64(0)
|
||||
if i == 0 {
|
||||
height = maxBlockHeight
|
||||
}
|
||||
reactorPairs[i] = newBlockchainReactorPair(t, logger[i], genDoc, privVals, height)
|
||||
}
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, numNodes, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger[i].With("module", moduleName))
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
outerFor:
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
for i := 0; i < numNodes; i++ {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
if !reactorPairs[i].conR.switchedToConsensus {
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
continue outerFor
|
||||
}
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// at this time, reactors[0-3] is the newest
|
||||
assert.Equal(t, numNodes-1, reactorPairs[1].bcR.Switch.Peers().Size())
|
||||
|
||||
// mark last reactorPair as an invalid peer
|
||||
reactorPairs[numNodes-1].bcR.store = otherChain.bcR.store
|
||||
|
||||
lastLogger := log.TestingLogger()
|
||||
lastReactorPair := newBlockchainReactorPair(t, lastLogger, genDoc, privVals, 0)
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[len(reactorPairs)-1].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", len(reactorPairs)-1)
|
||||
reactorPairs[len(reactorPairs)-1].bcR.SetLogger(lastLogger.With("module", moduleName))
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
|
||||
for i := 0; i < len(reactorPairs)-1; i++ {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
lastReactorPair.conR.mtx.Lock()
|
||||
if lastReactorPair.conR.switchedToConsensus {
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
|
||||
if lastReactorPair.bcR.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type iIO interface {
|
||||
sendBlockRequest(peerID p2p.ID, height int64) error
|
||||
sendBlockToPeer(block *types.Block, peerID p2p.ID) error
|
||||
sendBlockNotFound(height int64, peerID p2p.ID) error
|
||||
sendStatusResponse(base, height int64, peerID p2p.ID) error
|
||||
|
||||
broadcastStatusRequest() error
|
||||
|
||||
trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
}
|
||||
|
||||
type switchIO struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
|
||||
func newSwitchIo(sw *p2p.Switch) *switchIO {
|
||||
return &switchIO{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state state.State, skipWAL bool)
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return fmt.Errorf("send queue full")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendStatusResponse(base int64, height int64, peerID p2p.ID) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{Height: height, Base: base})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
if block == nil {
|
||||
panic("trying to send nil block")
|
||||
}
|
||||
|
||||
bpb, err := block.ToProto()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bpb})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool {
|
||||
conR, ok := sio.sw.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, skipWAL)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
func (sio *switchIO) broadcastStatusRequest() error {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// XXX: maybe we should use an io specific peer list here
|
||||
sio.sw.Broadcast(BlockchainChannel, msgBytes)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
"github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "blockchain"
|
||||
)
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// events_in
|
||||
EventsIn metrics.Counter
|
||||
// events_in
|
||||
EventsHandled metrics.Counter
|
||||
// events_out
|
||||
EventsOut metrics.Counter
|
||||
// errors_in
|
||||
ErrorsIn metrics.Counter
|
||||
// errors_handled
|
||||
ErrorsHandled metrics.Counter
|
||||
// errors_out
|
||||
ErrorsOut metrics.Counter
|
||||
// events_shed
|
||||
EventsShed metrics.Counter
|
||||
// events_sent
|
||||
EventsSent metrics.Counter
|
||||
// errors_sent
|
||||
ErrorsSent metrics.Counter
|
||||
// errors_shed
|
||||
ErrorsShed metrics.Counter
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns metrics for in and out events, errors, etc. handled by routines.
|
||||
// Can we burn in the routine name here?
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_in",
|
||||
Help: "Events read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_handled",
|
||||
Help: "Events handled",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_out",
|
||||
Help: "Events output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_in",
|
||||
Help: "Errors read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_handled",
|
||||
Help: "Errors handled.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_out",
|
||||
Help: "Errors output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_sent",
|
||||
Help: "Errors sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_shed",
|
||||
Help: "Errors dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_sent",
|
||||
Help: "Events sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_shed",
|
||||
Help: "Events dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
// NopMetrics returns no-op Metrics.
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
EventsIn: discard.NewCounter(),
|
||||
EventsHandled: discard.NewCounter(),
|
||||
EventsOut: discard.NewCounter(),
|
||||
ErrorsIn: discard.NewCounter(),
|
||||
ErrorsHandled: discard.NewCounter(),
|
||||
ErrorsOut: discard.NewCounter(),
|
||||
EventsShed: discard.NewCounter(),
|
||||
EventsSent: discard.NewCounter(),
|
||||
ErrorsSent: discard.NewCounter(),
|
||||
ErrorsShed: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
@@ -1,192 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Events generated by the processor:
|
||||
// block execution failure, event will indicate the peer(s) that caused the error
|
||||
type pcBlockVerificationFailure struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
firstPeerID p2p.ID
|
||||
secondPeerID p2p.ID
|
||||
}
|
||||
|
||||
func (e pcBlockVerificationFailure) String() string {
|
||||
return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}",
|
||||
e.height, e.firstPeerID, e.secondPeerID)
|
||||
}
|
||||
|
||||
// successful block execution
|
||||
type pcBlockProcessed struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
peerID p2p.ID
|
||||
}
|
||||
|
||||
func (e pcBlockProcessed) String() string {
|
||||
return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID)
|
||||
}
|
||||
|
||||
// processor has finished
|
||||
type pcFinished struct {
|
||||
priorityNormal
|
||||
blocksSynced int
|
||||
tmState tmState.State
|
||||
}
|
||||
|
||||
func (p pcFinished) Error() string {
|
||||
return "finished"
|
||||
}
|
||||
|
||||
type queueItem struct {
|
||||
block *types.Block
|
||||
peerID p2p.ID
|
||||
}
|
||||
|
||||
type blockQueue map[int64]queueItem
|
||||
|
||||
type pcState struct {
|
||||
// blocks waiting to be processed
|
||||
queue blockQueue
|
||||
|
||||
// draining indicates that the next rProcessBlock event with a queue miss constitutes completion
|
||||
draining bool
|
||||
|
||||
// the number of blocks successfully synced by the processor
|
||||
blocksSynced int
|
||||
|
||||
// the processorContext which contains the processor dependencies
|
||||
context processorContext
|
||||
}
|
||||
|
||||
func (state *pcState) String() string {
|
||||
return fmt.Sprintf("height: %d queue length: %d draining: %v blocks synced: %d",
|
||||
state.height(), len(state.queue), state.draining, state.blocksSynced)
|
||||
}
|
||||
|
||||
// newPcState returns a pcState initialized with the last verified block enqueued
|
||||
func newPcState(context processorContext) *pcState {
|
||||
return &pcState{
|
||||
queue: blockQueue{},
|
||||
draining: false,
|
||||
blocksSynced: 0,
|
||||
context: context,
|
||||
}
|
||||
}
|
||||
|
||||
// nextTwo returns the next two unverified blocks
|
||||
func (state *pcState) nextTwo() (queueItem, queueItem, error) {
|
||||
if first, ok := state.queue[state.height()+1]; ok {
|
||||
if second, ok := state.queue[state.height()+2]; ok {
|
||||
return first, second, nil
|
||||
}
|
||||
}
|
||||
return queueItem{}, queueItem{}, fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
// synced returns true when at most the last verified block remains in the queue
|
||||
func (state *pcState) synced() bool {
|
||||
return len(state.queue) <= 1
|
||||
}
|
||||
|
||||
func (state *pcState) enqueue(peerID p2p.ID, block *types.Block, height int64) {
|
||||
if item, ok := state.queue[height]; ok {
|
||||
panic(fmt.Sprintf(
|
||||
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
|
||||
height, block.Hash(), peerID, item.block.Hash(), item.peerID))
|
||||
}
|
||||
|
||||
state.queue[height] = queueItem{block: block, peerID: peerID}
|
||||
}
|
||||
|
||||
func (state *pcState) height() int64 {
|
||||
return state.context.tmState().LastBlockHeight
|
||||
}
|
||||
|
||||
// purgePeer moves all unprocessed blocks from the queue
|
||||
func (state *pcState) purgePeer(peerID p2p.ID) {
|
||||
// what if height is less than state.height?
|
||||
for height, item := range state.queue {
|
||||
if item.peerID == peerID {
|
||||
delete(state.queue, height)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle processes FSM events
|
||||
func (state *pcState) handle(event Event) (Event, error) {
|
||||
switch event := event.(type) {
|
||||
case bcResetState:
|
||||
state.context.setState(event.state)
|
||||
return noOp, nil
|
||||
|
||||
case scFinishedEv:
|
||||
if state.synced() {
|
||||
return pcFinished{tmState: state.context.tmState(), blocksSynced: state.blocksSynced}, nil
|
||||
}
|
||||
state.draining = true
|
||||
return noOp, nil
|
||||
|
||||
case scPeerError:
|
||||
state.purgePeer(event.peerID)
|
||||
return noOp, nil
|
||||
|
||||
case scBlockReceived:
|
||||
if event.block == nil {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// enqueue block if height is higher than state height, else ignore it
|
||||
if event.block.Height > state.height() {
|
||||
state.enqueue(event.peerID, event.block, event.block.Height)
|
||||
}
|
||||
return noOp, nil
|
||||
|
||||
case rProcessBlock:
|
||||
tmState := state.context.tmState()
|
||||
firstItem, secondItem, err := state.nextTwo()
|
||||
if err != nil {
|
||||
if state.draining {
|
||||
return pcFinished{tmState: tmState, blocksSynced: state.blocksSynced}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
var (
|
||||
first, second = firstItem.block, secondItem.block
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()}
|
||||
)
|
||||
|
||||
// verify if +second+ last commit "confirms" +first+ block
|
||||
err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
state.purgePeer(firstItem.peerID)
|
||||
if firstItem.peerID != secondItem.peerID {
|
||||
state.purgePeer(secondItem.peerID)
|
||||
}
|
||||
return pcBlockVerificationFailure{
|
||||
height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID},
|
||||
nil
|
||||
}
|
||||
|
||||
state.context.saveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
if err := state.context.applyBlock(firstID, first); err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
delete(state.queue, first.Height)
|
||||
state.blocksSynced++
|
||||
|
||||
return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type processorContext interface {
|
||||
applyBlock(blockID types.BlockID, block *types.Block) error
|
||||
verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error
|
||||
saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
|
||||
tmState() state.State
|
||||
setState(state.State)
|
||||
}
|
||||
|
||||
type pContext struct {
|
||||
store blockStore
|
||||
applier blockApplier
|
||||
state state.State
|
||||
}
|
||||
|
||||
func newProcessorContext(st blockStore, ex blockApplier, s state.State) *pContext {
|
||||
return &pContext{
|
||||
store: st,
|
||||
applier: ex,
|
||||
state: s,
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
newState, _, err := pc.applier.ApplyBlock(pc.state, blockID, block)
|
||||
pc.state = newState
|
||||
return err
|
||||
}
|
||||
|
||||
func (pc pContext) tmState() state.State {
|
||||
return pc.state
|
||||
}
|
||||
|
||||
func (pc *pContext) setState(state state.State) {
|
||||
pc.state = state
|
||||
}
|
||||
|
||||
func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
return pc.state.Validators.VerifyCommitLight(chainID, blockID, height, commit)
|
||||
}
|
||||
|
||||
func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
pc.store.SaveBlock(block, blockParts, seenCommit)
|
||||
}
|
||||
|
||||
type mockPContext struct {
|
||||
applicationBL []int64
|
||||
verificationBL []int64
|
||||
state state.State
|
||||
}
|
||||
|
||||
func newMockProcessorContext(
|
||||
state state.State,
|
||||
verificationBlackList []int64,
|
||||
applicationBlackList []int64) *mockPContext {
|
||||
return &mockPContext{
|
||||
applicationBL: applicationBlackList,
|
||||
verificationBL: verificationBlackList,
|
||||
state: state,
|
||||
}
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
for _, h := range mpc.applicationBL {
|
||||
if h == block.Height {
|
||||
return fmt.Errorf("generic application error")
|
||||
}
|
||||
}
|
||||
mpc.state.LastBlockHeight = block.Height
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
for _, h := range mpc.verificationBL {
|
||||
if h == height {
|
||||
return fmt.Errorf("generic verification error")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) setState(state state.State) {
|
||||
mpc.state = state
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) tmState() state.State {
|
||||
return mpc.state
|
||||
}
|
||||
@@ -1,306 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// pcBlock is a test helper structure with simple types. Its purpose is to help with test readability.
|
||||
type pcBlock struct {
|
||||
pid string
|
||||
height int64
|
||||
}
|
||||
|
||||
// params is a test structure used to create processor state.
|
||||
type params struct {
|
||||
height int64
|
||||
items []pcBlock
|
||||
blocksSynced int
|
||||
verBL []int64
|
||||
appBL []int64
|
||||
draining bool
|
||||
}
|
||||
|
||||
// makePcBlock makes an empty block.
|
||||
func makePcBlock(height int64) *types.Block {
|
||||
return &types.Block{Header: types.Header{Height: height}}
|
||||
}
|
||||
|
||||
// makeState takes test parameters and creates a specific processor state.
|
||||
func makeState(p *params) *pcState {
|
||||
var (
|
||||
tmState = tmState.State{LastBlockHeight: p.height}
|
||||
context = newMockProcessorContext(tmState, p.verBL, p.appBL)
|
||||
)
|
||||
state := newPcState(context)
|
||||
|
||||
for _, item := range p.items {
|
||||
state.enqueue(p2p.ID(item.pid), makePcBlock(item.height), item.height)
|
||||
}
|
||||
|
||||
state.blocksSynced = p.blocksSynced
|
||||
state.draining = p.draining
|
||||
return state
|
||||
}
|
||||
|
||||
func mBlockResponse(peerID p2p.ID, height int64) scBlockReceived {
|
||||
return scBlockReceived{
|
||||
peerID: peerID,
|
||||
block: makePcBlock(height),
|
||||
}
|
||||
}
|
||||
|
||||
type pcFsmMakeStateValues struct {
|
||||
currentState *params
|
||||
event Event
|
||||
wantState *params
|
||||
wantNextEvent Event
|
||||
wantErr error
|
||||
wantPanic bool
|
||||
}
|
||||
|
||||
type testFields struct {
|
||||
name string
|
||||
steps []pcFsmMakeStateValues
|
||||
}
|
||||
|
||||
func executeProcessorTests(t *testing.T, tests []testFields) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var state *pcState
|
||||
for _, step := range tt.steps {
|
||||
defer func() {
|
||||
r := recover()
|
||||
if (r != nil) != step.wantPanic {
|
||||
t.Errorf("recover = %v, wantPanic = %v", r, step.wantPanic)
|
||||
}
|
||||
}()
|
||||
|
||||
// First step must always initialise the currentState as state.
|
||||
if step.currentState != nil {
|
||||
state = makeState(step.currentState)
|
||||
}
|
||||
if state == nil {
|
||||
panic("Bad (initial?) step")
|
||||
}
|
||||
|
||||
nextEvent, err := state.handle(step.event)
|
||||
t.Log(state)
|
||||
assert.Equal(t, step.wantErr, err)
|
||||
assert.Equal(t, makeState(step.wantState), state)
|
||||
assert.Equal(t, step.wantNextEvent, nextEvent)
|
||||
// Next step may use the wantedState as their currentState.
|
||||
state = makeState(step.wantState)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRProcessPeerError(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "error for existing peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
event: scPeerError{peerID: "P2"},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}}},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error for unknown peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
event: scPeerError{peerID: "P3"},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestPcBlockResponse(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "add one block",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 1),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "add two blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 3),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp,
|
||||
},
|
||||
{ // use previous wantState as currentState,
|
||||
event: mBlockResponse("P1", 4),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}, {"P1", 4}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestRProcessBlockSuccess(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "noop - no blocks over current height",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: rProcessBlock{},
|
||||
wantState: ¶ms{}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "noop - high new blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}}, blocksSynced: 1},
|
||||
wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present after draining",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{ // some contiguous blocks - on stop check draining is set
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}},
|
||||
event: scFinishedEv{},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}, draining: true},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
{
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true},
|
||||
wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"},
|
||||
},
|
||||
{ // finish when H+1 or/and H+2 are missing
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true},
|
||||
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 1}, blocksSynced: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestRProcessBlockFailures(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from different peers - H+1 verification fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, verBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{}, verBL: []int64{1}},
|
||||
wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from same peer - H+1 applyBlock fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, appBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{}, appBL: []int64{1}}, wantPanic: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from same peers - H+1 verification fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 0, items: []pcBlock{{"P1", 1}, {"P1", 2}, {"P2", 3}},
|
||||
verBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 0, items: []pcBlock{{"P2", 3}}, verBL: []int64{1}},
|
||||
wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from different peers - H+1 applyBlock fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P2", 3}}, appBL: []int64{1}},
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{{"P2", 3}}, appBL: []int64{1}}, wantPanic: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestScFinishedEv(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "no blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100},
|
||||
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "maxHeight+1 block present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100},
|
||||
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "more blocks present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}, {"P1", 102}}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}, {"P1", 102}}, blocksSynced: 100, draining: true},
|
||||
wantNextEvent: noOp,
|
||||
wantErr: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
@@ -1,564 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// chBufferSize is the buffer size of all event channels.
|
||||
chBufferSize int = 1000
|
||||
)
|
||||
|
||||
type blockStore interface {
|
||||
LoadBlock(height int64) *types.Block
|
||||
SaveBlock(*types.Block, *types.PartSet, *types.Commit)
|
||||
Base() int64
|
||||
Height() int64
|
||||
}
|
||||
|
||||
// BlockchainReactor handles fast sync protocol.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
fastSync bool // if true, enable fast sync on start
|
||||
stateSynced bool // set to true when SwitchToFastSync is called by state sync
|
||||
scheduler *Routine
|
||||
processor *Routine
|
||||
logger log.Logger
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
maxPeerHeight int64
|
||||
syncHeight int64
|
||||
events chan Event // non-nil during a fast sync
|
||||
|
||||
reporter behaviour.Reporter
|
||||
io iIO
|
||||
store blockStore
|
||||
}
|
||||
|
||||
//nolint:unused,deadcode
|
||||
type blockVerifier interface {
|
||||
VerifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error)
|
||||
}
|
||||
|
||||
// XXX: unify naming in this package around tmState
|
||||
func newReactor(state state.State, store blockStore, reporter behaviour.Reporter,
|
||||
blockApplier blockApplier, fastSync bool) *BlockchainReactor {
|
||||
initHeight := state.LastBlockHeight + 1
|
||||
if initHeight == 1 {
|
||||
initHeight = state.InitialHeight
|
||||
}
|
||||
scheduler := newScheduler(initHeight, time.Now())
|
||||
pContext := newProcessorContext(store, blockApplier, state)
|
||||
// TODO: Fix naming to just newProcesssor
|
||||
// newPcState requires a processorContext
|
||||
processor := newPcState(pContext)
|
||||
|
||||
return &BlockchainReactor{
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
fastSync: fastSync,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlockchainReactor creates a new reactor instance.
|
||||
func NewBlockchainReactor(
|
||||
state state.State,
|
||||
blockApplier blockApplier,
|
||||
store blockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
reporter := behaviour.NewMockReporter()
|
||||
return newReactor(state, store, reporter, blockApplier, fastSync)
|
||||
}
|
||||
|
||||
// SetSwitch implements Reactor interface.
|
||||
func (r *BlockchainReactor) SetSwitch(sw *p2p.Switch) {
|
||||
r.Switch = sw
|
||||
if sw != nil {
|
||||
r.io = newSwitchIo(sw)
|
||||
} else {
|
||||
r.io = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) setMaxPeerHeight(height int64) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if height > r.maxPeerHeight {
|
||||
r.maxPeerHeight = height
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) setSyncHeight(height int64) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.syncHeight = height
|
||||
}
|
||||
|
||||
// SyncHeight returns the height to which the BlockchainReactor has synced.
|
||||
func (r *BlockchainReactor) SyncHeight() int64 {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.syncHeight
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the reactor.
|
||||
func (r *BlockchainReactor) SetLogger(logger log.Logger) {
|
||||
r.logger = logger
|
||||
r.scheduler.setLogger(logger)
|
||||
r.processor.setLogger(logger)
|
||||
}
|
||||
|
||||
// Start implements cmn.Service interface
|
||||
func (r *BlockchainReactor) Start() error {
|
||||
r.reporter = behaviour.NewSwitchReporter(r.BaseReactor.Switch)
|
||||
if r.fastSync {
|
||||
err := r.startSync(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start fast sync: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startSync begins a fast sync, signalled by r.events being non-nil. If state is non-nil,
|
||||
// the scheduler and processor is updated with this state on startup.
|
||||
func (r *BlockchainReactor) startSync(state *state.State) error {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if r.events != nil {
|
||||
return errors.New("fast sync already in progress")
|
||||
}
|
||||
r.events = make(chan Event, chBufferSize)
|
||||
go r.scheduler.start()
|
||||
go r.processor.start()
|
||||
if state != nil {
|
||||
<-r.scheduler.ready()
|
||||
<-r.processor.ready()
|
||||
r.scheduler.send(bcResetState{state: *state})
|
||||
r.processor.send(bcResetState{state: *state})
|
||||
}
|
||||
go r.demux(r.events)
|
||||
return nil
|
||||
}
|
||||
|
||||
// endSync ends a fast sync
|
||||
func (r *BlockchainReactor) endSync() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if r.events != nil {
|
||||
close(r.events)
|
||||
}
|
||||
r.events = nil
|
||||
r.scheduler.stop()
|
||||
r.processor.stop()
|
||||
}
|
||||
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
|
||||
func (r *BlockchainReactor) SwitchToFastSync(state state.State) error {
|
||||
r.stateSynced = true
|
||||
state = state.Copy()
|
||||
return r.startSync(&state)
|
||||
}
|
||||
|
||||
// reactor generated ticker events:
|
||||
// ticker for cleaning peers
|
||||
type rTryPrunePeer struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (e rTryPrunePeer) String() string {
|
||||
return fmt.Sprintf("rTryPrunePeer{%v}", e.time)
|
||||
}
|
||||
|
||||
// ticker event for scheduling block requests
|
||||
type rTrySchedule struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (e rTrySchedule) String() string {
|
||||
return fmt.Sprintf("rTrySchedule{%v}", e.time)
|
||||
}
|
||||
|
||||
// ticker for block processing
|
||||
type rProcessBlock struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
func (e rProcessBlock) String() string {
|
||||
return "rProcessBlock"
|
||||
}
|
||||
|
||||
// reactor generated events based on blockchain related messages from peers:
|
||||
// blockResponse message received from a peer
|
||||
type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.ID
|
||||
size int64
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
func (resp bcBlockResponse) String() string {
|
||||
return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}",
|
||||
resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time)
|
||||
}
|
||||
|
||||
// blockNoResponse message received from a peer
|
||||
type bcNoBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
func (resp bcNoBlockResponse) String() string {
|
||||
return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}",
|
||||
resp.peerID, resp.height, resp.time)
|
||||
}
|
||||
|
||||
// statusResponse message received from a peer
|
||||
type bcStatusResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.ID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
|
||||
func (resp bcStatusResponse) String() string {
|
||||
return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}",
|
||||
resp.peerID, resp.height, resp.base, resp.time)
|
||||
}
|
||||
|
||||
// new peer is connected
|
||||
type bcAddNewPeer struct {
|
||||
priorityNormal
|
||||
peerID p2p.ID
|
||||
}
|
||||
|
||||
func (resp bcAddNewPeer) String() string {
|
||||
return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID)
|
||||
}
|
||||
|
||||
// existing peer is removed
|
||||
type bcRemovePeer struct {
|
||||
priorityHigh
|
||||
peerID p2p.ID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
func (resp bcRemovePeer) String() string {
|
||||
return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason)
|
||||
}
|
||||
|
||||
// resets the scheduler and processor state, e.g. following a switch from state syncing
|
||||
type bcResetState struct {
|
||||
priorityHigh
|
||||
state state.State
|
||||
}
|
||||
|
||||
func (e bcResetState) String() string {
|
||||
return fmt.Sprintf("bcResetState{%v}", e.state)
|
||||
}
|
||||
|
||||
// Takes the channel as a parameter to avoid race conditions on r.events.
|
||||
func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
var lastRate = 0.0
|
||||
var lastHundred = time.Now()
|
||||
|
||||
var (
|
||||
processBlockFreq = 20 * time.Millisecond
|
||||
doProcessBlockCh = make(chan struct{}, 1)
|
||||
doProcessBlockTk = time.NewTicker(processBlockFreq)
|
||||
)
|
||||
defer doProcessBlockTk.Stop()
|
||||
|
||||
var (
|
||||
prunePeerFreq = 1 * time.Second
|
||||
doPrunePeerCh = make(chan struct{}, 1)
|
||||
doPrunePeerTk = time.NewTicker(prunePeerFreq)
|
||||
)
|
||||
defer doPrunePeerTk.Stop()
|
||||
|
||||
var (
|
||||
scheduleFreq = 20 * time.Millisecond
|
||||
doScheduleCh = make(chan struct{}, 1)
|
||||
doScheduleTk = time.NewTicker(scheduleFreq)
|
||||
)
|
||||
defer doScheduleTk.Stop()
|
||||
|
||||
var (
|
||||
statusFreq = 10 * time.Second
|
||||
doStatusCh = make(chan struct{}, 1)
|
||||
doStatusTk = time.NewTicker(statusFreq)
|
||||
)
|
||||
defer doStatusTk.Stop()
|
||||
doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers
|
||||
|
||||
// XXX: Extract timers to make testing atemporal
|
||||
for {
|
||||
select {
|
||||
// Pacers: send at most per frequency but don't saturate
|
||||
case <-doProcessBlockTk.C:
|
||||
select {
|
||||
case doProcessBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doPrunePeerTk.C:
|
||||
select {
|
||||
case doPrunePeerCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doScheduleTk.C:
|
||||
select {
|
||||
case doScheduleCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doStatusTk.C:
|
||||
select {
|
||||
case doStatusCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
// Tickers: perform tasks periodically
|
||||
case <-doScheduleCh:
|
||||
r.scheduler.send(rTrySchedule{time: time.Now()})
|
||||
case <-doPrunePeerCh:
|
||||
r.scheduler.send(rTryPrunePeer{time: time.Now()})
|
||||
case <-doProcessBlockCh:
|
||||
r.processor.send(rProcessBlock{})
|
||||
case <-doStatusCh:
|
||||
if err := r.io.broadcastStatusRequest(); err != nil {
|
||||
r.logger.Error("Error broadcasting status request", "err", err)
|
||||
}
|
||||
|
||||
// Events from peers. Closing the channel signals event loop termination.
|
||||
case event, ok := <-events:
|
||||
if !ok {
|
||||
r.logger.Info("Stopping event processing")
|
||||
return
|
||||
}
|
||||
switch event := event.(type) {
|
||||
case bcStatusResponse:
|
||||
r.setMaxPeerHeight(event.height)
|
||||
r.scheduler.send(event)
|
||||
case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse:
|
||||
r.scheduler.send(event)
|
||||
default:
|
||||
r.logger.Error("Received unexpected event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Incremental events from scheduler
|
||||
case event := <-r.scheduler.next():
|
||||
switch event := event.(type) {
|
||||
case scBlockReceived:
|
||||
r.processor.send(event)
|
||||
case scPeerError:
|
||||
r.processor.send(event)
|
||||
if err := r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError")); err != nil {
|
||||
r.logger.Error("Error reporting peer", "err", err)
|
||||
}
|
||||
case scBlockRequest:
|
||||
if err := r.io.sendBlockRequest(event.peerID, event.height); err != nil {
|
||||
r.logger.Error("Error sending block request", "err", err)
|
||||
}
|
||||
case scFinishedEv:
|
||||
r.processor.send(event)
|
||||
r.scheduler.stop()
|
||||
case scSchedulerFail:
|
||||
r.logger.Error("Scheduler failure", "err", event.reason.Error())
|
||||
case scPeersPruned:
|
||||
// Remove peers from the processor.
|
||||
for _, peerID := range event.peers {
|
||||
r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")})
|
||||
}
|
||||
r.logger.Debug("Pruned peers", "count", len(event.peers))
|
||||
case noOpEvent:
|
||||
default:
|
||||
r.logger.Error("Received unexpected scheduler event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Incremental events from processor
|
||||
case event := <-r.processor.next():
|
||||
switch event := event.(type) {
|
||||
case pcBlockProcessed:
|
||||
r.setSyncHeight(event.height)
|
||||
if r.syncHeight%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.logger.Info("Fast Sync Rate", "height", r.syncHeight,
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
r.scheduler.send(event)
|
||||
case pcBlockVerificationFailure:
|
||||
r.scheduler.send(event)
|
||||
case pcFinished:
|
||||
r.logger.Info("Fast sync complete, switching to consensus")
|
||||
if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) {
|
||||
r.logger.Error("Failed to switch to consensus reactor")
|
||||
}
|
||||
r.endSync()
|
||||
return
|
||||
case noOpEvent:
|
||||
default:
|
||||
r.logger.Error("Received unexpected processor event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Terminal event from scheduler
|
||||
case err := <-r.scheduler.final():
|
||||
switch err {
|
||||
case nil:
|
||||
r.logger.Info("Scheduler stopped")
|
||||
default:
|
||||
r.logger.Error("Scheduler aborted with error", "err", err)
|
||||
}
|
||||
|
||||
// Terminal event from processor
|
||||
case err := <-r.processor.final():
|
||||
switch err {
|
||||
case nil:
|
||||
r.logger.Info("Processor stopped")
|
||||
default:
|
||||
r.logger.Error("Processor aborted with error", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop implements cmn.Service interface.
|
||||
func (r *BlockchainReactor) Stop() error {
|
||||
r.logger.Info("reactor stopping")
|
||||
r.endSync()
|
||||
r.logger.Info("reactor stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling different message types.
|
||||
func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
r.logger.Error("error decoding message",
|
||||
"src", src.ID(), "chId", chID, "msg", msg, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
r.logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
r.logger.Debug("Receive", "src", src.ID(), "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcproto.StatusRequest:
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src.ID()); err != nil {
|
||||
r.logger.Error("Could not send status message to peer", "src", src)
|
||||
}
|
||||
|
||||
case *bcproto.BlockRequest:
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
if err = r.io.sendBlockToPeer(block, src.ID()); err != nil {
|
||||
r.logger.Error("Could not send block message to peer: ", err)
|
||||
}
|
||||
} else {
|
||||
r.logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
peerID := src.ID()
|
||||
if err = r.io.sendBlockNotFound(msg.Height, peerID); err != nil {
|
||||
r.logger.Error("Couldn't send block not found: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
case *bcproto.StatusResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
r.logger.Error("error transitioning block from protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcBlockResponse{
|
||||
peerID: src.ID(),
|
||||
block: bi,
|
||||
size: int64(len(msgBytes)),
|
||||
time: time.Now(),
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.NoBlockResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor interface
|
||||
func (r *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer.ID())
|
||||
if err != nil {
|
||||
r.logger.Error("Could not send status message to peer new", "src", peer.ID, "height", r.SyncHeight())
|
||||
}
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
if r.events != nil {
|
||||
r.events <- bcAddNewPeer{peerID: peer.ID()}
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor interface.
|
||||
func (r *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
if r.events != nil {
|
||||
r.events <- bcRemovePeer{
|
||||
peerID: peer.ID(),
|
||||
reason: reason,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,555 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
type mockPeer struct {
|
||||
service.Service
|
||||
id p2p.ID
|
||||
}
|
||||
|
||||
func (mp mockPeer) FlushStop() {}
|
||||
func (mp mockPeer) ID() p2p.ID { return mp.id }
|
||||
func (mp mockPeer) RemoteIP() net.IP { return net.IP{} }
|
||||
func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} }
|
||||
|
||||
func (mp mockPeer) IsOutbound() bool { return true }
|
||||
func (mp mockPeer) IsPersistent() bool { return true }
|
||||
func (mp mockPeer) CloseConn() error { return nil }
|
||||
|
||||
func (mp mockPeer) NodeInfo() p2p.NodeInfo {
|
||||
return p2p.DefaultNodeInfo{
|
||||
DefaultNodeID: "",
|
||||
ListenAddr: "",
|
||||
}
|
||||
}
|
||||
func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
|
||||
func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} }
|
||||
|
||||
func (mp mockPeer) Send(byte, []byte) bool { return true }
|
||||
func (mp mockPeer) TrySend(byte, []byte) bool { return true }
|
||||
|
||||
func (mp mockPeer) Set(string, interface{}) {}
|
||||
func (mp mockPeer) Get(string) interface{} { return struct{}{} }
|
||||
|
||||
// nolint:unused // ignore
|
||||
type mockBlockStore struct {
|
||||
blocks map[int64]*types.Block
|
||||
}
|
||||
|
||||
// nolint:unused // ignore
|
||||
func (ml *mockBlockStore) Height() int64 {
|
||||
return int64(len(ml.blocks))
|
||||
}
|
||||
|
||||
// nolint:unused // ignore
|
||||
func (ml *mockBlockStore) LoadBlock(height int64) *types.Block {
|
||||
return ml.blocks[height]
|
||||
}
|
||||
|
||||
// nolint:unused // ignore
|
||||
func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) {
|
||||
ml.blocks[block.Height] = block
|
||||
}
|
||||
|
||||
type mockBlockApplier struct {
|
||||
}
|
||||
|
||||
// XXX: Add whitelist/blacklist?
|
||||
func (mba *mockBlockApplier) ApplyBlock(
|
||||
state sm.State, blockID types.BlockID, block *types.Block,
|
||||
) (sm.State, int64, error) {
|
||||
state.LastBlockHeight++
|
||||
return state, 0, nil
|
||||
}
|
||||
|
||||
type mockSwitchIo struct {
|
||||
mtx sync.Mutex
|
||||
switchedToConsensus bool
|
||||
numStatusResponse int
|
||||
numBlockResponse int
|
||||
numNoBlockResponse int
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendStatusResponse(base, height int64, peerID p2p.ID) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numStatusResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numBlockResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockNotFound(height int64, peerID p2p.ID) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numNoBlockResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.switchedToConsensus = true
|
||||
return true
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) broadcastStatusRequest() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type testReactorParams struct {
|
||||
logger log.Logger
|
||||
genDoc *types.GenesisDoc
|
||||
privVals []types.PrivValidator
|
||||
startHeight int64
|
||||
mockA bool
|
||||
}
|
||||
|
||||
func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight)
|
||||
reporter := behaviour.NewMockReporter()
|
||||
|
||||
var appl blockApplier
|
||||
|
||||
if p.mockA {
|
||||
appl = &mockBlockApplier{}
|
||||
} else {
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
db := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(db)
|
||||
appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
r := newReactor(state, store, reporter, appl, true)
|
||||
logger := log.TestingLogger()
|
||||
r.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// This test is left here and not deleted to retain the termination cases for
|
||||
// future improvement in [#4482](https://github.com/tendermint/tendermint/issues/4482).
|
||||
// func TestReactorTerminationScenarios(t *testing.T) {
|
||||
|
||||
// config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
// defer os.RemoveAll(config.RootDir)
|
||||
// genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
// refStore, _, _ := newReactorStore(genDoc, privVals, 20)
|
||||
|
||||
// params := testReactorParams{
|
||||
// logger: log.TestingLogger(),
|
||||
// genDoc: genDoc,
|
||||
// privVals: privVals,
|
||||
// startHeight: 10,
|
||||
// bufferSize: 100,
|
||||
// mockA: true,
|
||||
// }
|
||||
|
||||
// type testEvent struct {
|
||||
// evType string
|
||||
// peer string
|
||||
// height int64
|
||||
// }
|
||||
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// params testReactorParams
|
||||
// msgs []testEvent
|
||||
// }{
|
||||
// {
|
||||
// name: "simple termination on max peer height - one peer",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 11},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 12},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 13},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: "simple termination on max peer height - two peers",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "AddPeer", peer: "P2"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "ReceiveS", peer: "P2", height: 15},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 11},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 12},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P1", height: 13},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 14},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 15},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: "termination on max peer height - two peers, noBlock error",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "AddPeer", peer: "P2"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "ReceiveS", peer: "P2", height: 15},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveNB", peer: "P1", height: 11},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 12},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 11},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 13},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 14},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 15},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: "termination on max peer height - two peers, remove one peer",
|
||||
// params: params,
|
||||
// msgs: []testEvent{
|
||||
// {evType: "AddPeer", peer: "P1"},
|
||||
// {evType: "AddPeer", peer: "P2"},
|
||||
// {evType: "ReceiveS", peer: "P1", height: 13},
|
||||
// {evType: "ReceiveS", peer: "P2", height: 15},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "RemovePeer", peer: "P1"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 12},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 11},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 13},
|
||||
// {evType: "Process"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 14},
|
||||
// {evType: "Process"},
|
||||
// {evType: "BlockReq"},
|
||||
// {evType: "ReceiveB", peer: "P2", height: 15},
|
||||
// {evType: "Process"},
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// for _, tt := range tests {
|
||||
// tt := tt
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// reactor := newTestReactor(params)
|
||||
// reactor.Start()
|
||||
// reactor.reporter = behaviour.NewMockReporter()
|
||||
// mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
// reactor.io = mockSwitch
|
||||
// // time for go routines to start
|
||||
// time.Sleep(time.Millisecond)
|
||||
|
||||
// for _, step := range tt.msgs {
|
||||
// switch step.evType {
|
||||
// case "AddPeer":
|
||||
// reactor.scheduler.send(bcAddNewPeer{peerID: p2p.ID(step.peer)})
|
||||
// case "RemovePeer":
|
||||
// reactor.scheduler.send(bcRemovePeer{peerID: p2p.ID(step.peer)})
|
||||
// case "ReceiveS":
|
||||
// reactor.scheduler.send(bcStatusResponse{
|
||||
// peerID: p2p.ID(step.peer),
|
||||
// height: step.height,
|
||||
// time: time.Now(),
|
||||
// })
|
||||
// case "ReceiveB":
|
||||
// reactor.scheduler.send(bcBlockResponse{
|
||||
// peerID: p2p.ID(step.peer),
|
||||
// block: refStore.LoadBlock(step.height),
|
||||
// size: 10,
|
||||
// time: time.Now(),
|
||||
// })
|
||||
// case "ReceiveNB":
|
||||
// reactor.scheduler.send(bcNoBlockResponse{
|
||||
// peerID: p2p.ID(step.peer),
|
||||
// height: step.height,
|
||||
// time: time.Now(),
|
||||
// })
|
||||
// case "BlockReq":
|
||||
// reactor.scheduler.send(rTrySchedule{time: time.Now()})
|
||||
// case "Process":
|
||||
// reactor.processor.send(rProcessBlock{})
|
||||
// }
|
||||
// // give time for messages to propagate between routines
|
||||
// time.Sleep(time.Millisecond)
|
||||
// }
|
||||
|
||||
// // time for processor to finish and reactor to switch to consensus
|
||||
// time.Sleep(20 * time.Millisecond)
|
||||
// assert.True(t, mockSwitch.hasSwitchedToConsensus())
|
||||
// reactor.Stop()
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestReactorHelperMode(t *testing.T) {
|
||||
var (
|
||||
channelID = byte(0x40)
|
||||
)
|
||||
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
|
||||
params := testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
startHeight: 20,
|
||||
mockA: true,
|
||||
}
|
||||
|
||||
type testEvent struct {
|
||||
peer string
|
||||
event interface{}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
params testReactorParams
|
||||
msgs []testEvent
|
||||
}{
|
||||
{
|
||||
name: "status request",
|
||||
params: params,
|
||||
msgs: []testEvent{
|
||||
{"P1", bcproto.StatusRequest{}},
|
||||
{"P1", bcproto.BlockRequest{Height: 13}},
|
||||
{"P1", bcproto.BlockRequest{Height: 20}},
|
||||
{"P1", bcproto.BlockRequest{Height: 22}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
reactor := newTestReactor(params)
|
||||
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
reactor.io = mockSwitch
|
||||
err := reactor.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(tt.msgs); i++ {
|
||||
step := tt.msgs[i]
|
||||
switch ev := step.event.(type) {
|
||||
case bcproto.StatusRequest:
|
||||
old := mockSwitch.numStatusResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
|
||||
case bcproto.BlockRequest:
|
||||
if ev.Height > params.startHeight {
|
||||
old := mockSwitch.numNoBlockResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
|
||||
} else {
|
||||
old := mockSwitch.numBlockResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
|
||||
}
|
||||
}
|
||||
}
|
||||
err = reactor.Stop()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactorSetSwitchNil(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
|
||||
reactor := newTestReactor(testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
})
|
||||
reactor.SetSwitch(nil)
|
||||
|
||||
assert.Nil(t, reactor.Switch)
|
||||
assert.Nil(t, reactor.io)
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) (
|
||||
*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: chainID,
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
// Why are we importing the entire blockExecutor dependency graph here
|
||||
// when we have the facilities to
|
||||
func newReactorStore(
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// add blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
vote, err := types.MakeVote(
|
||||
lastBlock.Header.Height,
|
||||
lastBlockMeta.BlockID,
|
||||
state.Validators,
|
||||
privVals[0],
|
||||
lastBlock.Header.ChainID,
|
||||
time.Now(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error apply block: %w", err))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
return blockStore, state, blockExec
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/Workiva/go-datastructures/queue"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
type handleFunc = func(event Event) (Event, error)
|
||||
|
||||
const historySize = 25
|
||||
|
||||
// Routine is a structure that models a finite state machine as serialized
|
||||
// stream of events processed by a handle function. This Routine structure
|
||||
// handles the concurrency and messaging guarantees. Events are sent via
|
||||
// `send` are handled by the `handle` function to produce an iterator
|
||||
// `next()`. Calling `stop()` on a routine will conclude processing of all
|
||||
// sent events and produce `final()` event representing the terminal state.
|
||||
type Routine struct {
|
||||
name string
|
||||
handle handleFunc
|
||||
queue *queue.PriorityQueue
|
||||
history []Event
|
||||
out chan Event
|
||||
fin chan error
|
||||
rdy chan struct{}
|
||||
running *uint32
|
||||
logger log.Logger
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine {
|
||||
return &Routine{
|
||||
name: name,
|
||||
handle: handleFunc,
|
||||
queue: queue.NewPriorityQueue(bufferSize, true),
|
||||
history: make([]Event, 0, historySize),
|
||||
out: make(chan Event, bufferSize),
|
||||
rdy: make(chan struct{}, 1),
|
||||
fin: make(chan error, 1),
|
||||
running: new(uint32),
|
||||
logger: log.NewNopLogger(),
|
||||
metrics: NopMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
func (rt *Routine) setLogger(logger log.Logger) {
|
||||
rt.logger = logger
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func (rt *Routine) setMetrics(metrics *Metrics) {
|
||||
rt.metrics = metrics
|
||||
}
|
||||
|
||||
func (rt *Routine) start() {
|
||||
rt.logger.Info("routine start", "msg", log.NewLazySprintf("%s: run", rt.name))
|
||||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
|
||||
if !running {
|
||||
panic(fmt.Sprintf("%s is already running", rt.name))
|
||||
}
|
||||
close(rt.rdy)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var (
|
||||
b strings.Builder
|
||||
j int
|
||||
)
|
||||
for i := len(rt.history) - 1; i >= 0; i-- {
|
||||
fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i])
|
||||
j++
|
||||
}
|
||||
panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String()))
|
||||
}
|
||||
stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0))
|
||||
if !stopped {
|
||||
panic(fmt.Sprintf("%s is failed to stop", rt.name))
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
events, err := rt.queue.Get(1)
|
||||
if err == queue.ErrDisposed {
|
||||
rt.terminate(nil)
|
||||
return
|
||||
} else if err != nil {
|
||||
rt.terminate(err)
|
||||
return
|
||||
}
|
||||
oEvent, err := rt.handle(events[0].(Event))
|
||||
rt.metrics.EventsHandled.With("routine", rt.name).Add(1)
|
||||
if err != nil {
|
||||
rt.terminate(err)
|
||||
return
|
||||
}
|
||||
rt.metrics.EventsOut.With("routine", rt.name).Add(1)
|
||||
rt.logger.Debug("routine start", "msg", log.NewLazySprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
|
||||
|
||||
// Skip rTrySchedule and rProcessBlock events as they clutter the history
|
||||
// due to their frequency.
|
||||
switch events[0].(type) {
|
||||
case rTrySchedule:
|
||||
case rProcessBlock:
|
||||
default:
|
||||
rt.history = append(rt.history, events[0].(Event))
|
||||
if len(rt.history) > historySize {
|
||||
rt.history = rt.history[1:]
|
||||
}
|
||||
}
|
||||
|
||||
rt.out <- oEvent
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: look into returning OpError in the net package
|
||||
func (rt *Routine) send(event Event) bool {
|
||||
rt.logger.Debug("routine send", "msg", log.NewLazySprintf("%s: received %T %+v", rt.name, event, event))
|
||||
if !rt.isRunning() {
|
||||
return false
|
||||
}
|
||||
err := rt.queue.Put(event)
|
||||
if err != nil {
|
||||
rt.metrics.EventsShed.With("routine", rt.name).Add(1)
|
||||
rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name))
|
||||
return false
|
||||
}
|
||||
|
||||
rt.metrics.EventsSent.With("routine", rt.name).Add(1)
|
||||
return true
|
||||
}
|
||||
|
||||
func (rt *Routine) isRunning() bool {
|
||||
return atomic.LoadUint32(rt.running) == 1
|
||||
}
|
||||
|
||||
func (rt *Routine) next() chan Event {
|
||||
return rt.out
|
||||
}
|
||||
|
||||
func (rt *Routine) ready() chan struct{} {
|
||||
return rt.rdy
|
||||
}
|
||||
|
||||
func (rt *Routine) stop() {
|
||||
if !rt.isRunning() { // XXX: this should check rt.queue.Disposed()
|
||||
return
|
||||
}
|
||||
|
||||
rt.logger.Info("routine stop", "msg", log.NewLazySprintf("%s: stop", rt.name))
|
||||
rt.queue.Dispose() // this should block until all queue items are free?
|
||||
}
|
||||
|
||||
func (rt *Routine) final() chan error {
|
||||
return rt.fin
|
||||
}
|
||||
|
||||
// XXX: Maybe get rid of this
|
||||
func (rt *Routine) terminate(reason error) {
|
||||
// We don't close the rt.out channel here, to avoid spinning on the closed channel
|
||||
// in the event loop.
|
||||
rt.fin <- reason
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type eventA struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
var errDone = fmt.Errorf("done")
|
||||
|
||||
func simpleHandler(event Event) (Event, error) {
|
||||
if _, ok := event.(eventA); ok {
|
||||
return noOp, errDone
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func TestRoutineFinal(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize)
|
||||
)
|
||||
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an initialized routine to not be running")
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
assert.True(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a ready routine to succeed")
|
||||
|
||||
assert.Equal(t, errDone, <-routine.final(),
|
||||
"expected the final event to be done")
|
||||
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an completed routine to no longer be running")
|
||||
}
|
||||
|
||||
func TestRoutineStop(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize)
|
||||
)
|
||||
|
||||
assert.False(t, routine.send(eventA{}),
|
||||
"expected sending to an unstarted routine to fail")
|
||||
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a running routine to succeed")
|
||||
|
||||
routine.stop()
|
||||
|
||||
assert.False(t, routine.send(eventA{}),
|
||||
"expected sending to a stopped routine to fail")
|
||||
}
|
||||
|
||||
type finalCount struct {
|
||||
count int
|
||||
}
|
||||
|
||||
func (f finalCount) Error() string {
|
||||
return "end"
|
||||
}
|
||||
|
||||
func genStatefulHandler(maxCount int) handleFunc {
|
||||
counter := 0
|
||||
return func(event Event) (Event, error) {
|
||||
if _, ok := event.(eventA); ok {
|
||||
counter++
|
||||
if counter >= maxCount {
|
||||
return noOp, finalCount{counter}
|
||||
}
|
||||
|
||||
return eventA{}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
}
|
||||
|
||||
func feedback(r *Routine) {
|
||||
for event := range r.next() {
|
||||
r.send(event)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatefulRoutine(t *testing.T) {
|
||||
var (
|
||||
count = 10
|
||||
handler = genStatefulHandler(count)
|
||||
bufferSize = 20
|
||||
routine = newRoutine("statefulRoutine", handler, bufferSize)
|
||||
)
|
||||
|
||||
go routine.start()
|
||||
go feedback(routine)
|
||||
<-routine.ready()
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a started routine to succeed")
|
||||
|
||||
final := <-routine.final()
|
||||
if fnl, ok := final.(finalCount); ok {
|
||||
assert.Equal(t, count, fnl.count,
|
||||
"expected the routine to count to 10")
|
||||
} else {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
type lowPriorityEvent struct {
|
||||
priorityLow
|
||||
}
|
||||
|
||||
type highPriorityEvent struct {
|
||||
priorityHigh
|
||||
}
|
||||
|
||||
func handleWithPriority(event Event) (Event, error) {
|
||||
switch event.(type) {
|
||||
case lowPriorityEvent:
|
||||
return noOp, nil
|
||||
case highPriorityEvent:
|
||||
return noOp, errDone
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func TestPriority(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 20
|
||||
routine = newRoutine("priorityRoutine", handleWithPriority, bufferSize)
|
||||
)
|
||||
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
go func() {
|
||||
for {
|
||||
routine.send(lowPriorityEvent{})
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
assert.True(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
assert.True(t, routine.send(highPriorityEvent{}),
|
||||
"expected send to succeed even when saturated")
|
||||
|
||||
assert.Equal(t, errDone, <-routine.final())
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
}
|
||||
@@ -1,712 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Events generated by the scheduler:
|
||||
// all blocks have been processed
|
||||
type scFinishedEv struct {
|
||||
priorityNormal
|
||||
reason string
|
||||
}
|
||||
|
||||
func (e scFinishedEv) String() string {
|
||||
return fmt.Sprintf("scFinishedEv{%v}", e.reason)
|
||||
}
|
||||
|
||||
// send a blockRequest message
|
||||
type scBlockRequest struct {
|
||||
priorityNormal
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
func (e scBlockRequest) String() string {
|
||||
return fmt.Sprintf("scBlockRequest{%d from %v}", e.height, e.peerID)
|
||||
}
|
||||
|
||||
// a block has been received and validated by the scheduler
|
||||
type scBlockReceived struct {
|
||||
priorityNormal
|
||||
peerID p2p.ID
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
func (e scBlockReceived) String() string {
|
||||
return fmt.Sprintf("scBlockReceived{%d#%X from %v}", e.block.Height, e.block.Hash(), e.peerID)
|
||||
}
|
||||
|
||||
// scheduler detected a peer error
|
||||
type scPeerError struct {
|
||||
priorityHigh
|
||||
peerID p2p.ID
|
||||
reason error
|
||||
}
|
||||
|
||||
func (e scPeerError) String() string {
|
||||
return fmt.Sprintf("scPeerError{%v errored with %v}", e.peerID, e.reason)
|
||||
}
|
||||
|
||||
// scheduler removed a set of peers (timed out or slow peer)
|
||||
type scPeersPruned struct {
|
||||
priorityHigh
|
||||
peers []p2p.ID
|
||||
}
|
||||
|
||||
func (e scPeersPruned) String() string {
|
||||
return fmt.Sprintf("scPeersPruned{%v}", e.peers)
|
||||
}
|
||||
|
||||
// XXX: make this fatal?
|
||||
// scheduler encountered a fatal error
|
||||
type scSchedulerFail struct {
|
||||
priorityHigh
|
||||
reason error
|
||||
}
|
||||
|
||||
func (e scSchedulerFail) String() string {
|
||||
return fmt.Sprintf("scSchedulerFail{%v}", e.reason)
|
||||
}
|
||||
|
||||
type blockState int
|
||||
|
||||
const (
|
||||
blockStateUnknown blockState = iota + 1 // no known peer has this block
|
||||
blockStateNew // indicates that a peer has reported having this block
|
||||
blockStatePending // indicates that this block has been requested from a peer
|
||||
blockStateReceived // indicates that this block has been received by a peer
|
||||
blockStateProcessed // indicates that this block has been applied
|
||||
)
|
||||
|
||||
func (e blockState) String() string {
|
||||
switch e {
|
||||
case blockStateUnknown:
|
||||
return "Unknown"
|
||||
case blockStateNew:
|
||||
return "New"
|
||||
case blockStatePending:
|
||||
return "Pending"
|
||||
case blockStateReceived:
|
||||
return "Received"
|
||||
case blockStateProcessed:
|
||||
return "Processed"
|
||||
default:
|
||||
return fmt.Sprintf("invalid blockState: %d", e)
|
||||
}
|
||||
}
|
||||
|
||||
type peerState int
|
||||
|
||||
const (
|
||||
peerStateNew = iota + 1
|
||||
peerStateReady
|
||||
peerStateRemoved
|
||||
)
|
||||
|
||||
func (e peerState) String() string {
|
||||
switch e {
|
||||
case peerStateNew:
|
||||
return "New"
|
||||
case peerStateReady:
|
||||
return "Ready"
|
||||
case peerStateRemoved:
|
||||
return "Removed"
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown peerState: %d", e))
|
||||
}
|
||||
}
|
||||
|
||||
type scPeer struct {
|
||||
peerID p2p.ID
|
||||
|
||||
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
|
||||
// updated to Removed when peer is removed
|
||||
state peerState
|
||||
|
||||
base int64 // updated when statusResponse is received
|
||||
height int64 // updated when statusResponse is received
|
||||
lastTouched time.Time
|
||||
lastRate int64 // last receive rate in bytes
|
||||
}
|
||||
|
||||
func (p scPeer) String() string {
|
||||
return fmt.Sprintf("{state %v, base %d, height %d, lastTouched %v, lastRate %d, id %v}",
|
||||
p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
|
||||
}
|
||||
|
||||
func newScPeer(peerID p2p.ID) *scPeer {
|
||||
return &scPeer{
|
||||
peerID: peerID,
|
||||
state: peerStateNew,
|
||||
base: -1,
|
||||
height: -1,
|
||||
lastTouched: time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
// The scheduler keep track of the state of each block and each peer. The
|
||||
// scheduler will attempt to schedule new block requests with `trySchedule`
|
||||
// events and remove slow peers with `tryPrune` events.
|
||||
type scheduler struct {
|
||||
initHeight int64
|
||||
|
||||
// next block that needs to be processed. All blocks with smaller height are
|
||||
// in Processed state.
|
||||
height int64
|
||||
|
||||
// lastAdvance tracks the last time a block execution happened.
|
||||
// syncTimeout is the maximum time the scheduler waits to advance in the fast sync process before finishing.
|
||||
// This covers the cases where there are no peers or all peers have a lower height.
|
||||
lastAdvance time.Time
|
||||
syncTimeout time.Duration
|
||||
|
||||
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
|
||||
// track of peer specific state
|
||||
peers map[p2p.ID]*scPeer
|
||||
peerTimeout time.Duration // maximum response time from a peer otherwise prune
|
||||
minRecvRate int64 // minimum receive rate from peer otherwise prune
|
||||
|
||||
// the maximum number of blocks that should be New, Received or Pending at any point
|
||||
// in time. This is used to enforce a limit on the blockStates map.
|
||||
targetPending int
|
||||
// a list of blocks to be scheduled (New), Pending or Received. Its length should be
|
||||
// smaller than targetPending.
|
||||
blockStates map[int64]blockState
|
||||
|
||||
// a map of heights to the peer we are waiting a response from
|
||||
pendingBlocks map[int64]p2p.ID
|
||||
|
||||
// the time at which a block was put in blockStatePending
|
||||
pendingTime map[int64]time.Time
|
||||
|
||||
// a map of heights to the peers that put the block in blockStateReceived
|
||||
receivedBlocks map[int64]p2p.ID
|
||||
}
|
||||
|
||||
func (sc scheduler) String() string {
|
||||
return fmt.Sprintf("ih: %d, bst: %v, peers: %v, pblks: %v, ptm %v, rblks: %v",
|
||||
sc.initHeight, sc.blockStates, sc.peers, sc.pendingBlocks, sc.pendingTime, sc.receivedBlocks)
|
||||
}
|
||||
|
||||
func newScheduler(initHeight int64, startTime time.Time) *scheduler {
|
||||
sc := scheduler{
|
||||
initHeight: initHeight,
|
||||
lastAdvance: startTime,
|
||||
syncTimeout: 60 * time.Second,
|
||||
height: initHeight,
|
||||
blockStates: make(map[int64]blockState),
|
||||
peers: make(map[p2p.ID]*scPeer),
|
||||
pendingBlocks: make(map[int64]p2p.ID),
|
||||
pendingTime: make(map[int64]time.Time),
|
||||
receivedBlocks: make(map[int64]p2p.ID),
|
||||
targetPending: 10, // TODO - pass as param
|
||||
peerTimeout: 15 * time.Second, // TODO - pass as param
|
||||
minRecvRate: 0, // int64(7680), TODO - pass as param
|
||||
}
|
||||
|
||||
return &sc
|
||||
}
|
||||
|
||||
func (sc *scheduler) ensurePeer(peerID p2p.ID) *scPeer {
|
||||
if _, ok := sc.peers[peerID]; !ok {
|
||||
sc.peers[peerID] = newScPeer(peerID)
|
||||
}
|
||||
return sc.peers[peerID]
|
||||
}
|
||||
|
||||
func (sc *scheduler) touchPeer(peerID p2p.ID, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state != peerStateReady {
|
||||
return fmt.Errorf("tried to touch peer in state %s, must be Ready", peer.state)
|
||||
}
|
||||
|
||||
peer.lastTouched = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) removePeer(peerID p2p.ID) {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if peer.state == peerStateRemoved {
|
||||
return
|
||||
}
|
||||
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.pendingTime, height)
|
||||
delete(sc.pendingBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
for height, rcvPeerID := range sc.receivedBlocks {
|
||||
if rcvPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.receivedBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
// remove the blocks from blockStates if the peer removal causes the max peer height to be lower.
|
||||
peer.state = peerStateRemoved
|
||||
maxPeerHeight := int64(0)
|
||||
for _, otherPeer := range sc.peers {
|
||||
if otherPeer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if otherPeer.peerID != peer.peerID && otherPeer.height > maxPeerHeight {
|
||||
maxPeerHeight = otherPeer.height
|
||||
}
|
||||
}
|
||||
for h := range sc.blockStates {
|
||||
if h > maxPeerHeight {
|
||||
delete(sc.blockStates, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check if the blockPool is running low and add new blocks in New state to be requested.
|
||||
// This function is called when there is an increase in the maximum peer height or when
|
||||
// blocks are processed.
|
||||
func (sc *scheduler) addNewBlocks() {
|
||||
if len(sc.blockStates) >= sc.targetPending {
|
||||
return
|
||||
}
|
||||
|
||||
for i := sc.height; i < int64(sc.targetPending)+sc.height; i++ {
|
||||
if i > sc.maxHeight() {
|
||||
break
|
||||
}
|
||||
if sc.getStateAtHeight(i) == blockStateUnknown {
|
||||
sc.setStateAtHeight(i, blockStateNew)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error {
|
||||
peer := sc.ensurePeer(peerID)
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return nil // noop
|
||||
}
|
||||
|
||||
if height < peer.height {
|
||||
sc.removePeer(peerID)
|
||||
return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height)
|
||||
}
|
||||
|
||||
if base > height {
|
||||
sc.removePeer(peerID)
|
||||
return fmt.Errorf("cannot set peer base higher than its height")
|
||||
}
|
||||
|
||||
peer.base = base
|
||||
peer.height = height
|
||||
peer.state = peerStateReady
|
||||
|
||||
sc.addNewBlocks()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) getStateAtHeight(height int64) blockState {
|
||||
if height < sc.height {
|
||||
return blockStateProcessed
|
||||
} else if state, ok := sc.blockStates[height]; ok {
|
||||
return state
|
||||
} else {
|
||||
return blockStateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.ID {
|
||||
peers := make([]p2p.ID, 0)
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if peer.base <= height && peer.height >= height {
|
||||
peers = append(peers, peer.peerID)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.ID {
|
||||
prunable := make([]p2p.ID, 0)
|
||||
for peerID, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate {
|
||||
prunable = append(prunable, peerID)
|
||||
}
|
||||
}
|
||||
// Tests for handleTryPrunePeer() may fail without sort due to range non-determinism
|
||||
sort.Sort(PeerByID(prunable))
|
||||
return prunable
|
||||
}
|
||||
|
||||
func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
|
||||
sc.blockStates[height] = state
|
||||
}
|
||||
|
||||
// CONTRACT: peer exists and in Ready state.
|
||||
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error {
|
||||
peer := sc.peers[peerID]
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
return fmt.Errorf("received block %d from peer %s without being requested", height, peerID)
|
||||
}
|
||||
|
||||
pendingTime, ok := sc.pendingTime[height]
|
||||
if !ok || now.Sub(pendingTime) <= 0 {
|
||||
return fmt.Errorf("clock error: block %d received at %s but requested at %s",
|
||||
height, pendingTime, now)
|
||||
}
|
||||
|
||||
peer.lastRate = size / now.Sub(pendingTime).Nanoseconds()
|
||||
|
||||
sc.setStateAtHeight(height, blockStateReceived)
|
||||
delete(sc.pendingBlocks, height)
|
||||
delete(sc.pendingTime, height)
|
||||
|
||||
sc.receivedBlocks[height] = peerID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markPending(peerID p2p.ID, height int64, time time.Time) error {
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateNew {
|
||||
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
|
||||
}
|
||||
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state != peerStateReady {
|
||||
return fmt.Errorf("cannot schedule %d from %s in %s", height, peerID, peer.state)
|
||||
}
|
||||
|
||||
if height > peer.height {
|
||||
return fmt.Errorf("cannot request height %d from peer %s that is at height %d",
|
||||
height, peerID, peer.height)
|
||||
}
|
||||
|
||||
if height < peer.base {
|
||||
return fmt.Errorf("cannot request height %d for peer %s with base %d",
|
||||
height, peerID, peer.base)
|
||||
}
|
||||
|
||||
sc.setStateAtHeight(height, blockStatePending)
|
||||
sc.pendingBlocks[height] = peerID
|
||||
sc.pendingTime[height] = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markProcessed(height int64) error {
|
||||
// It is possible that a peer error or timeout is handled after the processor
|
||||
// has processed the block but before the scheduler received this event, so
|
||||
// when pcBlockProcessed event is received, the block had been requested
|
||||
// again => don't check the block state.
|
||||
sc.lastAdvance = time.Now()
|
||||
sc.height = height + 1
|
||||
delete(sc.pendingBlocks, height)
|
||||
delete(sc.pendingTime, height)
|
||||
delete(sc.receivedBlocks, height)
|
||||
delete(sc.blockStates, height)
|
||||
sc.addNewBlocks()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) allBlocksProcessed() bool {
|
||||
if len(sc.peers) == 0 {
|
||||
return false
|
||||
}
|
||||
return sc.height >= sc.maxHeight()
|
||||
}
|
||||
|
||||
// returns max peer height or the last processed block, i.e. sc.height
|
||||
func (sc *scheduler) maxHeight() int64 {
|
||||
max := sc.height - 1
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if max < peer.height {
|
||||
max = peer.height
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// lowest block in sc.blockStates with state == blockStateNew or -1 if no new blocks
|
||||
func (sc *scheduler) nextHeightToSchedule() int64 {
|
||||
var min int64 = math.MaxInt64
|
||||
for height, state := range sc.blockStates {
|
||||
if state == blockStateNew && height < min {
|
||||
min = height
|
||||
}
|
||||
}
|
||||
if min == math.MaxInt64 {
|
||||
min = -1
|
||||
}
|
||||
return min
|
||||
}
|
||||
|
||||
func (sc *scheduler) pendingFrom(peerID p2p.ID) []int64 {
|
||||
var heights []int64
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
heights = append(heights, height)
|
||||
}
|
||||
}
|
||||
return heights
|
||||
}
|
||||
|
||||
func (sc *scheduler) selectPeer(height int64) (p2p.ID, error) {
|
||||
peers := sc.getPeersWithHeight(height)
|
||||
if len(peers) == 0 {
|
||||
return "", fmt.Errorf("cannot find peer for height %d", height)
|
||||
}
|
||||
|
||||
// create a map from number of pending requests to a list
|
||||
// of peers having that number of pending requests.
|
||||
pendingFrom := make(map[int][]p2p.ID)
|
||||
for _, peerID := range peers {
|
||||
numPending := len(sc.pendingFrom(peerID))
|
||||
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
|
||||
}
|
||||
|
||||
// find the set of peers with minimum number of pending requests.
|
||||
var minPending int64 = math.MaxInt64
|
||||
for mp := range pendingFrom {
|
||||
if int64(mp) < minPending {
|
||||
minPending = int64(mp)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(PeerByID(pendingFrom[int(minPending)]))
|
||||
return pendingFrom[int(minPending)][0], nil
|
||||
}
|
||||
|
||||
// PeerByID is a list of peers sorted by peerID.
|
||||
type PeerByID []p2p.ID
|
||||
|
||||
func (peers PeerByID) Len() int {
|
||||
return len(peers)
|
||||
}
|
||||
func (peers PeerByID) Less(i, j int) bool {
|
||||
return bytes.Compare([]byte(peers[i]), []byte(peers[j])) == -1
|
||||
}
|
||||
|
||||
func (peers PeerByID) Swap(i, j int) {
|
||||
peers[i], peers[j] = peers[j], peers[i]
|
||||
}
|
||||
|
||||
// Handlers
|
||||
|
||||
// This handler gets the block, performs some validation and then passes it on to the processor.
|
||||
func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) {
|
||||
err := sc.touchPeer(event.peerID, event.time)
|
||||
if err != nil {
|
||||
// peer does not exist OR not ready
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time)
|
||||
if err != nil {
|
||||
sc.removePeer(event.peerID)
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
}
|
||||
|
||||
return scBlockReceived{peerID: event.peerID, block: event.block}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) {
|
||||
// No such peer or peer was removed.
|
||||
peer, ok := sc.peers[event.peerID]
|
||||
if !ok || peer.state == peerStateRemoved {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// The peer may have been just removed due to errors, low speed or timeouts.
|
||||
sc.removePeer(event.peerID)
|
||||
|
||||
return scPeerError{peerID: event.peerID,
|
||||
reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d",
|
||||
event.peerID, peer.base, peer.height, event.height)}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) {
|
||||
if event.height != sc.height {
|
||||
panic(fmt.Sprintf("processed height %d, but expected height %d", event.height, sc.height))
|
||||
}
|
||||
|
||||
err := sc.markProcessed(event.height)
|
||||
if err != nil {
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "processed all blocks"}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// Handles an error from the processor. The processor had already cleaned the blocks from
|
||||
// the peers included in this event. Just attempt to remove the peers.
|
||||
func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) {
|
||||
// The peers may have been just removed due to errors, low speed or timeouts.
|
||||
sc.removePeer(event.firstPeerID)
|
||||
if event.firstPeerID != event.secondPeerID {
|
||||
sc.removePeer(event.secondPeerID)
|
||||
}
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "error on last block"}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) {
|
||||
sc.ensurePeer(event.peerID)
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) {
|
||||
sc.removePeer(event.peerID)
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "removed peer"}, nil
|
||||
}
|
||||
|
||||
// Return scPeerError so the peer (and all associated blocks) is removed from
|
||||
// the processor.
|
||||
return scPeerError{peerID: event.peerID, reason: errors.New("peer was stopped")}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) {
|
||||
// Check behavior of peer responsible to deliver block at sc.height.
|
||||
timeHeightAsked, ok := sc.pendingTime[sc.height]
|
||||
if ok && time.Since(timeHeightAsked) > sc.peerTimeout {
|
||||
// A request was sent to a peer for block at sc.height but a response was not received
|
||||
// from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer
|
||||
// will be timed out even if it sends blocks at higher heights but prevents progress by
|
||||
// not sending the block at current height.
|
||||
sc.removePeer(sc.pendingBlocks[sc.height])
|
||||
}
|
||||
|
||||
prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time)
|
||||
if len(prunablePeers) == 0 {
|
||||
return noOp, nil
|
||||
}
|
||||
for _, peerID := range prunablePeers {
|
||||
sc.removePeer(peerID)
|
||||
}
|
||||
|
||||
// If all blocks are processed we should finish.
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "after try prune"}, nil
|
||||
}
|
||||
|
||||
return scPeersPruned{peers: prunablePeers}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleResetState(event bcResetState) (Event, error) {
|
||||
initHeight := event.state.LastBlockHeight + 1
|
||||
if initHeight == 1 {
|
||||
initHeight = event.state.InitialHeight
|
||||
}
|
||||
sc.initHeight = initHeight
|
||||
sc.height = initHeight
|
||||
sc.lastAdvance = time.Now()
|
||||
sc.addNewBlocks()
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) {
|
||||
if time.Since(sc.lastAdvance) > sc.syncTimeout {
|
||||
return scFinishedEv{reason: "timeout, no advance"}, nil
|
||||
}
|
||||
|
||||
nextHeight := sc.nextHeightToSchedule()
|
||||
if nextHeight == -1 {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
bestPeerID, err := sc.selectPeer(nextHeight)
|
||||
if err != nil {
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
if err := sc.markPending(bestPeerID, nextHeight, event.time); err != nil {
|
||||
return scSchedulerFail{reason: err}, nil // XXX: peerError might be more appropriate
|
||||
}
|
||||
return scBlockRequest{peerID: bestPeerID, height: nextHeight}, nil
|
||||
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) {
|
||||
err := sc.setPeerRange(event.peerID, event.base, event.height)
|
||||
if err != nil {
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handle(event Event) (Event, error) {
|
||||
switch event := event.(type) {
|
||||
case bcResetState:
|
||||
nextEvent, err := sc.handleResetState(event)
|
||||
return nextEvent, err
|
||||
case bcStatusResponse:
|
||||
nextEvent, err := sc.handleStatusResponse(event)
|
||||
return nextEvent, err
|
||||
case bcBlockResponse:
|
||||
nextEvent, err := sc.handleBlockResponse(event)
|
||||
return nextEvent, err
|
||||
case bcNoBlockResponse:
|
||||
nextEvent, err := sc.handleNoBlockResponse(event)
|
||||
return nextEvent, err
|
||||
case rTrySchedule:
|
||||
nextEvent, err := sc.handleTrySchedule(event)
|
||||
return nextEvent, err
|
||||
case bcAddNewPeer:
|
||||
nextEvent, err := sc.handleAddNewPeer(event)
|
||||
return nextEvent, err
|
||||
case bcRemovePeer:
|
||||
nextEvent, err := sc.handleRemovePeer(event)
|
||||
return nextEvent, err
|
||||
case rTryPrunePeer:
|
||||
nextEvent, err := sc.handleTryPrunePeer(event)
|
||||
return nextEvent, err
|
||||
case pcBlockProcessed:
|
||||
nextEvent, err := sc.handleBlockProcessed(event)
|
||||
return nextEvent, err
|
||||
case pcBlockVerificationFailure:
|
||||
nextEvent, err := sc.handleBlockProcessError(event)
|
||||
return nextEvent, err
|
||||
default:
|
||||
return scSchedulerFail{reason: fmt.Errorf("unknown event %v", event)}, nil
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,65 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/Workiva/go-datastructures/queue"
|
||||
)
|
||||
|
||||
// Event is the type that can be added to the priority queue.
|
||||
type Event queue.Item
|
||||
|
||||
type priority interface {
|
||||
Compare(other queue.Item) int
|
||||
Priority() int
|
||||
}
|
||||
|
||||
type priorityLow struct{}
|
||||
type priorityNormal struct{}
|
||||
type priorityHigh struct{}
|
||||
|
||||
func (p priorityLow) Priority() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (p priorityNormal) Priority() int {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (p priorityHigh) Priority() int {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (p priorityLow) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p priorityNormal) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p priorityHigh) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
type noOpEvent struct {
|
||||
priorityLow
|
||||
}
|
||||
|
||||
var noOp = noOpEvent{}
|
||||
30
blocksync/metrics.gen.go
Normal file
30
blocksync/metrics.gen.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Syncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Syncing: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user