mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-12 07:42:48 +00:00
Compare commits
150 Commits
wb/add-tim
...
marko/brin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f6dc0502a | ||
|
|
dd4fee88ef | ||
|
|
c8e336f2e9 | ||
|
|
9a028b7d8a | ||
|
|
37287ead94 | ||
|
|
6c40ad39b2 | ||
|
|
ceca73a873 | ||
|
|
e31c1e3622 | ||
|
|
a49325e4e0 | ||
|
|
161496bfca | ||
|
|
e7451a43e7 | ||
|
|
cf2a00b398 | ||
|
|
89196596f7 | ||
|
|
eee19e42db | ||
|
|
97b39770e0 | ||
|
|
297fcc0a7c | ||
|
|
3d448e1ef8 | ||
|
|
6faf506540 | ||
|
|
d69bd64702 | ||
|
|
47d52fc78d | ||
|
|
2a58ea3ab2 | ||
|
|
1121698757 | ||
|
|
8670678291 | ||
|
|
da1b871808 | ||
|
|
e741d01231 | ||
|
|
b626b0a719 | ||
|
|
c44e6d1799 | ||
|
|
dcfbd9bc3d | ||
|
|
e36052c80e | ||
|
|
02c1c8ecb4 | ||
|
|
701eff8968 | ||
|
|
a4190208a3 | ||
|
|
5b38bc17b1 | ||
|
|
2e5d53ea9a | ||
|
|
674908b130 | ||
|
|
001449d536 | ||
|
|
a5ecd418a6 | ||
|
|
20f8485080 | ||
|
|
b5e6cf50d1 | ||
|
|
8345dc4f7c | ||
|
|
6970c9177b | ||
|
|
383f9ea180 | ||
|
|
ef314708e7 | ||
|
|
f0db0ff260 | ||
|
|
9d49c4b9af | ||
|
|
fd3c397c69 | ||
|
|
b4da12815f | ||
|
|
cbae5f9f53 | ||
|
|
d56392cee9 | ||
|
|
29e5fbcc64 | ||
|
|
e4991fd862 | ||
|
|
ad249ca178 | ||
|
|
d919a42f53 | ||
|
|
82585e1ce6 | ||
|
|
a1104b98d2 | ||
|
|
9ea0e5efa7 | ||
|
|
efd4f4a40b | ||
|
|
c372390fea | ||
|
|
889341152a | ||
|
|
c430624e1b | ||
|
|
7243ef71e2 | ||
|
|
bebfd8663b | ||
|
|
851c0dc4f3 | ||
|
|
767e2ec8a2 | ||
|
|
43f92806fd | ||
|
|
34e727676c | ||
|
|
a0f51d0370 | ||
|
|
be7cb50bb3 | ||
|
|
69874c2050 | ||
|
|
68c9efdb82 | ||
|
|
2a0147515f | ||
|
|
6bbf5b6d0f | ||
|
|
024f6117ee | ||
|
|
9296717433 | ||
|
|
6dc15b2f50 | ||
|
|
0e4b18806e | ||
|
|
c45367e22c | ||
|
|
29ad4dcb3b | ||
|
|
80b8c0057b | ||
|
|
5d4d01b4e4 | ||
|
|
571d26fbb0 | ||
|
|
cf337cc3f2 | ||
|
|
969690d81c | ||
|
|
9ec30ecc0c | ||
|
|
0e32ad9e5c | ||
|
|
b7e87eef05 | ||
|
|
0ab2f31239 | ||
|
|
9a46f575f5 | ||
|
|
8c4e982616 | ||
|
|
fa531d3da9 | ||
|
|
ce3b48febd | ||
|
|
5235913e12 | ||
|
|
4743a7ad0d | ||
|
|
f504089273 | ||
|
|
c53b71821c | ||
|
|
b91501cecf | ||
|
|
322bb460dd | ||
|
|
26b5672a54 | ||
|
|
44988943ba | ||
|
|
e6550f8686 | ||
|
|
e565a4a1f5 | ||
|
|
6ff07a1633 | ||
|
|
631ef7aae0 | ||
|
|
3e3a934818 | ||
|
|
90b951af72 | ||
|
|
9089b2aed5 | ||
|
|
5119d16d5c | ||
|
|
5df277caca | ||
|
|
53b7dbe285 | ||
|
|
f3858e52de | ||
|
|
9d20e06900 | ||
|
|
6ed3f2d98d | ||
|
|
681cdf8347 | ||
|
|
14f41ac5e3 | ||
|
|
7678ab8850 | ||
|
|
025894c11d | ||
|
|
85364a9ba8 | ||
|
|
d153388446 | ||
|
|
2304ea70f7 | ||
|
|
60f88194ec | ||
|
|
0a23b1e51d | ||
|
|
9d1e8eaad4 | ||
|
|
97f7021712 | ||
|
|
cb8e6b1c1a | ||
|
|
8df38db82e | ||
|
|
9fe25a1ed1 | ||
|
|
99f9ee0f63 | ||
|
|
4a504c0687 | ||
|
|
b68424be47 | ||
|
|
a79dd42d24 | ||
|
|
6af23ff757 | ||
|
|
d1722c9c10 | ||
|
|
6ed2c42d7b | ||
|
|
185f15d645 | ||
|
|
dfb322e68b | ||
|
|
3d0d89e505 | ||
|
|
9e643f3628 | ||
|
|
1949095c51 | ||
|
|
41a1bf539b | ||
|
|
462c475abc | ||
|
|
9d56520f76 | ||
|
|
14f5588ce2 | ||
|
|
3945915920 | ||
|
|
0af58409bf | ||
|
|
7d39f639f6 | ||
|
|
e4ae922c33 | ||
|
|
cbce877480 | ||
|
|
b29cc95920 | ||
|
|
0c9558a742 | ||
|
|
e2fc50ec9f |
12
.github/workflows/build.yml
vendored
12
.github/workflows/build.yml
vendored
@@ -20,11 +20,11 @@ jobs:
|
||||
goos: ["linux"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -41,11 +41,11 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -63,11 +63,11 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
uses: docker/setup-buildx-action@v1.7.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
20
.github/workflows/docs-toc.yml
vendored
Normal file
20
.github/workflows/docs-toc.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# Verify that important design docs have ToC entries.
|
||||
name: Check documentation ToC
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
docs/architecture/**
|
||||
docs/rfc/**
|
||||
- run: ./docs/presubmit.sh
|
||||
if: env.GIT_DIFF
|
||||
32
.github/workflows/docs.yaml
vendored
Normal file
32
.github/workflows/docs.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Documentation
|
||||
# This workflow builds the static documentation site, and publishes the results to GitHub Pages.
|
||||
# It runs on every push to the main branch, with changes in the docs and spec directories
|
||||
on:
|
||||
workflow_dispatch: # allow manual updates
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
- "spec/**"
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: tendermintdev/docker-website-deployment
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install and Build 🔧
|
||||
run: |
|
||||
apk add rsync
|
||||
make build-docs
|
||||
- name: Deploy 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@v4.3.0
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: ~/output
|
||||
single-commit: true
|
||||
2
.github/workflows/e2e-manual.yml
vendored
2
.github/workflows/e2e-manual.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
|
||||
2
.github/workflows/e2e-nightly-34x.yml
vendored
2
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
|
||||
2
.github/workflows/e2e-nightly-35x.yml
vendored
2
.github/workflows/e2e-nightly-35x.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
|
||||
2
.github/workflows/e2e-nightly-master.yml
vendored
2
.github/workflows/e2e-nightly-master.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
|
||||
4
.github/workflows/e2e.yml
vendored
4
.github/workflows/e2e.yml
vendored
@@ -14,11 +14,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
|
||||
8
.github/workflows/fuzz-nightly.yml
vendored
8
.github/workflows/fuzz-nightly.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
fuzz-nightly-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
@@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
run: go install github.com/dvyukov/go-fuzz/go-fuzz@latest github.com/dvyukov/go-fuzz/go-fuzz-build@latest
|
||||
|
||||
- name: Fuzz mempool
|
||||
working-directory: test/fuzz
|
||||
@@ -39,14 +39,14 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive crashers
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 3
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
|
||||
2
.github/workflows/jepsen.yml
vendored
2
.github/workflows/jepsen.yml
vendored
@@ -58,7 +58,7 @@ jobs:
|
||||
run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}'
|
||||
|
||||
- name: Archive results
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results
|
||||
path: tendermint/store/latest
|
||||
|
||||
2
.github/workflows/linkchecker.yml
vendored
2
.github/workflows/linkchecker.yml
vendored
@@ -7,6 +7,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.14
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
20
.github/workflows/lint.yml
vendored
20
.github/workflows/lint.yml
vendored
@@ -1,7 +1,11 @@
|
||||
name: Golang Linter
|
||||
# Lint runs golangci-lint over the entire Tendermint repository
|
||||
# This workflow is run on every pull request and push to master
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
|
||||
# Lint runs golangci-lint over the entire Tendermint repository.
|
||||
#
|
||||
# This workflow is run on every pull request and push to master.
|
||||
#
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum}
|
||||
# files have been modified.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
@@ -14,10 +18,10 @@ jobs:
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.17'
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -25,8 +29,10 @@ jobs:
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3.1.0
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.44
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.45
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
40
.github/workflows/markdown-links.yml
vendored
40
.github/workflows/markdown-links.yml
vendored
@@ -1,19 +1,23 @@
|
||||
# TODO: Re-enable when https://github.com/gaurav-nelson/github-action-markdown-link-check/pull/126 lands.
|
||||
name: Check Markdown links
|
||||
|
||||
#name: Check Markdown links
|
||||
#
|
||||
#on:
|
||||
# push:
|
||||
# branches:
|
||||
# - master
|
||||
# pull_request:
|
||||
# branches: [master]
|
||||
#
|
||||
#jobs:
|
||||
# markdown-link-check:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# - uses: gaurav-nelson/github-action-markdown-link-check@v1.0.13
|
||||
# with:
|
||||
# check-modified-files-only: 'yes'
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.md
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
with:
|
||||
check-modified-files-only: 'yes'
|
||||
config-file: '.md-link-check.json'
|
||||
if: env.GIT_DIFF
|
||||
|
||||
2
.github/workflows/proto-lint.yml
vendored
2
.github/workflows/proto-lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.1.0
|
||||
- uses: bufbuild/buf-setup-action@v1.4.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
@@ -27,11 +27,13 @@ jobs:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v4
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
|
||||
18
.github/workflows/tests.yml
vendored
18
.github/workflows/tests.yml
vendored
@@ -16,11 +16,11 @@ jobs:
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03", "04", "05"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
run: |
|
||||
make test-group-${{ matrix.part }} NUM_SPLIT=6
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./build/${{ matrix.part }}.profile.out
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -50,26 +50,26 @@ jobs:
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v2.1.0
|
||||
- uses: codecov/codecov-action@v3.1.0
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
6
.md-link-check.json
Normal file
6
.md-link-check.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503]
|
||||
}
|
||||
83
CHANGELOG.md
83
CHANGELOG.md
@@ -2,6 +2,39 @@
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.35.4
|
||||
|
||||
April 18, 2022
|
||||
|
||||
Special thanks to external contributors on this release: @firelizzard18
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8300](https://github.com/tendermint/tendermint/pull/8300) Add a tool to update old config files to the latest version [backport [\#8281](https://github.com/tendermint/tendermint/pull/8281)]. (@creachadair)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8294](https://github.com/tendermint/tendermint/pull/8294) keymigrate: ensure block hash keys are correctly translated. (@creachadair)
|
||||
- [cli] [\#8352](https://github.com/tendermint/tendermint/pull/8352) keymigrate: ensure transaction hash keys are correctly translated. (@creachadair)
|
||||
|
||||
## v0.35.3
|
||||
|
||||
April 8, 2022
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8081](https://github.com/tendermint/tendermint/pull/8081) add a safer-to-use `reset-state` command. (@marbar3778)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [consensus] [\#8138](https://github.com/tendermint/tendermint/pull/8138) change lock handling in reactor and handleMsg for RoundState. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8276](https://github.com/tendermint/tendermint/pull/8276) scmigrate: ensure target key is correctly renamed. (@creachadair)
|
||||
|
||||
## v0.35.2
|
||||
|
||||
February 28, 2022
|
||||
@@ -94,7 +127,7 @@ Special thanks to external contributors on this release: @JayT106,
|
||||
|
||||
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
|
||||
- [\#7142](https://github.com/tendermint/tendermint/pull/7142) mempool: remove panic when recheck-tx was not sent to ABCI application (@williambanfield).
|
||||
- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060)
|
||||
- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060) <!-- markdown-link-check-disable-line -->
|
||||
wait until peerUpdates channel is closed to close remaining peers (@williambanfield)
|
||||
- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
||||
@@ -230,6 +263,32 @@ Special thanks to external contributors on this release: @JayT106,
|
||||
- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
|
||||
|
||||
## v0.34.19
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8270](https://github.com/tendermint/tendermint/issues/8270) fix reset commands (@alexanderbez).
|
||||
|
||||
## v0.34.18
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [cli] [\#8258](https://github.com/tendermint/tendermint/pull/8258) Fix a bug in the cli that caused `unsafe-reset-all` to panic
|
||||
|
||||
## v0.34.17
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [cli] [\#8081](https://github.com/tendermint/tendermint/issues/8081) make the reset command safe to use (@marbar3778).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#8079](https://github.com/tendermint/tendermint/issues/8079) start the timeout ticker before relay (backport #7844) (@creachadair).
|
||||
- [consensus] [\#7992](https://github.com/tendermint/tendermint/issues/7992) [\#7994](https://github.com/tendermint/tendermint/issues/7994) change lock handling in handleMsg and reactor to alleviate issues gossiping during long ABCI calls (@williambanfield).
|
||||
|
||||
## v0.34.16
|
||||
|
||||
Special thanks to external contributors on this release: @yihuang
|
||||
@@ -1374,7 +1433,7 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah
|
||||
*August 28, 2019*
|
||||
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md)
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md) <!-- markdown-link-check-disable-line -->
|
||||
guide.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@@ -1953,7 +2012,7 @@ more details.
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
|
||||
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
|
||||
* Apps
|
||||
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
|
||||
@@ -2006,7 +2065,7 @@ more details.
|
||||
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
|
||||
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
|
||||
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md)
|
||||
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
|
||||
(@guagualvcha)
|
||||
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
|
||||
@@ -2710,7 +2769,7 @@ Special thanks to external contributors on this release:
|
||||
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
|
||||
It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
@@ -2775,7 +2834,7 @@ are affected by a change.
|
||||
|
||||
A few more breaking changes are in the works - each will come with a clear
|
||||
Architecture Decision Record (ADR) explaining the change. You can review ADRs
|
||||
[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture)
|
||||
[here](https://github.com/tendermint/tendermint/tree/master/docs/architecture)
|
||||
or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls).
|
||||
You can also check in on the [issues marked as
|
||||
breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking).
|
||||
@@ -2791,7 +2850,7 @@ BREAKING CHANGES:
|
||||
- [abci] Added address of the original proposer of the block to Header
|
||||
- [abci] Change ABCI Header to match Tendermint exactly
|
||||
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
- Remove PubKey from `Validator` (so it's just Address and Power)
|
||||
- Introduce `ValidatorUpdate` (with just PubKey and Power)
|
||||
- InitChain and EndBlock use ValidatorUpdate
|
||||
@@ -2813,7 +2872,7 @@ BREAKING CHANGES:
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
|
||||
- Add NextValidatorSet to State, changes on-disk representation of state
|
||||
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)).
|
||||
- Remove ConsensusParams.BlockSize.MaxTxs
|
||||
- Introduce maximum sizes for all components of a block, including ChainID
|
||||
- [types] Updates to the block Header:
|
||||
@@ -2824,7 +2883,7 @@ BREAKING CHANGES:
|
||||
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
|
||||
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
|
||||
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)):
|
||||
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
|
||||
- malleability removed by requiring `s` to be in canonical form.
|
||||
|
||||
@@ -3054,7 +3113,7 @@ BREAKING CHANGES:
|
||||
FEATURES
|
||||
- [cmd] Added metrics (served under `/metrics` using a Prometheus client;
|
||||
disabled by default). See the new `instrumentation` section in the config and
|
||||
[metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html)
|
||||
[metrics](https://github.com/tendermint/tendermint/blob/master/docs/nodes/metrics.md)
|
||||
guide.
|
||||
- [p2p] Add IPv6 support to peering.
|
||||
- [p2p] Add `external_address` to config to allow specifying the address for
|
||||
@@ -3168,7 +3227,7 @@ BREAKING:
|
||||
|
||||
FEATURES
|
||||
|
||||
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
|
||||
- [rpc] the RPC documentation is now published to `https://tendermint.github.io/slate`
|
||||
- [p2p] AllowDuplicateIP config option to refuse connections from same IP.
|
||||
- true by default for now, false by default in next breaking release
|
||||
- [docs] Add docs for query, tx indexing, events, pubsub
|
||||
@@ -3646,7 +3705,7 @@ containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusCon
|
||||
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) accomodate new config
|
||||
|
||||
- Logger
|
||||
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
|
||||
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`. <!-- markdown-link-check-disable-next-line -->
|
||||
See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details
|
||||
- Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!)
|
||||
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger
|
||||
|
||||
@@ -19,7 +19,8 @@ Special thanks to external contributors on this release:
|
||||
- [rpc] \#7713 Remove unused options for websocket clients. (@creachadair)
|
||||
- [config] \#7930 Add new event subscription options and defaults. (@creachadair)
|
||||
- [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair)
|
||||
- [cli] \#8081 make the reset command safe to use. (@marbar3778)
|
||||
- [cli] \#8081 make the reset command safe to use by intoducing `reset-state` command. Fixed by \#8259. (@marbar3778, @cmwaters)
|
||||
- [config] \#8222 default indexer configuration to null. (@creachadair)
|
||||
|
||||
- Apps
|
||||
|
||||
@@ -46,6 +47,7 @@ Special thanks to external contributors on this release:
|
||||
- [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychoish)
|
||||
- [abci/client] \#7607 Simplify client interface (removes most "async" methods). (@creachadair)
|
||||
- [libs/json] \#7673 Remove the libs/json (tmjson) library. (@creachadair)
|
||||
- [crypto] \#8412 \#8432 Remove `crypto/tmhash` package in favor of small functions in `crypto` package and cleanup of unused functions. (@tychoish)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
@@ -61,6 +63,7 @@ Special thanks to external contributors on this release:
|
||||
- [consensus] \#7415 Update proposal validation logic to Prevote nil if a proposal does not meet the conditions for Timelyness per the proposer-based timestamp specification. (@anca)
|
||||
- [consensus] \#7382 Update block validation to no longer require the block timestamp to be the median of the timestamps of the previous commit. (@anca)
|
||||
- [consensus] \#7711 Use the proposer timestamp for the first height instead of the genesis time. Chains will still start consensus at the genesis time. (@anca)
|
||||
- [cli] \#8281 Add a tool to update old config files to the latest version. (@creachadair)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
@@ -80,3 +83,6 @@ Special thanks to external contributors on this release:
|
||||
- [light] \#7640 Light Client: fix absence proof verification (@ashcherbakov)
|
||||
- [light] \#7641 Light Client: fix querying against the latest height (@ashcherbakov)
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
|
||||
- [cli] \#8276 scmigrate: ensure target key is correctly renamed. (@creachadair)
|
||||
- [cli] \#8294 keymigrate: ensure block hash keys are correctly translated. (@creachadair)
|
||||
- [cli] \#8352 keymigrate: ensure transaction hash keys are correctly translated. (@creachadair)
|
||||
|
||||
@@ -20,7 +20,7 @@ This code of conduct applies to all projects run by the Tendermint/COSMOS team a
|
||||
|
||||
* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
|
||||
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups.
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](https://github.com/stumpsyn/policies/blob/master/citizen_code_of_conduct.md); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups.
|
||||
|
||||
* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back.
|
||||
|
||||
|
||||
1
DOCKER/.gitignore
vendored
1
DOCKER/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
tendermint
|
||||
@@ -2,7 +2,7 @@
|
||||
FROM golang:1.17-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
apk --no-cache add make git
|
||||
COPY / /tendermint
|
||||
WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
@@ -53,4 +53,3 @@ CMD ["start"]
|
||||
|
||||
# Expose the data directory as a volume since there's mutable state in there
|
||||
VOLUME [ "$TMHOME" ]
|
||||
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
FROM amazonlinux:2
|
||||
|
||||
RUN yum -y update && \
|
||||
yum -y install wget
|
||||
|
||||
RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \
|
||||
rpm -ivh epel-release-latest-7.noarch.rpm
|
||||
|
||||
RUN yum -y groupinstall "Development Tools"
|
||||
RUN yum -y install leveldb-devel which
|
||||
|
||||
ENV GOVERSION=1.16.5
|
||||
|
||||
RUN cd /tmp && \
|
||||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
mkdir -p /go/src && \
|
||||
mkdir -p /go/bin
|
||||
|
||||
ENV PATH=$PATH:/usr/local/go/bin:/go/bin
|
||||
ENV GOBIN=/go/bin
|
||||
ENV GOPATH=/go/src
|
||||
|
||||
RUN mkdir -p /tendermint
|
||||
WORKDIR /tendermint
|
||||
|
||||
CMD ["/usr/bin/make", "build", "TENDERMINT_BUILD_OPTIONS=cleveldb"]
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
FROM golang:latest
|
||||
|
||||
# Grab deps (jq, hexdump, xxd, killall)
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
jq bsdmainutils vim-common psmisc netcat
|
||||
|
||||
# Add testing deps for curl
|
||||
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends curl
|
||||
|
||||
VOLUME /go
|
||||
|
||||
EXPOSE 26656
|
||||
EXPOSE 26657
|
||||
@@ -1,13 +0,0 @@
|
||||
build:
|
||||
@sh -c "'$(CURDIR)/build.sh'"
|
||||
|
||||
push:
|
||||
@sh -c "'$(CURDIR)/push.sh'"
|
||||
|
||||
build_testing:
|
||||
docker build --tag tendermint/testing -f ./Dockerfile.testing .
|
||||
|
||||
build_amazonlinux_buildimage:
|
||||
docker build -t "tendermint/tendermint:build_c-amazonlinux" -f Dockerfile.build_c-amazonlinux .
|
||||
|
||||
.PHONY: build push build_testing build_amazonlinux_buildimage
|
||||
@@ -8,7 +8,7 @@ Official releases can be found [here](https://github.com/tendermint/tendermint/r
|
||||
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
|
||||
|
||||
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
|
||||
Respective versioned files can be found at `https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile` (replace the Xs with the version number).
|
||||
|
||||
## Quick reference
|
||||
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Get the tag from the version, or try to figure it out.
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
|
||||
fi
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "Please specify a tag."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG_NO_PATCH=${TAG%.*}
|
||||
|
||||
read -p "==> Build 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$TAG" -t "tendermint/tendermint:$TAG_NO_PATCH" .
|
||||
fi
|
||||
@@ -1,22 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Get the tag from the version, or try to figure it out.
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
|
||||
fi
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "Please specify a tag."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG_NO_PATCH=${TAG%.*}
|
||||
|
||||
read -p "==> Push 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
docker push "tendermint/tendermint:latest"
|
||||
docker push "tendermint/tendermint:$TAG"
|
||||
docker push "tendermint/tendermint:$TAG_NO_PATCH"
|
||||
fi
|
||||
13
Makefile
13
Makefile
@@ -117,12 +117,6 @@ proto-check-breaking: check-proto-deps
|
||||
@buf breaking --against ".git"
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
# TODO: Should be removed when work on ABCI++ is complete.
|
||||
# For more information, see https://github.com/tendermint/tendermint/issues/8066
|
||||
abci-proto-gen:
|
||||
./scripts/abci-gen.sh
|
||||
.PHONY: abci-proto-gen
|
||||
|
||||
###############################################################################
|
||||
### Build ABCI ###
|
||||
###############################################################################
|
||||
@@ -178,7 +172,7 @@ go.sum: go.mod
|
||||
|
||||
draw_deps:
|
||||
@# requires brew install graphviz or apt-get install graphviz
|
||||
go get github.com/RobotsAndPencils/goviz
|
||||
go install github.com/RobotsAndPencils/goviz@latest
|
||||
@goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png
|
||||
.PHONY: draw_deps
|
||||
|
||||
@@ -243,10 +237,8 @@ build-docs:
|
||||
### Docker image ###
|
||||
###############################################################################
|
||||
|
||||
build-docker: build-linux
|
||||
cp $(BUILDDIR)/tendermint DOCKER/tendermint
|
||||
build-docker:
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile .
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
@@ -343,4 +335,3 @@ split-test-packages:$(BUILDDIR)/packages.txt
|
||||
split -d -n l/$(NUM_SPLIT) $< $<.
|
||||
test-group-%:split-test-packages
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
|
||||
|
||||
134
UPGRADING.md
134
UPGRADING.md
@@ -26,6 +26,31 @@ application concern so be very sure to test the application thoroughly
|
||||
using realistic workloads and the race detector to ensure your
|
||||
applications remains correct.
|
||||
|
||||
### Config Changes
|
||||
|
||||
- We have added a new, experimental tool to help operators migrate
|
||||
configuration files created by previous versions of Tendermint.
|
||||
To try this tool, run:
|
||||
|
||||
```shell
|
||||
# Install the tool.
|
||||
go install github.com/tendermint/tendermint/scripts/confix@latest
|
||||
|
||||
# Run the tool with the old configuration file as input.
|
||||
# Replace the -config argument with your path.
|
||||
confix -config ~/.tendermint/config/config.toml -out updated.toml
|
||||
```
|
||||
|
||||
This tool should be able to update configurations from v0.34 and v0.35. We
|
||||
plan to extend it to handle older configuration files in the future. For now,
|
||||
it will report an error (without making any changes) if it does not recognize
|
||||
the version that created the file.
|
||||
|
||||
- The default configuration for a newly-created node now disables indexing for
|
||||
ABCI event metadata. Existing node configurations that already have indexing
|
||||
turned on are not affected. Operators who wish to enable indexing for a new
|
||||
node, however, must now edit the `config.toml` explicitly.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
Tendermint v0.36 adds a new RPC event subscription API. The existing event
|
||||
@@ -63,6 +88,84 @@ callback.
|
||||
For more detailed information, see [ADR 075](https://tinyurl.com/adr075) which
|
||||
defines and describes the new API in detail.
|
||||
|
||||
### Timeout Parameter Changes
|
||||
|
||||
Tendermint v0.36 updates how the Tendermint consensus timing parameters are
|
||||
configured. These parameters, `timeout-propose`, `timeout-propose-delta`,
|
||||
`timeout-prevote`, `timeout-prevote-delta`, `timeout-precommit`,
|
||||
`timeout-precommit-delta`, `timeout-commit`, and `skip-timeout-commit`, were
|
||||
previously configured in `config.toml`. These timing parameters have moved and
|
||||
are no longer configured in the `config.toml` file. These parameters have been
|
||||
migrated into the `ConsensusParameters`. Nodes with these parameters set in the
|
||||
local configuration file will see a warning logged on startup indicating that
|
||||
these parameters are no longer used.
|
||||
|
||||
These parameters have also been pared-down. There are no longer separate
|
||||
parameters for both the `prevote` and `precommit` phases of Tendermint. The
|
||||
separate `timeout-prevote` and `timeout-precommit` parameters have been merged
|
||||
into a single `timeout-vote` parameter that configures both of these similar
|
||||
phases of the consensus protocol.
|
||||
|
||||
A set of reasonable defaults have been put in place for these new parameters
|
||||
that will take effect when the node starts up in version v0.36. New chains
|
||||
created using v0.36 and beyond will be able to configure these parameters in the
|
||||
chain's `genesis.json` file. Chains that upgrade to v0.36 from a previous
|
||||
compatible version of Tendermint will begin running with the default values.
|
||||
Upgrading applications that wish to use different values from the defaults for
|
||||
these parameters may do so by setting the `ConsensusParams.Timeout` field of the
|
||||
`FinalizeBlock` `ABCI` response.
|
||||
|
||||
As a safety measure in case of unusual timing issues during the upgrade to
|
||||
v0.36, an operator may override the consensus timeout values for a single node.
|
||||
Note, however, that these overrides will be removed in Tendermint v0.37. See
|
||||
[configuration](https://github.com/tendermint/tendermint/blob/master/docs/nodes/configuration.md)
|
||||
for more information about these overrides.
|
||||
|
||||
For more discussion of this, see [ADR 074](https://tinyurl.com/adr074), which
|
||||
lays out the reasoning for the changes as well as [RFC
|
||||
009](https://tinyurl.com/rfc009) for a discussion of the complexities of
|
||||
upgrading consensus parameters.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
The functionality around resetting a node has been extended to make it safer. The
|
||||
`unsafe-reset-all` command has been replaced by a `reset` command with four
|
||||
subcommands: `blockchain`, `peers`, `unsafe-signer` and `unsafe-all`.
|
||||
|
||||
- `tendermint reset blockchain`: Clears a node of all blocks, consensus state, evidence,
|
||||
and indexed transactions. NOTE: This command does not reset application state.
|
||||
If you need to rollback the last application state (to recover from application
|
||||
nondeterminism), see instead the `tendermint rollback` command.
|
||||
- `tendermint reset peers`: Clears the peer store, which persists information on peers used
|
||||
by the networking layer. This can be used to get rid of stale addresses or to switch
|
||||
to a predefined set of static peers.
|
||||
- `tendermint reset unsafe-signer`: Resets the watermark level of the PrivVal File signer
|
||||
allowing it to sign votes from the genesis height. This should only be used in testing as
|
||||
it can lead to the node double signing.
|
||||
- `tendermint reset unsafe-all`: A summation of the other three commands. This will delete
|
||||
the entire `data` directory which may include application data as well.
|
||||
|
||||
### Go API Changes
|
||||
|
||||
#### `crypto` Package Cleanup
|
||||
|
||||
The `github.com/tendermint/tendermint/crypto/tmhash` package was removed
|
||||
to improve clarity. Users are encouraged to use the standard library
|
||||
`crypto/sha256` package directly. However, as a convenience, some constants
|
||||
and one function have moved to the Tendermint `crypto` package:
|
||||
|
||||
- The `crypto.Checksum` function returns the sha256 checksum of a
|
||||
byteslice. This is a wrapper around `sha256.Sum265` from the
|
||||
standard libary, but provided as a function to ease type
|
||||
requirements (the library function returns a `[32]byte` rather than
|
||||
a `[]byte`).
|
||||
- `tmhash.TruncatedSize` is now `crypto.AddressSize` which was
|
||||
previously an alias for the same value.
|
||||
- `tmhash.Size` and `tmhash.BlockSize` are now `crypto.HashSize` and
|
||||
`crypto.HashSize`.
|
||||
- `tmhash.SumTruncated` is now available via `crypto.AddressHash` or by
|
||||
`crypto.Checksum(<...>)[:crypto.AddressSize]`
|
||||
|
||||
## v0.35
|
||||
|
||||
### ABCI Changes
|
||||
@@ -109,22 +212,25 @@ defines and describes the new API in detail.
|
||||
|
||||
The format of all tendermint on-disk database keys changes in
|
||||
0.35. Upgrading nodes must either re-sync all data or run a migration
|
||||
script provided in this release. The script located in
|
||||
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
|
||||
provides the function `Migrate(context.Context, db.DB)` which you can
|
||||
operationalize as makes sense for your deployment.
|
||||
script provided in this release.
|
||||
|
||||
The script located in
|
||||
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` provides the
|
||||
function `Migrate(context.Context, db.DB)` which you can operationalize as
|
||||
makes sense for your deployment.
|
||||
|
||||
For ease of use the `tendermint` command includes a CLI version of the
|
||||
migration script, which you can invoke, as in:
|
||||
|
||||
tendermint key-migrate
|
||||
|
||||
This reads the configuration file as normal and allows the
|
||||
`--db-backend` and `--db-dir` flags to change database operations as
|
||||
needed.
|
||||
This reads the configuration file as normal and allows the `--db-backend` and
|
||||
`--db-dir` flags to override the database location as needed.
|
||||
|
||||
The migration operation is idempotent and can be run more than once,
|
||||
if needed.
|
||||
The migration operation is intended to be idempotent, and should be safe to
|
||||
rerun on the same database multiple times. As a safety measure, however, we
|
||||
recommend that operators test out the migration on a copy of the database
|
||||
first, if it is practical to do so, before applying it to the production data.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
@@ -177,11 +283,13 @@ the full RPC interface provided as direct function calls. Import the
|
||||
the node service as in the following:
|
||||
|
||||
```go
|
||||
node := node.NewDefault() //construct the node object
|
||||
// start and set up the node service
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
client := local.New(node.(local.NodeService))
|
||||
// use client object to interact with the node
|
||||
// Construct and start up a node with default settings.
|
||||
node := node.NewDefault(logger)
|
||||
|
||||
// Construct a local (in-memory) RPC client to the node.
|
||||
client := local.New(logger, node.(local.NodeService))
|
||||
```
|
||||
|
||||
### gRPC Support
|
||||
|
||||
@@ -17,35 +17,18 @@ const (
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Client
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
//
|
||||
// All methods return the appropriate protobuf ResponseXxx struct and
|
||||
// an error.
|
||||
// Client defines the interface for an ABCI client.
|
||||
//
|
||||
// NOTE these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes
|
||||
// and logs.
|
||||
// and (potentially) error response.
|
||||
type Client interface {
|
||||
service.Service
|
||||
types.Application
|
||||
|
||||
Error() error
|
||||
|
||||
Flush(context.Context) error
|
||||
Echo(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
CheckTx(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
Query(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
Commit(context.Context) (*types.ResponseCommit, error)
|
||||
InitChain(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
PrepareProposal(context.Context, types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)
|
||||
ProcessProposal(context.Context, types.RequestProcessProposal) (*types.ResponseProcessProposal, error)
|
||||
ExtendVote(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error)
|
||||
VerifyVoteExtension(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)
|
||||
FinalizeBlock(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
|
||||
ListSnapshots(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshot(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunk(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunk(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
Echo(context.Context, string) (*types.ResponseEcho, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -52,6 +52,9 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStart(ctx context.Context) error {
|
||||
timer := time.NewTimer(0)
|
||||
defer timer.Stop()
|
||||
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err := grpc.Dial(cli.addr,
|
||||
@@ -63,8 +66,13 @@ RETRY_LOOP:
|
||||
return err
|
||||
}
|
||||
cli.logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err)
|
||||
time.Sleep(time.Second * dialRetryIntervalSeconds)
|
||||
continue RETRY_LOOP
|
||||
timer.Reset(time.Second * dialRetryIntervalSeconds)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
continue RETRY_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
cli.logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
|
||||
@@ -82,7 +90,13 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
cli.logger.Error("Echo failed", "err", err)
|
||||
time.Sleep(time.Second * echoRetryIntervalSeconds)
|
||||
timer.Reset(time.Second * echoRetryIntervalSeconds)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
continue ENSURE_CONNECTED
|
||||
}
|
||||
}
|
||||
|
||||
cli.client = client
|
||||
@@ -114,15 +128,15 @@ func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.ResponseEch
|
||||
return cli.client.Echo(ctx, types.ToRequestEcho(msg).GetEcho(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Info(ctx context.Context, params types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
func (cli *grpcClient) Info(ctx context.Context, params *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
return cli.client.Info(ctx, types.ToRequestInfo(params).GetInfo(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTx(ctx context.Context, params types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
func (cli *grpcClient) CheckTx(ctx context.Context, params *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
return cli.client.CheckTx(ctx, types.ToRequestCheckTx(params).GetCheckTx(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Query(ctx context.Context, params types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
func (cli *grpcClient) Query(ctx context.Context, params *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
return cli.client.Query(ctx, types.ToRequestQuery(params).GetQuery(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
@@ -130,42 +144,42 @@ func (cli *grpcClient) Commit(ctx context.Context) (*types.ResponseCommit, error
|
||||
return cli.client.Commit(ctx, types.ToRequestCommit().GetCommit(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChain(ctx context.Context, params types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
func (cli *grpcClient) InitChain(ctx context.Context, params *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
return cli.client.InitChain(ctx, types.ToRequestInitChain(params).GetInitChain(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshots(ctx context.Context, params types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
func (cli *grpcClient) ListSnapshots(ctx context.Context, params *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
return cli.client.ListSnapshots(ctx, types.ToRequestListSnapshots(params).GetListSnapshots(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OfferSnapshot(ctx context.Context, params types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
func (cli *grpcClient) OfferSnapshot(ctx context.Context, params *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
return cli.client.OfferSnapshot(ctx, types.ToRequestOfferSnapshot(params).GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunk(ctx context.Context, params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
func (cli *grpcClient) LoadSnapshotChunk(ctx context.Context, params *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
return cli.client.LoadSnapshotChunk(ctx, types.ToRequestLoadSnapshotChunk(params).GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunk(ctx context.Context, params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
func (cli *grpcClient) ApplySnapshotChunk(ctx context.Context, params *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
return cli.client.ApplySnapshotChunk(ctx, types.ToRequestApplySnapshotChunk(params).GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) PrepareProposal(ctx context.Context, params types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
func (cli *grpcClient) PrepareProposal(ctx context.Context, params *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
return cli.client.PrepareProposal(ctx, types.ToRequestPrepareProposal(params).GetPrepareProposal(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ProcessProposal(ctx context.Context, params types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
func (cli *grpcClient) ProcessProposal(ctx context.Context, params *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
return cli.client.ProcessProposal(ctx, types.ToRequestProcessProposal(params).GetProcessProposal(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ExtendVote(ctx context.Context, params types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
func (cli *grpcClient) ExtendVote(ctx context.Context, params *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
return cli.client.ExtendVote(ctx, types.ToRequestExtendVote(params).GetExtendVote(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) VerifyVoteExtension(ctx context.Context, params types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
func (cli *grpcClient) VerifyVoteExtension(ctx context.Context, params *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
return cli.client.VerifyVoteExtension(ctx, types.ToRequestVerifyVoteExtension(params).GetVerifyVoteExtension(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FinalizeBlock(ctx context.Context, params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
func (cli *grpcClient) FinalizeBlock(ctx context.Context, params *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
return cli.client.FinalizeBlock(ctx, types.ToRequestFinalizeBlock(params).GetFinalizeBlock(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
@@ -34,81 +34,7 @@ func NewLocalClient(logger log.Logger, app types.Application) Client {
|
||||
func (*localClient) OnStart(context.Context) error { return nil }
|
||||
func (*localClient) OnStop() {}
|
||||
func (*localClient) Error() error { return nil }
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (*localClient) Flush(context.Context) error { return nil }
|
||||
|
||||
func (app *localClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
func (*localClient) Flush(context.Context) error { return nil }
|
||||
func (*localClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTx(_ context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
res := app.Application.CheckTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Query(_ context.Context, req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
res := app.Application.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChain(_ context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
res := app.Application.InitChain(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshots(_ context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshot(_ context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunk(_ context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunk(_ context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposal(_ context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposal(_ context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ExtendVote(_ context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
res := app.Application.ExtendVote(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) VerifyVoteExtension(_ context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
res := app.Application.VerifyVoteExtension(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlock(_ context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
res := app.Application.FinalizeBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -4,8 +4,10 @@ package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
testing "testing"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
@@ -15,11 +17,11 @@ type Client struct {
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -28,7 +30,7 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApply
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -38,11 +40,11 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApply
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -51,7 +53,7 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -83,13 +85,13 @@ func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Echo provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
// Echo provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseEcho
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEcho)
|
||||
@@ -98,7 +100,7 @@ func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, er
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, msg)
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -121,11 +123,11 @@ func (_m *Client) Error() error {
|
||||
}
|
||||
|
||||
// ExtendVote provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseExtendVote
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestExtendVote) *types.ResponseExtendVote); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -134,7 +136,7 @@ func (_m *Client) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) (
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestExtendVote) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -144,11 +146,11 @@ func (_m *Client) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) (
|
||||
}
|
||||
|
||||
// FinalizeBlock provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -157,7 +159,7 @@ func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 types.RequestFinalizeBl
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -181,11 +183,11 @@ func (_m *Client) Flush(_a0 context.Context) error {
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Info(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -194,7 +196,7 @@ func (_m *Client) Info(_a0 context.Context, _a1 types.RequestInfo) (*types.Respo
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -204,11 +206,11 @@ func (_m *Client) Info(_a0 context.Context, _a1 types.RequestInfo) (*types.Respo
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChain(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -217,7 +219,7 @@ func (_m *Client) InitChain(_a0 context.Context, _a1 types.RequestInitChain) (*t
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -241,11 +243,11 @@ func (_m *Client) IsRunning() bool {
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshots(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -254,7 +256,7 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 types.RequestListSnapsh
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -264,11 +266,11 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 types.RequestListSnapsh
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -277,7 +279,7 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 types.RequestLoadSn
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -287,11 +289,11 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 types.RequestLoadSn
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -300,7 +302,7 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 types.RequestOfferSnaps
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -310,11 +312,11 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 types.RequestOfferSnaps
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) PrepareProposal(_a0 context.Context, _a1 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -323,7 +325,7 @@ func (_m *Client) PrepareProposal(_a0 context.Context, _a1 types.RequestPrepareP
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestPrepareProposal) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -333,11 +335,11 @@ func (_m *Client) PrepareProposal(_a0 context.Context, _a1 types.RequestPrepareP
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ProcessProposal(_a0 context.Context, _a1 types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -346,7 +348,7 @@ func (_m *Client) ProcessProposal(_a0 context.Context, _a1 types.RequestProcessP
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestProcessProposal) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -356,11 +358,11 @@ func (_m *Client) ProcessProposal(_a0 context.Context, _a1 types.RequestProcessP
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Query(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -369,7 +371,7 @@ func (_m *Client) Query(_a0 context.Context, _a1 types.RequestQuery) (*types.Res
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -393,11 +395,11 @@ func (_m *Client) Start(_a0 context.Context) error {
|
||||
}
|
||||
|
||||
// VerifyVoteExtension provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseVerifyVoteExtension
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -406,7 +408,7 @@ func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 types.RequestVeri
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestVerifyVoteExtension) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -419,3 +421,13 @@ func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 types.RequestVeri
|
||||
func (_m *Client) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// NewClient creates a new instance of Client. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewClient(t testing.TB) *Client {
|
||||
mock := &Client{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -64,6 +64,8 @@ func (cli *socketClient) OnStart(ctx context.Context) error {
|
||||
err error
|
||||
conn net.Conn
|
||||
)
|
||||
timer := time.NewTimer(0)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
conn, err = tmnet.Connect(cli.addr)
|
||||
@@ -73,8 +75,15 @@ func (cli *socketClient) OnStart(ctx context.Context) error {
|
||||
}
|
||||
cli.logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...",
|
||||
cli.addr, dialRetryIntervalSeconds), "err", err)
|
||||
time.Sleep(time.Second * dialRetryIntervalSeconds)
|
||||
continue
|
||||
|
||||
timer.Reset(time.Second * dialRetryIntervalSeconds)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
cli.conn = conn
|
||||
|
||||
@@ -90,11 +99,7 @@ func (cli *socketClient) OnStop() {
|
||||
if cli.conn != nil {
|
||||
cli.conn.Close()
|
||||
}
|
||||
|
||||
// this timeout is arbitrary.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
cli.drainQueue(ctx)
|
||||
cli.drainQueue()
|
||||
}
|
||||
|
||||
// Error returns an error if the client was stopped abruptly.
|
||||
@@ -113,12 +118,6 @@ func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case reqres := <-cli.reqQueue:
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cli.willSendReq(reqres)
|
||||
|
||||
if err := types.WriteMessage(reqres.Request, bw); err != nil {
|
||||
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
|
||||
return
|
||||
@@ -162,6 +161,11 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader
|
||||
func (cli *socketClient) willSendReq(reqres *requestAndResponse) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
if !cli.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
cli.reqSent.PushBack(reqres)
|
||||
}
|
||||
|
||||
@@ -189,138 +193,13 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) Flush(ctx context.Context) error {
|
||||
_, err := cli.doRequest(ctx, types.ToRequestFlush())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetEcho(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetCheckTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Query(ctx context.Context, req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetQuery(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetCommit(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChain(ctx context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshots(ctx context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetListSnapshots(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshot(ctx context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetOfferSnapshot(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetLoadSnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetApplySnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposal(ctx context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestPrepareProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetPrepareProposal(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposal(ctx context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestProcessProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetProcessProposal(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ExtendVote(ctx context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestExtendVote(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetExtendVote(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestVerifyVoteExtension(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetVerifyVoteExtension(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlock(ctx context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestFinalizeBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetFinalizeBlock(), nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
|
||||
if !cli.IsRunning() {
|
||||
return nil, errors.New("client has stopped")
|
||||
}
|
||||
|
||||
reqres := makeReqRes(req)
|
||||
cli.willSendReq(reqres)
|
||||
|
||||
select {
|
||||
case cli.reqQueue <- reqres:
|
||||
@@ -342,7 +221,7 @@ func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*ty
|
||||
|
||||
// drainQueue marks as complete and discards all remaining pending requests
|
||||
// from the queue.
|
||||
func (cli *socketClient) drainQueue(ctx context.Context) {
|
||||
func (cli *socketClient) drainQueue() {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
@@ -351,22 +230,136 @@ func (cli *socketClient) drainQueue(ctx context.Context) {
|
||||
reqres := req.Value.(*requestAndResponse)
|
||||
reqres.markDone()
|
||||
}
|
||||
}
|
||||
|
||||
// Mark all queued messages as resolved.
|
||||
//
|
||||
// TODO(creachadair): We can't simply range the channel, because it is never
|
||||
// closed, and the writer continues to add work.
|
||||
// See https://github.com/tendermint/tendermint/issues/6996.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case reqres := <-cli.reqQueue:
|
||||
reqres.markDone()
|
||||
default:
|
||||
return
|
||||
}
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) Flush(ctx context.Context) error {
|
||||
_, err := cli.doRequest(ctx, types.ToRequestFlush())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetEcho(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetCheckTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetQuery(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetCommit(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetListSnapshots(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetOfferSnapshot(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetLoadSnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetApplySnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestPrepareProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetPrepareProposal(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestProcessProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetProcessProposal(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestExtendVote(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetExtendVote(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestVerifyVoteExtension(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetVerifyVoteExtension(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestFinalizeBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetFinalizeBlock(), nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -482,7 +482,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
version = args[0]
|
||||
}
|
||||
res, err := client.Info(cmd.Context(), types.RequestInfo{Version: version})
|
||||
res, err := client.Info(cmd.Context(), &types.RequestInfo{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -511,7 +511,7 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
txs[i] = txBytes
|
||||
}
|
||||
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: txs})
|
||||
res, err := client.FinalizeBlock(cmd.Context(), &types.RequestFinalizeBlock{Txs: txs})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -539,7 +539,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTx(cmd.Context(), types.RequestCheckTx{Tx: txBytes})
|
||||
res, err := client.CheckTx(cmd.Context(), &types.RequestCheckTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -579,7 +579,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resQuery, err := client.Query(cmd.Context(), types.RequestQuery{
|
||||
resQuery, err := client.Query(cmd.Context(), &types.RequestQuery{
|
||||
Data: queryBytes,
|
||||
Path: flagPath,
|
||||
Height: int64(flagHeight),
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestGRPC(t *testing.T) {
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
t.Log("### Testing GRPC")
|
||||
testGRPCSync(ctx, t, logger, types.NewGRPCApplication(types.NewBaseApplication()))
|
||||
testGRPCSync(ctx, t, logger, types.NewBaseApplication())
|
||||
}
|
||||
|
||||
func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) {
|
||||
@@ -77,7 +77,7 @@ func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Ap
|
||||
require.NoError(t, err)
|
||||
|
||||
// Construct request
|
||||
rfb := types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)}
|
||||
rfb := &types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)}
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
rfb.Txs[counter] = []byte("test")
|
||||
}
|
||||
@@ -101,7 +101,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
|
||||
func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.ABCIApplicationServer) {
|
||||
func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) {
|
||||
t.Helper()
|
||||
numDeliverTxs := 680000
|
||||
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
mrand "math/rand"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
@@ -32,8 +33,9 @@ func RandVals(cnt int) []types.ValidatorUpdate {
|
||||
// InitKVStore initializes the kvstore app with some data,
|
||||
// which allows tests to pass and is fine as long as you
|
||||
// don't make any tx that modify the validator state
|
||||
func InitKVStore(app *PersistentKVStoreApplication) {
|
||||
app.InitChain(types.RequestInitChain{
|
||||
func InitKVStore(ctx context.Context, app *PersistentKVStoreApplication) error {
|
||||
_, err := app.InitChain(ctx, &types.RequestInitChain{
|
||||
Validators: RandVals(1),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
@@ -90,7 +91,7 @@ func NewApplication() *Application {
|
||||
}
|
||||
}
|
||||
|
||||
func (app *Application) InitChain(req types.RequestInitChain) types.ResponseInitChain {
|
||||
func (app *Application) InitChain(_ context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
@@ -101,19 +102,19 @@ func (app *Application) InitChain(req types.RequestInitChain) types.ResponseInit
|
||||
panic("problem updating validators")
|
||||
}
|
||||
}
|
||||
return types.ResponseInitChain{}
|
||||
return &types.ResponseInitChain{}, nil
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
func (app *Application) Info(_ context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
return types.ResponseInfo{
|
||||
return &types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
AppVersion: ProtocolVersion,
|
||||
LastBlockHeight: app.state.Height,
|
||||
LastBlockAppHash: app.state.AppHash,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
@@ -166,7 +167,7 @@ func (app *Application) Close() error {
|
||||
return app.state.db.Close()
|
||||
}
|
||||
|
||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
@@ -175,7 +176,7 @@ func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.Resp
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
|
||||
if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
@@ -195,14 +196,14 @@ func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.Resp
|
||||
respTxs[i] = app.handleTx(tx)
|
||||
}
|
||||
|
||||
return types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates}
|
||||
return &types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates}, nil
|
||||
}
|
||||
|
||||
func (*Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
func (*Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
return &types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}, nil
|
||||
}
|
||||
|
||||
func (app *Application) Commit() types.ResponseCommit {
|
||||
func (app *Application) Commit(_ context.Context) (*types.ResponseCommit, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
@@ -213,15 +214,15 @@ func (app *Application) Commit() types.ResponseCommit {
|
||||
app.state.Height++
|
||||
saveState(app.state)
|
||||
|
||||
resp := types.ResponseCommit{Data: appHash}
|
||||
resp := &types.ResponseCommit{Data: appHash}
|
||||
if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks {
|
||||
resp.RetainHeight = app.state.Height - app.RetainBlocks + 1
|
||||
}
|
||||
return resp
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
@@ -232,10 +233,10 @@ func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return types.ResponseQuery{
|
||||
return &types.ResponseQuery{
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
if reqQuery.Prove {
|
||||
@@ -257,7 +258,7 @@ func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
|
||||
return resQuery
|
||||
return &resQuery, nil
|
||||
}
|
||||
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
@@ -277,26 +278,25 @@ func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
|
||||
return resQuery
|
||||
return &resQuery, nil
|
||||
}
|
||||
|
||||
func (app *Application) PrepareProposal(req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
func (app *Application) PrepareProposal(_ context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
return types.ResponsePrepareProposal{
|
||||
ModifiedTxStatus: types.ResponsePrepareProposal_MODIFIED,
|
||||
TxRecords: app.substPrepareTx(req.Txs),
|
||||
}
|
||||
return &types.ResponsePrepareProposal{
|
||||
TxRecords: app.substPrepareTx(req.Txs, req.MaxTxBytes),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (*Application) ProcessProposal(req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
func (*Application) ProcessProposal(_ context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}, nil
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}, nil
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
@@ -434,28 +434,32 @@ func (app *Application) execPrepareTx(tx []byte) *types.ExecTxResult {
|
||||
}
|
||||
|
||||
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
|
||||
// proposal for transactions with the prefix strips.
|
||||
// proposal for transactions with the prefix stripped.
|
||||
// It marks all of the original transactions as 'REMOVED' so that
|
||||
// Tendermint will remove them from its mempool.
|
||||
func (app *Application) substPrepareTx(blockData [][]byte) []*types.TxRecord {
|
||||
trs := make([]*types.TxRecord, len(blockData))
|
||||
func (app *Application) substPrepareTx(blockData [][]byte, maxTxBytes int64) []*types.TxRecord {
|
||||
trs := make([]*types.TxRecord, 0, len(blockData))
|
||||
var removed []*types.TxRecord
|
||||
for i, tx := range blockData {
|
||||
var totalBytes int64
|
||||
for _, tx := range blockData {
|
||||
txMod := tx
|
||||
action := types.TxRecord_UNMODIFIED
|
||||
if isPrepareTx(tx) {
|
||||
removed = append(removed, &types.TxRecord{
|
||||
Tx: tx,
|
||||
Action: types.TxRecord_REMOVED,
|
||||
})
|
||||
trs[i] = &types.TxRecord{
|
||||
Tx: bytes.TrimPrefix(tx, []byte(PreparePrefix)),
|
||||
Action: types.TxRecord_ADDED,
|
||||
}
|
||||
continue
|
||||
txMod = bytes.TrimPrefix(tx, []byte(PreparePrefix))
|
||||
action = types.TxRecord_ADDED
|
||||
}
|
||||
trs[i] = &types.TxRecord{
|
||||
Tx: tx,
|
||||
Action: types.TxRecord_UNMODIFIED,
|
||||
totalBytes += int64(len(txMod))
|
||||
if totalBytes > maxTxBytes {
|
||||
break
|
||||
}
|
||||
trs = append(trs, &types.TxRecord{
|
||||
Tx: txMod,
|
||||
Action: action,
|
||||
})
|
||||
}
|
||||
|
||||
return append(trs, removed...)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -24,37 +23,43 @@ const (
|
||||
testValue = "def"
|
||||
)
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
|
||||
ar := app.FinalizeBlock(req)
|
||||
func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := &types.RequestFinalizeBlock{Txs: [][]byte{tx}}
|
||||
ar, err := app.FinalizeBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
// repeating tx doesn't raise error
|
||||
ar = app.FinalizeBlock(req)
|
||||
ar, err = app.FinalizeBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
// commit
|
||||
app.Commit()
|
||||
_, err = app.Commit(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
info := app.Info(types.RequestInfo{})
|
||||
info, err := app.Info(ctx, &types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery := app.Query(types.RequestQuery{
|
||||
resQuery, err := app.Query(ctx, &types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery = app.Query(types.RequestQuery{
|
||||
resQuery, err = app.Query(ctx, &types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
@@ -62,18 +67,24 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri
|
||||
}
|
||||
|
||||
func TestKVStoreKV(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kvstore := NewApplication()
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
testKVStore(ctx, t, kvstore, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
testKVStore(ctx, t, kvstore, tx, key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
dir := t.TempDir()
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
@@ -81,22 +92,30 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
testKVStore(ctx, t, kvstore, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
testKVStore(ctx, t, kvstore, tx, key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
dir := t.TempDir()
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
InitKVStore(kvstore)
|
||||
if err := InitKVStore(ctx, kvstore); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
height := int64(0)
|
||||
|
||||
resInfo := kvstore.Info(types.RequestInfo{})
|
||||
resInfo, err := kvstore.Info(ctx, &types.RequestInfo{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
@@ -104,13 +123,19 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
// make and apply block
|
||||
height = int64(1)
|
||||
hash := []byte("foo")
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
if _, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Hash: hash, Height: height}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
|
||||
kvstore.Commit()
|
||||
|
||||
resInfo = kvstore.Info(types.RequestInfo{})
|
||||
if _, err := kvstore.Commit(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
}
|
||||
|
||||
resInfo, err = kvstore.Info(ctx, &types.RequestInfo{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
@@ -119,6 +144,9 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kvstore := NewApplication()
|
||||
|
||||
// init with some validators
|
||||
@@ -126,9 +154,12 @@ func TestValUpdates(t *testing.T) {
|
||||
nInit := 5
|
||||
vals := RandVals(total)
|
||||
// initialize with the first nInit
|
||||
kvstore.InitChain(types.RequestInitChain{
|
||||
_, err := kvstore.InitChain(ctx, &types.RequestInitChain{
|
||||
Validators: vals[:nInit],
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
vals1, vals2 := vals[:nInit], kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
@@ -141,7 +172,7 @@ func TestValUpdates(t *testing.T) {
|
||||
tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
|
||||
makeApplyBlock(t, kvstore, 1, diff, tx1, tx2)
|
||||
makeApplyBlock(ctx, t, kvstore, 1, diff, tx1, tx2)
|
||||
|
||||
vals1, vals2 = vals[:nInit+2], kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
@@ -156,7 +187,7 @@ func TestValUpdates(t *testing.T) {
|
||||
tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power)
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
makeApplyBlock(ctx, t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
@@ -172,7 +203,7 @@ func TestValUpdates(t *testing.T) {
|
||||
diff = []types.ValidatorUpdate{v1}
|
||||
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
|
||||
makeApplyBlock(t, kvstore, 3, diff, tx1)
|
||||
makeApplyBlock(ctx, t, kvstore, 3, diff, tx1)
|
||||
|
||||
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
|
||||
vals2 = kvstore.Validators()
|
||||
@@ -180,26 +211,23 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func makeApplyBlock(
|
||||
t *testing.T,
|
||||
kvstore types.Application,
|
||||
heightInt int,
|
||||
diff []types.ValidatorUpdate,
|
||||
txs ...[]byte) {
|
||||
func makeApplyBlock(ctx context.Context, t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) {
|
||||
// make and apply block
|
||||
height := int64(heightInt)
|
||||
hash := []byte("foo")
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
}
|
||||
|
||||
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
|
||||
resFinalizeBlock, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{
|
||||
Hash: hash,
|
||||
Header: header,
|
||||
Height: height,
|
||||
Txs: txs,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kvstore.Commit()
|
||||
_, err = kvstore.Commit(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
|
||||
|
||||
@@ -268,8 +296,7 @@ func makeGRPCClientServer(
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
|
||||
gapp := types.NewGRPCApplication(app)
|
||||
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, gapp)
|
||||
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app)
|
||||
|
||||
if err := server.Start(ctx); err != nil {
|
||||
cancel()
|
||||
@@ -323,12 +350,12 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client)
|
||||
}
|
||||
|
||||
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
|
||||
ar, err := app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
ar, err := app.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
// repeating FinalizeBlock doesn't raise error
|
||||
ar, err = app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
ar, err = app.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
@@ -336,12 +363,12 @@ func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []b
|
||||
_, err = app.Commit(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := app.Info(ctx, types.RequestInfo{})
|
||||
info, err := app.Info(ctx, &types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery, err := app.Query(ctx, types.RequestQuery{
|
||||
resQuery, err := app.Query(ctx, &types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
@@ -352,7 +379,7 @@ func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []b
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery, err = app.Query(ctx, types.RequestQuery{
|
||||
resQuery, err = app.Query(ctx, &types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
ptypes "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -38,41 +37,10 @@ func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *Persisten
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(_ context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
return &types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}, nil
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ExtendVote(req types.RequestExtendVote) types.ResponseExtendVote {
|
||||
return types.ResponseExtendVote{VoteExtension: ConstructVoteExtension(req.Vote.ValidatorAddress)}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) VerifyVoteExtension(req types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
|
||||
return types.RespondVerifyVoteExtension(app.verifyExtension(req.Vote.ValidatorAddress, req.Vote.VoteExtension))
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
|
||||
func ConstructVoteExtension(valAddr []byte) *ptypes.VoteExtension {
|
||||
return &ptypes.VoteExtension{
|
||||
AppDataToSign: valAddr,
|
||||
AppDataSelfAuthenticating: valAddr,
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) verifyExtension(valAddr []byte, ext *ptypes.VoteExtension) bool {
|
||||
if ext == nil {
|
||||
return false
|
||||
}
|
||||
canonical := ConstructVoteExtension(valAddr)
|
||||
if !bytes.Equal(canonical.AppDataToSign, ext.AppDataToSign) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(canonical.AppDataSelfAuthenticating, ext.AppDataSelfAuthenticating) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(_ context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
return &types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}, nil
|
||||
}
|
||||
|
||||
@@ -20,11 +20,11 @@ type GRPCServer struct {
|
||||
addr string
|
||||
server *grpc.Server
|
||||
|
||||
app types.ABCIApplicationServer
|
||||
app types.Application
|
||||
}
|
||||
|
||||
// NewGRPCServer returns a new gRPC ABCI server
|
||||
func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicationServer) service.Service {
|
||||
func NewGRPCServer(logger log.Logger, protoAddr string, app types.Application) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &GRPCServer{
|
||||
logger: logger,
|
||||
@@ -44,7 +44,7 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
|
||||
}
|
||||
|
||||
s.server = grpc.NewServer()
|
||||
types.RegisterABCIApplicationServer(s.server, s.app)
|
||||
types.RegisterABCIApplicationServer(s.server, &gRPCApplication{Application: s.app})
|
||||
|
||||
s.logger.Info("Listening", "proto", s.proto, "addr", s.addr)
|
||||
go func() {
|
||||
@@ -62,3 +62,22 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
|
||||
|
||||
// OnStop stops the gRPC server.
|
||||
func (s *GRPCServer) OnStop() { s.server.Stop() }
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// gRPCApplication is a gRPC shim for Application
|
||||
type gRPCApplication struct {
|
||||
types.Application
|
||||
}
|
||||
|
||||
func (app *gRPCApplication) Echo(_ context.Context, req *types.RequestEcho) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: req.Message}, nil
|
||||
}
|
||||
|
||||
func (app *gRPCApplication) Flush(_ context.Context, req *types.RequestFlush) (*types.ResponseFlush, error) {
|
||||
return &types.ResponseFlush{}, nil
|
||||
}
|
||||
|
||||
func (app *gRPCApplication) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
return app.Application.Commit(ctx)
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ func NewServer(logger log.Logger, protoAddr, transport string, app types.Applica
|
||||
case "socket":
|
||||
s = NewSocketServer(logger, protoAddr, app)
|
||||
case "grpc":
|
||||
s = NewGRPCServer(logger, protoAddr, types.NewGRPCApplication(app))
|
||||
s = NewGRPCServer(logger, protoAddr, app)
|
||||
default:
|
||||
err = fmt.Errorf("unknown server type %s", transport)
|
||||
}
|
||||
|
||||
@@ -159,12 +159,7 @@ func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) {
|
||||
}
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
func (s *SocketServer) handleRequests(
|
||||
ctx context.Context,
|
||||
closer func(error),
|
||||
conn io.Reader,
|
||||
responses chan<- *types.Response,
|
||||
) {
|
||||
func (s *SocketServer) handleRequests(ctx context.Context, closer func(error), conn io.Reader, responses chan<- *types.Response) {
|
||||
var bufReader = bufio.NewReader(conn)
|
||||
|
||||
defer func() {
|
||||
@@ -184,7 +179,12 @@ func (s *SocketServer) handleRequests(
|
||||
return
|
||||
}
|
||||
|
||||
resp := s.processRequest(req)
|
||||
resp, err := s.processRequest(ctx, req)
|
||||
if err != nil {
|
||||
closer(err)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
closer(ctx.Err())
|
||||
@@ -194,42 +194,99 @@ func (s *SocketServer) handleRequests(
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) processRequest(req *types.Request) *types.Response {
|
||||
func (s *SocketServer) processRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
|
||||
switch r := req.Value.(type) {
|
||||
case *types.Request_Echo:
|
||||
return types.ToResponseEcho(r.Echo.Message)
|
||||
return types.ToResponseEcho(r.Echo.Message), nil
|
||||
case *types.Request_Flush:
|
||||
return types.ToResponseFlush()
|
||||
return types.ToResponseFlush(), nil
|
||||
case *types.Request_Info:
|
||||
return types.ToResponseInfo(s.app.Info(*r.Info))
|
||||
res, err := s.app.Info(ctx, r.Info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return types.ToResponseInfo(res), nil
|
||||
case *types.Request_CheckTx:
|
||||
return types.ToResponseCheckTx(s.app.CheckTx(*r.CheckTx))
|
||||
res, err := s.app.CheckTx(ctx, r.CheckTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseCheckTx(res), nil
|
||||
case *types.Request_Commit:
|
||||
return types.ToResponseCommit(s.app.Commit())
|
||||
res, err := s.app.Commit(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseCommit(res), nil
|
||||
case *types.Request_Query:
|
||||
return types.ToResponseQuery(s.app.Query(*r.Query))
|
||||
res, err := s.app.Query(ctx, r.Query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseQuery(res), nil
|
||||
case *types.Request_InitChain:
|
||||
return types.ToResponseInitChain(s.app.InitChain(*r.InitChain))
|
||||
res, err := s.app.InitChain(ctx, r.InitChain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseInitChain(res), nil
|
||||
case *types.Request_ListSnapshots:
|
||||
return types.ToResponseListSnapshots(s.app.ListSnapshots(*r.ListSnapshots))
|
||||
res, err := s.app.ListSnapshots(ctx, r.ListSnapshots)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseListSnapshots(res), nil
|
||||
case *types.Request_OfferSnapshot:
|
||||
return types.ToResponseOfferSnapshot(s.app.OfferSnapshot(*r.OfferSnapshot))
|
||||
res, err := s.app.OfferSnapshot(ctx, r.OfferSnapshot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseOfferSnapshot(res), nil
|
||||
case *types.Request_PrepareProposal:
|
||||
return types.ToResponsePrepareProposal(s.app.PrepareProposal(*r.PrepareProposal))
|
||||
res, err := s.app.PrepareProposal(ctx, r.PrepareProposal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponsePrepareProposal(res), nil
|
||||
case *types.Request_ProcessProposal:
|
||||
return types.ToResponseProcessProposal(s.app.ProcessProposal(*r.ProcessProposal))
|
||||
res, err := s.app.ProcessProposal(ctx, r.ProcessProposal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseProcessProposal(res), nil
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
return types.ToResponseLoadSnapshotChunk(s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk))
|
||||
res, err := s.app.LoadSnapshotChunk(ctx, r.LoadSnapshotChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseLoadSnapshotChunk(res), nil
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
return types.ToResponseApplySnapshotChunk(s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk))
|
||||
res, err := s.app.ApplySnapshotChunk(ctx, r.ApplySnapshotChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseApplySnapshotChunk(res), nil
|
||||
case *types.Request_ExtendVote:
|
||||
return types.ToResponseExtendVote(s.app.ExtendVote(*r.ExtendVote))
|
||||
res, err := s.app.ExtendVote(ctx, r.ExtendVote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseExtendVote(res), nil
|
||||
case *types.Request_VerifyVoteExtension:
|
||||
return types.ToResponseVerifyVoteExtension(s.app.VerifyVoteExtension(*r.VerifyVoteExtension))
|
||||
res, err := s.app.VerifyVoteExtension(ctx, r.VerifyVoteExtension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseVerifyVoteExtension(res), nil
|
||||
case *types.Request_FinalizeBlock:
|
||||
return types.ToResponseFinalizeBlock(s.app.FinalizeBlock(*r.FinalizeBlock))
|
||||
res, err := s.app.FinalizeBlock(ctx, r.FinalizeBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseFinalizeBlock(res), nil
|
||||
default:
|
||||
return types.ToResponseException("Unknown request")
|
||||
return types.ToResponseException("Unknown request"), errors.New("unknown request type")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ func InitChain(ctx context.Context, client abciclient.Client) error {
|
||||
power := mrand.Int()
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChain(ctx, types.RequestInitChain{
|
||||
_, err := client.InitChain(ctx, &types.RequestInitChain{
|
||||
Validators: vals,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -50,7 +50,7 @@ func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error
|
||||
}
|
||||
|
||||
func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error {
|
||||
res, _ := client.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: txBytes})
|
||||
res, _ := client.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: txBytes})
|
||||
for i, tx := range res.TxResults {
|
||||
code, data, log := tx.Code, tx.Data, tx.Log
|
||||
if code != codeExp[i] {
|
||||
@@ -71,7 +71,7 @@ func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]by
|
||||
}
|
||||
|
||||
func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTx(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
|
||||
@@ -1,40 +1,36 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
import "context"
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Application
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
// to be driven by a blockchain-based replication engine via the ABCI.
|
||||
// All methods take a RequestXxx argument and return a ResponseXxx argument,
|
||||
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
|
||||
type Application interface {
|
||||
// Info/Query Connection
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
Info(context.Context, *RequestInfo) (*ResponseInfo, error) // Return application info
|
||||
Query(context.Context, *RequestQuery) (*ResponseQuery, error) // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal
|
||||
ProcessProposal(RequestProcessProposal) ResponseProcessProposal
|
||||
InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) // Initialize blockchain w validators/other info from TendermintCore
|
||||
PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error)
|
||||
ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error)
|
||||
// Commit the state and return the application Merkle root hash
|
||||
Commit() ResponseCommit
|
||||
Commit(context.Context) (*ResponseCommit, error)
|
||||
// Create application specific vote extension
|
||||
ExtendVote(RequestExtendVote) ResponseExtendVote
|
||||
ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error)
|
||||
// Verify application's vote extension data
|
||||
VerifyVoteExtension(RequestVerifyVoteExtension) ResponseVerifyVoteExtension
|
||||
VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error)
|
||||
// Deliver the decided block with its txs to the Application
|
||||
FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock
|
||||
FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error)
|
||||
|
||||
// State Sync Connection
|
||||
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
|
||||
OfferSnapshot(RequestOfferSnapshot) ResponseOfferSnapshot // Offer a snapshot to the application
|
||||
LoadSnapshotChunk(RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk // Load a snapshot chunk
|
||||
ApplySnapshotChunk(RequestApplySnapshotChunk) ResponseApplySnapshotChunk // Apply a shapshot chunk
|
||||
ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) // List available snapshots
|
||||
OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) // Offer a snapshot to the application
|
||||
LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) // Load a snapshot chunk
|
||||
ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) // Apply a shapshot chunk
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
@@ -48,164 +44,78 @@ func NewBaseApplication() *BaseApplication {
|
||||
return &BaseApplication{}
|
||||
}
|
||||
|
||||
func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
func (BaseApplication) Info(_ context.Context, req *RequestInfo) (*ResponseInfo, error) {
|
||||
return &ResponseInfo{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
|
||||
return ResponseCheckTx{Code: CodeTypeOK}
|
||||
func (BaseApplication) CheckTx(_ context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
return &ResponseCheckTx{Code: CodeTypeOK}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) Commit() ResponseCommit {
|
||||
return ResponseCommit{}
|
||||
func (BaseApplication) Commit(_ context.Context) (*ResponseCommit, error) {
|
||||
return &ResponseCommit{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) ExtendVote(req RequestExtendVote) ResponseExtendVote {
|
||||
return ResponseExtendVote{}
|
||||
func (BaseApplication) ExtendVote(_ context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) {
|
||||
return &ResponseExtendVote{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) VerifyVoteExtension(req RequestVerifyVoteExtension) ResponseVerifyVoteExtension {
|
||||
return ResponseVerifyVoteExtension{
|
||||
func (BaseApplication) VerifyVoteExtension(_ context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) {
|
||||
return &ResponseVerifyVoteExtension{
|
||||
Status: ResponseVerifyVoteExtension_ACCEPT,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) Query(_ context.Context, req *RequestQuery) (*ResponseQuery, error) {
|
||||
return &ResponseQuery{Code: CodeTypeOK}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) InitChain(_ context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
|
||||
return &ResponseInitChain{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) ListSnapshots(_ context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||
return &ResponseListSnapshots{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) OfferSnapshot(_ context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) {
|
||||
return &ResponseOfferSnapshot{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) LoadSnapshotChunk(_ context.Context, _ *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) {
|
||||
return &ResponseLoadSnapshotChunk{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) ApplySnapshotChunk(_ context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) {
|
||||
return &ResponseApplySnapshotChunk{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) PrepareProposal(_ context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
|
||||
trs := make([]*TxRecord, 0, len(req.Txs))
|
||||
var totalBytes int64
|
||||
for _, tx := range req.Txs {
|
||||
totalBytes += int64(len(tx))
|
||||
if totalBytes > req.MaxTxBytes {
|
||||
break
|
||||
}
|
||||
trs = append(trs, &TxRecord{
|
||||
Action: TxRecord_UNMODIFIED,
|
||||
Tx: tx,
|
||||
})
|
||||
}
|
||||
return &ResponsePrepareProposal{TxRecords: trs}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) Query(req RequestQuery) ResponseQuery {
|
||||
return ResponseQuery{Code: CodeTypeOK}
|
||||
func (BaseApplication) ProcessProposal(_ context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) {
|
||||
return &ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
|
||||
return ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
|
||||
return ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (BaseApplication) OfferSnapshot(req RequestOfferSnapshot) ResponseOfferSnapshot {
|
||||
return ResponseOfferSnapshot{}
|
||||
}
|
||||
|
||||
func (BaseApplication) LoadSnapshotChunk(req RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk {
|
||||
return ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) ResponseApplySnapshotChunk {
|
||||
return ResponseApplySnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal {
|
||||
return ResponsePrepareProposal{ModifiedTxStatus: ResponsePrepareProposal_UNMODIFIED}
|
||||
}
|
||||
|
||||
func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProcessProposal {
|
||||
return ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
|
||||
func (BaseApplication) FinalizeBlock(_ context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
|
||||
txs := make([]*ExecTxResult, len(req.Txs))
|
||||
for i := range req.Txs {
|
||||
txs[i] = &ExecTxResult{Code: CodeTypeOK}
|
||||
}
|
||||
return ResponseFinalizeBlock{
|
||||
return &ResponseFinalizeBlock{
|
||||
TxResults: txs,
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// GRPCApplication is a GRPC wrapper for Application
|
||||
type GRPCApplication struct {
|
||||
app Application
|
||||
}
|
||||
|
||||
func NewGRPCApplication(app Application) *GRPCApplication {
|
||||
return &GRPCApplication{app}
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) {
|
||||
return &ResponseEcho{Message: req.Message}, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) {
|
||||
return &ResponseFlush{}, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) {
|
||||
res := app.app.Info(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
res := app.app.CheckTx(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) {
|
||||
res := app.app.Query(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) {
|
||||
res := app.app.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
|
||||
res := app.app.InitChain(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ListSnapshots(
|
||||
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||
res := app.app.ListSnapshots(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) OfferSnapshot(
|
||||
ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) {
|
||||
res := app.app.OfferSnapshot(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) LoadSnapshotChunk(
|
||||
ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) {
|
||||
res := app.app.LoadSnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ApplySnapshotChunk(
|
||||
ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) {
|
||||
res := app.app.ApplySnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ExtendVote(
|
||||
ctx context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) {
|
||||
res := app.app.ExtendVote(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) VerifyVoteExtension(
|
||||
ctx context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) {
|
||||
res := app.app.VerifyVoteExtension(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) PrepareProposal(
|
||||
ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
|
||||
res := app.app.PrepareProposal(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ProcessProposal(
|
||||
ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) {
|
||||
res := app.app.ProcessProposal(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) FinalizeBlock(
|
||||
ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
|
||||
res := app.app.FinalizeBlock(*req)
|
||||
return &res, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -39,15 +39,15 @@ func ToRequestFlush() *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestInfo(req RequestInfo) *Request {
|
||||
func ToRequestInfo(req *RequestInfo) *Request {
|
||||
return &Request{
|
||||
Value: &Request_Info{&req},
|
||||
Value: &Request_Info{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestCheckTx(req RequestCheckTx) *Request {
|
||||
func ToRequestCheckTx(req *RequestCheckTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_CheckTx{&req},
|
||||
Value: &Request_CheckTx{req},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,69 +57,69 @@ func ToRequestCommit() *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestQuery(req RequestQuery) *Request {
|
||||
func ToRequestQuery(req *RequestQuery) *Request {
|
||||
return &Request{
|
||||
Value: &Request_Query{&req},
|
||||
Value: &Request_Query{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestInitChain(req RequestInitChain) *Request {
|
||||
func ToRequestInitChain(req *RequestInitChain) *Request {
|
||||
return &Request{
|
||||
Value: &Request_InitChain{&req},
|
||||
Value: &Request_InitChain{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
|
||||
func ToRequestListSnapshots(req *RequestListSnapshots) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ListSnapshots{&req},
|
||||
Value: &Request_ListSnapshots{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestOfferSnapshot(req RequestOfferSnapshot) *Request {
|
||||
func ToRequestOfferSnapshot(req *RequestOfferSnapshot) *Request {
|
||||
return &Request{
|
||||
Value: &Request_OfferSnapshot{&req},
|
||||
Value: &Request_OfferSnapshot{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestLoadSnapshotChunk(req RequestLoadSnapshotChunk) *Request {
|
||||
func ToRequestLoadSnapshotChunk(req *RequestLoadSnapshotChunk) *Request {
|
||||
return &Request{
|
||||
Value: &Request_LoadSnapshotChunk{&req},
|
||||
Value: &Request_LoadSnapshotChunk{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
|
||||
func ToRequestApplySnapshotChunk(req *RequestApplySnapshotChunk) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ApplySnapshotChunk{&req},
|
||||
Value: &Request_ApplySnapshotChunk{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestExtendVote(req RequestExtendVote) *Request {
|
||||
func ToRequestExtendVote(req *RequestExtendVote) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ExtendVote{&req},
|
||||
Value: &Request_ExtendVote{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestVerifyVoteExtension(req RequestVerifyVoteExtension) *Request {
|
||||
func ToRequestVerifyVoteExtension(req *RequestVerifyVoteExtension) *Request {
|
||||
return &Request{
|
||||
Value: &Request_VerifyVoteExtension{&req},
|
||||
Value: &Request_VerifyVoteExtension{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestPrepareProposal(req RequestPrepareProposal) *Request {
|
||||
func ToRequestPrepareProposal(req *RequestPrepareProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_PrepareProposal{&req},
|
||||
Value: &Request_PrepareProposal{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestProcessProposal(req RequestProcessProposal) *Request {
|
||||
func ToRequestProcessProposal(req *RequestProcessProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ProcessProposal{&req},
|
||||
Value: &Request_ProcessProposal{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request {
|
||||
func ToRequestFinalizeBlock(req *RequestFinalizeBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_FinalizeBlock{&req},
|
||||
Value: &Request_FinalizeBlock{req},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,86 +143,86 @@ func ToResponseFlush() *Response {
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseInfo(res ResponseInfo) *Response {
|
||||
func ToResponseInfo(res *ResponseInfo) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Info{&res},
|
||||
Value: &Response_Info{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseCheckTx(res ResponseCheckTx) *Response {
|
||||
func ToResponseCheckTx(res *ResponseCheckTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_CheckTx{&res},
|
||||
Value: &Response_CheckTx{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseCommit(res ResponseCommit) *Response {
|
||||
func ToResponseCommit(res *ResponseCommit) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Commit{&res},
|
||||
Value: &Response_Commit{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseQuery(res ResponseQuery) *Response {
|
||||
func ToResponseQuery(res *ResponseQuery) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Query{&res},
|
||||
Value: &Response_Query{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseInitChain(res ResponseInitChain) *Response {
|
||||
func ToResponseInitChain(res *ResponseInitChain) *Response {
|
||||
return &Response{
|
||||
Value: &Response_InitChain{&res},
|
||||
Value: &Response_InitChain{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
|
||||
func ToResponseListSnapshots(res *ResponseListSnapshots) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ListSnapshots{&res},
|
||||
Value: &Response_ListSnapshots{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseOfferSnapshot(res ResponseOfferSnapshot) *Response {
|
||||
func ToResponseOfferSnapshot(res *ResponseOfferSnapshot) *Response {
|
||||
return &Response{
|
||||
Value: &Response_OfferSnapshot{&res},
|
||||
Value: &Response_OfferSnapshot{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseLoadSnapshotChunk(res ResponseLoadSnapshotChunk) *Response {
|
||||
func ToResponseLoadSnapshotChunk(res *ResponseLoadSnapshotChunk) *Response {
|
||||
return &Response{
|
||||
Value: &Response_LoadSnapshotChunk{&res},
|
||||
Value: &Response_LoadSnapshotChunk{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
|
||||
func ToResponseApplySnapshotChunk(res *ResponseApplySnapshotChunk) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ApplySnapshotChunk{&res},
|
||||
Value: &Response_ApplySnapshotChunk{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseExtendVote(res ResponseExtendVote) *Response {
|
||||
func ToResponseExtendVote(res *ResponseExtendVote) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ExtendVote{&res},
|
||||
Value: &Response_ExtendVote{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseVerifyVoteExtension(res ResponseVerifyVoteExtension) *Response {
|
||||
func ToResponseVerifyVoteExtension(res *ResponseVerifyVoteExtension) *Response {
|
||||
return &Response{
|
||||
Value: &Response_VerifyVoteExtension{&res},
|
||||
Value: &Response_VerifyVoteExtension{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponsePrepareProposal(res ResponsePrepareProposal) *Response {
|
||||
func ToResponsePrepareProposal(res *ResponsePrepareProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_PrepareProposal{&res},
|
||||
Value: &Response_PrepareProposal{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseProcessProposal(res ResponseProcessProposal) *Response {
|
||||
func ToResponseProcessProposal(res *ResponseProcessProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ProcessProposal{&res},
|
||||
Value: &Response_ProcessProposal{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response {
|
||||
func ToResponseFinalizeBlock(res *ResponseFinalizeBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_FinalizeBlock{&res},
|
||||
Value: &Response_FinalizeBlock{res},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,11 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
testing "testing"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
@@ -12,198 +16,334 @@ type Application struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields: _a0
|
||||
func (_m *Application) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk); ok {
|
||||
var r0 *types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseApplySnapshotChunk)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCommit)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0
|
||||
func (_m *Application) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0)
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCheckTx)
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields:
|
||||
func (_m *Application) Commit() types.ResponseCommit {
|
||||
ret := _m.Called()
|
||||
// ExtendVote provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func() types.ResponseCommit); ok {
|
||||
r0 = rf()
|
||||
var r0 *types.ResponseExtendVote
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCommit)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseExtendVote)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ExtendVote provides a mock function with given fields: _a0
|
||||
func (_m *Application) ExtendVote(_a0 types.RequestExtendVote) types.ResponseExtendVote {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseExtendVote
|
||||
if rf, ok := ret.Get(0).(func(types.RequestExtendVote) types.ResponseExtendVote); ok {
|
||||
r0 = rf(_a0)
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseExtendVote)
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FinalizeBlock provides a mock function with given fields: _a0
|
||||
func (_m *Application) FinalizeBlock(_a0 types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
ret := _m.Called(_a0)
|
||||
// FinalizeBlock provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestFinalizeBlock) types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseFinalizeBlock)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0
|
||||
func (_m *Application) Info(_a0 types.RequestInfo) types.ResponseInfo {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) types.ResponseInfo); ok {
|
||||
r0 = rf(_a0)
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInfo)
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0
|
||||
func (_m *Application) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain {
|
||||
ret := _m.Called(_a0)
|
||||
// Info provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInitChain)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0
|
||||
func (_m *Application) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0)
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseListSnapshots)
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
// InitChain provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseLoadSnapshotChunk)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInitChain)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0
|
||||
func (_m *Application) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0)
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseOfferSnapshot)
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) PrepareProposal(_a0 types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
ret := _m.Called(_a0)
|
||||
// ListSnapshots provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponsePrepareProposal)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseListSnapshots)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) ProcessProposal(_a0 types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseProcessProposal)
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0
|
||||
func (_m *Application) Query(_a0 types.RequestQuery) types.ResponseQuery {
|
||||
ret := _m.Called(_a0)
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) types.ResponseQuery); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseQuery)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// VerifyVoteExtension provides a mock function with given fields: _a0
|
||||
func (_m *Application) VerifyVoteExtension(_a0 types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseVerifyVoteExtension
|
||||
if rf, ok := ret.Get(0).(func(types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension); ok {
|
||||
r0 = rf(_a0)
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseVerifyVoteExtension)
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseOfferSnapshot)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseProcessProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseQuery)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// VerifyVoteExtension provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseVerifyVoteExtension
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// NewApplication creates a new instance of Application. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewApplication(t testing.TB) *Application {
|
||||
mock := &Application{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -1,175 +0,0 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// BaseMock provides a wrapper around the generated Application mock and a BaseApplication.
|
||||
// BaseMock first tries to use the mock's implementation of the method.
|
||||
// If no functionality was provided for the mock by the user, BaseMock dispatches
|
||||
// to the BaseApplication and uses its functionality.
|
||||
// BaseMock allows users to provide mocked functionality for only the methods that matter
|
||||
// for their test while avoiding a panic if the code calls Application methods that are
|
||||
// not relevant to the test.
|
||||
type BaseMock struct {
|
||||
base *types.BaseApplication
|
||||
*Application
|
||||
}
|
||||
|
||||
func NewBaseMock() BaseMock {
|
||||
return BaseMock{
|
||||
base: types.NewBaseApplication(),
|
||||
Application: new(Application),
|
||||
}
|
||||
}
|
||||
|
||||
// Info/Query Connection
|
||||
// Return application info
|
||||
func (m BaseMock) Info(input types.RequestInfo) (ret types.ResponseInfo) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Info(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Info(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) Query(input types.RequestQuery) (ret types.ResponseQuery) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Query(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Query(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Mempool Connection
|
||||
// Validate a tx for the mempool
|
||||
func (m BaseMock) CheckTx(input types.RequestCheckTx) (ret types.ResponseCheckTx) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.CheckTx(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.CheckTx(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Consensus Connection
|
||||
// Initialize blockchain w validators/other info from TendermintCore
|
||||
func (m BaseMock) InitChain(input types.RequestInitChain) (ret types.ResponseInitChain) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.InitChain(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.InitChain(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) PrepareProposal(input types.RequestPrepareProposal) (ret types.ResponsePrepareProposal) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.PrepareProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.PrepareProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ProcessProposal(input types.RequestProcessProposal) (ret types.ResponseProcessProposal) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ProcessProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ProcessProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Commit the state and return the application Merkle root hash
|
||||
func (m BaseMock) Commit() (ret types.ResponseCommit) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Commit()
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Commit()
|
||||
return ret
|
||||
}
|
||||
|
||||
// Create application specific vote extension
|
||||
func (m BaseMock) ExtendVote(input types.RequestExtendVote) (ret types.ResponseExtendVote) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ExtendVote(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ExtendVote(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Verify application's vote extension data
|
||||
func (m BaseMock) VerifyVoteExtension(input types.RequestVerifyVoteExtension) (ret types.ResponseVerifyVoteExtension) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.VerifyVoteExtension(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.VerifyVoteExtension(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// State Sync Connection
|
||||
// List available snapshots
|
||||
func (m BaseMock) ListSnapshots(input types.RequestListSnapshots) (ret types.ResponseListSnapshots) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ListSnapshots(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ListSnapshots(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) OfferSnapshot(input types.RequestOfferSnapshot) (ret types.ResponseOfferSnapshot) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.OfferSnapshot(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.OfferSnapshot(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) LoadSnapshotChunk(input types.RequestLoadSnapshotChunk) (ret types.ResponseLoadSnapshotChunk) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.LoadSnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.LoadSnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ApplySnapshotChunk(input types.RequestApplySnapshotChunk) (ret types.ResponseApplySnapshotChunk) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ApplySnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ApplySnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) FinalizeBlock(input types.RequestFinalizeBlock) (ret types.ResponseFinalizeBlock) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.FinalizeBlock(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.FinalizeBlock(input)
|
||||
return ret
|
||||
}
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
|
||||
types "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -53,14 +51,6 @@ func (r ResponseQuery) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
func (r ResponsePrepareProposal) IsTxStatusUnknown() bool {
|
||||
return r.ModifiedTxStatus == ResponsePrepareProposal_UNKNOWN
|
||||
}
|
||||
|
||||
func (r ResponsePrepareProposal) IsTxStatusModified() bool {
|
||||
return r.ModifiedTxStatus == ResponsePrepareProposal_MODIFIED
|
||||
}
|
||||
|
||||
func (r ResponseProcessProposal) IsAccepted() bool {
|
||||
return r.Status == ResponseProcessProposal_ACCEPT
|
||||
}
|
||||
@@ -165,15 +155,6 @@ var _ jsonRoundTripper = (*EventAttribute)(nil)
|
||||
// -----------------------------------------------
|
||||
// construct Result data
|
||||
|
||||
func RespondExtendVote(appDataToSign, appDataSelfAuthenticating []byte) ResponseExtendVote {
|
||||
return ResponseExtendVote{
|
||||
VoteExtension: &types.VoteExtension{
|
||||
AppDataToSign: appDataToSign,
|
||||
AppDataSelfAuthenticating: appDataSelfAuthenticating,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func RespondVerifyVoteExtension(ok bool) ResponseVerifyVoteExtension {
|
||||
status := ResponseVerifyVoteExtension_REJECT
|
||||
if ok {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -113,7 +113,7 @@ func main() {
|
||||
// add prometheus metrics for unary RPC calls
|
||||
opts = append(opts, grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor))
|
||||
|
||||
ss := grpcprivval.NewSignerServer(*chainID, pv, logger)
|
||||
ss := grpcprivval.NewSignerServer(logger, *chainID, pv)
|
||||
|
||||
protocol, address := tmnet.ProtocolAndAddress(*addr)
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package debug
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/scripts/keymigrate"
|
||||
"github.com/tendermint/tendermint/scripts/scmigrate"
|
||||
)
|
||||
|
||||
func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command {
|
||||
@@ -51,6 +52,13 @@ func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command {
|
||||
return fmt.Errorf("running migration for context %q: %w",
|
||||
dbctx, err)
|
||||
}
|
||||
|
||||
if dbctx == "blockstore" {
|
||||
if err := scmigrate.Migrate(ctx, db); err != nil {
|
||||
return fmt.Errorf("running seen commit migration: %w", err)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("completed database migration successfully")
|
||||
|
||||
@@ -180,7 +180,7 @@ for applications built w/ Cosmos SDK).
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
|
||||
ctx, cancel := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -13,81 +13,92 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// MakeResetAllCommand constructs a command that removes the database of
|
||||
// MakeResetCommand constructs a command that removes the database of
|
||||
// the specified Tendermint core instance.
|
||||
func MakeResetAllCommand(conf *config.Config, logger log.Logger) *cobra.Command {
|
||||
func MakeResetCommand(conf *config.Config, logger log.Logger) *cobra.Command {
|
||||
var keyType string
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "unsafe-reset-all",
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||
resetCmd := &cobra.Command{
|
||||
Use: "reset",
|
||||
Short: "Set of commands to conveniently reset tendermint related data",
|
||||
}
|
||||
|
||||
resetBlocksCmd := &cobra.Command{
|
||||
Use: "blockchain",
|
||||
Short: "Removes all blocks, state, transactions and evidence stored by the tendermint node",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return resetAll(conf.DBDir(), conf.PrivValidator.KeyFile(),
|
||||
return ResetState(conf.DBDir(), logger)
|
||||
},
|
||||
}
|
||||
|
||||
resetPeersCmd := &cobra.Command{
|
||||
Use: "peers",
|
||||
Short: "Removes all peer addresses",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return ResetPeerStore(conf.DBDir())
|
||||
},
|
||||
}
|
||||
|
||||
resetSignerCmd := &cobra.Command{
|
||||
Use: "unsafe-signer",
|
||||
Short: "esets private validator signer state",
|
||||
Long: `Resets private validator signer state.
|
||||
Only use in testing. This can cause the node to double sign`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return ResetFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile(), logger, keyType)
|
||||
},
|
||||
}
|
||||
|
||||
resetAllCmd := &cobra.Command{
|
||||
Use: "unsafe-all",
|
||||
Short: "Removes all tendermint data including signing state",
|
||||
Long: `Removes all tendermint data including signing state.
|
||||
Only use in testing. This can cause the node to double sign`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return ResetAll(conf.DBDir(), conf.PrivValidator.KeyFile(),
|
||||
conf.PrivValidator.StateFile(), logger, keyType)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
"Key type to generate privval file with. Options: ed25519, secp256k1")
|
||||
|
||||
return cmd
|
||||
resetSignerCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
"Signer key type. Options: ed25519, secp256k1")
|
||||
|
||||
resetAllCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
"Signer key type. Options: ed25519, secp256k1")
|
||||
|
||||
resetCmd.AddCommand(resetBlocksCmd)
|
||||
resetCmd.AddCommand(resetPeersCmd)
|
||||
resetCmd.AddCommand(resetSignerCmd)
|
||||
resetCmd.AddCommand(resetAllCmd)
|
||||
|
||||
return resetCmd
|
||||
}
|
||||
|
||||
// MakeResetStateCommand constructs a command that removes the database of
|
||||
// the specified Tendermint core instance.
|
||||
func MakeResetStateCommand(conf *config.Config, logger log.Logger) *cobra.Command {
|
||||
var keyType string
|
||||
|
||||
return &cobra.Command{
|
||||
Use: "reset-state",
|
||||
Short: "Remove all the data and WAL",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return resetState(conf.DBDir(), logger, keyType)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *cobra.Command {
|
||||
var keyType string
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "unsafe-reset-priv-validator",
|
||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return resetFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile(), logger, keyType)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
"Key type to generate privval file with. Options: ed25519, secp256k1")
|
||||
return cmd
|
||||
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
|
||||
// resetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
func resetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
|
||||
// ResetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// Exported for extenal CLI usage
|
||||
// XXX: this is unsafe and should only suitable for testnets.
|
||||
func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
logger.Error("error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
}
|
||||
|
||||
return resetFilePV(privValKeyFile, privValStateFile, logger, keyType)
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
|
||||
// recreate the dbDir since the privVal state needs to live there
|
||||
return ResetFilePV(privValKeyFile, privValStateFile, logger, keyType)
|
||||
}
|
||||
|
||||
// resetState removes address book files plus all databases.
|
||||
func resetState(dbDir string, logger log.Logger, keyType string) error {
|
||||
// ResetState removes all blocks, tendermint state, indexed transactions and evidence.
|
||||
func ResetState(dbDir string, logger log.Logger) error {
|
||||
blockdb := filepath.Join(dbDir, "blockstore.db")
|
||||
state := filepath.Join(dbDir, "state.db")
|
||||
wal := filepath.Join(dbDir, "cs.wal")
|
||||
evidence := filepath.Join(dbDir, "evidence.db")
|
||||
txIndex := filepath.Join(dbDir, "tx_index.db")
|
||||
peerstore := filepath.Join(dbDir, "peerstore.db")
|
||||
|
||||
if tmos.FileExists(blockdb) {
|
||||
if err := os.RemoveAll(blockdb); err == nil {
|
||||
@@ -129,20 +140,13 @@ func resetState(dbDir string, logger log.Logger, keyType string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(peerstore) {
|
||||
if err := os.RemoveAll(peerstore); err == nil {
|
||||
logger.Info("Removed peerstore.db", "dir", peerstore)
|
||||
} else {
|
||||
logger.Error("error removing peerstore.db", "dir", peerstore, "err", err)
|
||||
}
|
||||
}
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
return nil
|
||||
return tmos.EnsureDir(dbDir, 0700)
|
||||
}
|
||||
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
|
||||
// ResetFilePV loads the file private validator and resets the watermark to 0. If used on an existing network,
|
||||
// this can cause the node to double sign.
|
||||
// XXX: this is unsafe and should only suitable for testnets.
|
||||
func ResetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
|
||||
if _, err := os.Stat(privValKeyFile); err == nil {
|
||||
pv, err := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
|
||||
if err != nil {
|
||||
@@ -166,3 +170,13 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetPeerStore removes the peer store containing all information used by the tendermint networking layer
|
||||
// In the case of a reset, new peers will need to be set either via the config or through the discovery mechanism
|
||||
func ResetPeerStore(dbDir string) error {
|
||||
peerstore := filepath.Join(dbDir, "peerstore.db")
|
||||
if tmos.FileExists(peerstore) {
|
||||
return os.RemoveAll(peerstore)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
62
cmd/tendermint/commands/reset_test.go
Normal file
62
cmd/tendermint/commands/reset_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func Test_ResetAll(t *testing.T) {
|
||||
config := cfg.TestConfig()
|
||||
dir := t.TempDir()
|
||||
config.SetRoot(dir)
|
||||
logger := log.NewNopLogger()
|
||||
cfg.EnsureRoot(dir)
|
||||
require.NoError(t, initFilesWithConfig(context.Background(), config, logger, types.ABCIPubKeyTypeEd25519))
|
||||
pv, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
|
||||
require.NoError(t, err)
|
||||
pv.LastSignState.Height = 10
|
||||
require.NoError(t, pv.Save())
|
||||
require.NoError(t, ResetAll(config.DBDir(), config.PrivValidator.KeyFile(),
|
||||
config.PrivValidator.StateFile(), logger, types.ABCIPubKeyTypeEd25519))
|
||||
require.DirExists(t, config.DBDir())
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
|
||||
require.FileExists(t, config.PrivValidator.StateFile())
|
||||
pv, err = privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), pv.LastSignState.Height)
|
||||
}
|
||||
|
||||
func Test_ResetState(t *testing.T) {
|
||||
config := cfg.TestConfig()
|
||||
dir := t.TempDir()
|
||||
config.SetRoot(dir)
|
||||
logger := log.NewNopLogger()
|
||||
cfg.EnsureRoot(dir)
|
||||
require.NoError(t, initFilesWithConfig(context.Background(), config, logger, types.ABCIPubKeyTypeEd25519))
|
||||
pv, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
|
||||
require.NoError(t, err)
|
||||
pv.LastSignState.Height = 10
|
||||
require.NoError(t, pv.Save())
|
||||
require.NoError(t, ResetState(config.DBDir(), logger))
|
||||
require.DirExists(t, config.DBDir())
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
|
||||
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
|
||||
require.FileExists(t, config.PrivValidator.StateFile())
|
||||
pv, err = privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
|
||||
require.NoError(t, err)
|
||||
// private validator state should still be in tact.
|
||||
require.Equal(t, int64(10), pv.LastSignState.Height)
|
||||
}
|
||||
@@ -51,10 +51,12 @@ func RootCommand(conf *config.Config, logger log.Logger) *cobra.Command {
|
||||
}
|
||||
*conf = *pconf
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
|
||||
if err := log.OverrideWithNewLogger(logger, conf.LogFormat, conf.LogLevel); err != nil {
|
||||
return err
|
||||
}
|
||||
if warning := pconf.DeprecatedFieldWarning(); warning != nil {
|
||||
logger.Info("WARNING", "deprecated field warning", warning)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -105,7 +105,7 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider, conf *cfg.Config, logger lo
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
|
||||
ctx, cancel := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
n, err := nodeProvider(ctx, conf, logger)
|
||||
|
||||
@@ -33,9 +33,7 @@ func main() {
|
||||
commands.MakeLightCommand(conf, logger),
|
||||
commands.MakeReplayCommand(conf, logger),
|
||||
commands.MakeReplayConsoleCommand(conf, logger),
|
||||
commands.MakeResetAllCommand(conf, logger),
|
||||
commands.MakeResetStateCommand(conf, logger),
|
||||
commands.MakeResetPrivateValidatorCommand(conf, logger),
|
||||
commands.MakeResetCommand(conf, logger),
|
||||
commands.MakeShowValidatorCommand(conf, logger),
|
||||
commands.MakeTestnetFilesCommand(conf, logger),
|
||||
commands.MakeShowNodeIDCommand(conf),
|
||||
|
||||
191
config/config.go
191
config/config.go
@@ -8,6 +8,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -145,6 +146,10 @@ func (cfg *Config) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *Config) DeprecatedFieldWarning() error {
|
||||
return cfg.Consensus.DeprecatedFieldWarning()
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// BaseConfig
|
||||
|
||||
@@ -956,27 +961,6 @@ type ConsensusConfig struct {
|
||||
WalPath string `mapstructure:"wal-file"`
|
||||
walFile string // overrides WalPath if set
|
||||
|
||||
// TODO: remove timeout configs, these should be global not local
|
||||
// How long we wait for a proposal block before prevoting nil
|
||||
TimeoutPropose time.Duration `mapstructure:"timeout-propose"`
|
||||
// How much timeout-propose increases with each round
|
||||
TimeoutProposeDelta time.Duration `mapstructure:"timeout-propose-delta"`
|
||||
// How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
|
||||
TimeoutPrevote time.Duration `mapstructure:"timeout-prevote"`
|
||||
// How much the timeout-prevote increases with each round
|
||||
TimeoutPrevoteDelta time.Duration `mapstructure:"timeout-prevote-delta"`
|
||||
// How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
|
||||
TimeoutPrecommit time.Duration `mapstructure:"timeout-precommit"`
|
||||
// How much the timeout-precommit increases with each round
|
||||
TimeoutPrecommitDelta time.Duration `mapstructure:"timeout-precommit-delta"`
|
||||
// How long we wait after committing a block, before starting on the new
|
||||
// height (this gives us a chance to receive some more precommits, even
|
||||
// though we already have +2/3).
|
||||
TimeoutCommit time.Duration `mapstructure:"timeout-commit"`
|
||||
|
||||
// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
SkipTimeoutCommit bool `mapstructure:"skip-timeout-commit"`
|
||||
|
||||
// EmptyBlocks mode and possible interval between empty blocks
|
||||
CreateEmptyBlocks bool `mapstructure:"create-empty-blocks"`
|
||||
CreateEmptyBlocksInterval time.Duration `mapstructure:"create-empty-blocks-interval"`
|
||||
@@ -986,20 +970,59 @@ type ConsensusConfig struct {
|
||||
PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer-query-maj23-sleep-duration"`
|
||||
|
||||
DoubleSignCheckHeight int64 `mapstructure:"double-sign-check-height"`
|
||||
|
||||
// TODO: The following fields are all temporary overrides that should exist only
|
||||
// for the duration of the v0.36 release. The below fields should be completely
|
||||
// removed in the v0.37 release of Tendermint.
|
||||
// See: https://github.com/tendermint/tendermint/issues/8188
|
||||
|
||||
// UnsafeProposeTimeoutOverride provides an unsafe override of the Propose
|
||||
// timeout consensus parameter. It configures how long the consensus engine
|
||||
// will wait to receive a proposal block before prevoting nil.
|
||||
UnsafeProposeTimeoutOverride time.Duration `mapstructure:"unsafe-propose-timeout-override"`
|
||||
// UnsafeProposeTimeoutDeltaOverride provides an unsafe override of the
|
||||
// ProposeDelta timeout consensus parameter. It configures how much the
|
||||
// propose timeout increases with each round.
|
||||
UnsafeProposeTimeoutDeltaOverride time.Duration `mapstructure:"unsafe-propose-timeout-delta-override"`
|
||||
// UnsafeVoteTimeoutOverride provides an unsafe override of the Vote timeout
|
||||
// consensus parameter. It configures how long the consensus engine will wait
|
||||
// to gather additional votes after receiving +2/3 votes in a round.
|
||||
UnsafeVoteTimeoutOverride time.Duration `mapstructure:"unsafe-vote-timeout-override"`
|
||||
// UnsafeVoteTimeoutDeltaOverride provides an unsafe override of the VoteDelta
|
||||
// timeout consensus parameter. It configures how much the vote timeout
|
||||
// increases with each round.
|
||||
UnsafeVoteTimeoutDeltaOverride time.Duration `mapstructure:"unsafe-vote-timeout-delta-override"`
|
||||
// UnsafeCommitTimeoutOverride provides an unsafe override of the Commit timeout
|
||||
// consensus parameter. It configures how long the consensus engine will wait
|
||||
// after receiving +2/3 precommits before beginning the next height.
|
||||
UnsafeCommitTimeoutOverride time.Duration `mapstructure:"unsafe-commit-timeout-override"`
|
||||
|
||||
// UnsafeBypassCommitTimeoutOverride provides an unsafe override of the
|
||||
// BypassCommitTimeout consensus parameter. It configures if the consensus
|
||||
// engine will wait for the full Commit timeout before proceeding to the next height.
|
||||
// If it is set to true, the consensus engine will proceed to the next height
|
||||
// as soon as the node has gathered votes from all of the validators on the network.
|
||||
UnsafeBypassCommitTimeoutOverride *bool `mapstructure:"unsafe-bypass-commit-timeout-override"`
|
||||
|
||||
// Deprecated timeout parameters. These parameters are present in this struct
|
||||
// so that they can be parsed so that validation can check if they have erroneously
|
||||
// been included and provide a helpful error message.
|
||||
// These fields should be completely removed in v0.37.
|
||||
// See: https://github.com/tendermint/tendermint/issues/8188
|
||||
DeprecatedTimeoutPropose *interface{} `mapstructure:"timeout-propose"`
|
||||
DeprecatedTimeoutProposeDelta *interface{} `mapstructure:"timeout-propose-delta"`
|
||||
DeprecatedTimeoutPrevote *interface{} `mapstructure:"timeout-prevote"`
|
||||
DeprecatedTimeoutPrevoteDelta *interface{} `mapstructure:"timeout-prevote-delta"`
|
||||
DeprecatedTimeoutPrecommit *interface{} `mapstructure:"timeout-precommit"`
|
||||
DeprecatedTimeoutPrecommitDelta *interface{} `mapstructure:"timeout-precommit-delta"`
|
||||
DeprecatedTimeoutCommit *interface{} `mapstructure:"timeout-commit"`
|
||||
DeprecatedSkipTimeoutCommit *interface{} `mapstructure:"skip-timeout-commit"`
|
||||
}
|
||||
|
||||
// DefaultConsensusConfig returns a default configuration for the consensus service
|
||||
func DefaultConsensusConfig() *ConsensusConfig {
|
||||
return &ConsensusConfig{
|
||||
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
|
||||
TimeoutPropose: 3000 * time.Millisecond,
|
||||
TimeoutProposeDelta: 500 * time.Millisecond,
|
||||
TimeoutPrevote: 1000 * time.Millisecond,
|
||||
TimeoutPrevoteDelta: 500 * time.Millisecond,
|
||||
TimeoutPrecommit: 1000 * time.Millisecond,
|
||||
TimeoutPrecommitDelta: 500 * time.Millisecond,
|
||||
TimeoutCommit: 1000 * time.Millisecond,
|
||||
SkipTimeoutCommit: false,
|
||||
CreateEmptyBlocks: true,
|
||||
CreateEmptyBlocksInterval: 0 * time.Second,
|
||||
PeerGossipSleepDuration: 100 * time.Millisecond,
|
||||
@@ -1011,14 +1034,6 @@ func DefaultConsensusConfig() *ConsensusConfig {
|
||||
// TestConsensusConfig returns a configuration for testing the consensus service
|
||||
func TestConsensusConfig() *ConsensusConfig {
|
||||
cfg := DefaultConsensusConfig()
|
||||
cfg.TimeoutPropose = 40 * time.Millisecond
|
||||
cfg.TimeoutProposeDelta = 1 * time.Millisecond
|
||||
cfg.TimeoutPrevote = 10 * time.Millisecond
|
||||
cfg.TimeoutPrevoteDelta = 1 * time.Millisecond
|
||||
cfg.TimeoutPrecommit = 10 * time.Millisecond
|
||||
cfg.TimeoutPrecommitDelta = 1 * time.Millisecond
|
||||
cfg.TimeoutCommit = 10 * time.Millisecond
|
||||
cfg.SkipTimeoutCommit = true
|
||||
cfg.PeerGossipSleepDuration = 5 * time.Millisecond
|
||||
cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond
|
||||
cfg.DoubleSignCheckHeight = int64(0)
|
||||
@@ -1030,33 +1045,6 @@ func (cfg *ConsensusConfig) WaitForTxs() bool {
|
||||
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
|
||||
}
|
||||
|
||||
// Propose returns the amount of time to wait for a proposal
|
||||
func (cfg *ConsensusConfig) Propose(round int32) time.Duration {
|
||||
return time.Duration(
|
||||
cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round),
|
||||
) * time.Nanosecond
|
||||
}
|
||||
|
||||
// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes
|
||||
func (cfg *ConsensusConfig) Prevote(round int32) time.Duration {
|
||||
return time.Duration(
|
||||
cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round),
|
||||
) * time.Nanosecond
|
||||
}
|
||||
|
||||
// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits
|
||||
func (cfg *ConsensusConfig) Precommit(round int32) time.Duration {
|
||||
return time.Duration(
|
||||
cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round),
|
||||
) * time.Nanosecond
|
||||
}
|
||||
|
||||
// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits
|
||||
// for a single block (ie. a commit).
|
||||
func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
|
||||
return t.Add(cfg.TimeoutCommit)
|
||||
}
|
||||
|
||||
// WalFile returns the full path to the write-ahead log file
|
||||
func (cfg *ConsensusConfig) WalFile() string {
|
||||
if cfg.walFile != "" {
|
||||
@@ -1073,26 +1061,20 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) {
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg *ConsensusConfig) ValidateBasic() error {
|
||||
if cfg.TimeoutPropose < 0 {
|
||||
return errors.New("timeout-propose can't be negative")
|
||||
if cfg.UnsafeProposeTimeoutOverride < 0 {
|
||||
return errors.New("unsafe-propose-timeout-override can't be negative")
|
||||
}
|
||||
if cfg.TimeoutProposeDelta < 0 {
|
||||
return errors.New("timeout-propose-delta can't be negative")
|
||||
if cfg.UnsafeProposeTimeoutDeltaOverride < 0 {
|
||||
return errors.New("unsafe-propose-timeout-delta-override can't be negative")
|
||||
}
|
||||
if cfg.TimeoutPrevote < 0 {
|
||||
return errors.New("timeout-prevote can't be negative")
|
||||
if cfg.UnsafeVoteTimeoutOverride < 0 {
|
||||
return errors.New("unsafe-vote-timeout-override can't be negative")
|
||||
}
|
||||
if cfg.TimeoutPrevoteDelta < 0 {
|
||||
return errors.New("timeout-prevote-delta can't be negative")
|
||||
if cfg.UnsafeVoteTimeoutDeltaOverride < 0 {
|
||||
return errors.New("unsafe-vote-timeout-delta-override can't be negative")
|
||||
}
|
||||
if cfg.TimeoutPrecommit < 0 {
|
||||
return errors.New("timeout-precommit can't be negative")
|
||||
}
|
||||
if cfg.TimeoutPrecommitDelta < 0 {
|
||||
return errors.New("timeout-precommit-delta can't be negative")
|
||||
}
|
||||
if cfg.TimeoutCommit < 0 {
|
||||
return errors.New("timeout-commit can't be negative")
|
||||
if cfg.UnsafeCommitTimeoutOverride < 0 {
|
||||
return errors.New("unsafe-commit-timeout-override can't be negative")
|
||||
}
|
||||
if cfg.CreateEmptyBlocksInterval < 0 {
|
||||
return errors.New("create-empty-blocks-interval can't be negative")
|
||||
@@ -1109,6 +1091,44 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *ConsensusConfig) DeprecatedFieldWarning() error {
|
||||
var fields []string
|
||||
if cfg.DeprecatedSkipTimeoutCommit != nil {
|
||||
fields = append(fields, "skip-timeout-commit")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutPropose != nil {
|
||||
fields = append(fields, "timeout-propose")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutProposeDelta != nil {
|
||||
fields = append(fields, "timeout-propose-delta")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutPrevote != nil {
|
||||
fields = append(fields, "timeout-prevote")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutPrevoteDelta != nil {
|
||||
fields = append(fields, "timeout-prevote-delta")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutPrecommit != nil {
|
||||
fields = append(fields, "timeout-precommit")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutPrecommitDelta != nil {
|
||||
fields = append(fields, "timeout-precommit-delta")
|
||||
}
|
||||
if cfg.DeprecatedTimeoutCommit != nil {
|
||||
fields = append(fields, "timeout-commit")
|
||||
}
|
||||
if cfg.DeprecatedSkipTimeoutCommit != nil {
|
||||
fields = append(fields, "skip-timeout-commit")
|
||||
}
|
||||
if len(fields) != 0 {
|
||||
return fmt.Errorf("the following deprecated fields were set in the "+
|
||||
"configuration file: %s. These fields were removed in v0.36. Timeout "+
|
||||
"configuration has been moved to the ConsensusParams. For more information see "+
|
||||
"https://tinyurl.com/adr074", strings.Join(fields, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// TxIndexConfig
|
||||
// Remember that Event has the following structure:
|
||||
@@ -1125,9 +1145,8 @@ type TxIndexConfig struct {
|
||||
// If list contains `null`, meaning no indexer service will be used.
|
||||
//
|
||||
// Options:
|
||||
// 1) "null" - no indexer services.
|
||||
// 2) "kv" (default) - the simplest possible indexer,
|
||||
// backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
// 1) "null" (default) - no indexer services.
|
||||
// 2) "kv" - a simple indexer backed by key-value storage (see DBBackend)
|
||||
// 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
Indexer []string `mapstructure:"indexer"`
|
||||
|
||||
@@ -1138,14 +1157,12 @@ type TxIndexConfig struct {
|
||||
|
||||
// DefaultTxIndexConfig returns a default configuration for the transaction indexer.
|
||||
func DefaultTxIndexConfig() *TxIndexConfig {
|
||||
return &TxIndexConfig{
|
||||
Indexer: []string{"kv"},
|
||||
}
|
||||
return &TxIndexConfig{Indexer: []string{"null"}}
|
||||
}
|
||||
|
||||
// TestTxIndexConfig returns a default configuration for the transaction indexer.
|
||||
func TestTxIndexConfig() *TxIndexConfig {
|
||||
return DefaultTxIndexConfig()
|
||||
return &TxIndexConfig{Indexer: []string{"kv"}}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
@@ -29,8 +29,8 @@ func TestConfigValidateBasic(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with timeout_propose
|
||||
cfg.Consensus.TimeoutPropose = -10 * time.Second
|
||||
// tamper with unsafe-propose-timeout-override
|
||||
cfg.Consensus.UnsafeProposeTimeoutOverride = -10 * time.Second
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
@@ -106,25 +106,21 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) {
|
||||
modify func(*ConsensusConfig)
|
||||
expectErr bool
|
||||
}{
|
||||
"TimeoutPropose": {func(c *ConsensusConfig) { c.TimeoutPropose = time.Second }, false},
|
||||
"TimeoutPropose negative": {func(c *ConsensusConfig) { c.TimeoutPropose = -1 }, true},
|
||||
"TimeoutProposeDelta": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = time.Second }, false},
|
||||
"TimeoutProposeDelta negative": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = -1 }, true},
|
||||
"TimeoutPrevote": {func(c *ConsensusConfig) { c.TimeoutPrevote = time.Second }, false},
|
||||
"TimeoutPrevote negative": {func(c *ConsensusConfig) { c.TimeoutPrevote = -1 }, true},
|
||||
"TimeoutPrevoteDelta": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = time.Second }, false},
|
||||
"TimeoutPrevoteDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = -1 }, true},
|
||||
"TimeoutPrecommit": {func(c *ConsensusConfig) { c.TimeoutPrecommit = time.Second }, false},
|
||||
"TimeoutPrecommit negative": {func(c *ConsensusConfig) { c.TimeoutPrecommit = -1 }, true},
|
||||
"TimeoutPrecommitDelta": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = time.Second }, false},
|
||||
"TimeoutPrecommitDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = -1 }, true},
|
||||
"TimeoutCommit": {func(c *ConsensusConfig) { c.TimeoutCommit = time.Second }, false},
|
||||
"TimeoutCommit negative": {func(c *ConsensusConfig) { c.TimeoutCommit = -1 }, true},
|
||||
"PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false},
|
||||
"PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true},
|
||||
"PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false},
|
||||
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
|
||||
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
|
||||
"UnsafeProposeTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutOverride = time.Second }, false},
|
||||
"UnsafeProposeTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutOverride = -1 }, true},
|
||||
"UnsafeProposeTimeoutDeltaOverride": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutDeltaOverride = time.Second }, false},
|
||||
"UnsafeProposeTimeoutDeltaOverride negative": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutDeltaOverride = -1 }, true},
|
||||
"UnsafePrevoteTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutOverride = time.Second }, false},
|
||||
"UnsafePrevoteTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutOverride = -1 }, true},
|
||||
"UnsafePrevoteTimeoutDeltaOverride": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutDeltaOverride = time.Second }, false},
|
||||
"UnsafePrevoteTimeoutDeltaOverride negative": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutDeltaOverride = -1 }, true},
|
||||
"UnsafeCommitTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeCommitTimeoutOverride = time.Second }, false},
|
||||
"UnsafeCommitTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeCommitTimeoutOverride = -1 }, true},
|
||||
"PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false},
|
||||
"PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true},
|
||||
"PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false},
|
||||
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
|
||||
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
|
||||
}
|
||||
for desc, tc := range testcases {
|
||||
tc := tc // appease linter
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
// DefaultDirPerm is the default permissions used when creating directories.
|
||||
const DefaultDirPerm = 0700
|
||||
// defaultDirPerm is the default permissions used when creating directories.
|
||||
const defaultDirPerm = 0700
|
||||
|
||||
var configTemplate *template.Template
|
||||
|
||||
@@ -32,13 +32,13 @@ func init() {
|
||||
// EnsureRoot creates the root, config, and data directories if they don't exist,
|
||||
// and panics if it fails.
|
||||
func EnsureRoot(rootDir string) {
|
||||
if err := tmos.EnsureDir(rootDir, DefaultDirPerm); err != nil {
|
||||
if err := tmos.EnsureDir(rootDir, defaultDirPerm); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), defaultDirPerm); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), defaultDirPerm); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
}
|
||||
@@ -450,32 +450,12 @@ fetchers = "{{ .StateSync.Fetchers }}"
|
||||
|
||||
wal-file = "{{ js .Consensus.WalPath }}"
|
||||
|
||||
# How long we wait for a proposal block before prevoting nil
|
||||
timeout-propose = "{{ .Consensus.TimeoutPropose }}"
|
||||
# How much timeout-propose increases with each round
|
||||
timeout-propose-delta = "{{ .Consensus.TimeoutProposeDelta }}"
|
||||
# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
|
||||
timeout-prevote = "{{ .Consensus.TimeoutPrevote }}"
|
||||
# How much the timeout-prevote increases with each round
|
||||
timeout-prevote-delta = "{{ .Consensus.TimeoutPrevoteDelta }}"
|
||||
# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
|
||||
timeout-precommit = "{{ .Consensus.TimeoutPrecommit }}"
|
||||
# How much the timeout-precommit increases with each round
|
||||
timeout-precommit-delta = "{{ .Consensus.TimeoutPrecommitDelta }}"
|
||||
# How long we wait after committing a block, before starting on the new
|
||||
# height (this gives us a chance to receive some more precommits, even
|
||||
# though we already have +2/3).
|
||||
timeout-commit = "{{ .Consensus.TimeoutCommit }}"
|
||||
|
||||
# How many blocks to look back to check existence of the node's consensus votes before joining consensus
|
||||
# When non-zero, the node will panic upon restart
|
||||
# if the same consensus key was used to sign {double-sign-check-height} last blocks.
|
||||
# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic.
|
||||
double-sign-check-height = {{ .Consensus.DoubleSignCheckHeight }}
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip-timeout-commit = {{ .Consensus.SkipTimeoutCommit }}
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks
|
||||
create-empty-blocks = {{ .Consensus.CreateEmptyBlocks }}
|
||||
create-empty-blocks-interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
|
||||
@@ -484,6 +464,50 @@ create-empty-blocks-interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
|
||||
peer-gossip-sleep-duration = "{{ .Consensus.PeerGossipSleepDuration }}"
|
||||
peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
|
||||
### Unsafe Timeout Overrides ###
|
||||
|
||||
# These fields provide temporary overrides for the Timeout consensus parameters.
|
||||
# Use of these parameters is strongly discouraged. Using these parameters may have serious
|
||||
# liveness implications for the validator and for the chain.
|
||||
#
|
||||
# These fields will be removed from the configuration file in the v0.37 release of Tendermint.
|
||||
# For additional information, see ADR-74:
|
||||
# https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-074-timeout-params.md
|
||||
|
||||
# This field provides an unsafe override of the Propose timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait for a proposal block before prevoting nil.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-propose-timeout-override = {{ .Consensus.UnsafeProposeTimeoutOverride }}
|
||||
|
||||
# This field provides an unsafe override of the ProposeDelta timeout consensus parameter.
|
||||
# This field configures how much the propose timeout increases with each round.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-propose-timeout-delta-override = {{ .Consensus.UnsafeProposeTimeoutDeltaOverride }}
|
||||
|
||||
# This field provides an unsafe override of the Vote timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait after
|
||||
# receiving +2/3 votes in a round.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-vote-timeout-override = {{ .Consensus.UnsafeVoteTimeoutOverride }}
|
||||
|
||||
# This field provides an unsafe override of the VoteDelta timeout consensus parameter.
|
||||
# This field configures how much the vote timeout increases with each round.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-vote-timeout-delta-override = {{ .Consensus.UnsafeVoteTimeoutDeltaOverride }}
|
||||
|
||||
# This field provides an unsafe override of the Commit timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait after receiving
|
||||
# +2/3 precommits before beginning the next height.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-commit-timeout-override = {{ .Consensus.UnsafeCommitTimeoutOverride }}
|
||||
|
||||
# This field provides an unsafe override of the BypassCommitTimeout consensus parameter.
|
||||
# This field configures if the consensus engine will wait for the full Commit timeout
|
||||
# before proceeding to the next height.
|
||||
# If this field is set to true, the consensus engine will proceed to the next height
|
||||
# as soon as the node has gathered votes from all of the validators on the network.
|
||||
# unsafe-bypass-commit-timeout-override =
|
||||
|
||||
#######################################################
|
||||
### Transaction Indexer Configuration Options ###
|
||||
#######################################################
|
||||
@@ -496,8 +520,8 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
# to decide which txs to index based on configuration set in the application.
|
||||
#
|
||||
# Options:
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
# 1) "null" (default) - no indexer services.
|
||||
# 2) "kv" - a simple indexer backed by key-value storage (see DBBackend)
|
||||
# 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}]
|
||||
@@ -542,10 +566,10 @@ func ResetTestRootWithChainID(dir, testName string, chainID string) (*Config, er
|
||||
return nil, err
|
||||
}
|
||||
// ensure config and data subdirs are created
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), defaultDirPerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), defaultDirPerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -588,7 +612,7 @@ func writeFile(filePath string, contents []byte, mode os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var testGenesisFmt = `{
|
||||
const testGenesisFmt = `{
|
||||
"genesis_time": "2018-10-10T08:20:13.695936996Z",
|
||||
"chain_id": "%s",
|
||||
"initial_height": "1",
|
||||
@@ -602,6 +626,14 @@ var testGenesisFmt = `{
|
||||
"message_delay": "500000000",
|
||||
"precision": "10000000"
|
||||
},
|
||||
"timeout": {
|
||||
"propose": "30000000",
|
||||
"propose_delta": "50000",
|
||||
"vote": "30000000",
|
||||
"vote_delta": "50000",
|
||||
"commit": "10000000",
|
||||
"bypass_timeout_commit": true
|
||||
},
|
||||
"evidence": {
|
||||
"max_age_num_blocks": "100000",
|
||||
"max_age_duration": "172800000000000",
|
||||
@@ -627,7 +659,7 @@ var testGenesisFmt = `{
|
||||
"app_hash": ""
|
||||
}`
|
||||
|
||||
var testPrivValidatorKey = `{
|
||||
const testPrivValidatorKey = `{
|
||||
"address": "A3258DCBF45DCA0DF052981870F2D1441A36D145",
|
||||
"pub_key": {
|
||||
"type": "tendermint/PubKeyEd25519",
|
||||
@@ -639,7 +671,7 @@ var testPrivValidatorKey = `{
|
||||
}
|
||||
}`
|
||||
|
||||
var testPrivValidatorState = `{
|
||||
const testPrivValidatorState = `{
|
||||
"height": "0",
|
||||
"round": 0,
|
||||
"step": 0
|
||||
|
||||
@@ -12,7 +12,7 @@ For any specific algorithm, use its specific module e.g.
|
||||
|
||||
## Binary encoding
|
||||
|
||||
For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/blockchain/encoding.html).
|
||||
For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/core/encoding.html).
|
||||
|
||||
## JSON Encoding
|
||||
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"crypto/sha256"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/jsontypes"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
)
|
||||
|
||||
const (
|
||||
// HashSize is the size in bytes of an AddressHash.
|
||||
HashSize = sha256.Size
|
||||
|
||||
// AddressSize is the size of a pubkey address.
|
||||
AddressSize = tmhash.TruncatedSize
|
||||
AddressSize = 20
|
||||
)
|
||||
|
||||
// An address is a []byte, but hex-encoded even in JSON.
|
||||
@@ -16,8 +20,19 @@ const (
|
||||
// Use an alias so Unmarshal methods (with ptr receivers) are available too.
|
||||
type Address = bytes.HexBytes
|
||||
|
||||
// AddressHash computes a truncated SHA-256 hash of bz for use as
|
||||
// a peer address.
|
||||
//
|
||||
// See: https://docs.tendermint.com/master/spec/core/data_structures.html#address
|
||||
func AddressHash(bz []byte) Address {
|
||||
return Address(tmhash.SumTruncated(bz))
|
||||
h := sha256.Sum256(bz)
|
||||
return Address(h[:AddressSize])
|
||||
}
|
||||
|
||||
// Checksum returns the SHA256 of the bz.
|
||||
func Checksum(bz []byte) []byte {
|
||||
h := sha256.Sum256(bz)
|
||||
return h[:]
|
||||
}
|
||||
|
||||
type PubKey interface {
|
||||
|
||||
@@ -2,6 +2,8 @@ package ed25519
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -11,7 +13,6 @@ import (
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/ed25519/extra/cache"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/jsontypes"
|
||||
)
|
||||
|
||||
@@ -124,7 +125,7 @@ func (privKey PrivKey) Type() string {
|
||||
// It uses OS randomness in conjunction with the current global random seed
|
||||
// in tendermint/libs/common to generate the private key.
|
||||
func GenPrivKey() PrivKey {
|
||||
return genPrivKey(crypto.CReader())
|
||||
return genPrivKey(rand.Reader)
|
||||
}
|
||||
|
||||
// genPrivKey generates a new ed25519 private key using the provided reader.
|
||||
@@ -142,9 +143,8 @@ func genPrivKey(rand io.Reader) PrivKey {
|
||||
// NOTE: secret should be the output of a KDF like bcrypt,
|
||||
// if it's derived from user input.
|
||||
func GenPrivKeyFromSecret(secret []byte) PrivKey {
|
||||
seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes.
|
||||
|
||||
return PrivKey(ed25519.NewKeyFromSeed(seed))
|
||||
seed := sha256.Sum256(secret)
|
||||
return PrivKey(ed25519.NewKeyFromSeed(seed[:]))
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
@@ -162,7 +162,7 @@ func (pubKey PubKey) Address() crypto.Address {
|
||||
if len(pubKey) != PubKeySize {
|
||||
panic("pubkey is incorrect size")
|
||||
}
|
||||
return crypto.Address(tmhash.SumTruncated(pubKey))
|
||||
return crypto.AddressHash(pubKey)
|
||||
}
|
||||
|
||||
// Bytes returns the PubKey byte format.
|
||||
@@ -229,5 +229,5 @@ func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error {
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Verify() (bool, []bool) {
|
||||
return b.BatchVerifier.Verify(crypto.CReader())
|
||||
return b.BatchVerifier.Verify(rand.Reader)
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
// Copyright 2017 Tendermint. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
func ExampleSha256() {
|
||||
sum := crypto.Sha256([]byte("This is Tendermint"))
|
||||
fmt.Printf("%x\n", sum)
|
||||
// Output:
|
||||
// f91afb642f3d1c87c17eb01aae5cb65c242dfdbe7cf1066cc260f4ce5d33b94e
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
)
|
||||
|
||||
func Sha256(bytes []byte) []byte {
|
||||
hasher := sha256.New()
|
||||
hasher.Write(bytes)
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
@@ -3,7 +3,7 @@ package merkle
|
||||
import (
|
||||
"hash"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
// TODO: make these have a large predefined capacity
|
||||
@@ -14,12 +14,12 @@ var (
|
||||
|
||||
// returns tmhash(<empty>)
|
||||
func emptyHash() []byte {
|
||||
return tmhash.Sum([]byte{})
|
||||
return crypto.Checksum([]byte{})
|
||||
}
|
||||
|
||||
// returns tmhash(0x00 || leaf)
|
||||
func leafHash(leaf []byte) []byte {
|
||||
return tmhash.Sum(append(leafPrefix, leaf...))
|
||||
return crypto.Checksum(append(leafPrefix, leaf...))
|
||||
}
|
||||
|
||||
// returns tmhash(0x00 || leaf)
|
||||
@@ -36,7 +36,7 @@ func innerHash(left []byte, right []byte) []byte {
|
||||
n := copy(data, innerPrefix)
|
||||
n += copy(data[n:], left)
|
||||
copy(data[n:], right)
|
||||
return tmhash.Sum(data)
|
||||
return crypto.Checksum(data)[:]
|
||||
}
|
||||
|
||||
func innerHashOpt(s hash.Hash, left []byte, right []byte) []byte {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
)
|
||||
|
||||
@@ -102,15 +102,15 @@ func (sp *Proof) ValidateBasic() error {
|
||||
if sp.Index < 0 {
|
||||
return errors.New("negative Index")
|
||||
}
|
||||
if len(sp.LeafHash) != tmhash.Size {
|
||||
return fmt.Errorf("expected LeafHash size to be %d, got %d", tmhash.Size, len(sp.LeafHash))
|
||||
if len(sp.LeafHash) != crypto.HashSize {
|
||||
return fmt.Errorf("expected LeafHash size to be %d, got %d", crypto.HashSize, len(sp.LeafHash))
|
||||
}
|
||||
if len(sp.Aunts) > MaxAunts {
|
||||
return fmt.Errorf("expected no more than %d aunts, got %d", MaxAunts, len(sp.Aunts))
|
||||
}
|
||||
for i, auntHash := range sp.Aunts {
|
||||
if len(auntHash) != tmhash.Size {
|
||||
return fmt.Errorf("expected Aunts#%d size to be %d, got %d", i, tmhash.Size, len(auntHash))
|
||||
if len(auntHash) != crypto.HashSize {
|
||||
return fmt.Errorf("expected Aunts#%d size to be %d, got %d", i, crypto.HashSize, len(auntHash))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -2,9 +2,9 @@ package merkle
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
)
|
||||
|
||||
@@ -79,14 +79,13 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) {
|
||||
return nil, fmt.Errorf("expected 1 arg, got %v", len(args))
|
||||
}
|
||||
value := args[0]
|
||||
hasher := tmhash.New()
|
||||
hasher.Write(value)
|
||||
vhash := hasher.Sum(nil)
|
||||
|
||||
vhash := sha256.Sum256(value)
|
||||
|
||||
bz := new(bytes.Buffer)
|
||||
// Wrap <op.Key, vhash> to hash the KVPair.
|
||||
encodeByteSlice(bz, op.key) //nolint: errcheck // does not error
|
||||
encodeByteSlice(bz, vhash) //nolint: errcheck // does not error
|
||||
encodeByteSlice(bz, op.key) //nolint: errcheck // does not error
|
||||
encodeByteSlice(bz, vhash[:]) //nolint: errcheck // does not error
|
||||
kvhash := leafHash(bz.Bytes())
|
||||
|
||||
if !bytes.Equal(kvhash, op.Proof.LeafHash) {
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
func TestRFC6962Hasher(t *testing.T) {
|
||||
@@ -39,7 +39,7 @@ func TestRFC6962Hasher(t *testing.T) {
|
||||
// echo -n '' | sha256sum
|
||||
{
|
||||
desc: "RFC6962 Empty Tree",
|
||||
want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"[:tmhash.Size*2],
|
||||
want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"[:crypto.HashSize*2],
|
||||
got: emptyTreeHash,
|
||||
},
|
||||
|
||||
@@ -47,19 +47,19 @@ func TestRFC6962Hasher(t *testing.T) {
|
||||
// echo -n 00 | xxd -r -p | sha256sum
|
||||
{
|
||||
desc: "RFC6962 Empty Leaf",
|
||||
want: "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"[:tmhash.Size*2],
|
||||
want: "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"[:crypto.HashSize*2],
|
||||
got: emptyLeafHash,
|
||||
},
|
||||
// echo -n 004C313233343536 | xxd -r -p | sha256sum
|
||||
{
|
||||
desc: "RFC6962 Leaf",
|
||||
want: "395aa064aa4c29f7010acfe3f25db9485bbd4b91897b6ad7ad547639252b4d56"[:tmhash.Size*2],
|
||||
want: "395aa064aa4c29f7010acfe3f25db9485bbd4b91897b6ad7ad547639252b4d56"[:crypto.HashSize*2],
|
||||
got: leafHash,
|
||||
},
|
||||
// echo -n 014E3132334E343536 | xxd -r -p | sha256sum
|
||||
{
|
||||
desc: "RFC6962 Node",
|
||||
want: "aa217fe888e47007fa15edab33c2b492a722cb106c64667fc2b044444de66bbb"[:tmhash.Size*2],
|
||||
want: "aa217fe888e47007fa15edab33c2b492a722cb106c64667fc2b044444de66bbb"[:crypto.HashSize*2],
|
||||
got: innerHash([]byte("N123"), []byte("N456")),
|
||||
},
|
||||
} {
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
ctest "github.com/tendermint/tendermint/internal/libs/test"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
@@ -53,7 +53,7 @@ func TestProof(t *testing.T) {
|
||||
|
||||
items := make([][]byte, total)
|
||||
for i := 0; i < total; i++ {
|
||||
items[i] = testItem(tmrand.Bytes(tmhash.Size))
|
||||
items[i] = testItem(tmrand.Bytes(crypto.HashSize))
|
||||
}
|
||||
|
||||
rootHash = HashFromByteSlices(items)
|
||||
@@ -106,7 +106,7 @@ func TestHashAlternatives(t *testing.T) {
|
||||
|
||||
items := make([][]byte, total)
|
||||
for i := 0; i < total; i++ {
|
||||
items[i] = testItem(tmrand.Bytes(tmhash.Size))
|
||||
items[i] = testItem(tmrand.Bytes(crypto.HashSize))
|
||||
}
|
||||
|
||||
rootHash1 := HashFromByteSlicesIterative(items)
|
||||
@@ -119,7 +119,7 @@ func BenchmarkHashAlternatives(b *testing.B) {
|
||||
|
||||
items := make([][]byte, total)
|
||||
for i := 0; i < total; i++ {
|
||||
items[i] = testItem(tmrand.Bytes(tmhash.Size))
|
||||
items[i] = testItem(tmrand.Bytes(crypto.HashSize))
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
@@ -1,35 +1,15 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"crypto/rand"
|
||||
)
|
||||
|
||||
// This only uses the OS's randomness
|
||||
func randBytes(numBytes int) []byte {
|
||||
func CRandBytes(numBytes int) []byte {
|
||||
b := make([]byte, numBytes)
|
||||
_, err := crand.Read(b)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// This only uses the OS's randomness
|
||||
func CRandBytes(numBytes int) []byte {
|
||||
return randBytes(numBytes)
|
||||
}
|
||||
|
||||
// CRandHex returns a hex encoded string that's floor(numDigits/2) * 2 long.
|
||||
//
|
||||
// Note: CRandHex(24) gives 96 bits of randomness that
|
||||
// are usually strong enough for most purposes.
|
||||
func CRandHex(numDigits int) string {
|
||||
return hex.EncodeToString(CRandBytes(numDigits / 2))
|
||||
}
|
||||
|
||||
// Returns a crand.Reader.
|
||||
func CReader() io.Reader {
|
||||
return crand.Reader
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package secp256k1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
@@ -70,7 +71,7 @@ func (privKey PrivKey) Type() string {
|
||||
// GenPrivKey generates a new ECDSA private key on curve secp256k1 private key.
|
||||
// It uses OS randomness to generate the private key.
|
||||
func GenPrivKey() PrivKey {
|
||||
return genPrivKey(crypto.CReader())
|
||||
return genPrivKey(rand.Reader)
|
||||
}
|
||||
|
||||
// genPrivKey generates a new secp256k1 private key using the provided reader.
|
||||
@@ -190,8 +191,8 @@ var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
|
||||
// The returned signature will be of the form R || S (in lower-S form).
|
||||
func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
|
||||
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey)
|
||||
|
||||
sig, err := priv.Sign(crypto.Sha256(msg))
|
||||
seed := sha256.Sum256(msg)
|
||||
sig, err := priv.Sign(seed[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -220,7 +221,8 @@ func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return signature.Verify(crypto.Sha256(msg), pub)
|
||||
seed := sha256.Sum256(msg)
|
||||
return signature.Verify(seed[:], pub)
|
||||
}
|
||||
|
||||
// Read Signature struct from R || S. Caller needs to ensure
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package sr25519
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/sr25519"
|
||||
@@ -42,5 +43,5 @@ func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error {
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Verify() (bool, []bool) {
|
||||
return b.BatchVerifier.Verify(crypto.CReader())
|
||||
return b.BatchVerifier.Verify(rand.Reader)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package sr25519
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -48,7 +50,7 @@ func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
|
||||
|
||||
st := signingCtx.NewTranscriptBytes(msg)
|
||||
|
||||
sig, err := privKey.kp.Sign(crypto.CReader(), st)
|
||||
sig, err := privKey.kp.Sign(rand.Reader, st)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sr25519: failed to sign message: %w", err)
|
||||
}
|
||||
@@ -132,7 +134,7 @@ func (privKey *PrivKey) UnmarshalJSON(data []byte) error {
|
||||
// It uses OS randomness in conjunction with the current global random seed
|
||||
// in tendermint/libs/common to generate the private key.
|
||||
func GenPrivKey() PrivKey {
|
||||
return genPrivKey(crypto.CReader())
|
||||
return genPrivKey(rand.Reader)
|
||||
}
|
||||
|
||||
func genPrivKey(rng io.Reader) PrivKey {
|
||||
@@ -154,10 +156,9 @@ func genPrivKey(rng io.Reader) PrivKey {
|
||||
// NOTE: secret should be the output of a KDF like bcrypt,
|
||||
// if it's derived from user input.
|
||||
func GenPrivKeyFromSecret(secret []byte) PrivKey {
|
||||
seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes.
|
||||
|
||||
seed := sha256.Sum256(secret)
|
||||
var privKey PrivKey
|
||||
if err := privKey.msk.UnmarshalBinary(seed); err != nil {
|
||||
if err := privKey.msk.UnmarshalBinary(seed[:]); err != nil {
|
||||
panic("sr25519: failed to deserialize MiniSecretKey: " + err.Error())
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/sr25519"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
var _ crypto.PubKey = PubKey{}
|
||||
@@ -31,7 +30,7 @@ func (pubKey PubKey) Address() crypto.Address {
|
||||
if len(pubKey) != PubKeySize {
|
||||
panic("pubkey is incorrect size")
|
||||
}
|
||||
return crypto.Address(tmhash.SumTruncated(pubKey))
|
||||
return crypto.AddressHash(pubKey)
|
||||
}
|
||||
|
||||
// Bytes returns the PubKey byte format.
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
package tmhash
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
Size = sha256.Size
|
||||
BlockSize = sha256.BlockSize
|
||||
)
|
||||
|
||||
// New returns a new hash.Hash.
|
||||
func New() hash.Hash {
|
||||
return sha256.New()
|
||||
}
|
||||
|
||||
// Sum returns the SHA256 of the bz.
|
||||
func Sum(bz []byte) []byte {
|
||||
h := sha256.Sum256(bz)
|
||||
return h[:]
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------
|
||||
|
||||
const (
|
||||
TruncatedSize = 20
|
||||
)
|
||||
|
||||
type sha256trunc struct {
|
||||
sha256 hash.Hash
|
||||
}
|
||||
|
||||
func (h sha256trunc) Write(p []byte) (n int, err error) {
|
||||
return h.sha256.Write(p)
|
||||
}
|
||||
func (h sha256trunc) Sum(b []byte) []byte {
|
||||
shasum := h.sha256.Sum(b)
|
||||
return shasum[:TruncatedSize]
|
||||
}
|
||||
|
||||
func (h sha256trunc) Reset() {
|
||||
h.sha256.Reset()
|
||||
}
|
||||
|
||||
func (h sha256trunc) Size() int {
|
||||
return TruncatedSize
|
||||
}
|
||||
|
||||
func (h sha256trunc) BlockSize() int {
|
||||
return h.sha256.BlockSize()
|
||||
}
|
||||
|
||||
// NewTruncated returns a new hash.Hash.
|
||||
func NewTruncated() hash.Hash {
|
||||
return sha256trunc{
|
||||
sha256: sha256.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// SumTruncated returns the first 20 bytes of SHA256 of the bz.
|
||||
func SumTruncated(bz []byte) []byte {
|
||||
hash := sha256.Sum256(bz)
|
||||
return hash[:TruncatedSize]
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package tmhash_test
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
func TestHash(t *testing.T) {
|
||||
testVector := []byte("abc")
|
||||
hasher := tmhash.New()
|
||||
_, err := hasher.Write(testVector)
|
||||
require.NoError(t, err)
|
||||
bz := hasher.Sum(nil)
|
||||
|
||||
bz2 := tmhash.Sum(testVector)
|
||||
|
||||
hasher = sha256.New()
|
||||
_, err = hasher.Write(testVector)
|
||||
require.NoError(t, err)
|
||||
bz3 := hasher.Sum(nil)
|
||||
|
||||
assert.Equal(t, bz, bz2)
|
||||
assert.Equal(t, bz, bz3)
|
||||
}
|
||||
|
||||
func TestHashTruncated(t *testing.T) {
|
||||
testVector := []byte("abc")
|
||||
hasher := tmhash.NewTruncated()
|
||||
_, err := hasher.Write(testVector)
|
||||
require.NoError(t, err)
|
||||
bz := hasher.Sum(nil)
|
||||
|
||||
bz2 := tmhash.SumTruncated(testVector)
|
||||
|
||||
hasher = sha256.New()
|
||||
_, err = hasher.Write(testVector)
|
||||
require.NoError(t, err)
|
||||
bz3 := hasher.Sum(nil)
|
||||
bz3 = bz3[:tmhash.TruncatedSize]
|
||||
|
||||
assert.Equal(t, bz, bz2)
|
||||
assert.Equal(t, bz, bz3)
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
package crypto
|
||||
|
||||
const Version = "0.9.0-dev"
|
||||
@@ -182,7 +182,7 @@ node example/counter.js
|
||||
In another window, reset and start `tendermint`:
|
||||
|
||||
```sh
|
||||
tendermint unsafe-reset-all
|
||||
tendermint reset unsafe-all
|
||||
tendermint start
|
||||
```
|
||||
|
||||
|
||||
@@ -104,12 +104,17 @@ None
|
||||
- [ADR-013: Symmetric-Crypto](./adr-013-symmetric-crypto.md)
|
||||
- [ADR-022: ABCI-Errors](./adr-022-abci-errors.md)
|
||||
- [ADR-030: Consensus-Refactor](./adr-030-consensus-refactor.md)
|
||||
- [ADR-036: Empty Blocks via ABCI](./adr-036-empty-blocks-abci.md)
|
||||
- [ADR-037: Deliver-Block](./adr-037-deliver-block.md)
|
||||
- [ADR-038: Non-Zero-Start-Height](./adr-038-non-zero-start-height.md)
|
||||
- [ADR-040: Blockchain Reactor Refactor](./adr-040-blockchain-reactor-refactor.md)
|
||||
- [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md)
|
||||
- [ADR-042: State Sync Design](./adr-042-state-sync.md)
|
||||
- [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md)
|
||||
- [ADR-050: Improved Trusted Peering](./adr-050-improved-trusted-peering.md)
|
||||
- [ADR-057: RPC](./adr-057-RPC.md)
|
||||
- [ADR-064: Batch Verification](./adr-064-batch-verification.md)
|
||||
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
|
||||
- [ADR-071: Proposer-Based Timestamps](adr-071-proposer-based-timestamps.md)
|
||||
- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md)
|
||||
- [ADR-073: Adopt LibP2P](./adr-073-libp2p.md)
|
||||
- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md)
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ The 8 timeout parameters will be consolidated down to 6. These will be as follow
|
||||
parameters.
|
||||
* `TimeoutCommit`
|
||||
* Same as current `TimeoutCommit`.
|
||||
* `EnableTimeoutCommitBypass`
|
||||
* `BypassCommitTimeout`
|
||||
* Same as current `SkipTimeoutCommit`, renamed for clarity.
|
||||
|
||||
A safe default will be provided by Tendermint for each of these parameters and
|
||||
@@ -149,7 +149,7 @@ message TimeoutParams {
|
||||
google.protobuf.Duration vote = 3;
|
||||
google.protobuf.Duration vote_delta = 4;
|
||||
google.protobuf.Duration commit = 5;
|
||||
bool enable_commit_timeout_bypass = 6;
|
||||
bool bypass_commit_timeout = 6;
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -68,10 +68,10 @@ Tendermint is in essence similar software, but with two key differences:
|
||||
|
||||
- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a
|
||||
1/3 of failures, but those failures can include arbitrary behaviour -
|
||||
including hacking and malicious attacks.
|
||||
- It does not specify a particular application, like a fancy key-value
|
||||
store. Instead, it focuses on arbitrary state machine replication,
|
||||
so developers can build the application logic that's right for them,
|
||||
including hacking and malicious attacks.
|
||||
- It does not specify a particular application, like a fancy key-value
|
||||
store. Instead, it focuses on arbitrary state machine replication,
|
||||
so developers can build the application logic that's right for them,
|
||||
from key-value store to cryptocurrency to e-voting platform and beyond.
|
||||
|
||||
### Bitcoin, Ethereum, etc
|
||||
@@ -104,12 +104,10 @@ to Tendermint, but is more opinionated about how the state is managed,
|
||||
and requires that all application behaviour runs in potentially many
|
||||
docker containers, modules it calls "chaincode". It uses an
|
||||
implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf).
|
||||
from a team at IBM that is [augmented to handle potentially
|
||||
non-deterministic
|
||||
chaincode](https://www.zurich.ibm.com/~cca/papers/sieve.pdf) It is
|
||||
possible to implement this docker-based behaviour as a ABCI app in
|
||||
Tendermint, though extending Tendermint to handle non-determinism
|
||||
remains for future work.
|
||||
from a team at IBM that is augmented to handle potentially non-deterministic
|
||||
chaincode It is possible to implement this docker-based behaviour as a ABCI app
|
||||
in Tendermint, though extending Tendermint to handle non-determinism remains
|
||||
for future work.
|
||||
|
||||
[Burrow](https://github.com/hyperledger/burrow) is an implementation of
|
||||
the Ethereum Virtual Machine and Ethereum transaction mechanics, with
|
||||
|
||||
@@ -16,7 +16,8 @@ the parameters set with their default values. It will look something
|
||||
like the file below, however, double check by inspecting the
|
||||
`config.toml` created with your version of `tendermint` installed:
|
||||
|
||||
```toml# This is a TOML config file.
|
||||
```toml
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or
|
||||
@@ -33,11 +34,10 @@ like the file below, however, double check by inspecting the
|
||||
proxy-app = "tcp://127.0.0.1:26658"
|
||||
|
||||
# A custom human readable name for this node
|
||||
moniker = "ape"
|
||||
moniker = "sidewinder"
|
||||
|
||||
|
||||
# Mode of Node: full | validator | seed (default: "validator")
|
||||
# * validator node (default)
|
||||
# Mode of Node: full | validator | seed
|
||||
# * validator node
|
||||
# - all reactors
|
||||
# - with priv_validator_key.json, priv_validator_state.json
|
||||
# * full node
|
||||
@@ -48,11 +48,6 @@ moniker = "ape"
|
||||
# - No priv_validator_key.json, priv_validator_state.json
|
||||
mode = "validator"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast-sync = true
|
||||
|
||||
# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb
|
||||
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
# - pure go
|
||||
@@ -120,10 +115,10 @@ laddr = ""
|
||||
client-certificate-file = ""
|
||||
|
||||
# Client key generated while creating certificates for secure connection
|
||||
validator-client-key-file = ""
|
||||
client-key-file = ""
|
||||
|
||||
# Path to the Root Certificate Authority used to sign both client and server certificates
|
||||
certificate-authority = ""
|
||||
root-ca-file = ""
|
||||
|
||||
|
||||
#######################################################################
|
||||
@@ -149,26 +144,10 @@ cors-allowed-methods = ["HEAD", "GET", "POST", ]
|
||||
# A list of non simple headers the client is allowed to use with cross-domain requests
|
||||
cors-allowed-headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-laddr = ""
|
||||
|
||||
# Maximum number of simultaneous connections.
|
||||
# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
# 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-max-open-connections = 900
|
||||
|
||||
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
|
||||
unsafe = false
|
||||
|
||||
# Maximum number of simultaneous connections (including WebSocket).
|
||||
# Does not include gRPC connections. See grpc-max-open-connections
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
@@ -182,10 +161,37 @@ max-open-connections = 900
|
||||
max-subscription-clients = 100
|
||||
|
||||
# Maximum number of unique queries a given client can /subscribe to
|
||||
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
|
||||
# the estimated # maximum number of broadcast_tx_commit calls per block.
|
||||
# If you're using a Local RPC client and /broadcast_tx_commit, set this
|
||||
# to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
max-subscriptions-per-client = 5
|
||||
|
||||
# If true, disable the websocket interface to the RPC service. This has
|
||||
# the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all
|
||||
# methods for event subscription.
|
||||
#
|
||||
# EXPERIMENTAL: This setting will be removed in Tendermint v0.37.
|
||||
experimental-disable-websocket = false
|
||||
|
||||
# The time window size for the event log. All events up to this long before
|
||||
# the latest (up to EventLogMaxItems) will be available for subscribers to
|
||||
# fetch via the /events method. If 0 (the default) the event log and the
|
||||
# /events RPC method are disabled.
|
||||
event-log-window-size = "0s"
|
||||
|
||||
# The maxiumum number of events that may be retained by the event log. If
|
||||
# this value is 0, no upper limit is set. Otherwise, items in excess of
|
||||
# this number will be discarded from the event log.
|
||||
#
|
||||
# Warning: This setting is a safety valve. Setting it too low may cause
|
||||
# subscribers to miss events. Try to choose a value higher than the
|
||||
# maximum worst-case expected event load within the chosen window size in
|
||||
# ordinary operation.
|
||||
#
|
||||
# For example, if the window size is 10 minutes and the node typically
|
||||
# averages 1000 events per ten minutes, but with occasional known spikes of
|
||||
# up to 2000, choose a value > 2000.
|
||||
event-log-max-items = 0
|
||||
|
||||
# How long to wait for a tx to be committed during /broadcast_tx_commit.
|
||||
# WARNING: Using a value larger than 10s will result in increasing the
|
||||
# global HTTP write timeout, which applies to all connections and endpoints.
|
||||
@@ -252,63 +258,12 @@ persistent-peers = ""
|
||||
# UPNP port forwarding
|
||||
upnp = false
|
||||
|
||||
# Path to address book
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
addr-book-file = "config/addrbook.json"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
# Set false for private or local networks
|
||||
addr-book-strict = true
|
||||
|
||||
# Maximum number of inbound peers
|
||||
#
|
||||
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
max-num-inbound-peers = 40
|
||||
|
||||
# Maximum number of outbound peers to connect to, excluding persistent peers
|
||||
#
|
||||
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
max-num-outbound-peers = 10
|
||||
|
||||
# Maximum number of connections (inbound and outbound).
|
||||
max-connections = 64
|
||||
|
||||
# Rate limits the number of incoming connection attempts per IP address.
|
||||
max-incoming-connection-attempts = 100
|
||||
|
||||
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
unconditional-peer-ids = ""
|
||||
|
||||
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
persistent-peers-max-dial-period = "0s"
|
||||
|
||||
# Time to wait before flushing messages out on the connection
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
flush-throttle-timeout = "100ms"
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
max-packet-msg-payload-size = 1400
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
send-rate = 5120000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
recv-rate = 5120000
|
||||
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = true
|
||||
|
||||
@@ -323,16 +278,28 @@ allow-duplicate-ip = false
|
||||
handshake-timeout = "20s"
|
||||
dial-timeout = "3s"
|
||||
|
||||
# Time to wait before flushing messages out on the connection
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
flush-throttle-timeout = "100ms"
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
max-packet-msg-payload-size = 1400
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
send-rate = 5120000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
# TODO: Remove once MConnConnection is removed.
|
||||
recv-rate = 5120000
|
||||
|
||||
|
||||
#######################################################
|
||||
### Mempool Configuration Option ###
|
||||
#######################################################
|
||||
[mempool]
|
||||
|
||||
# Mempool version to use:
|
||||
# 1) "v0" - The legacy non-prioritized mempool reactor.
|
||||
# 2) "v1" (default) - The prioritized mempool reactor.
|
||||
version = "v1"
|
||||
|
||||
recheck = true
|
||||
broadcast = true
|
||||
|
||||
@@ -388,22 +355,30 @@ ttl-num-blocks = 0
|
||||
# starting from the height of the snapshot.
|
||||
enable = false
|
||||
|
||||
# RPC servers (comma-separated) for light client verification of the synced state machine and
|
||||
# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding
|
||||
# header hash obtained from a trusted source, and a period during which validators can be trusted.
|
||||
#
|
||||
# For Cosmos SDK-based chains, trust-period should usually be about 2/3 of the unbonding time (~2
|
||||
# weeks) during which they can be financially punished (slashed) for misbehavior.
|
||||
# State sync uses light client verification to verify state. This can be done either through the
|
||||
# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer
|
||||
# will be used.
|
||||
use-p2p = false
|
||||
|
||||
# If using RPC, at least two addresses need to be provided. They should be compatible with net.Dial,
|
||||
# for example: "host.example.com:2125"
|
||||
rpc-servers = ""
|
||||
|
||||
# The hash and height of a trusted block. Must be within the trust-period.
|
||||
trust-height = 0
|
||||
trust-hash = ""
|
||||
|
||||
# The trust period should be set so that Tendermint can detect and gossip misbehavior before
|
||||
# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding
|
||||
# period should suffice.
|
||||
trust-period = "168h0m0s"
|
||||
|
||||
# Time to spend discovering snapshots before initiating a restore.
|
||||
discovery-time = "15s"
|
||||
|
||||
# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp).
|
||||
# Will create a new, randomly named directory within, and remove it when done.
|
||||
# Temporary directory for state sync snapshot chunks, defaults to os.TempDir().
|
||||
# The synchronizer will create a new, randomly named directory within this directory
|
||||
# and remove it when the sync is complete.
|
||||
temp-dir = ""
|
||||
|
||||
# The timeout duration before re-requesting a chunk, possibly from a different
|
||||
@@ -413,21 +388,6 @@ chunk-request-timeout = "15s"
|
||||
# The number of concurrent chunk and block fetchers to run (default: 4).
|
||||
fetchers = "4"
|
||||
|
||||
#######################################################
|
||||
### Block Sync Configuration Connections ###
|
||||
#######################################################
|
||||
[blocksync]
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, BlockSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
enable = true
|
||||
|
||||
# Block Sync version to use:
|
||||
# 1) "v0" (default) - the standard block sync implementation
|
||||
# 2) "v2" - DEPRECATED, please use v0
|
||||
version = "v0"
|
||||
|
||||
#######################################################
|
||||
### Consensus Configuration Options ###
|
||||
#######################################################
|
||||
@@ -435,32 +395,12 @@ version = "v0"
|
||||
|
||||
wal-file = "data/cs.wal/wal"
|
||||
|
||||
# How long we wait for a proposal block before prevoting nil
|
||||
timeout-propose = "3s"
|
||||
# How much timeout-propose increases with each round
|
||||
timeout-propose-delta = "500ms"
|
||||
# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
|
||||
timeout-prevote = "1s"
|
||||
# How much the timeout-prevote increases with each round
|
||||
timeout-prevote-delta = "500ms"
|
||||
# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
|
||||
timeout-precommit = "1s"
|
||||
# How much the timeout-precommit increases with each round
|
||||
timeout-precommit-delta = "500ms"
|
||||
# How long we wait after committing a block, before starting on the new
|
||||
# height (this gives us a chance to receive some more precommits, even
|
||||
# though we already have +2/3).
|
||||
timeout-commit = "1s"
|
||||
|
||||
# How many blocks to look back to check existence of the node's consensus votes before joining consensus
|
||||
# When non-zero, the node will panic upon restart
|
||||
# if the same consensus key was used to sign {double-sign-check-height} last blocks.
|
||||
# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic.
|
||||
double-sign-check-height = 0
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip-timeout-commit = false
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks
|
||||
create-empty-blocks = true
|
||||
create-empty-blocks-interval = "0s"
|
||||
@@ -469,6 +409,50 @@ create-empty-blocks-interval = "0s"
|
||||
peer-gossip-sleep-duration = "100ms"
|
||||
peer-query-maj23-sleep-duration = "2s"
|
||||
|
||||
### Unsafe Timeout Overrides ###
|
||||
|
||||
# These fields provide temporary overrides for the Timeout consensus parameters.
|
||||
# Use of these parameters is strongly discouraged. Using these parameters may have serious
|
||||
# liveness implications for the validator and for the chain.
|
||||
#
|
||||
# These fields will be removed from the configuration file in the v0.37 release of Tendermint.
|
||||
# For additional information, see ADR-74:
|
||||
# https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-074-timeout-params.md
|
||||
|
||||
# This field provides an unsafe override of the Propose timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait for a proposal block before prevoting nil.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-propose-timeout-override = 0s
|
||||
|
||||
# This field provides an unsafe override of the ProposeDelta timeout consensus parameter.
|
||||
# This field configures how much the propose timeout increases with each round.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-propose-timeout-delta-override = 0s
|
||||
|
||||
# This field provides an unsafe override of the Vote timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait after
|
||||
# receiving +2/3 votes in a around.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-vote-timeout-override = 0s
|
||||
|
||||
# This field provides an unsafe override of the VoteDelta timeout consensus parameter.
|
||||
# This field configures how much the vote timeout increases with each round.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-vote-timeout-delta-override = 0s
|
||||
|
||||
# This field provides an unsafe override of the Commit timeout consensus parameter.
|
||||
# This field configures how long the consensus engine will wait after receiving
|
||||
# +2/3 precommits before beginning the next height.
|
||||
# If this field is set to a value greater than 0, it will take effect.
|
||||
# unsafe-commit-timeout-override = 0s
|
||||
|
||||
# This field provides an unsafe override of the BypassCommitTimeout consensus parameter.
|
||||
# This field configures if the consensus engine will wait for the full Commit timeout
|
||||
# before proceeding to the next height.
|
||||
# If this field is set to true, the consensus engine will proceed to the next height
|
||||
# as soon as the node has gathered votes from all of the validators on the network.
|
||||
# unsafe-bypass-commit-timeout-override =
|
||||
|
||||
#######################################################
|
||||
### Transaction Indexer Configuration Options ###
|
||||
#######################################################
|
||||
@@ -543,46 +527,6 @@ transactions every `create-empty-blocks-interval`. For instance, with
|
||||
Tendermint will only create blocks if there are transactions, or after waiting
|
||||
30 seconds without receiving any transactions.
|
||||
|
||||
## Consensus timeouts explained
|
||||
|
||||
There's a variety of information about timeouts in [Running in
|
||||
production](../tendermint-core/running-in-production.md)
|
||||
|
||||
You can also find more detailed technical explanation in the spec: [The latest
|
||||
gossip on BFT consensus](https://arxiv.org/abs/1807.04938).
|
||||
|
||||
```toml
|
||||
[consensus]
|
||||
...
|
||||
|
||||
timeout-propose = "3s"
|
||||
timeout-propose-delta = "500ms"
|
||||
timeout-prevote = "1s"
|
||||
timeout-prevote-delta = "500ms"
|
||||
timeout-precommit = "1s"
|
||||
timeout-precommit-delta = "500ms"
|
||||
timeout-commit = "1s"
|
||||
```
|
||||
|
||||
Note that in a successful round, the only timeout that we absolutely wait no
|
||||
matter what is `timeout-commit`.
|
||||
|
||||
Here's a brief summary of the timeouts:
|
||||
|
||||
- `timeout-propose` = how long we wait for a proposal block before prevoting
|
||||
nil
|
||||
- `timeout-propose-delta` = how much timeout-propose increases with each round
|
||||
- `timeout-prevote` = how long we wait after receiving +2/3 prevotes for
|
||||
anything (ie. not a single block or nil)
|
||||
- `timeout-prevote-delta` = how much the timeout-prevote increases with each
|
||||
round
|
||||
- `timeout-precommit` = how long we wait after receiving +2/3 precommits for
|
||||
anything (ie. not a single block or nil)
|
||||
- `timeout-precommit-delta` = how much the timeout-precommit increases with
|
||||
each round
|
||||
- `timeout-commit` = how long we wait after committing a block, before starting
|
||||
on the new height (this gives us a chance to receive some more precommits,
|
||||
even though we already have +2/3)
|
||||
|
||||
## P2P settings
|
||||
|
||||
@@ -648,3 +592,27 @@ Example:
|
||||
```shell
|
||||
$ psql ... -f state/indexer/sink/psql/schema.sql
|
||||
```
|
||||
|
||||
## Unsafe Consensus Timeout Overrides
|
||||
|
||||
Tendermint version v0.36 provides a set of unsafe overrides for the consensus
|
||||
timing parameters. These parameters are provided as a safety measure in case of
|
||||
unusual timing issues during the upgrade to v0.36 so that an operator may
|
||||
override the timings for a single node. These overrides will completely be
|
||||
removed in Tendermint v0.37.
|
||||
|
||||
- `unsafe-propose-override`: How long the Tendermint consensus engine will wait
|
||||
for a proposal block before prevoting nil.
|
||||
- `unsafe-propose-delta-override`: How much the propose timeout increase with
|
||||
each round.
|
||||
- `unsafe-vote-override`: How long the consensus engine will wait after
|
||||
receiving +2/3 votes in a round.
|
||||
- `unsafe-vote-delta-override`: How much the vote timeout increases with each
|
||||
round.
|
||||
- `unsafe-commit-override`: How long the consensus engine will wait after
|
||||
receiving +2/3 precommits before beginning the next height.
|
||||
- `unsafe-bypass-commit-timeout-override`: Configures if the consensus engine
|
||||
will wait for the full commit timeout before proceeding to the next height. If
|
||||
this field is set to true, the consensus engine will proceed to the next
|
||||
height as soon as the node has gathered votes from all of the validators on
|
||||
the network.
|
||||
|
||||
@@ -37,7 +37,7 @@ There are two ways to generate certificates, [openssl](https://www.openssl.org/)
|
||||
- Install `Certstrap`:
|
||||
|
||||
```sh
|
||||
go get github.com/square/certstrap@v1.2.0
|
||||
go install github.com/square/certstrap@v1.2.0
|
||||
```
|
||||
|
||||
- Create certificate authority for self signing.
|
||||
|
||||
24
docs/package-lock.json
generated
24
docs/package-lock.json
generated
@@ -3037,9 +3037,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/async": {
|
||||
"version": "2.6.3",
|
||||
"resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz",
|
||||
"integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==",
|
||||
"version": "2.6.4",
|
||||
"resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz",
|
||||
"integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==",
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.14"
|
||||
}
|
||||
@@ -8876,9 +8876,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/minimist": {
|
||||
"version": "1.2.5",
|
||||
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
|
||||
"integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw=="
|
||||
"version": "1.2.6",
|
||||
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
|
||||
"integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q=="
|
||||
},
|
||||
"node_modules/mississippi": {
|
||||
"version": "3.0.0",
|
||||
@@ -16588,9 +16588,9 @@
|
||||
"integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c="
|
||||
},
|
||||
"async": {
|
||||
"version": "2.6.3",
|
||||
"resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz",
|
||||
"integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==",
|
||||
"version": "2.6.4",
|
||||
"resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz",
|
||||
"integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==",
|
||||
"requires": {
|
||||
"lodash": "^4.17.14"
|
||||
}
|
||||
@@ -21113,9 +21113,9 @@
|
||||
}
|
||||
},
|
||||
"minimist": {
|
||||
"version": "1.2.5",
|
||||
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
|
||||
"integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw=="
|
||||
"version": "1.2.6",
|
||||
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
|
||||
"integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q=="
|
||||
},
|
||||
"mississippi": {
|
||||
"version": "3.0.0",
|
||||
|
||||
39
docs/presubmit.sh
Executable file
39
docs/presubmit.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script verifies that each document in the docs and architecture
|
||||
# directory has a corresponding table-of-contents entry in its README file.
|
||||
#
|
||||
# This can be run manually from the command line.
|
||||
# It is also run in CI via the docs-toc.yml workflow.
|
||||
#
|
||||
set -euo pipefail
|
||||
|
||||
readonly base="$(dirname $0)"
|
||||
cd "$base"
|
||||
|
||||
readonly workdir="$(mktemp -d)"
|
||||
trap "rm -fr -- '$workdir'" EXIT
|
||||
|
||||
checktoc() {
|
||||
local dir="$1"
|
||||
local tag="$2"'-*-*'
|
||||
local out="$workdir/${dir}.out.txt"
|
||||
(
|
||||
cd "$dir" >/dev/null
|
||||
find . -type f -maxdepth 1 -name "$tag" -not -exec grep -q "({})" README.md ';' -print
|
||||
) > "$out"
|
||||
if [[ -s "$out" ]] ; then
|
||||
echo "-- The following files in $dir lack a ToC entry:
|
||||
"
|
||||
cat "$out"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
err=0
|
||||
|
||||
# Verify that each RFC and ADR has a ToC entry in its README file.
|
||||
checktoc architecture adr || ((err++))
|
||||
checktoc rfc rfc || ((err++))
|
||||
|
||||
exit $err
|
||||
@@ -52,5 +52,7 @@ sections.
|
||||
- [RFC-012: Event Indexing Revisited](./rfc-012-custom-indexing.md)
|
||||
- [RFC-013: ABCI++](./rfc-013-abci++.md)
|
||||
- [RFC-014: Semantic Versioning](./rfc-014-semantic-versioning.md)
|
||||
- [RFC-015: ABCI++ Tx Mutation](./rfc-015-abci++-tx-mutation.md)
|
||||
- [RFC-019: Configuration File Versioning](./rfc-019-config-version.md)
|
||||
|
||||
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->
|
||||
|
||||
400
docs/rfc/rfc-019-config-version.md
Normal file
400
docs/rfc/rfc-019-config-version.md
Normal file
@@ -0,0 +1,400 @@
|
||||
# RFC 019: Configuration File Versioning
|
||||
|
||||
## Changelog
|
||||
|
||||
- 19-Apr-2022: Initial draft (@creachadair)
|
||||
- 20-Apr-2022: Updates from review feedback (@creachadair)
|
||||
|
||||
## Abstract
|
||||
|
||||
Updating configuration settings is an essential part of upgrading an existing
|
||||
node to a new version of the Tendermint software. Unfortunately, it is also
|
||||
currently a very manual process. This document discusses some of the history of
|
||||
changes to the config format, actions we've taken to improve the tooling for
|
||||
configuration upgrades, and additional steps we may want to consider.
|
||||
|
||||
## Background
|
||||
|
||||
A Tendermint node reads configuration settings at startup from a TOML formatted
|
||||
text file, typically named `config.toml`. The contents of this file are defined
|
||||
by the [`github.com/tendermint/tendermint/config`][config-pkg].
|
||||
|
||||
Although many settings in this file remain valid from one version of Tendermint
|
||||
to the next, new versions of Tendermint often add, update, and remove settings.
|
||||
These changes often require manual intervention by operators who are upgrading
|
||||
their nodes.
|
||||
|
||||
I propose we should provide better tools and documentation to help operators
|
||||
make configuration changes correctly during version upgrades. Ideally, as much
|
||||
as possible of any configuration file update should be automated, and where
|
||||
that is not possible or practical, we should provide clear, explicit directions
|
||||
for what steps need to be taken manually. Moreover, when the node discovers
|
||||
incorrect or invalid configuration, we should improve the diagnostics it emits
|
||||
so that the operator can quickly and easily find the relevant documentation,
|
||||
without having to grep through source code.
|
||||
|
||||
## Discussion
|
||||
|
||||
By convention, we are supposed to document required changes to the config file
|
||||
in the `UPGRADING.md` file for the release that introduces them. Although we
|
||||
have mostly done this, the level of detail in the upgrading instructions is
|
||||
often insufficient for an operator to correctly update their file.
|
||||
|
||||
The updates vary widely in complexity: Operators may need to add new required
|
||||
settings, update obsolete values for existing settings, move or rename existing
|
||||
settings within the file, or remove obsolete settings (which are thus invalid).
|
||||
Here are a few examples of each of these cases:
|
||||
|
||||
- **New required settings:** Tendermint v0.35 added a new top-level `mode`
|
||||
setting that determines whether a node runs as a validator, a full node, or a
|
||||
seed node. The default value is `"full"`, which means the operator of a
|
||||
validator must manually add `mode = "validator"` (or set the `--mode` flag on
|
||||
the command line) for their node to come up in the correct mode.
|
||||
|
||||
- **Updated obsolete values:** Tendermint v0.35 removed support for versions
|
||||
`"v1"` and `"v2"` of the blocksync (formerly "fastsync") protocol, requiring
|
||||
any node using either of those values to update to `"v0"`.
|
||||
|
||||
- **Moved/renamed settings:** Version v0.34 moved the top-level `pprof_laddr`
|
||||
setting under the `[rpc]` section.
|
||||
|
||||
Version v0.35 renamed every setting in the file from `snake_case` to
|
||||
`kebab-case`, moved the top-level `fast_sync` setting into the `[blocksync]`
|
||||
section as (itself renamed from `[fastsync]`), and moved all the top-level
|
||||
`priv-validator-*` settings under a new `[priv-validator]` section with their
|
||||
prefix trimmed off.
|
||||
|
||||
- **Removed obsolete settings:** Version v0.34 removed the `index_all_keys` and
|
||||
`index_keys` settings from the `[tx_index]` section; version v0.35 removed
|
||||
the `wal-dir` setting from the `[mempool]` section, and version v0.36 removed
|
||||
the `[blocksync]` section entirely.
|
||||
|
||||
While many of these changes are mentioned in the config section of the upgrade
|
||||
instructions, some are not mentioned at all, or are hidden in other parts of
|
||||
the doc. For instance, the v0.34 `pprof_laddr` change was documented only as an
|
||||
RPC flag change. (A savvy reader might realize that the flag `--rpc.pprof_laddr`
|
||||
implies a corresponding config section, but it omits the related detail that
|
||||
there was a top-level setting that's been renamed). The lesson here is not
|
||||
that the docs are bad, but to point out that prose is not the most efficient
|
||||
format to convey detailed changes like this. The upgrading instructions are
|
||||
still valuable for the human reader to understand what to expect.
|
||||
|
||||
### Concrete Steps
|
||||
|
||||
As part of the v0.36 development cycle, we spent some time reverse-engineering
|
||||
the configuration changes since the v0.34 release and built an experimental
|
||||
command-line tool called [`confix`][confix], whose job it is to automatically
|
||||
update the settings in a `config.toml` file to the latest version. We also
|
||||
backported a version of this tool into the v0.35.x branch at release v0.35.4.
|
||||
|
||||
This tool should work fine for configuration files created by Tendermint v0.34
|
||||
and later, but does not (yet) know how to handle changes from prior versions of
|
||||
Tendermint. Part of the difficulty for older versions is simply logistical: To
|
||||
figure out which changes to apply, we need to understand something about the
|
||||
version that made the file, as well as the version we're converting it to.
|
||||
|
||||
> **Discussion point:** In the future we might want to consider incorporating
|
||||
> this into the node CLI directly, but we're keeping it separate for now until
|
||||
> we can get some feedback from operators.
|
||||
|
||||
For the experiment, we handled this by carefully searching the history of
|
||||
config format changes for shibboleths to bound the version: For example, the
|
||||
`[fastsync]` section was added in Tendermint v0.32 and renamed `[blocksync]` in
|
||||
Tendermint v0.35. So if we see a `[fastsync]` section, we have some confidence
|
||||
that the file was created by v0.32, v0.33, or v0.34.
|
||||
|
||||
But such signals are delicate: The `[blocksync]` section was removed in v0.36,
|
||||
so if we do not find `[fastsync]`, we cannot conclude from that alone that the
|
||||
file is from v0.31 or earlier -- we have to look for corroborating details.
|
||||
While such "sniffing" tactics are fine for an experiment, they aren't as robust
|
||||
as we might like.
|
||||
|
||||
This is especially relevant for configuration files that may have already been
|
||||
manually upgraded across several versions by the time we are asked to update
|
||||
them again. Another related concern is that we'd like to make sure conversion
|
||||
is idempotent, so that it would be safe to rerun the tool over an
|
||||
already-converted file without breaking anything.
|
||||
|
||||
### Config Versioning
|
||||
|
||||
One obvious tactic we could use for future releases is add a version marker to
|
||||
the config file. This would give tools like `confix` (and the node itself) a
|
||||
way to calibrate their expectations. Rather than being a version for the file
|
||||
itself, however, this version marker would indicate which version of Tendermint
|
||||
is needed to read the file.
|
||||
|
||||
Provisionally, this might look something like:
|
||||
|
||||
```toml
|
||||
# THe minimum version of Tendermint compatible with the contents of
|
||||
# this configuration file.
|
||||
config-version = 'v0.35'
|
||||
```
|
||||
|
||||
When initializing a new node, Tendermint would populate this field with its own
|
||||
version (e.g., `v0.36`). When conducting an upgrade, tools like `confix` can
|
||||
then use this to decide which conversions are valid, and then update the value
|
||||
accordingly. After converting a file marked `'v0.35'` to`'v0.37'`, the
|
||||
conversion tool sets the file's `config-version` to reflect its compatibility.
|
||||
|
||||
> **Discussion point:** This example presumes we would keep config files
|
||||
> compatible within a given release cycle, e.g., all of v0.36.x. We could also
|
||||
> use patch numbers here, if we think there's some reason to permit changes
|
||||
> that would require config file edits at that granularity. I don't think we
|
||||
> should, but that's a design question to consider.
|
||||
|
||||
Upon seeing an up-to-date version marker, the conversion tool can simply exit
|
||||
with a diagnostic like "this file is already up-to-date", rather than sniffing
|
||||
the keyspace and potentially introducing errors. In addition, this would let a
|
||||
tool detect config files that are _newer_ than the one it understands, and
|
||||
issue a safe diagnostic rather than doing something wrong. Plus, besides
|
||||
avoiding potentially unsafe conversions, this would also serve as
|
||||
human-readable documentation that the file is up-to-date for a given version.
|
||||
|
||||
Adding a config version would not address the problem of how to convert files
|
||||
created by older versions of Tendermint, but it would at least help us build
|
||||
more robust config tooling going forward.
|
||||
|
||||
### Stability and Change
|
||||
|
||||
In light of the discussion so far, it is natural to examine why we make so many
|
||||
changes to the configuration file from one version to the next, and whether we
|
||||
could reduce friction by being more conservative about what we make
|
||||
configurable, what config changes we make over time, and how we roll them out.
|
||||
|
||||
Some changes, like renaming everything from snake case to kebab case, are
|
||||
entirely gratuitous. We could safely agree not to make those kinds of changes.
|
||||
Apart from that obvious case, however, many other configuration settings
|
||||
provide value to node operators in cases where there is no simple, universal
|
||||
setting that matches every application.
|
||||
|
||||
Taking a high-level view, there are several broad reasons why we might want to
|
||||
make changes to configuration settings:
|
||||
|
||||
- **Lessons learned:** Configuration settings are a good way to try things out
|
||||
in production, before making more invasive changes to the consensus protocol.
|
||||
|
||||
For example, up until Tendermint v0.35, consensus timeouts were specified as
|
||||
per-node configuration settings (e.g., `timeout-precommit` et al.). This
|
||||
allowed operators to tune these values for the needs of their network, but
|
||||
had the downside that individually-misconfigured nodes could stall consensus.
|
||||
|
||||
Based on that experience, these timeouts have been deprecated in Tendermint
|
||||
v0.36 and converted to consensus parameters, to be consistent across all
|
||||
nodes in the network.
|
||||
|
||||
- **Migration & experimentation:** Introducing new features and updating old
|
||||
features can complicate migration for existing users of the software.
|
||||
Temporary or "experimental" configuration settings can be a valuable way to
|
||||
mitigate that friction.
|
||||
|
||||
For example, Tendermint v0.36 introduces a new RPC event subscription
|
||||
endpoint (see [ADR 075][adr075]) that will eventually replace the existing
|
||||
webwocket-based interface. To give users time to migrate, v0.36 adds an
|
||||
`experimental-disable-websocket` setting, defaulted to `false`, that allows
|
||||
operators to selectively disable the websocket API for testing purposes
|
||||
during the conversion. This setting is designed to be removed in v0.37, when
|
||||
the old interface is no longer supported.
|
||||
|
||||
- **Ongoing maintenance:** Sometimes configuration settings become obsolete,
|
||||
and the cost of removing them trades off against the potential risks of
|
||||
leaving a non-functional or deprecated knob hooked up indefinitely.
|
||||
|
||||
For example, Tendermint v0.35 deprecated two alternate implementations of the
|
||||
blocksync protocol, one of which was deleted entirely (`v1`) and one of which
|
||||
was scheduled for removal (`v2`). The `blocksync.version` setting, which had
|
||||
been added as a migration aid, became obsolete and needed to be updated.
|
||||
|
||||
Despite our best intentions, sometimes engineering designs do not work out.
|
||||
It's just as important to leave room to back out of changes we have since
|
||||
reconsidered, as it is to support migrations forward onto new and improved
|
||||
code.
|
||||
|
||||
- **Clarity and legibility:** Besides configuring the software, another
|
||||
important purpose of a config file is to document intent for the humans who
|
||||
operate and maintain the software. Operators need adjust settings to keep the
|
||||
node running, and developers need to know what options were in use when
|
||||
something goes wrong so they can diagnose and fix bugs. The legibility of a
|
||||
config file as a _human_ artifact is also thus important.
|
||||
|
||||
For example, Tendermint v0.35 moved settings related to validator private
|
||||
keys from the top-level section of the configuration file to their own
|
||||
designated `[priv-validator]` section. Although this change did not make any
|
||||
difference to the meaning of those settings, it made the organization of the
|
||||
file easier to understand, and allowed the names of the individual settings
|
||||
to be simplified (e.g., `priv-validator-key-file` became simply `key-file` in
|
||||
the new section).
|
||||
|
||||
Although such changes are "gratuitous" with respect to the software, there is
|
||||
often value in making things more legible for the humans. While there is no
|
||||
simple rule to define the line, the Potter Stewart principle can be used with
|
||||
due care.
|
||||
|
||||
Keeping these examples in mind, we can and should take reasonable steps to
|
||||
avoid churn in the configuration file across versions where we can. However, we
|
||||
must also accept that part of the reason for _having_ a config file is to allow
|
||||
us flexibility elsewhere in the design. On that basis, we should not attempt
|
||||
to be too dogmatic about config changes either. Unlike changes in the block
|
||||
protocol, for example, which affect every user of every network that adopts
|
||||
them, config changes are relatively self-contained.
|
||||
|
||||
There are few guiding principles I think we can use to strike a sensible
|
||||
balance:
|
||||
|
||||
1. **No gratuitous changes.** Aesthetic changes that do not enhance legibility,
|
||||
avert confusion, or clarity documentation, should be entirely avoided.
|
||||
|
||||
2. **Prefer mechanical changes.** Whenever it is practical, change settings in
|
||||
a way that can be updated by a tool without operator judgement. This implies
|
||||
finding safe, universal defaults for new settings, and not changing the
|
||||
default values of existing settings.
|
||||
|
||||
Even if that means we have to make multiple changes (e.g., add a new setting
|
||||
in the current version, deprecate the old one, and remove the old one in the
|
||||
next version) it's preferable if we can mechanize each step.
|
||||
|
||||
3. **Clearly signal intent.** When adding temporary or experimental settings,
|
||||
they should be clearly named and documented as such. Use long names and
|
||||
suggestive prefixes (e.g., `experimental-*`) so that they stand out when
|
||||
read in the config file or printed in logs.
|
||||
|
||||
Relatedly, using temporary or experimental settings should cause the
|
||||
software to emit diagnostic logs at runtime. These log messages should be
|
||||
easy to grep for, and should contain pointers to more complete documentation
|
||||
(say, issue numbers or URLs) that the operator can read, as well as a hint
|
||||
about when the setting is expected to become invalid. For example:
|
||||
|
||||
```
|
||||
WARNING: Websocket RPC access is deprecated and will be removed in
|
||||
Tendermint v0.37. See https://tinyurl.com/adr075 for more information.
|
||||
```
|
||||
|
||||
4. **Consider both directions.** When adding a configuration setting, take some
|
||||
time during the implementation process to think about how the setting could
|
||||
be removed, as well as how it will be rolled out. This applies even for
|
||||
settings we imagine should be permanent. Experience may cause is to rethink
|
||||
our original design intent more broadly than we expected.
|
||||
|
||||
This does not mean we have to spend a long time picking nits over the design
|
||||
of every setting; merely that we should convince ourselves we _could_ undo
|
||||
it without making too big a mess later. Even a little extra effort up front
|
||||
can sometimes save a lot.
|
||||
|
||||
## References
|
||||
|
||||
- [Tendermint `config` package][config-pkg]
|
||||
- [`confix` command-line tool][confix]
|
||||
- [`condiff` command-line tool][condiff]
|
||||
- [Configuration update plan][plan]
|
||||
- [ADR 075: RPC Event Subscription Interface][adr075]
|
||||
|
||||
[config-pkg]: https://godoc.org/github.com/tendermint/tendermint/config
|
||||
[confix]: https://github.com/tendermint/tendermint/blob/master/scripts/confix
|
||||
[condiff]: https://github.com/tendermint/tendermint/blob/master/scripts/confix/condiff
|
||||
[plan]: https://github.com/tendermint/tendermint/blob/master/scripts/confix/plan.go
|
||||
[testdata]: https://github.com/tendermint/tendermint/blob/master/scripts/confix/testdata
|
||||
[adr075]: https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-075-rpc-subscription.md
|
||||
|
||||
## Appendix: Research Notes
|
||||
|
||||
Discovering when various configuration settings were added, updated, and
|
||||
removed turns out to be surprisingly tedious. To solve this puzzle, we had to
|
||||
answer the following questions:
|
||||
|
||||
1. What changes were made between v0.x and v0.y? This is further complicated by
|
||||
cases where we have backported config changes into the middle of an earlier
|
||||
release cycle (e.g., `psql-conn` from v0.35.x into v0.34.13).
|
||||
|
||||
2. When during the development cycle were those changes made? This allows us to
|
||||
recognize features that were backported into a previous release.
|
||||
|
||||
3. What were the default values of the changed settings, and did they change at
|
||||
all during or across the release boundary?
|
||||
|
||||
Each step of the [configuration update plan][plan] is commented with a link to
|
||||
one or more PRs where that change was made. The sections below discuss how we
|
||||
found these references.
|
||||
|
||||
### Tracking Changes Across Releases
|
||||
|
||||
To figure out what changed between two releases, we built a tool called
|
||||
[`condiff`][condiff], which performs a "keyspace" diff of two TOML documents.
|
||||
This diff respects the structure of the TOML file, but ignores comments, blank
|
||||
lines, and configuration values, so that we can see what was added and removed.
|
||||
|
||||
To use it, run:
|
||||
|
||||
```shell
|
||||
go run ./scripts/confix/condiff old.toml new.toml
|
||||
```
|
||||
|
||||
This tool works on any TOML documents, but for our purposes we needed
|
||||
Tendermint `config.toml` files. The easiest way to get these is to build the
|
||||
node binary for your version of interest, run `tendermint init` on a clean home
|
||||
directory, and copy the generated config file out. The [`testdata`][testdata]
|
||||
directory for the `confix` tool has configs generated from the heads of each
|
||||
release branch from v0.31 through v0.35.
|
||||
|
||||
If you want to reproduce this yourself, it looks something like this:
|
||||
|
||||
```shell
|
||||
# Example for Tendermint v0.32.
|
||||
git checkout --track origin/v0.32.x
|
||||
go get golang.org/x/sys/unix
|
||||
go mod tidy
|
||||
make build
|
||||
rm -fr -- tmhome
|
||||
./build/tendermint --home=tmhome init
|
||||
cp tmhome/config/config.toml config-v32.toml
|
||||
```
|
||||
|
||||
Be advised that the further back you go, the more idiosyncrasies you will
|
||||
encounter. For example, Tendermint v0.31 and earlier predate Go modules (v0.31
|
||||
used dep), and lack backport branches. And you may need to do some editing of
|
||||
Makefile rules once you get back into the 20s.
|
||||
|
||||
Note that when diffing config files across the v0.34/v0.35 gap, the swap from
|
||||
`snake_case` to `kebab-case` makes it look like everything changed. The
|
||||
`condiff` tool has a `-desnake` flag that normalizes all the keys to kebab case
|
||||
in both inputs before comparison.
|
||||
|
||||
### Locating Additions and Deletions
|
||||
|
||||
To figure out when a configuration setting was added or removed, your tool of
|
||||
choice is `git bisect`. The only tricky part is finding the endpoints for the
|
||||
search. If the transition happened within a release, you can use that
|
||||
release's backport branch as the endpoint (if it has one, e.g., `v0.35.x`).
|
||||
|
||||
However, the start point can be more problematic. The backport branches are not
|
||||
ancestors of `master` or of each other, which means you need to find some point
|
||||
in history _prior_ to the change but still attached to the mainline. For recent
|
||||
releases there is a dev root (e.g., `v0.35.0-dev`, `v0.34.0-dev1`, etc.). These
|
||||
are not named consistently, but you can usually grep the output of `git tag` to
|
||||
find them.
|
||||
|
||||
In the worst case you could try starting from the root commit of the repo, but
|
||||
that turns out not to work in all cases. We've done some branching shenanigans
|
||||
over the years that mean the root is not a direct ancestor of all our release
|
||||
branches. When you find this you will probably swear a lot. I did.
|
||||
|
||||
Once you have a start and end point (say, `v0.35.0-dev` and `master`), you can
|
||||
bisect in the usual way. I use `git grep` on the `config` directory to check
|
||||
whether the case I am looking for is present. For example, to find when the
|
||||
`[fastsync]` section was removed:
|
||||
|
||||
```shell
|
||||
# Setup:
|
||||
git checkout master
|
||||
git bisect start
|
||||
git bisect bad # it's not present on tip of master.
|
||||
git bisect good v0.34.0-dev1 # it was present at the start of v0.34.
|
||||
```
|
||||
|
||||
```shell
|
||||
# Now repeat this until it gives you a specific commit:
|
||||
if git grep -q '\[fastsync\]' config ; then git bisect good ; else git bisect bad ; fi
|
||||
```
|
||||
|
||||
The above example finds where a config was removed: To find where a setting was
|
||||
added, do the same thing except reverse the sense of the test (`if ! git grep -q
|
||||
...`).
|
||||
@@ -13,14 +13,15 @@ order: 3
|
||||
The PBTS algorithm defines a way for a Tendermint blockchain to create block
|
||||
timestamps that are within a reasonable bound of the clocks of the validators on
|
||||
the network. This replaces the original BFTTime algorithm for timestamp
|
||||
assignment that relied on the timestamps included in precommit messages.
|
||||
assignment that computed a timestamp using the timestamps included in precommit
|
||||
messages.
|
||||
|
||||
## Algorithm Parameters
|
||||
|
||||
The functionality of the PBTS algorithm is governed by two parameters within
|
||||
Tendermint. These two parameters are [consensus
|
||||
parameters](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#L291),
|
||||
meaning they are configured by the ABCI application and are expected to be the
|
||||
meaning they are configured by the ABCI application and are therefore the same
|
||||
same across all nodes on the network.
|
||||
|
||||
### `Precision`
|
||||
@@ -51,7 +52,7 @@ useful for the protocols and applications built on top of Tendermint.
|
||||
The following protocols and application features require a reliable source of time:
|
||||
|
||||
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification.
|
||||
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification).
|
||||
* Tendermint Evidence expiration is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification).
|
||||
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21
|
||||
days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime).
|
||||
* IBC packets can use either a [timestamp or a height to timeout packet
|
||||
|
||||
@@ -164,7 +164,7 @@ page](https://app.logz.io/#/dashboard/data-sources/Filebeat), then:
|
||||
yum install systemd-devel || echo "This will only work on RHEL-based systems."
|
||||
apt-get install libsystemd-dev || echo "This will only work on Debian-based systems."
|
||||
|
||||
go get github.com/mheese/journalbeat
|
||||
go install github.com/mheese/journalbeat@latest
|
||||
ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345
|
||||
```
|
||||
|
||||
|
||||
46
go.mod
46
go.mod
@@ -3,21 +3,20 @@ module github.com/tendermint/tendermint
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.0.0
|
||||
github.com/adlio/schema v1.2.3
|
||||
github.com/btcsuite/btcd v0.22.0-beta
|
||||
github.com/BurntSushi/toml v1.1.0
|
||||
github.com/adlio/schema v1.3.0
|
||||
github.com/btcsuite/btcd v0.22.1
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/fortytw2/leaktest v1.3.0
|
||||
github.com/go-kit/kit v0.12.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/golangci/golangci-lint v1.45.0
|
||||
github.com/google/orderedcode v0.0.1
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/lib/pq v1.10.4
|
||||
github.com/lib/pq v1.10.5
|
||||
github.com/libp2p/go-buffer-pool v0.0.2
|
||||
github.com/mroth/weightedrand v0.4.1
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
|
||||
@@ -27,24 +26,28 @@ require (
|
||||
github.com/rs/zerolog v1.26.1
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/viper v1.10.1
|
||||
github.com/spf13/viper v1.11.0
|
||||
github.com/stretchr/testify v1.7.1
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
github.com/vektra/mockery/v2 v2.10.0
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292
|
||||
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/grpc v1.45.0
|
||||
google.golang.org/grpc v1.46.0
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
pgregory.net/rapid v0.4.7
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/creachadair/atomicfile v0.2.4
|
||||
github.com/google/go-cmp v0.5.7
|
||||
github.com/creachadair/atomicfile v0.2.6
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/golangci/golangci-lint v1.45.2
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/vektra/mockery/v2 v2.12.1
|
||||
gotest.tools v2.2.0+incompatible
|
||||
)
|
||||
|
||||
require github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect
|
||||
|
||||
require (
|
||||
4d63.com/gochecknoglobals v0.1.0 // indirect
|
||||
github.com/Antonboom/errname v0.1.5 // indirect
|
||||
@@ -61,7 +64,7 @@ require (
|
||||
github.com/ashanbrown/makezero v1.1.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bkielbasa/cyclop v1.2.0 // indirect
|
||||
github.com/blizzy78/varnamelen v0.6.0 // indirect
|
||||
github.com/blizzy78/varnamelen v0.6.1 // indirect
|
||||
github.com/bombsimon/wsl/v3 v3.3.0 // indirect
|
||||
github.com/breml/bidichk v0.2.2 // indirect
|
||||
github.com/breml/errchkjson v0.2.3 // indirect
|
||||
@@ -72,6 +75,7 @@ require (
|
||||
github.com/charithe/durationcheck v0.0.9 // indirect
|
||||
github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect
|
||||
github.com/containerd/continuity v0.2.1 // indirect
|
||||
github.com/creachadair/tomledit v0.0.19
|
||||
github.com/daixiang0/gci v0.3.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/denis-tingaikin/go-header v0.4.3 // indirect
|
||||
@@ -119,7 +123,7 @@ require (
|
||||
github.com/gostaticanalysis/nilerr v0.1.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-version v1.2.1 // indirect
|
||||
github.com/hashicorp/go-version v1.4.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hexops/gotextdiff v1.0.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
@@ -136,7 +140,7 @@ require (
|
||||
github.com/ldez/gomoddirectives v0.2.2 // indirect
|
||||
github.com/ldez/tagliatelle v0.3.1 // indirect
|
||||
github.com/leonklingele/grouper v1.1.0 // indirect
|
||||
github.com/magiconair/properties v1.8.5 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/maratori/testpackage v1.0.1 // indirect
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
@@ -177,7 +181,7 @@ require (
|
||||
github.com/sivchari/tenv v1.4.7 // indirect
|
||||
github.com/sonatard/noctx v0.0.1 // indirect
|
||||
github.com/sourcegraph/go-diff v0.6.1 // indirect
|
||||
github.com/spf13/afero v1.8.0 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
@@ -200,14 +204,14 @@ require (
|
||||
gitlab.com/bosi/decorder v0.2.1 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.10 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/ini.v1 v1.66.3 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
|
||||
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
honnef.co/go/tools v0.2.2 // indirect
|
||||
|
||||
126
go.sum
126
go.sum
@@ -34,12 +34,16 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
|
||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
|
||||
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
|
||||
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU=
|
||||
@@ -62,11 +66,15 @@ github.com/Antonboom/errname v0.1.5 h1:IM+A/gz0pDhKmlt5KSNTVAvfLMb+65RxavBXpRtCU
|
||||
github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo=
|
||||
github.com/Antonboom/nilnil v0.1.0 h1:DLDavmg0a6G/F4Lt9t7Enrbgb3Oph6LnDE6YVsmTt74=
|
||||
github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
|
||||
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
|
||||
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
@@ -96,8 +104,8 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/adlio/schema v1.2.3 h1:GfKThfEsjS9cCz7gaF8zdXv4cpTdUqdljkKGDTbJjys=
|
||||
github.com/adlio/schema v1.2.3/go.mod h1:nD7ZWmMMbwU12Pqwg+qL0rTvHBrBXfNz+5UQxTfy38M=
|
||||
github.com/adlio/schema v1.3.0 h1:eSVYLxYWbm/6ReZBCkLw4Fz7uqC+ZNoPvA39bOwi52A=
|
||||
github.com/adlio/schema v1.3.0/go.mod h1:51QzxkpeFs6lRY11kPye26IaFPOV+HqEj01t5aXXKfs=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||
@@ -140,8 +148,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A=
|
||||
github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI=
|
||||
github.com/blizzy78/varnamelen v0.6.0 h1:TOIDk9qRIMspALZKX8x+5hQfAjuvAFogppnxtvuNmBo=
|
||||
github.com/blizzy78/varnamelen v0.6.0/go.mod h1:zy2Eic4qWqjrxa60jG34cfL0VXcSwzUrIx68eJPb4Q8=
|
||||
github.com/blizzy78/varnamelen v0.6.1 h1:kttPCLzXFa+0nt++Cw9fb7GrSSM4KkyIAoX/vXsbuqA=
|
||||
github.com/blizzy78/varnamelen v0.6.1/go.mod h1:zy2Eic4qWqjrxa60jG34cfL0VXcSwzUrIx68eJPb4Q8=
|
||||
github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM=
|
||||
github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc=
|
||||
github.com/breml/bidichk v0.2.2 h1:w7QXnpH0eCBJm55zGCTJveZEkQBt6Fs5zThIdA6qQ9Y=
|
||||
@@ -149,8 +157,10 @@ github.com/breml/bidichk v0.2.2/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt7
|
||||
github.com/breml/errchkjson v0.2.3 h1:97eGTmR/w0paL2SwfRPI1jaAZHaH/fXnxWTw2eEIqE0=
|
||||
github.com/breml/errchkjson v0.2.3/go.mod h1:jZEATw/jF69cL1iy7//Yih8yp/mXp2CBoBr9GJwCAsY=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo=
|
||||
github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
|
||||
github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c=
|
||||
github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ=
|
||||
@@ -217,8 +227,12 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creachadair/atomicfile v0.2.4 h1:GRjpQLmz/78I4+nBQpGMFrRa9yrL157AUTrA6hnF0YU=
|
||||
github.com/creachadair/atomicfile v0.2.4/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc=
|
||||
github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg=
|
||||
github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc=
|
||||
github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM=
|
||||
github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk=
|
||||
github.com/creachadair/tomledit v0.0.19 h1:zbpfUtYFYFdpRjwJY9HJlto1iZ4M5YwYB6qqc37F6UM=
|
||||
github.com/creachadair/tomledit v0.0.19/go.mod h1:gvtfnSZLa+YNQD28vaPq0Nk12bRxEhmUdBzAWn+EGF4=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
@@ -232,6 +246,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU=
|
||||
github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c=
|
||||
github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA=
|
||||
github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU=
|
||||
github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k=
|
||||
github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
|
||||
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA=
|
||||
@@ -240,6 +256,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
@@ -260,6 +277,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
|
||||
@@ -352,6 +370,10 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4=
|
||||
github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -400,8 +422,8 @@ github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZB
|
||||
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
|
||||
github.com/golangci/golangci-lint v1.45.0 h1:T2oCVkYoeckBxcNS6DTYiSXN2QcTNuAWaHyLGfqzMlU=
|
||||
github.com/golangci/golangci-lint v1.45.0/go.mod h1:Y6grRO3drH/7kGP88i9jSl9fGWwCrbA5u7i++jOXll4=
|
||||
github.com/golangci/golangci-lint v1.45.2 h1:9I3PzkvscJkFAQpTQi5Ga0V4qWdJERajX1UZ7QqkW+I=
|
||||
github.com/golangci/golangci-lint v1.45.2/go.mod h1:f20dpzMmUTRp+oYnX0OGjV1Au3Jm2JeI9yLqHq1/xsI=
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
|
||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
|
||||
@@ -429,8 +451,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
@@ -467,6 +490,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
||||
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
|
||||
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
@@ -525,6 +550,7 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S
|
||||
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
@@ -538,8 +564,9 @@ github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerX
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI=
|
||||
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4=
|
||||
github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
@@ -552,6 +579,7 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn
|
||||
github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
|
||||
github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
|
||||
github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
|
||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@@ -644,16 +672,18 @@ github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJ
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk=
|
||||
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ=
|
||||
github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs=
|
||||
github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
|
||||
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
|
||||
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ=
|
||||
github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU=
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA=
|
||||
@@ -718,6 +748,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4=
|
||||
github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k=
|
||||
@@ -803,11 +834,14 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
|
||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0=
|
||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -894,6 +928,7 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
|
||||
github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
|
||||
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
|
||||
github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM=
|
||||
github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA=
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA=
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
@@ -933,8 +968,9 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/afero v1.8.0 h1:5MmtuhAgYeU6qpa7w7bP0dv6MBYuup0vekhSpSkoq60=
|
||||
github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
|
||||
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
|
||||
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
@@ -955,8 +991,9 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
|
||||
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
|
||||
github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk=
|
||||
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
|
||||
github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44=
|
||||
github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
@@ -1022,8 +1059,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/vektra/mockery/v2 v2.10.0 h1:MiiQWxwdq7/ET6dCXLaJzSGEN17k758H7JHS9kOdiks=
|
||||
github.com/vektra/mockery/v2 v2.10.0/go.mod h1:m/WO2UzWzqgVX3nvqpRQq70I4Z7jbSCRhdmkgtp+Ab4=
|
||||
github.com/vektra/mockery/v2 v2.12.1 h1:BAJk2fGjVg/P9Fi+BxZD1/ZeKTOclpeAb/SKCc12zXc=
|
||||
github.com/vektra/mockery/v2 v2.12.1/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U=
|
||||
github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
@@ -1055,10 +1092,13 @@ go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k=
|
||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||
go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
|
||||
go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI=
|
||||
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
||||
go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
@@ -1100,6 +1140,7 @@ golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
@@ -1111,8 +1152,9 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1210,14 +1252,19 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b h1:MWaHNqZy3KTpuTMAGvv+Kw+ylsEpmyJZizz1dqxnu28=
|
||||
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4=
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1235,6 +1282,9 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -1352,8 +1402,12 @@ golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
@@ -1479,8 +1533,9 @@ golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
@@ -1519,6 +1574,10 @@ google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUb
|
||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||
google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
|
||||
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
|
||||
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
|
||||
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
|
||||
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
|
||||
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -1598,8 +1657,17 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6
|
||||
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
|
||||
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac h1:qSNTkEN+L2mvWcLgJOR+8bdHX9rN/IdU3A1Ghpfb1Rg=
|
||||
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
@@ -1633,8 +1701,10 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
|
||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
@@ -1648,8 +1718,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -1664,8 +1735,9 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.3 h1:jRskFVxYaMGAMUbN0UZ7niA9gzL9B49DOqE78vg0k3w=
|
||||
gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
|
||||
gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
|
||||
@@ -28,7 +28,7 @@ eg, L = latency = 0.1s
|
||||
*/
|
||||
|
||||
const (
|
||||
requestIntervalMS = 2
|
||||
requestInterval = 2 * time.Millisecond
|
||||
maxTotalRequesters = 600
|
||||
maxPeerErrBuffer = 1000
|
||||
maxPendingRequests = maxTotalRequesters
|
||||
@@ -130,27 +130,23 @@ func (*BlockPool) OnStop() {}
|
||||
|
||||
// spawns requesters as needed
|
||||
func (pool *BlockPool) makeRequestersRoutine(ctx context.Context) {
|
||||
for {
|
||||
if !pool.IsRunning() {
|
||||
break
|
||||
for pool.IsRunning() {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, numPending, lenRequesters := pool.GetStatus()
|
||||
switch {
|
||||
case numPending >= maxPendingRequests:
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
if numPending >= maxPendingRequests || lenRequesters >= maxTotalRequesters {
|
||||
// This is preferable to using a timer because the request interval
|
||||
// is so small. Larger request intervals may necessitate using a
|
||||
// timer/ticker.
|
||||
time.Sleep(requestInterval)
|
||||
pool.removeTimedoutPeers()
|
||||
case lenRequesters >= maxTotalRequesters:
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
default:
|
||||
// request for more blocks.
|
||||
pool.makeNextRequester(ctx)
|
||||
continue
|
||||
}
|
||||
|
||||
// request for more blocks.
|
||||
pool.makeNextRequester(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -639,9 +635,16 @@ OUTER_LOOP:
|
||||
if !bpr.IsRunning() || !bpr.pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
peer = bpr.pool.pickIncrAvailablePeer(bpr.height)
|
||||
if peer == nil {
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// This is preferable to using a timer because the request
|
||||
// interval is so small. Larger request intervals may
|
||||
// necessitate using a timer/ticker.
|
||||
time.Sleep(requestInterval)
|
||||
continue PICK_PEER_LOOP
|
||||
}
|
||||
break PICK_PEER_LOOP
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -45,6 +46,7 @@ func GetChannelDescriptor() *p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvMessageCapacity: MaxMsgSize,
|
||||
Name: "blockSync",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,8 +81,8 @@ type Reactor struct {
|
||||
consReactor consensusReactor
|
||||
blockSync *atomicBool
|
||||
|
||||
blockSyncCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
chCreator p2p.ChannelCreator
|
||||
peerEvents p2p.PeerEventSubscriber
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
@@ -93,23 +95,17 @@ type Reactor struct {
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
func NewReactor(
|
||||
ctx context.Context,
|
||||
logger log.Logger,
|
||||
stateStore sm.Store,
|
||||
blockExec *sm.BlockExecutor,
|
||||
store *store.BlockStore,
|
||||
consReactor consensusReactor,
|
||||
channelCreator p2p.ChannelCreator,
|
||||
peerUpdates *p2p.PeerUpdates,
|
||||
peerEvents p2p.PeerEventSubscriber,
|
||||
blockSync bool,
|
||||
metrics *consensus.Metrics,
|
||||
eventBus *eventbus.EventBus,
|
||||
) (*Reactor, error) {
|
||||
blockSyncCh, err := channelCreator(ctx, GetChannelDescriptor())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
) *Reactor {
|
||||
r := &Reactor{
|
||||
logger: logger,
|
||||
stateStore: stateStore,
|
||||
@@ -117,14 +113,14 @@ func NewReactor(
|
||||
store: store,
|
||||
consReactor: consReactor,
|
||||
blockSync: newAtomicBool(blockSync),
|
||||
blockSyncCh: blockSyncCh,
|
||||
peerUpdates: peerUpdates,
|
||||
chCreator: channelCreator,
|
||||
peerEvents: peerEvents,
|
||||
metrics: metrics,
|
||||
eventBus: eventBus,
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "BlockSync", r)
|
||||
return r, nil
|
||||
return r
|
||||
}
|
||||
|
||||
// OnStart starts separate go routines for each p2p Channel and listens for
|
||||
@@ -135,6 +131,12 @@ func NewReactor(
|
||||
// If blockSync is enabled, we also start the pool and the pool processing
|
||||
// goroutine. If the pool fails to start, an error is returned.
|
||||
func (r *Reactor) OnStart(ctx context.Context) error {
|
||||
blockSyncCh, err := r.chCreator(ctx, GetChannelDescriptor())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.chCreator = func(context.Context, *conn.ChannelDescriptor) (*p2p.Channel, error) { return blockSyncCh, nil }
|
||||
|
||||
state, err := r.stateStore.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -160,13 +162,13 @@ func (r *Reactor) OnStart(ctx context.Context) error {
|
||||
if err := r.pool.Start(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
go r.requestRoutine(ctx)
|
||||
go r.requestRoutine(ctx, blockSyncCh)
|
||||
|
||||
go r.poolRoutine(ctx, false)
|
||||
go r.poolRoutine(ctx, false, blockSyncCh)
|
||||
}
|
||||
|
||||
go r.processBlockSyncCh(ctx)
|
||||
go r.processPeerUpdates(ctx)
|
||||
go r.processBlockSyncCh(ctx, blockSyncCh)
|
||||
go r.processPeerUpdates(ctx, r.peerEvents(ctx), blockSyncCh)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -181,7 +183,7 @@ func (r *Reactor) OnStop() {
|
||||
|
||||
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
|
||||
// Otherwise, we'll respond saying we do not have it.
|
||||
func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID) error {
|
||||
func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh *p2p.Channel) error {
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
blockProto, err := block.ToProto()
|
||||
@@ -190,7 +192,7 @@ func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest,
|
||||
return err
|
||||
}
|
||||
|
||||
return r.blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
return blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.BlockResponse{Block: blockProto},
|
||||
})
|
||||
@@ -198,55 +200,16 @@ func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest,
|
||||
|
||||
r.logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height)
|
||||
|
||||
return r.blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
return blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
})
|
||||
}
|
||||
|
||||
// handleBlockSyncMessage handles envelopes sent from peers on the
|
||||
// BlockSyncChannel. It returns an error only if the Envelope.Message is unknown
|
||||
// for this channel. This should never be called outside of handleMessage.
|
||||
func (r *Reactor) handleBlockSyncMessage(ctx context.Context, envelope *p2p.Envelope) error {
|
||||
logger := r.logger.With("peer", envelope.From)
|
||||
|
||||
switch msg := envelope.Message.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
return r.respondToPeer(ctx, msg, envelope.From)
|
||||
case *bcproto.BlockResponse:
|
||||
block, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
logger.Error("failed to convert block from proto", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
r.pool.AddBlock(envelope.From, block, block.Size())
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
return r.blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: envelope.From,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: r.store.Height(),
|
||||
Base: r.store.Base(),
|
||||
},
|
||||
})
|
||||
case *bcproto.StatusResponse:
|
||||
r.pool.SetPeerRange(envelope.From, msg.Base, msg.Height)
|
||||
|
||||
case *bcproto.NoBlockResponse:
|
||||
logger.Debug("peer does not have the requested block", "height", msg.Height)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("received unknown message: %T", msg)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
|
||||
// It will handle errors and any possible panics gracefully. A caller can handle
|
||||
// any error returned by sending a PeerError on the respective channel.
|
||||
func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (err error) {
|
||||
func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blockSyncCh *p2p.Channel) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
@@ -260,12 +223,44 @@ func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelop
|
||||
|
||||
r.logger.Debug("received message", "message", envelope.Message, "peer", envelope.From)
|
||||
|
||||
switch chID {
|
||||
switch envelope.ChannelID {
|
||||
case BlockSyncChannel:
|
||||
err = r.handleBlockSyncMessage(ctx, envelope)
|
||||
switch msg := envelope.Message.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
return r.respondToPeer(ctx, msg, envelope.From, blockSyncCh)
|
||||
case *bcproto.BlockResponse:
|
||||
block, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
r.logger.Error("failed to convert block from proto",
|
||||
"peer", envelope.From,
|
||||
"err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
r.pool.AddBlock(envelope.From, block, block.Size())
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
return blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: envelope.From,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: r.store.Height(),
|
||||
Base: r.store.Base(),
|
||||
},
|
||||
})
|
||||
case *bcproto.StatusResponse:
|
||||
r.pool.SetPeerRange(envelope.From, msg.Base, msg.Height)
|
||||
|
||||
case *bcproto.NoBlockResponse:
|
||||
r.logger.Debug("peer does not have the requested block",
|
||||
"peer", envelope.From,
|
||||
"height", msg.Height)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("received unknown message: %T", msg)
|
||||
}
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope)
|
||||
err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", envelope.ChannelID, envelope)
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -276,17 +271,17 @@ func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelop
|
||||
// message execution will result in a PeerError being sent on the BlockSyncChannel.
|
||||
// When the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// gracefully.
|
||||
func (r *Reactor) processBlockSyncCh(ctx context.Context) {
|
||||
iter := r.blockSyncCh.Receive(ctx)
|
||||
func (r *Reactor) processBlockSyncCh(ctx context.Context, blockSyncCh *p2p.Channel) {
|
||||
iter := blockSyncCh.Receive(ctx)
|
||||
for iter.Next(ctx) {
|
||||
envelope := iter.Envelope()
|
||||
if err := r.handleMessage(ctx, r.blockSyncCh.ID, envelope); err != nil {
|
||||
if err := r.handleMessage(ctx, envelope, blockSyncCh); err != nil {
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return
|
||||
}
|
||||
|
||||
r.logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err)
|
||||
if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
r.logger.Error("failed to process message", "ch_id", envelope.ChannelID, "envelope", envelope, "err", err)
|
||||
if serr := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: envelope.From,
|
||||
Err: err,
|
||||
}); serr != nil {
|
||||
@@ -297,7 +292,7 @@ func (r *Reactor) processBlockSyncCh(ctx context.Context) {
|
||||
}
|
||||
|
||||
// processPeerUpdate processes a PeerUpdate.
|
||||
func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate) {
|
||||
func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, blockSyncCh *p2p.Channel) {
|
||||
r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
|
||||
|
||||
// XXX: Pool#RedoRequest can sometimes give us an empty peer.
|
||||
@@ -308,7 +303,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda
|
||||
switch peerUpdate.Status {
|
||||
case p2p.PeerStatusUp:
|
||||
// send a status update the newly added peer
|
||||
if err := r.blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
if err := blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: peerUpdate.NodeID,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: r.store.Base(),
|
||||
@@ -316,7 +311,7 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda
|
||||
},
|
||||
}); err != nil {
|
||||
r.pool.RemovePeer(peerUpdate.NodeID)
|
||||
if err := r.blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
if err := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: peerUpdate.NodeID,
|
||||
Err: err,
|
||||
}); err != nil {
|
||||
@@ -332,13 +327,13 @@ func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpda
|
||||
// processPeerUpdates initiates a blocking process where we listen for and handle
|
||||
// PeerUpdate messages. When the reactor is stopped, we will catch the signal and
|
||||
// close the p2p PeerUpdatesCh gracefully.
|
||||
func (r *Reactor) processPeerUpdates(ctx context.Context) {
|
||||
func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, blockSyncCh *p2p.Channel) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case peerUpdate := <-r.peerUpdates.Updates():
|
||||
r.processPeerUpdate(ctx, peerUpdate)
|
||||
case peerUpdate := <-peerUpdates.Updates():
|
||||
r.processPeerUpdate(ctx, peerUpdate, blockSyncCh)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -356,13 +351,25 @@ func (r *Reactor) SwitchToBlockSync(ctx context.Context, state sm.State) error {
|
||||
|
||||
r.syncStartTime = time.Now()
|
||||
|
||||
go r.requestRoutine(ctx)
|
||||
go r.poolRoutine(ctx, true)
|
||||
bsCh, err := r.chCreator(ctx, GetChannelDescriptor())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go r.requestRoutine(ctx, bsCh)
|
||||
go r.poolRoutine(ctx, true, bsCh)
|
||||
|
||||
if err := r.PublishStatus(types.EventDataBlockSyncStatus{
|
||||
Complete: false,
|
||||
Height: state.LastBlockHeight,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reactor) requestRoutine(ctx context.Context) {
|
||||
func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh *p2p.Channel) {
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
defer statusUpdateTicker.Stop()
|
||||
|
||||
@@ -371,11 +378,11 @@ func (r *Reactor) requestRoutine(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case request := <-r.requestsCh:
|
||||
if err := r.blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
if err := blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
To: request.PeerID,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
}); err != nil {
|
||||
if err := r.blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
if err := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: request.PeerID,
|
||||
Err: err,
|
||||
}); err != nil {
|
||||
@@ -383,14 +390,14 @@ func (r *Reactor) requestRoutine(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
case pErr := <-r.errorsCh:
|
||||
if err := r.blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
if err := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: pErr.peerID,
|
||||
Err: pErr.err,
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
case <-statusUpdateTicker.C:
|
||||
if err := r.blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
if err := blockSyncCh.Send(ctx, p2p.Envelope{
|
||||
Broadcast: true,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
}); err != nil {
|
||||
@@ -404,7 +411,7 @@ func (r *Reactor) requestRoutine(ctx context.Context) {
|
||||
// do.
|
||||
//
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool) {
|
||||
func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh *p2p.Channel) {
|
||||
var (
|
||||
trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
|
||||
@@ -522,7 +529,7 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool) {
|
||||
// NOTE: We've already removed the peer's request, but we still need
|
||||
// to clean up the rest.
|
||||
peerID := r.pool.RedoRequest(first.Height)
|
||||
if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
if serr := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: peerID,
|
||||
Err: err,
|
||||
}); serr != nil {
|
||||
@@ -531,7 +538,7 @@ func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool) {
|
||||
|
||||
peerID2 := r.pool.RedoRequest(second.Height)
|
||||
if peerID2 != peerID {
|
||||
if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
if serr := blockSyncCh.SendError(ctx, p2p.PeerError{
|
||||
NodeID: peerID2,
|
||||
Err: err,
|
||||
}); serr != nil {
|
||||
@@ -602,11 +609,11 @@ func (r *Reactor) GetRemainingSyncTime() time.Duration {
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
|
||||
func (r *Reactor) PublishStatus(ctx context.Context, event types.EventDataBlockSyncStatus) error {
|
||||
func (r *Reactor) PublishStatus(event types.EventDataBlockSyncStatus) error {
|
||||
if r.eventBus == nil {
|
||||
return errors.New("event bus is not configured")
|
||||
}
|
||||
return r.eventBus.PublishEventBlockSyncStatus(ctx, event)
|
||||
return r.eventBus.PublishEventBlockSyncStatus(event)
|
||||
}
|
||||
|
||||
// atomicBool is an atomic Boolean, safe for concurrent use by multiple
|
||||
|
||||
@@ -50,7 +50,6 @@ func setup(
|
||||
genDoc *types.GenesisDoc,
|
||||
privVal types.PrivValidator,
|
||||
maxBlockHeights []int64,
|
||||
chBuf uint,
|
||||
) *reactorTestSuite {
|
||||
t.Helper()
|
||||
|
||||
@@ -145,6 +144,7 @@ func (rts *reactorTestSuite) addNode(
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStore,
|
||||
eventbus,
|
||||
sm.NopMetrics(),
|
||||
)
|
||||
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
@@ -189,20 +189,18 @@ func (rts *reactorTestSuite) addNode(
|
||||
chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
return rts.blockSyncChannels[nodeID], nil
|
||||
}
|
||||
rts.reactors[nodeID], err = NewReactor(
|
||||
ctx,
|
||||
rts.reactors[nodeID] = NewReactor(
|
||||
rts.logger.With("nodeID", nodeID),
|
||||
stateStore,
|
||||
blockExec,
|
||||
blockStore,
|
||||
nil,
|
||||
chCreator,
|
||||
rts.peerUpdates[nodeID],
|
||||
func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] },
|
||||
rts.blockSync,
|
||||
consensus.NopMetrics(),
|
||||
nil, // eventbus, can be nil
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, rts.reactors[nodeID].Start(ctx))
|
||||
require.True(t, rts.reactors[nodeID].IsRunning())
|
||||
@@ -226,10 +224,10 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
maxBlockHeight := int64(64)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0})
|
||||
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
|
||||
@@ -266,10 +264,10 @@ func TestReactor_SyncTime(t *testing.T) {
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
maxBlockHeight := int64(101)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0})
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
rts.start(ctx, t)
|
||||
|
||||
@@ -294,10 +292,10 @@ func TestReactor_NoBlockResponse(t *testing.T) {
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0})
|
||||
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
|
||||
@@ -347,9 +345,9 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
|
||||
|
||||
maxBlockHeight := int64(48)
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000)
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0})
|
||||
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
|
||||
@@ -382,7 +380,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
|
||||
// XXX: This causes a potential race condition.
|
||||
// See: https://github.com/tendermint/tendermint/issues/6005
|
||||
valSet, otherPrivVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
otherGenDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
otherGenDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, factory.ConsensusParams())
|
||||
newNode := rts.network.MakeNode(ctx, t, p2ptest.NodeOptions{
|
||||
MaxPeers: uint16(len(rts.nodes) + 1),
|
||||
MaxConnected: uint16(len(rts.nodes) + 1),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user