Compare commits

..

2 Commits

Author SHA1 Message Date
William Banfield
c74df714d4 update spec document to reflect ADDED behavior 2022-03-21 16:37:37 -04:00
William Banfield
1d2858f1b2 abci++: remove CheckTx call from PrepareProposal flow 2022-03-21 16:33:47 -04:00
552 changed files with 19619 additions and 32842 deletions

4
.github/CODEOWNERS vendored
View File

@@ -7,7 +7,7 @@
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @ancazamfir @samricotta
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @ancazamfir
# Spec related changes can be approved by the protocol design team
/spec @josef-widder @milosevic @cason @sergio-mena @jmalicevic
/spec @josef-widder @milosevic @cason

View File

@@ -4,37 +4,6 @@ updates:
directory: "/"
schedule:
interval: weekly
target-branch: "master"
open-pull-requests-limit: 10
labels:
- T:dependencies
- S:automerge
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: weekly
target-branch: "v0.34.x"
open-pull-requests-limit: 10
labels:
- T:dependencies
- S:automerge
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: weekly
target-branch: "v0.35.x"
open-pull-requests-limit: 10
labels:
- T:dependencies
- S:automerge
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: weekly
target-branch: "v0.36.x"
open-pull-requests-limit: 10
labels:
- T:dependencies
@@ -53,7 +22,7 @@ updates:
- package-ecosystem: gomod
directory: "/"
schedule:
interval: weekly
interval: daily
target-branch: "master"
open-pull-requests-limit: 10
labels:
@@ -63,7 +32,7 @@ updates:
- package-ecosystem: gomod
directory: "/"
schedule:
interval: weekly
interval: daily
target-branch: "v0.34.x"
open-pull-requests-limit: 10
labels:
@@ -73,19 +42,9 @@ updates:
- package-ecosystem: gomod
directory: "/"
schedule:
interval: weekly
interval: daily
target-branch: "v0.35.x"
open-pull-requests-limit: 10
labels:
- T:dependencies
- S:automerge
- package-ecosystem: gomod
directory: "/"
schedule:
interval: weekly
target-branch: "v0.36.x"
open-pull-requests-limit: 10
labels:
- T:dependencies
- S:automerge

8
.github/mergify.yml vendored
View File

@@ -33,11 +33,3 @@ pull_request_rules:
backport:
branches:
- v0.35.x
- name: backport patches to v0.36.x branch
conditions:
- base=master
- label=S:backport-to-v0.36.x
actions:
backport:
branches:
- v0.36.x

View File

@@ -20,11 +20,11 @@ jobs:
goos: ["linux"]
timeout-minutes: 5
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: "1.18"
go-version: "1.17"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
@@ -41,11 +41,11 @@ jobs:
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: "1.18"
go-version: "1.17"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
@@ -63,11 +63,11 @@ jobs:
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: "1.18"
go-version: "1.17"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go

View File

@@ -1,75 +0,0 @@
# Verify that generated code is up-to-date.
#
# Note that we run these checks regardless whether the input files have
# changed, because generated code can change in response to toolchain updates
# even if no files in the repository are modified.
name: Check generated code
on:
pull_request:
branches:
- master
permissions:
contents: read
jobs:
check-mocks:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.18'
- uses: actions/checkout@v3
- name: "Check generated mocks"
run: |
set -euo pipefail
readonly MOCKERY=2.12.3 # N.B. no leading "v"
curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf -
make mockery 2>/dev/null
if ! git diff --stat --exit-code ; then
echo ">> ERROR:"
echo ">>"
echo ">> Generated mocks require update (either Mockery or source files may have changed)."
echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR."
echo ">>"
exit 1
fi
check-proto:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.18'
- uses: actions/checkout@v3
with:
fetch-depth: 1 # we need a .git directory to run git diff
- name: "Check protobuf generated code"
run: |
set -euo pipefail
# Install buf and gogo tools, so that differences that arise from
# toolchain differences are also caught.
readonly tools="$(mktemp -d)"
export PATH="${PATH}:${tools}/bin"
export GOBIN="${tools}/bin"
go install github.com/bufbuild/buf/cmd/buf
go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
make proto-gen
if ! git diff --stat --exit-code ; then
echo ">> ERROR:"
echo ">>"
echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)."
echo ">> Ensure your tools are up-to-date, re-run 'make proto-gen' and update this PR."
echo ">>"
exit 1
fi

View File

@@ -39,17 +39,17 @@ jobs:
platforms: all
- name: Set up Docker Build
uses: docker/setup-buildx-action@v2.0.0
uses: docker/setup-buildx-action@v1.6.0
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v2.0.0
uses: docker/login-action@v1.14.1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v3.1.0
uses: docker/build-push-action@v2.10.0
with:
context: .
file: ./DOCKER/Dockerfile

View File

@@ -1,62 +0,0 @@
# Build and deploy the docs.tendermint.com website content.
# The static content is published to GitHub Pages.
#
# For documentation build info, see docs/DOCS_README.md.
name: Build static documentation site
on:
workflow_dispatch: # allow manual updates
push:
branches:
- master
paths:
- docs/**
- spec/**
jobs:
# This is split into two jobs so that the build, which runs npm, does not
# have write access to anything. The deploy requires write access to publish
# to the branch used by GitHub Pages, however, so we can't just make the
# whole workflow read-only.
build:
name: VuePress build
runs-on: ubuntu-latest
container:
image: alpine:latest
permissions:
contents: read
steps:
- name: Install generator dependencies
run: |
apk add --no-cache make bash git npm
- uses: actions/checkout@v4
with:
# We need to fetch full history so the backport branches for previous
# versions will be available for the build.
fetch-depth: 0
- name: Build documentation
run: |
git config --global --add safe.directory "$PWD"
make build-docs
- uses: actions/upload-artifact@v4
with:
name: build-output
path: ~/output/
deploy:
name: Deploy to GitHub Pages
runs-on: ubuntu-latest
needs: build
permissions:
contents: write
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: build-output
path: ~/output
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@v4
with:
branch: 'docs-tendermint-com'
folder: ~/output
single-commit: true

View File

@@ -1,20 +0,0 @@
# Verify that important design docs have ToC entries.
name: Check documentation ToC
on:
pull_request:
push:
branches:
- master
jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
docs/architecture/**
docs/rfc/**
- run: ./docs/presubmit.sh
if: env.GIT_DIFF

View File

@@ -11,13 +11,13 @@ jobs:
strategy:
fail-fast: false
matrix:
group: ['00', '01', '02', '03', '04']
group: ['00', '01', '02', '03']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.18'
go-version: '1.17'
- uses: actions/checkout@v3
@@ -29,7 +29,7 @@ jobs:
- name: Generate testnets
working-directory: test/e2e
# When changing -g, also change the matrix groups above
run: ./build/generator -g 5 -d networks/nightly/
run: ./build/generator -g 4 -d networks/nightly/
- name: Run ${{ matrix.p2p }} p2p testnets
working-directory: test/e2e

View File

@@ -1,7 +1,8 @@
# Runs randomly generated E2E testnets nightly on the 0.34.x branch.
# Runs randomly generated E2E testnets nightly
# on the 0.34.x release branch
# !! This file should be kept in sync with the e2e-nightly-master.yml file,
# modulo changes to the version labels.
# !! If you change something in this file, you probably want
# to update the e2e-nightly-master workflow as well!
name: e2e-nightly-34x
on:
@@ -19,9 +20,9 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.18'
go-version: '1.17'
- uses: actions/checkout@v3
with:

View File

@@ -1,7 +1,7 @@
# Runs randomly generated E2E testnets nightly on the v0.35.x branch.
# Runs randomly generated E2E testnets nightly on v0.35.x.
# !! This file should be kept in sync with the e2e-nightly-master.yml file,
# modulo changes to the version labels.
# !! If you change something in this file, you probably want
# to update the e2e-nightly-master workflow as well!
name: e2e-nightly-35x
on:
@@ -20,9 +20,9 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.18'
go-version: '1.17'
- uses: actions/checkout@v3
with:

View File

@@ -1,74 +0,0 @@
# Runs randomly generated E2E testnets nightly on the v0.36.x branch.
# !! This file should be kept in sync with the e2e-nightly-master.yml file,
# modulo changes to the version labels.
name: e2e-nightly-36x
on:
schedule:
- cron: '0 2 * * *'
jobs:
e2e-nightly-test:
# Run parallel jobs for the listed testnet groups (must match the
# ./build/generator -g flag)
strategy:
fail-fast: false
matrix:
group: ['00', '01', '02', '03', '04']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v3
with:
go-version: '1.18'
- uses: actions/checkout@v3
with:
ref: 'v0.36.x'
- name: Build
working-directory: test/e2e
# Run make jobs in parallel, since we can't run steps in parallel.
run: make -j2 docker generator runner tests
- name: Generate testnets
working-directory: test/e2e
# When changing -g, also change the matrix groups above
run: ./build/generator -g 5 -d networks/nightly
- name: Run testnets in group ${{ matrix.group }}
working-directory: test/e2e
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
e2e-nightly-fail-2:
needs: e2e-nightly-test
if: ${{ failure() }}
runs-on: ubuntu-latest
steps:
- name: Notify Slack on failure
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
SLACK_USERNAME: Nightly E2E Tests
SLACK_ICON_EMOJI: ':skull:'
SLACK_COLOR: danger
SLACK_MESSAGE: Nightly E2E tests failed on v0.36.x
SLACK_FOOTER: ''
e2e-nightly-success: # may turn this off once they seem to pass consistently
needs: e2e-nightly-test
if: ${{ success() }}
runs-on: ubuntu-latest
steps:
- name: Notify Slack on success
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
SLACK_USERNAME: Nightly E2E Tests
SLACK_ICON_EMOJI: ':white_check_mark:'
SLACK_COLOR: good
SLACK_MESSAGE: Nightly E2E tests passed on v0.36.x
SLACK_FOOTER: ''

View File

@@ -1,8 +1,7 @@
# Runs randomly generated E2E testnets nightly on master
# !! Relevant changes to this file should be propagated to the e2e-nightly-<V>x
# files for the supported backport branches, when appropriate, modulo version
# markers.
# !! If you change something in this file, you probably want
# to update the e2e-nightly-34x workflow as well!
name: e2e-nightly-master
on:
@@ -16,13 +15,13 @@ jobs:
strategy:
fail-fast: false
matrix:
group: ['00', '01', '02', '03', "04"]
group: ['00', '01', '02', '03']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.18'
go-version: '1.17'
- uses: actions/checkout@v3
@@ -34,7 +33,7 @@ jobs:
- name: Generate testnets
working-directory: test/e2e
# When changing -g, also change the matrix groups above
run: ./build/generator -g 5 -d networks/nightly/
run: ./build/generator -g 4 -d networks/nightly/
- name: Run ${{ matrix.p2p }} p2p testnets
working-directory: test/e2e

View File

@@ -14,11 +14,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.18'
go-version: '1.17'
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go

View File

@@ -13,15 +13,15 @@ jobs:
fuzz-nightly-test:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.18'
go-version: '1.17'
- uses: actions/checkout@v3
- name: Install go-fuzz
working-directory: test/fuzz
run: go install github.com/dvyukov/go-fuzz/go-fuzz@latest github.com/dvyukov/go-fuzz/go-fuzz-build@latest
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
- name: Fuzz mempool
working-directory: test/fuzz
@@ -39,14 +39,14 @@ jobs:
continue-on-error: true
- name: Archive crashers
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
with:
name: crashers
path: test/fuzz/**/crashers
retention-days: 3
- name: Archive suppressions
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
with:
name: suppressions
path: test/fuzz/**/suppressions

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 3
steps:
- uses: styfle/cancel-workflow-action@0.10.0
- uses: styfle/cancel-workflow-action@0.9.1
with:
workflow_id: 1041851,1401230,2837803
access_token: ${{ github.token }}

View File

@@ -58,7 +58,7 @@ jobs:
run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}'
- name: Archive results
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
with:
name: results
path: tendermint/store/latest

12
.github/workflows/linkchecker.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: Check Markdown links
on:
schedule:
- cron: '* */24 * * *'
jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.14
with:
folder-path: "docs"

View File

@@ -1,11 +1,7 @@
name: Golang Linter
# Lint runs golangci-lint over the entire Tendermint repository.
#
# This workflow is run on every pull request and push to master.
#
# The `golangci` job will pass without running if no *.{go, mod, sum}
# files have been modified.
# Lint runs golangci-lint over the entire Tendermint repository
# This workflow is run on every pull request and push to master
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
on:
pull_request:
push:
@@ -18,21 +14,19 @@ jobs:
timeout-minutes: 8
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.18'
- uses: technote-space/get-diff-action@v6
go-version: '^1.17'
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: golangci/golangci-lint-action@v3
- uses: golangci/golangci-lint-action@v3.1.0
with:
# Required: the version of golangci-lint is required and
# must be specified without patch version: we always use the
# latest patch version.
version: v1.45
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.44
args: --timeout 10m
github-token: ${{ secrets.github_token }}
if: env.GIT_DIFF

View File

@@ -1,23 +1,19 @@
name: Check Markdown links
# TODO: Re-enable when https://github.com/gaurav-nelson/github-action-markdown-link-check/pull/126 lands.
on:
push:
branches:
- master
pull_request:
branches: [master]
jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.md
- uses: creachadair/github-action-markdown-link-check@master
with:
check-modified-files-only: 'yes'
config-file: '.md-link-check.json'
if: env.GIT_DIFF
#name: Check Markdown links
#
#on:
# push:
# branches:
# - master
# pull_request:
# branches: [master]
#
#jobs:
# markdown-link-check:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v3
# - uses: gaurav-nelson/github-action-markdown-link-check@v1.0.13
# with:
# check-modified-files-only: 'yes'

View File

@@ -15,7 +15,7 @@ jobs:
timeout-minutes: 5
steps:
- uses: actions/checkout@v3
- uses: bufbuild/buf-setup-action@v1.6.0
- uses: bufbuild/buf-setup-action@v1.1.0
- uses: bufbuild/buf-lint-action@v1
with:
input: 'proto'

View File

@@ -16,24 +16,22 @@ jobs:
with:
fetch-depth: 0
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.18'
go-version: '1.17'
- name: Build
uses: goreleaser/goreleaser-action@v3
uses: goreleaser/goreleaser-action@v2
if: ${{ github.event_name == 'pull_request' }}
with:
version: latest
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
- name: Release
uses: goreleaser/goreleaser-action@v3
uses: goreleaser/goreleaser-action@v2
if: startsWith(github.ref, 'refs/tags/')
with:
version: latest
args: release --rm-dist --release-notes=../release_notes.md
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v5
- uses: actions/stale@v4
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: "This pull request has been automatically marked as stale because it has not had

View File

@@ -16,11 +16,11 @@ jobs:
matrix:
part: ["00", "01", "02", "03", "04", "05"]
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: "1.18"
go-version: "1.17"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
@@ -32,3 +32,44 @@ jobs:
run: |
make test-group-${{ matrix.part }} NUM_SPLIT=6
if: env.GIT_DIFF
- uses: actions/upload-artifact@v2
with:
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
path: ./build/${{ matrix.part }}.profile.out
upload-coverage-report:
runs-on: ubuntu-latest
needs: tests
steps:
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
Makefile
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-00-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-01-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-02-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-03-coverage"
if: env.GIT_DIFF
- run: |
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v2.1.0
with:
file: ./coverage.txt
if: env.GIT_DIFF

View File

@@ -1,6 +0,0 @@
{
"retryOn429": true,
"retryCount": 5,
"fallbackRetryDelay": "30s",
"aliveStatusCodes": [200, 206, 503]
}

View File

@@ -2,133 +2,6 @@
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
## v0.35.9
July 20, 2022
This release fixes a deadlock that could occur in some cases when using the
priority mempool with the ABCI socket client.
### BUG FIXES
- [mempool] [\#9030](https://github.com/tendermint/tendermint/pull/9030) rework lock discipline to mitigate callback deadlocks (@creachadair)
## v0.35.8
July 12, 2022
Special thanks to external contributors on this release: @joeabbey
This release fixes an unbounded heap growth issue in the implementation of the
priority mempool, as well as some configuration, logging, and peer dialing
improvements in the non-legacy p2p stack. It also adds a new opt-in
"simple-priority" value for the `p2p.queue-type` setting, that should improve
gossip performance for non-legacy peer networks.
### BREAKING CHANGES
- CLI/RPC/Config
- [node] [\#8902](https://github.com/tendermint/tendermint/pull/8902) Always start blocksync and avoid misconfiguration (@tychoish)
### FEATURES
- [cli] [\#8675](https://github.com/tendermint/tendermint/pull/8675) Add command to force compact goleveldb databases (@cmwaters)
### IMPROVEMENTS
- [p2p] [\#8914](https://github.com/tendermint/tendermint/pull/8914) [\#8875](https://github.com/tendermint/tendermint/pull/8875) Improvements to peer dialing (backported). (@tychoish)
- [p2p] [\#8820](https://github.com/tendermint/tendermint/pull/8820) add eviction metrics and cleanup dialing error handling (backport #8819) (@tychoish)
- [logging] [\#8896](https://github.com/tendermint/tendermint/pull/8896) Do not pre-process log results (backport #8895). (@tychoish)
- [p2p] [\#8956](https://github.com/tendermint/tendermint/pull/8956) Simpler priority queue (backport #8929). (@tychoish)
### BUG FIXES
- [mempool] [\#8944](https://github.com/tendermint/tendermint/pull/8944) Fix unbounded heap growth in the priority mempool. (@creachadair)
- [p2p] [\#8869](https://github.com/tendermint/tendermint/pull/8869) Set empty timeouts to configed values. (backport #8847). (@williambanfield)
## v0.35.7
June 16, 2022
### BUG FIXES
- [p2p] [\#8692](https://github.com/tendermint/tendermint/pull/8692) scale the number of stored peers by the configured maximum connections (#8684)
- [rpc] [\#8715](https://github.com/tendermint/tendermint/pull/8715) always close http bodies (backport #8712)
- [p2p] [\#8760](https://github.com/tendermint/tendermint/pull/8760) accept should not abort on first error (backport #8759)
### BREAKING CHANGES
- P2P Protocol
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish)
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish)
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish)
- [p2p] [\#8737](https://github.com/tendermint/tendermint/pull/8737) Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish)
## v0.35.6
June 3, 2022
### FEATURES
- [migrate] [\#8672](https://github.com/tendermint/tendermint/pull/8672) provide function for database production (backport #8614) (@tychoish)
### BUG FIXES
- [consensus] [\#8651](https://github.com/tendermint/tendermint/pull/8651) restructure peer catchup sleep (@tychoish)
- [pex] [\#8657](https://github.com/tendermint/tendermint/pull/8657) align max address thresholds (@cmwaters)
- [cmd] [\#8668](https://github.com/tendermint/tendermint/pull/8668) don't used global config for reset commands (@cmwaters)
- [p2p] [\#8681](https://github.com/tendermint/tendermint/pull/8681) shed peers from store from other networks (backport #8678) (@tychoish)
## v0.35.5
May 26, 2022
### BUG FIXES
- [p2p] [\#8371](https://github.com/tendermint/tendermint/pull/8371) fix setting in con-tracker (backport #8370) (@tychoish)
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
- [statesync] [\#8494](https://github.com/tendermint/tendermint/pull/8494) avoid potential race (@tychoish)
- [keymigrate] [\#8467](https://github.com/tendermint/tendermint/pull/8467) improve filtering for legacy transaction hashes (backport #8466) (@creachadair)
- [rpc] [\#8594](https://github.com/tendermint/tendermint/pull/8594) fix encoding of block_results responses (@creachadair)
## v0.35.4
April 18, 2022
Special thanks to external contributors on this release: @firelizzard18
### FEATURES
- [cli] [\#8300](https://github.com/tendermint/tendermint/pull/8300) Add a tool to update old config files to the latest version [backport [\#8281](https://github.com/tendermint/tendermint/pull/8281)]. (@creachadair)
### IMPROVEMENTS
### BUG FIXES
- [cli] [\#8294](https://github.com/tendermint/tendermint/pull/8294) keymigrate: ensure block hash keys are correctly translated. (@creachadair)
- [cli] [\#8352](https://github.com/tendermint/tendermint/pull/8352) keymigrate: ensure transaction hash keys are correctly translated. (@creachadair)
## v0.35.3
April 8, 2022
### FEATURES
- [cli] [\#8081](https://github.com/tendermint/tendermint/pull/8081) add a safer-to-use `reset-state` command. (@marbar3778)
### IMPROVEMENTS
- [consensus] [\#8138](https://github.com/tendermint/tendermint/pull/8138) change lock handling in reactor and handleMsg for RoundState. (@williambanfield)
### BUG FIXES
- [cli] [\#8276](https://github.com/tendermint/tendermint/pull/8276) scmigrate: ensure target key is correctly renamed. (@creachadair)
## v0.35.2
February 28, 2022
@@ -221,7 +94,7 @@ Special thanks to external contributors on this release: @JayT106,
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
- [\#7142](https://github.com/tendermint/tendermint/pull/7142) mempool: remove panic when recheck-tx was not sent to ABCI application (@williambanfield).
- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060) <!-- markdown-link-check-disable-line -->
- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060)
wait until peerUpdates channel is closed to close remaining peers (@williambanfield)
- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
@@ -357,32 +230,6 @@ Special thanks to external contributors on this release: @JayT106,
- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
## v0.34.19
### BUG FIXES
- [cli] [\#8270](https://github.com/tendermint/tendermint/issues/8270) fix reset commands (@alexanderbez).
## v0.34.18
### BREAKING CHANGES
- CLI/RPC/Config
- [cli] [\#8258](https://github.com/tendermint/tendermint/pull/8258) Fix a bug in the cli that caused `unsafe-reset-all` to panic
## v0.34.17
### BREAKING CHANGES
- CLI/RPC/Config
- [cli] [\#8081](https://github.com/tendermint/tendermint/issues/8081) make the reset command safe to use (@marbar3778).
### BUG FIXES
- [consensus] [\#8079](https://github.com/tendermint/tendermint/issues/8079) start the timeout ticker before relay (backport #7844) (@creachadair).
- [consensus] [\#7992](https://github.com/tendermint/tendermint/issues/7992) [\#7994](https://github.com/tendermint/tendermint/issues/7994) change lock handling in handleMsg and reactor to alleviate issues gossiping during long ABCI calls (@williambanfield).
## v0.34.16
Special thanks to external contributors on this release: @yihuang
@@ -747,7 +594,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
- [light] [\#5347](https://github.com/tendermint/tendermint/pull/5347) `NewClient`, `NewHTTPClient`, `VerifyHeader` and `VerifyLightBlockAtHeight` now accept `context.Context` as 1st param (@melekes)
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) `HashFromByteSlices` and `ProofsFromByteSlices` now return a hash for empty inputs, following RFC6962 (@erikgrinaker)
- [proto] [\#5025](https://github.com/tendermint/tendermint/pull/5025) All proto files have been moved to `/proto` directory. (@marbar3778)
- Using the recommended the file layout from buf, [see here for more info](https://docs.buf.build/lint/rules) <!-- markdown-link-check-disable-line -->
- Using the recommended the file layout from buf, [see here for more info](https://buf.build/docs/lint-checkers#file_layout)
- [rpc/client] [\#4947](https://github.com/tendermint/tendermint/pull/4947) `Validators`, `TxSearch` `page`/`per_page` params become pointers (@melekes)
- `UnconfirmedTxs` `limit` param is a pointer
- [rpc/jsonrpc/server] [\#5141](https://github.com/tendermint/tendermint/pull/5141) Remove `WriteRPCResponseArrayHTTP` (use `WriteRPCResponseHTTP` instead) (@melekes)
@@ -1527,7 +1374,7 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah
*August 28, 2019*
@climber73 wrote the [Writing a Tendermint Core application in Java
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md) <!-- markdown-link-check-disable-line -->
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md)
guide.
Special thanks to external contributors on this release:
@@ -2106,7 +1953,7 @@ more details.
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
* Apps
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
@@ -2159,7 +2006,7 @@ more details.
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md)
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
(@guagualvcha)
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
@@ -2220,7 +2067,7 @@ handshake by authenticating the NetAddress.ID of the peer we're dialing.
This release fixes yet another issue with the proposer selection algorithm.
We hope it's the last one, but we won't be surprised if it's not.
We plan to one day expose the selection algorithm more directly to
the application ([\#3285](https://github.com/tendermint/tendermint/issues/3285)), and even to support randomness ([\#763](https://github.com/tendermint/tendermint/issues/763)). <!-- markdown-link-check-disable-line -->
the application ([\#3285](https://github.com/tendermint/tendermint/issues/3285)), and even to support randomness ([\#763](https://github.com/tendermint/tendermint/issues/763)).
For more, see issues marked
[proposer-selection](https://github.com/tendermint/tendermint/labels/proposer-selection).
@@ -2863,7 +2710,7 @@ Special thanks to external contributors on this release:
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
It also addresses some issues found via security audit, removes various unused
functions from `libs/common`, and implements
[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md).
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
BREAKING CHANGES:
@@ -2928,7 +2775,7 @@ are affected by a change.
A few more breaking changes are in the works - each will come with a clear
Architecture Decision Record (ADR) explaining the change. You can review ADRs
[here](https://github.com/tendermint/tendermint/tree/master/docs/architecture)
[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture)
or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls).
You can also check in on the [issues marked as
breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking).
@@ -2944,7 +2791,7 @@ BREAKING CHANGES:
- [abci] Added address of the original proposer of the block to Header
- [abci] Change ABCI Header to match Tendermint exactly
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
[ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)):
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
- Remove PubKey from `Validator` (so it's just Address and Power)
- Introduce `ValidatorUpdate` (with just PubKey and Power)
- InitChain and EndBlock use ValidatorUpdate
@@ -2966,7 +2813,7 @@ BREAKING CHANGES:
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
- Add NextValidatorSet to State, changes on-disk representation of state
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
[ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)).
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
- Remove ConsensusParams.BlockSize.MaxTxs
- Introduce maximum sizes for all components of a block, including ChainID
- [types] Updates to the block Header:
@@ -2977,7 +2824,7 @@ BREAKING CHANGES:
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
[ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)):
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
- malleability removed by requiring `s` to be in canonical form.
@@ -3207,7 +3054,7 @@ BREAKING CHANGES:
FEATURES
- [cmd] Added metrics (served under `/metrics` using a Prometheus client;
disabled by default). See the new `instrumentation` section in the config and
[metrics](https://github.com/tendermint/tendermint/blob/master/docs/nodes/metrics.md)
[metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html)
guide.
- [p2p] Add IPv6 support to peering.
- [p2p] Add `external_address` to config to allow specifying the address for
@@ -3321,7 +3168,7 @@ BREAKING:
FEATURES
- [rpc] the RPC documentation is now published to `https://tendermint.github.io/slate`
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
- [p2p] AllowDuplicateIP config option to refuse connections from same IP.
- true by default for now, false by default in next breaking release
- [docs] Add docs for query, tx indexing, events, pubsub
@@ -3799,7 +3646,7 @@ containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusCon
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) accomodate new config
- Logger
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`. <!-- markdown-link-check-disable-next-line -->
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details
- Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!)
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger

View File

@@ -19,31 +19,18 @@ Special thanks to external contributors on this release:
- [rpc] \#7713 Remove unused options for websocket clients. (@creachadair)
- [config] \#7930 Add new event subscription options and defaults. (@creachadair)
- [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair)
- [cli] \#8081 make the reset command safe to use by intoducing `reset-state` command. Fixed by \#8259. (@marbar3778, @cmwaters)
- [config] \#8222 default indexer configuration to null. (@creachadair)
- [rpc] \#8570 rework timeouts to be per-method instead of global. (@creachadair)
- [rpc] \#8624 deprecate `broadcast_tx_commit` and `braodcast_tx_sync` and `broadcast_tx_async` in favor of `braodcast_tx`. (@tychoish)
- [config] \#8654 remove deprecated `seeds` field from config. Users should switch to `bootstrap-peers` instead. (@cmwaters)
- [cli] \#8081 make the reset command safe to use. (@marbar3778)
- Apps
- [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec).
- [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish)
- [abci] \#8605 Remove info, log, events, gasUsed and mempoolError fields from ResponseCheckTx as they are not used by Tendermint. (@jmalicevic)
- [abci] \#8664 Move `app_hash` parameter from `Commit` to `FinalizeBlock`. (@sergio-mena)
- [abci] \#8656 Added cli command for `PrepareProposal`. (@jmalicevic)
- [sink/psql] \#8637 tx_results emitted from psql sink are now json encoded, previously they were protobuf encoded
- [abci] \#8901 Added cli command for `ProcessProposal`. (@hvanz)
- P2P Protocol
- [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish)
- [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish)
- [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui)
- [p2p] \#8737 Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish)
- [p2p] \#8737 Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish)
- [p2p] \#8737 Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish)
- [p2p] \#8737 Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish)
- Go API
@@ -59,7 +46,6 @@ Special thanks to external contributors on this release:
- [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychoish)
- [abci/client] \#7607 Simplify client interface (removes most "async" methods). (@creachadair)
- [libs/json] \#7673 Remove the libs/json (tmjson) library. (@creachadair)
- [crypto] \#8412 \#8432 Remove `crypto/tmhash` package in favor of small functions in `crypto` package and cleanup of unused functions. (@tychoish)
- Blockchain Protocol
@@ -69,16 +55,12 @@ Special thanks to external contributors on this release:
- [rpc] [\#7701] Add `ApplicationInfo` to `status` rpc call which contains the application version. (@jonasbostoen)
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade.
- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish)
- [consensus] \#7354 add a new `synchrony` field to the `ConsensusParams` struct for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield)
- [consensus] \#7354 add a new `synchrony` field to the `ConsensusParameter` struct for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield)
- [consensus] \#7376 Update the proposal logic per the Propose-based timestamps specification so that the proposer will wait for the previous block time to occur before proposing the next block. (@williambanfield)
- [consensus] \#7391 Use the proposed block timestamp as the proposal timestamp. Update the block validation logic to ensure that the proposed block's timestamp matches the timestamp in the proposal message. (@williambanfield)
- [consensus] \#7415 Update proposal validation logic to Prevote nil if a proposal does not meet the conditions for Timelyness per the proposer-based timestamp specification. (@anca)
- [consensus] \#7382 Update block validation to no longer require the block timestamp to be the median of the timestamps of the previous commit. (@anca)
- [consensus] \#7711 Use the proposer timestamp for the first height instead of the genesis time. Chains will still start consensus at the genesis time. (@anca)
- [cli] \#8281 Add a tool to update old config files to the latest version. (@creachadair)
- [consenus] \#8514 move `RecheckTx` from the local node mempool config to a global `ConsensusParams` field in `BlockParams` (@cmwaters)
- [abci] ABCI++ [specified](https://github.com/tendermint/tendermint/tree/master/spec/abci%2B%2B). (@sergio-mena, @cmwaters, @josef-widder)
- [abci] ABCI++ [implemented](https://github.com/orgs/tendermint/projects/9). (@williambanfield, @thanethomson, @sergio-mena)
### IMPROVEMENTS
@@ -98,7 +80,3 @@ Special thanks to external contributors on this release:
- [light] \#7640 Light Client: fix absence proof verification (@ashcherbakov)
- [light] \#7641 Light Client: fix querying against the latest height (@ashcherbakov)
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
- [cli] \#8276 scmigrate: ensure target key is correctly renamed. (@creachadair)
- [cli] \#8294 keymigrate: ensure block hash keys are correctly translated. (@creachadair)
- [cli] \#8352 keymigrate: ensure transaction hash keys are correctly translated. (@creachadair)
- (indexer) \#8625 Fix overriding tx index of duplicated txs.

View File

@@ -20,7 +20,7 @@ This code of conduct applies to all projects run by the Tendermint/COSMOS team a
* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](https://github.com/stumpsyn/policies/blob/master/citizen_code_of_conduct.md); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we dont tolerate behavior that excludes people in socially marginalized groups.
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we dont tolerate behavior that excludes people in socially marginalized groups.
* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether youre a regular contributor or a newcomer, we care about making this community a safe place for you and weve got your back.

1
DOCKER/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
tendermint

View File

@@ -2,7 +2,7 @@
FROM golang:1.17-alpine as builder
RUN apk update && \
apk upgrade && \
apk --no-cache add make git
apk --no-cache add make
COPY / /tendermint
WORKDIR /tendermint
RUN make build-linux
@@ -53,3 +53,4 @@ CMD ["start"]
# Expose the data directory as a volume since there's mutable state in there
VOLUME [ "$TMHOME" ]

View File

@@ -0,0 +1,28 @@
FROM amazonlinux:2
RUN yum -y update && \
yum -y install wget
RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \
rpm -ivh epel-release-latest-7.noarch.rpm
RUN yum -y groupinstall "Development Tools"
RUN yum -y install leveldb-devel which
ENV GOVERSION=1.16.5
RUN cd /tmp && \
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \
mkdir -p /go/src && \
mkdir -p /go/bin
ENV PATH=$PATH:/usr/local/go/bin:/go/bin
ENV GOBIN=/go/bin
ENV GOPATH=/go/src
RUN mkdir -p /tendermint
WORKDIR /tendermint
CMD ["/usr/bin/make", "build", "TENDERMINT_BUILD_OPTIONS=cleveldb"]

16
DOCKER/Dockerfile.testing Normal file
View File

@@ -0,0 +1,16 @@
FROM golang:latest
# Grab deps (jq, hexdump, xxd, killall)
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq bsdmainutils vim-common psmisc netcat
# Add testing deps for curl
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \
apt-get update && \
apt-get install -y --no-install-recommends curl
VOLUME /go
EXPOSE 26656
EXPOSE 26657

13
DOCKER/Makefile Normal file
View File

@@ -0,0 +1,13 @@
build:
@sh -c "'$(CURDIR)/build.sh'"
push:
@sh -c "'$(CURDIR)/push.sh'"
build_testing:
docker build --tag tendermint/testing -f ./Dockerfile.testing .
build_amazonlinux_buildimage:
docker build -t "tendermint/tendermint:build_c-amazonlinux" -f Dockerfile.build_c-amazonlinux .
.PHONY: build push build_testing build_amazonlinux_buildimage

View File

@@ -8,7 +8,7 @@ Official releases can be found [here](https://github.com/tendermint/tendermint/r
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
Respective versioned files can be found at `https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile` (replace the Xs with the version number).
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
## Quick reference

20
DOCKER/build.sh Executable file
View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -e
# Get the tag from the version, or try to figure it out.
if [ -z "$TAG" ]; then
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
fi
if [ -z "$TAG" ]; then
echo "Please specify a tag."
exit 1
fi
TAG_NO_PATCH=${TAG%.*}
read -p "==> Build 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$TAG" -t "tendermint/tendermint:$TAG_NO_PATCH" .
fi

22
DOCKER/push.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -e
# Get the tag from the version, or try to figure it out.
if [ -z "$TAG" ]; then
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
fi
if [ -z "$TAG" ]; then
echo "Please specify a tag."
exit 1
fi
TAG_NO_PATCH=${TAG%.*}
read -p "==> Push 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
docker push "tendermint/tendermint:latest"
docker push "tendermint/tendermint:$TAG"
docker push "tendermint/tendermint:$TAG_NO_PATCH"
fi

View File

@@ -77,6 +77,9 @@ $(BUILDDIR)/:
###############################################################################
check-proto-deps:
ifeq (,$(shell which buf))
$(error "buf is required for Protobuf building, linting and breakage checking. See https://docs.buf.build/installation for installation instructions.")
endif
ifeq (,$(shell which protoc-gen-gogofaster))
$(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install")
endif
@@ -90,7 +93,7 @@ endif
proto-gen: check-proto-deps
@echo "Generating Protobuf files"
@go run github.com/bufbuild/buf/cmd/buf generate
@buf generate
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
.PHONY: proto-gen
@@ -98,7 +101,7 @@ proto-gen: check-proto-deps
# execution only.
proto-lint: check-proto-deps
@echo "Linting Protobuf files"
@go run github.com/bufbuild/buf/cmd/buf lint
@buf lint
.PHONY: proto-lint
proto-format: check-proto-format-deps
@@ -111,9 +114,15 @@ proto-check-breaking: check-proto-deps
@echo "Note: This is only useful if your changes have not yet been committed."
@echo " Otherwise read up on buf's \"breaking\" command usage:"
@echo " https://docs.buf.build/breaking/usage"
@go run github.com/bufbuild/buf/cmd/buf breaking --against ".git"
@buf breaking --against ".git"
.PHONY: proto-check-breaking
# TODO: Should be removed when work on ABCI++ is complete.
# For more information, see https://github.com/tendermint/tendermint/issues/8066
abci-proto-gen:
./scripts/abci-gen.sh
.PHONY: abci-proto-gen
###############################################################################
### Build ABCI ###
###############################################################################
@@ -169,7 +178,7 @@ go.sum: go.mod
draw_deps:
@# requires brew install graphviz or apt-get install graphviz
go install github.com/RobotsAndPencils/goviz@latest
go get github.com/RobotsAndPencils/goviz
@goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png
.PHONY: draw_deps
@@ -223,8 +232,7 @@ DESTINATION = ./index.html.md
build-docs:
@cd docs && \
while read -r branch path_prefix; do \
( git checkout $${branch} && npm ci --quiet && \
VUEPRESS_BASE="/$${path_prefix}/" NODE_OPTIONS="--openssl-legacy-provider" npm run build --quiet ) ; \
(git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
mkdir -p ~/output/$${path_prefix} ; \
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
cp ~/output/$${path_prefix}/index.html ~/output ; \
@@ -235,8 +243,10 @@ build-docs:
### Docker image ###
###############################################################################
build-docker:
build-docker: build-linux
cp $(BUILDDIR)/tendermint DOCKER/tendermint
docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile .
rm -rf DOCKER/tendermint
.PHONY: build-docker
@@ -248,21 +258,6 @@ mockery:
go generate -run="./scripts/mockery_generate.sh" ./...
.PHONY: mockery
###############################################################################
### Metrics ###
###############################################################################
metrics: testdata-metrics
go generate -run="scripts/metricsgen" ./...
.PHONY: metrics
# By convention, the go tool ignores subdirectories of directories named
# 'testdata'. This command invokes the generate command on the folder directly
# to avoid this.
testdata-metrics:
ls ./scripts/metricsgen/testdata | xargs -I{} go generate -run="scripts/metricsgen" ./scripts/metricsgen/testdata/{}
.PHONY: testdata-metrics
###############################################################################
### Local testnet using docker ###
###############################################################################
@@ -348,3 +343,4 @@ split-test-packages:$(BUILDDIR)/packages.txt
split -d -n l/$(NUM_SPLIT) $< $<.
test-group-%:split-test-packages
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out

View File

@@ -36,17 +36,20 @@ Please do not depend on master as your production branch. Use [releases](https:/
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
See below for more details about [versioning](#versioning).
In any case, if you intend to run Tendermint in production, we're happy to help.
You can contact us [over email](mailto:hello@newtendermint.org) or [join the
chat](https://discord.gg/gnoland).
In any case, if you intend to run Tendermint in production, we're happy to help. You can
contact us [over email](mailto:hello@interchain.io) or [join the chat](https://discord.gg/cosmosnetwork).
More on how releases are conducted can be found [here](./RELEASES.md).
## Security
To report a security vulnerability, please [email us](mailto:security@newtendermint.org).
To report a security vulnerability, see our [bug bounty
program](https://hackerone.com/cosmos).
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
## Minimum requirements
| Requirement | Notes |
@@ -136,9 +139,9 @@ We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap
## Join us!
The development of Tendermint Core was led primarily by All in Bits, Inc. The
Tendermint trademark is owned by New Tendermint, LLC. If you'd like to work
full-time on Tendermint2 or [gno.land](https://gno.land), [we're
hiring](mailto:hiring@newtendermint.org)!
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/)!
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
that also maintains [tendermint.com](https://tendermint.com).

View File

@@ -1,9 +1,8 @@
# Releases
Tendermint uses modified [semantic versioning](https://semver.org/) with each
release following a `vX.Y.Z` format. Tendermint is currently on major version
0 and uses the minor version to signal breaking changes. The `master` branch is
used for active development and thus it is not advisable to build against it.
Tendermint uses [semantic versioning](https://semver.org/) with each release following
a `vX.Y.Z` format. The `master` branch is used for active development and thus it's
advisable not to build against it.
The latest changes are always initially merged into `master`.
Releases are specified using tags and are built from long-lived "backport" branches
@@ -30,8 +29,8 @@ merging the pull request.
### Creating a backport branch
If this is the first release candidate for a minor version release, e.g.
v0.25.0, you get to have the honor of creating the backport branch!
If this is the first release candidate for a major release, you get to have the
honor of creating the backport branch!
Note that, after creating the backport branch, you'll also need to update the
tags on `master` so that `go mod` is able to order the branches correctly. You
@@ -78,8 +77,7 @@ the 0.35.x line.
After doing these steps, go back to `master` and do the following:
1. Tag `master` as the dev branch for the _next_ minor version release and push
it up to GitHub.
1. Tag `master` as the dev branch for the _next_ major release and push it up to GitHub.
For example:
```sh
git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."
@@ -101,7 +99,7 @@ After doing these steps, go back to `master` and do the following:
## Release candidates
Before creating an official release, especially a minor release, we may want to create a
Before creating an official release, especially a major release, we may want to create a
release candidate (RC) for our friends and partners to test out. We use git tags to
create RCs, and we build them off of backport branches.
@@ -111,7 +109,7 @@ Tags for RCs should follow the "standard" release naming conventions, with `-rcX
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
have distinct names from the tags/release names.)
If this is the first RC for a minor release, you'll have to make a new backport branch (see above).
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
Otherwise:
1. Start from the backport branch (e.g. `v0.35.x`).
@@ -142,13 +140,11 @@ Note that this process should only be used for "true" RCs--
release candidates that, if successful, will be the next release.
For more experimental "RCs," create a new, short-lived branch and tag that instead.
## Minor release
## Major release
This minor release process assumes that this release was preceded by release candidates.
This major release process assumes that this release was preceded by release candidates.
If there were no release candidates, begin by creating a backport branch, as described above.
Before performing these steps, be sure the [Minor Release Checklist](#minor-release-checklist) has been completed.
1. Start on the backport branch (e.g. `v0.35.x`)
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
3. Prepare the release:
@@ -180,16 +176,16 @@ Before performing these steps, be sure the [Minor Release Checklist](#minor-rele
- Commit these changes to `master` and backport them into the backport
branch for this release.
## Patch release
## Minor release (point releases)
Patch releases are done differently from minor releases: They are built off of
Minor releases are done differently from major releases: They are built off of
long-lived backport branches, rather than from master. As non-breaking changes
land on `master`, they should also be backported into these backport branches.
Patch releases don't have release candidates by default, although any tricky
Minor releases don't have release candidates by default, although any tricky
changes may merit a release candidate.
To create a patch release:
To create a minor release:
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
2. Run integration tests (`make test_integrations`) and the nightlies.
@@ -201,143 +197,11 @@ To create a patch release:
- Bump the TMDefaultVersion in `version.go`
- Bump the ABCI version number, if necessary.
(Note that ABCI follows semver, and that ABCI versions are the only versions
which can change during patch releases, and only field additions are valid patch changes.)
which can change during minor releases, and only field additions are valid minor changes.)
4. Open a PR with these changes that will land them back on `v0.35.x`
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
- `git push origin v0.35.1`
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
- Remove all `R:patch` labels from the pull requests that were included in the release.
- Remove all `R:minor` labels from the pull requests that were included in the release.
- Do not merge the backport branch into master.
## Minor Release Checklist
The following set of steps are performed on all releases that increment the
_minor_ version, e.g. v0.25 to v0.26. These steps ensure that Tendermint is
well tested, stable, and suitable for adoption by the various diverse projects
that rely on Tendermint.
### Feature Freeze
Ahead of any minor version release of Tendermint, the software enters 'Feature
Freeze' for at least two weeks. A feature freeze means that _no_ new features
are added to the code being prepared for release. No code changes should be made
to the code being released that do not directly improve pressing issues of code
quality. The following must not be merged during a feature freeze:
* Refactors that are not related to specific bug fixes.
* Dependency upgrades.
* New test code that does not test a discovered regression.
* New features of any kind.
* Documentation or spec improvements that are not related to the newly developed
code.
This period directly follows the creation of the [backport
branch](#creating-a-backport-branch). The Tendermint team instead directs all
attention to ensuring that the existing code is stable and reliable. Broken
tests are fixed, flakey-tests are remedied, end-to-end test failures are
thoroughly diagnosed and all efforts of the team are aimed at improving the
quality of the code. During this period, the upgrade harness tests are run
repeatedly and a variety of in-house testnets are run to ensure Tendermint
functions at the scale it will be used by application developers and node
operators.
### Nightly End-To-End Tests
The Tendermint team maintains [a set of end-to-end
tests](https://github.com/tendermint/tendermint/blob/master/test/e2e/README.md#L1)
that run each night on the latest commit of the project and on the code in the
tip of each supported backport branch. These tests start a network of containerized
Tendermint processes and run automated checks that the network functions as
expected in both stable and unstable conditions. During the feature freeze,
these tests are run nightly and must pass consistently for a release of
Tendermint to be considered stable.
### Upgrade Harness
> TODO(williambanfield): Change to past tense and clarify this section once
> upgrade harness is complete.
The Tendermint team is creating an upgrade test harness to exercise the
workflow of stopping an instance of Tendermint running one version of the
software and starting up the same application running the next version. To
support upgrade testing, we will add the ability to terminate the Tendermint
process at specific pre-defined points in its execution so that we can verify
upgrades work in a representative sample of stop conditions.
### Large Scale Testnets
The Tendermint end-to-end tests run a small network (~10s of nodes) to exercise
basic consensus interactions. Real world deployments of Tendermint often have over
a hundred nodes just in the validator set, with many others acting as full
nodes and sentry nodes. To gain more assurance before a release, we will also run
larger-scale test networks to shake out emergent behaviors at scale.
Large-scale test networks are run on a set of virtual machines (VMs). Each VM
is equipped with 4 Gigabytes of RAM and 2 CPU cores. The network runs a very
simple key-value store application. The application adds artificial delays to
different ABCI calls to simulate a slow application. Each testnet is briefly
run with no load being generated to collect a baseline performance. Once
baseline is captured, a consistent load is applied across the network. This
load takes the form of 10% of the running nodes all receiving a consistent
stream of two hundred transactions per minute each.
During each test net, the following metrics are monitored and collected on each
node:
* Consensus rounds per height
* Maximum connected peers, Minimum connected peers, Rate of change of peer connections
* Memory resident set size
* CPU utilization
* Blocks produced per minute
* Seconds for each step of consensus (Propose, Prevote, Precommit, Commit)
* Latency to receive block proposals
For these tests we intentionally target low-powered host machines (with low core
counts and limited memory) to ensure we observe similar kinds of resource contention
and limitation that real-world deployments of Tendermint experience in production.
#### 200 Node Testnet
To test the stability and performance of Tendermint in a real world scenario,
a 200 node test network is run. The network comprises 5 seed nodes, 100
validators and 95 non-validating full nodes. All nodes begin by dialing
a subset of the seed nodes to discover peers. The network is run for several
days, with metrics being collected continuously. In cases of changes to performance
critical systems, testnets of larger sizes should be considered.
#### Rotating Node Testnet
Real-world deployments of Tendermint frequently see new nodes arrive and old
nodes exit the network. The rotating node testnet ensures that Tendermint is
able to handle this reliably. In this test, a network with 10 validators and
3 seed nodes is started. A rolling set of 25 full nodes are started and each
connects to the network by dialing one of the seed nodes. Once the node is able
to blocksync to the head of the chain and begins producing blocks using
Tendermint consensus it is stopped. Once stopped, a new node is started and
takes its place. This network is run for several days.
#### Network Partition Testnet
Tendermint is expected to recover from network partitions. A partition where no
subset of the nodes is left with the super-majority of the stake is expected to
stop making blocks. Upon alleviation of the partition, the network is expected
to once again become fully connected and capable of producing blocks. The
network partition testnet ensures that Tendermint is able to handle this
reliably at scale. In this test, a network with 100 validators and 95 full
nodes is started. All validators have equal stake. Once the network is
producing blocks, a set of firewall rules is deployed to create a partitioned
network with 50% of the stake on one side and 50% on the other. Once the
network stops producing blocks, the firewall rules are removed and the nodes
are monitored to ensure they reconnect and that the network again begins
producing blocks.
#### Absent Stake Testnet
Tendermint networks often run with _some_ portion of the voting power offline.
The absent stake testnet ensures that large networks are able to handle this
reliably. A set of 150 validator nodes and three seed nodes is started. The set
of 150 validators is configured to only possess a cumulative stake of 67% of
the total stake. The remaining 33% of the stake is configured to belong to
a validator that is never actually run in the test network. The network is run
for multiple days, ensuring that it is able to produce blocks without issue.

View File

@@ -6,28 +6,9 @@ This guide provides instructions for upgrading to specific versions of Tendermin
### ABCI Changes
### ResponseCheckTx Parameter Change
`ResponseCheckTx` had fields that are not used by Tendermint, they are now removed.
In 0.36, we removed the following fields, from `ResponseCheckTx`: `Log`, `Info`, `Events`,
`GasUsed` and `MempoolError`.
`MempoolError` was used to signal to operators that a transaction was rejected from the mempool
by Tendermint itself. Right now, we return a regular error when this happens.
#### ABCI++
For information on how ABCI++ works, see the
[Specification](spec/abci%2B%2B/README.md).
In particular, the simplest way to upgrade your application is described
[here](spec/abci%2B%2B/abci++_tmint_expected_behavior.md#adapting-existing-applications-that-use-abci).
#### Moving the `app_hash` parameter
The Application's hash (or any data representing the Application's current
state) is known by the time `FinalizeBlock` finishes its execution.
Accordingly, the `app_hash` parameter has been moved from `ResponseCommit` to
`ResponseFinalizeBlock`, since it makes sense for the Application to return
this value as soon as is it known.
Coming soon...
#### ABCI Mutex
@@ -45,36 +26,6 @@ application concern so be very sure to test the application thoroughly
using realistic workloads and the race detector to ensure your
applications remains correct.
### Config Changes
- We have added a new, experimental tool to help operators migrate
configuration files created by previous versions of Tendermint.
To try this tool, run:
```shell
# Install the tool.
go install github.com/tendermint/tendermint/scripts/confix@latest
# Run the tool with the old configuration file as input.
# Replace the -config argument with your path.
confix -config ~/.tendermint/config/config.toml -out updated.toml
```
This tool should be able to update configurations from v0.34 and v0.35. We
plan to extend it to handle older configuration files in the future. For now,
it will report an error (without making any changes) if it does not recognize
the version that created the file.
- The default configuration for a newly-created node now disables indexing for
ABCI event metadata. Existing node configurations that already have indexing
turned on are not affected. Operators who wish to enable indexing for a new
node, however, must now edit the `config.toml` explicitly.
- The function of seed nodes was modified in the past release. Now, seed nodes
are treated identically to any other peer, however they only run the PEX
reactor. Because of this `seeds` has been removed from the config. Users
should add any seed nodes in the list of `bootstrap-peers`.
### RPC Changes
Tendermint v0.36 adds a new RPC event subscription API. The existing event
@@ -112,109 +63,6 @@ callback.
For more detailed information, see [ADR 075](https://tinyurl.com/adr075) which
defines and describes the new API in detail.
#### BroadcastTx Methods
All callers should use the new `broadcast_tx` method, which has the
same semantics as the legacy `broadcast_tx_sync` method. The
`broadcast_tx_sync` and `broadcast_tx_async` methods are now
deprecated and will be removed in 0.37.
Additionally the `broadcast_tx_commit` method is *also* deprecated,
and will be removed in 0.37. Client code that submits a transaction
and needs to wait for it to be committed to the chain, should poll
the tendermint to observe the transaction in the committed state.
### Timeout Parameter Changes
Tendermint v0.36 updates how the Tendermint consensus timing parameters are
configured. These parameters, `timeout-propose`, `timeout-propose-delta`,
`timeout-prevote`, `timeout-prevote-delta`, `timeout-precommit`,
`timeout-precommit-delta`, `timeout-commit`, and `skip-timeout-commit`, were
previously configured in `config.toml`. These timing parameters have moved and
are no longer configured in the `config.toml` file. These parameters have been
migrated into the `ConsensusParameters`. Nodes with these parameters set in the
local configuration file will see a warning logged on startup indicating that
these parameters are no longer used.
These parameters have also been pared-down. There are no longer separate
parameters for both the `prevote` and `precommit` phases of Tendermint. The
separate `timeout-prevote` and `timeout-precommit` parameters have been merged
into a single `timeout-vote` parameter that configures both of these similar
phases of the consensus protocol.
A set of reasonable defaults have been put in place for these new parameters
that will take effect when the node starts up in version v0.36. New chains
created using v0.36 and beyond will be able to configure these parameters in the
chain's `genesis.json` file. Chains that upgrade to v0.36 from a previous
compatible version of Tendermint will begin running with the default values.
Upgrading applications that wish to use different values from the defaults for
these parameters may do so by setting the `ConsensusParams.Timeout` field of the
`FinalizeBlock` `ABCI` response.
As a safety measure in case of unusual timing issues during the upgrade to
v0.36, an operator may override the consensus timeout values for a single node.
Note, however, that these overrides will be removed in Tendermint v0.37. See
[configuration](https://github.com/tendermint/tendermint/blob/master/docs/nodes/configuration.md)
for more information about these overrides.
For more discussion of this, see [ADR 074](https://tinyurl.com/adr074), which
lays out the reasoning for the changes as well as [RFC
009](https://tinyurl.com/rfc009) for a discussion of the complexities of
upgrading consensus parameters.
### RecheckTx Parameter Change
`RecheckTx` was previously enabled by the `recheck` parameter in the mempool
section of the `config.toml`.
Setting it to true made Tendermint invoke another `CheckTx` ABCI call on
each transaction remaining in the mempool following the execution of a block.
Similar to the timeout parameter changes, this parameter makes more sense as a
network-wide coordinated variable so that applications can be written knowing
either all nodes agree on whether to run `RecheckTx`.
Applications can turn on `RecheckTx` by altering the `ConsensusParams` in the
`FinalizeBlock` ABCI response.
### CLI Changes
The functionality around resetting a node has been extended to make it safer. The
`unsafe-reset-all` command has been replaced by a `reset` command with four
subcommands: `blockchain`, `peers`, `unsafe-signer` and `unsafe-all`.
- `tendermint reset blockchain`: Clears a node of all blocks, consensus state, evidence,
and indexed transactions. NOTE: This command does not reset application state.
If you need to rollback the last application state (to recover from application
nondeterminism), see instead the `tendermint rollback` command.
- `tendermint reset peers`: Clears the peer store, which persists information on peers used
by the networking layer. This can be used to get rid of stale addresses or to switch
to a predefined set of static peers.
- `tendermint reset unsafe-signer`: Resets the watermark level of the PrivVal File signer
allowing it to sign votes from the genesis height. This should only be used in testing as
it can lead to the node double signing.
- `tendermint reset unsafe-all`: A summation of the other three commands. This will delete
the entire `data` directory which may include application data as well.
### Go API Changes
#### `crypto` Package Cleanup
The `github.com/tendermint/tendermint/crypto/tmhash` package was removed
to improve clarity. Users are encouraged to use the standard library
`crypto/sha256` package directly. However, as a convenience, some constants
and one function have moved to the Tendermint `crypto` package:
- The `crypto.Checksum` function returns the sha256 checksum of a
byteslice. This is a wrapper around `sha256.Sum265` from the
standard libary, but provided as a function to ease type
requirements (the library function returns a `[32]byte` rather than
a `[]byte`).
- `tmhash.TruncatedSize` is now `crypto.AddressSize` which was
previously an alias for the same value.
- `tmhash.Size` and `tmhash.BlockSize` are now `crypto.HashSize` and
`crypto.HashSize`.
- `tmhash.SumTruncated` is now available via `crypto.AddressHash` or by
`crypto.Checksum(<...>)[:crypto.AddressSize]`
## v0.35
### ABCI Changes
@@ -261,25 +109,22 @@ and one function have moved to the Tendermint `crypto` package:
The format of all tendermint on-disk database keys changes in
0.35. Upgrading nodes must either re-sync all data or run a migration
script provided in this release.
The script located in
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` provides the
function `Migrate(context.Context, db.DB)` which you can operationalize as
makes sense for your deployment.
script provided in this release. The script located in
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
provides the function `Migrate(context.Context, db.DB)` which you can
operationalize as makes sense for your deployment.
For ease of use the `tendermint` command includes a CLI version of the
migration script, which you can invoke, as in:
tendermint key-migrate
This reads the configuration file as normal and allows the `--db-backend` and
`--db-dir` flags to override the database location as needed.
This reads the configuration file as normal and allows the
`--db-backend` and `--db-dir` flags to change database operations as
needed.
The migration operation is intended to be idempotent, and should be safe to
rerun on the same database multiple times. As a safety measure, however, we
recommend that operators test out the migration on a copy of the database
first, if it is practical to do so, before applying it to the production data.
The migration operation is idempotent and can be run more than once,
if needed.
### CLI Changes
@@ -332,13 +177,11 @@ the full RPC interface provided as direct function calls. Import the
the node service as in the following:
```go
logger := log.NewNopLogger()
node := node.NewDefault() //construct the node object
// start and set up the node service
// Construct and start up a node with default settings.
node := node.NewDefault(logger)
// Construct a local (in-memory) RPC client to the node.
client := local.New(logger, node.(local.NodeService))
client := local.New(node.(local.NodeService))
// use client object to interact with the node
```
### gRPC Support

View File

@@ -17,18 +17,35 @@ const (
//go:generate ../../scripts/mockery_generate.sh Client
// Client defines the interface for an ABCI client.
// Client defines an interface for an ABCI client.
//
// All methods return the appropriate protobuf ResponseXxx struct and
// an error.
//
// NOTE these are client errors, eg. ABCI socket connectivity issues.
// Application-related errors are reflected in response via ABCI error codes
// and (potentially) error response.
// and logs.
type Client interface {
service.Service
types.Application
Error() error
Flush(context.Context) error
Echo(context.Context, string) (*types.ResponseEcho, error)
Echo(ctx context.Context, msg string) (*types.ResponseEcho, error)
Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
CheckTx(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
Query(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
Commit(context.Context) (*types.ResponseCommit, error)
InitChain(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
PrepareProposal(context.Context, types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)
ProcessProposal(context.Context, types.RequestProcessProposal) (*types.ResponseProcessProposal, error)
ExtendVote(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error)
VerifyVoteExtension(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)
FinalizeBlock(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
ListSnapshots(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
OfferSnapshot(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
LoadSnapshotChunk(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
ApplySnapshotChunk(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
}
//----------------------------------------

View File

@@ -3,6 +3,7 @@ package abciclient
import (
"context"
"errors"
"fmt"
"net"
"sync"
"time"
@@ -51,9 +52,6 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
}
func (cli *grpcClient) OnStart(ctx context.Context) error {
timer := time.NewTimer(0)
defer timer.Stop()
RETRY_LOOP:
for {
conn, err := grpc.Dial(cli.addr,
@@ -64,14 +62,9 @@ RETRY_LOOP:
if cli.mustConnect {
return err
}
cli.logger.Error("abci.grpcClient failed to connect, Retrying...", "addr", cli.addr, "err", err)
timer.Reset(time.Second * dialRetryIntervalSeconds)
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
continue RETRY_LOOP
}
cli.logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err)
time.Sleep(time.Second * dialRetryIntervalSeconds)
continue RETRY_LOOP
}
cli.logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
@@ -89,13 +82,7 @@ RETRY_LOOP:
}
cli.logger.Error("Echo failed", "err", err)
timer.Reset(time.Second * echoRetryIntervalSeconds)
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
continue ENSURE_CONNECTED
}
time.Sleep(time.Second * echoRetryIntervalSeconds)
}
cli.client = client
@@ -127,15 +114,15 @@ func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.ResponseEch
return cli.client.Echo(ctx, types.ToRequestEcho(msg).GetEcho(), grpc.WaitForReady(true))
}
func (cli *grpcClient) Info(ctx context.Context, params *types.RequestInfo) (*types.ResponseInfo, error) {
func (cli *grpcClient) Info(ctx context.Context, params types.RequestInfo) (*types.ResponseInfo, error) {
return cli.client.Info(ctx, types.ToRequestInfo(params).GetInfo(), grpc.WaitForReady(true))
}
func (cli *grpcClient) CheckTx(ctx context.Context, params *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
func (cli *grpcClient) CheckTx(ctx context.Context, params types.RequestCheckTx) (*types.ResponseCheckTx, error) {
return cli.client.CheckTx(ctx, types.ToRequestCheckTx(params).GetCheckTx(), grpc.WaitForReady(true))
}
func (cli *grpcClient) Query(ctx context.Context, params *types.RequestQuery) (*types.ResponseQuery, error) {
func (cli *grpcClient) Query(ctx context.Context, params types.RequestQuery) (*types.ResponseQuery, error) {
return cli.client.Query(ctx, types.ToRequestQuery(params).GetQuery(), grpc.WaitForReady(true))
}
@@ -143,42 +130,42 @@ func (cli *grpcClient) Commit(ctx context.Context) (*types.ResponseCommit, error
return cli.client.Commit(ctx, types.ToRequestCommit().GetCommit(), grpc.WaitForReady(true))
}
func (cli *grpcClient) InitChain(ctx context.Context, params *types.RequestInitChain) (*types.ResponseInitChain, error) {
func (cli *grpcClient) InitChain(ctx context.Context, params types.RequestInitChain) (*types.ResponseInitChain, error) {
return cli.client.InitChain(ctx, types.ToRequestInitChain(params).GetInitChain(), grpc.WaitForReady(true))
}
func (cli *grpcClient) ListSnapshots(ctx context.Context, params *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
func (cli *grpcClient) ListSnapshots(ctx context.Context, params types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
return cli.client.ListSnapshots(ctx, types.ToRequestListSnapshots(params).GetListSnapshots(), grpc.WaitForReady(true))
}
func (cli *grpcClient) OfferSnapshot(ctx context.Context, params *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
func (cli *grpcClient) OfferSnapshot(ctx context.Context, params types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
return cli.client.OfferSnapshot(ctx, types.ToRequestOfferSnapshot(params).GetOfferSnapshot(), grpc.WaitForReady(true))
}
func (cli *grpcClient) LoadSnapshotChunk(ctx context.Context, params *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
func (cli *grpcClient) LoadSnapshotChunk(ctx context.Context, params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
return cli.client.LoadSnapshotChunk(ctx, types.ToRequestLoadSnapshotChunk(params).GetLoadSnapshotChunk(), grpc.WaitForReady(true))
}
func (cli *grpcClient) ApplySnapshotChunk(ctx context.Context, params *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
func (cli *grpcClient) ApplySnapshotChunk(ctx context.Context, params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
return cli.client.ApplySnapshotChunk(ctx, types.ToRequestApplySnapshotChunk(params).GetApplySnapshotChunk(), grpc.WaitForReady(true))
}
func (cli *grpcClient) PrepareProposal(ctx context.Context, params *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
func (cli *grpcClient) PrepareProposal(ctx context.Context, params types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
return cli.client.PrepareProposal(ctx, types.ToRequestPrepareProposal(params).GetPrepareProposal(), grpc.WaitForReady(true))
}
func (cli *grpcClient) ProcessProposal(ctx context.Context, params *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
func (cli *grpcClient) ProcessProposal(ctx context.Context, params types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
return cli.client.ProcessProposal(ctx, types.ToRequestProcessProposal(params).GetProcessProposal(), grpc.WaitForReady(true))
}
func (cli *grpcClient) ExtendVote(ctx context.Context, params *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
func (cli *grpcClient) ExtendVote(ctx context.Context, params types.RequestExtendVote) (*types.ResponseExtendVote, error) {
return cli.client.ExtendVote(ctx, types.ToRequestExtendVote(params).GetExtendVote(), grpc.WaitForReady(true))
}
func (cli *grpcClient) VerifyVoteExtension(ctx context.Context, params *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
func (cli *grpcClient) VerifyVoteExtension(ctx context.Context, params types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
return cli.client.VerifyVoteExtension(ctx, types.ToRequestVerifyVoteExtension(params).GetVerifyVoteExtension(), grpc.WaitForReady(true))
}
func (cli *grpcClient) FinalizeBlock(ctx context.Context, params *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
func (cli *grpcClient) FinalizeBlock(ctx context.Context, params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
return cli.client.FinalizeBlock(ctx, types.ToRequestFinalizeBlock(params).GetFinalizeBlock(), grpc.WaitForReady(true))
}

View File

@@ -34,7 +34,81 @@ func NewLocalClient(logger log.Logger, app types.Application) Client {
func (*localClient) OnStart(context.Context) error { return nil }
func (*localClient) OnStop() {}
func (*localClient) Error() error { return nil }
func (*localClient) Flush(context.Context) error { return nil }
func (*localClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) {
//-------------------------------------------------------
func (*localClient) Flush(context.Context) error { return nil }
func (app *localClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) {
return &types.ResponseEcho{Message: msg}, nil
}
func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
res := app.Application.Info(req)
return &res, nil
}
func (app *localClient) CheckTx(_ context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
res := app.Application.CheckTx(req)
return &res, nil
}
func (app *localClient) Query(_ context.Context, req types.RequestQuery) (*types.ResponseQuery, error) {
res := app.Application.Query(req)
return &res, nil
}
func (app *localClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
res := app.Application.Commit()
return &res, nil
}
func (app *localClient) InitChain(_ context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) {
res := app.Application.InitChain(req)
return &res, nil
}
func (app *localClient) ListSnapshots(_ context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
res := app.Application.ListSnapshots(req)
return &res, nil
}
func (app *localClient) OfferSnapshot(_ context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
res := app.Application.OfferSnapshot(req)
return &res, nil
}
func (app *localClient) LoadSnapshotChunk(_ context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
res := app.Application.LoadSnapshotChunk(req)
return &res, nil
}
func (app *localClient) ApplySnapshotChunk(_ context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
res := app.Application.ApplySnapshotChunk(req)
return &res, nil
}
func (app *localClient) PrepareProposal(_ context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
res := app.Application.PrepareProposal(req)
return &res, nil
}
func (app *localClient) ProcessProposal(_ context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
res := app.Application.ProcessProposal(req)
return &res, nil
}
func (app *localClient) ExtendVote(_ context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
res := app.Application.ExtendVote(req)
return &res, nil
}
func (app *localClient) VerifyVoteExtension(_ context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
res := app.Application.VerifyVoteExtension(req)
return &res, nil
}
func (app *localClient) FinalizeBlock(_ context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
res := app.Application.FinalizeBlock(req)
return &res, nil
}

View File

@@ -15,11 +15,11 @@ type Client struct {
}
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseApplySnapshotChunk
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -28,7 +28,7 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestAppl
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -38,11 +38,11 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestAppl
}
// CheckTx provides a mock function with given fields: _a0, _a1
func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
func (_m *Client) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseCheckTx
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -51,7 +51,7 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*type
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -83,13 +83,13 @@ func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
return r0, r1
}
// Echo provides a mock function with given fields: _a0, _a1
func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) {
ret := _m.Called(_a0, _a1)
// Echo provides a mock function with given fields: ctx, msg
func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
ret := _m.Called(ctx, msg)
var r0 *types.ResponseEcho
if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok {
r0 = rf(_a0, _a1)
r0 = rf(ctx, msg)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseEcho)
@@ -98,7 +98,7 @@ func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, er
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(_a0, _a1)
r1 = rf(ctx, msg)
} else {
r1 = ret.Error(1)
}
@@ -121,11 +121,11 @@ func (_m *Client) Error() error {
}
// ExtendVote provides a mock function with given fields: _a0, _a1
func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
func (_m *Client) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) (*types.ResponseExtendVote, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseExtendVote
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestExtendVote) *types.ResponseExtendVote); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -134,7 +134,7 @@ func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestExtendVote) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -144,11 +144,11 @@ func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote)
}
// FinalizeBlock provides a mock function with given fields: _a0, _a1
func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseFinalizeBlock
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -157,7 +157,7 @@ func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeB
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -181,11 +181,11 @@ func (_m *Client) Flush(_a0 context.Context) error {
}
// Info provides a mock function with given fields: _a0, _a1
func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) {
func (_m *Client) Info(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseInfo
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -194,7 +194,7 @@ func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.Resp
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -204,11 +204,11 @@ func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.Resp
}
// InitChain provides a mock function with given fields: _a0, _a1
func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) {
func (_m *Client) InitChain(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseInitChain
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -217,7 +217,7 @@ func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -241,11 +241,11 @@ func (_m *Client) IsRunning() bool {
}
// ListSnapshots provides a mock function with given fields: _a0, _a1
func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
func (_m *Client) ListSnapshots(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseListSnapshots
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -254,7 +254,7 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnaps
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -264,11 +264,11 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnaps
}
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseLoadSnapshotChunk
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -277,7 +277,7 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadS
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -287,11 +287,11 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadS
}
// OfferSnapshot provides a mock function with given fields: _a0, _a1
func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseOfferSnapshot
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -300,7 +300,7 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnap
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -310,11 +310,11 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnap
}
// PrepareProposal provides a mock function with given fields: _a0, _a1
func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
func (_m *Client) PrepareProposal(_a0 context.Context, _a1 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponsePrepareProposal
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -323,7 +323,7 @@ func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepare
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestPrepareProposal) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -333,11 +333,11 @@ func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepare
}
// ProcessProposal provides a mock function with given fields: _a0, _a1
func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
func (_m *Client) ProcessProposal(_a0 context.Context, _a1 types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseProcessProposal
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -346,7 +346,7 @@ func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcess
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestProcessProposal) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -356,11 +356,11 @@ func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcess
}
// Query provides a mock function with given fields: _a0, _a1
func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) {
func (_m *Client) Query(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseQuery
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -369,7 +369,7 @@ func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.Re
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -393,11 +393,11 @@ func (_m *Client) Start(_a0 context.Context) error {
}
// VerifyVoteExtension provides a mock function with given fields: _a0, _a1
func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseVerifyVoteExtension
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
if rf, ok := ret.Get(0).(func(context.Context, types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
@@ -406,7 +406,7 @@ func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVer
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestVerifyVoteExtension) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -419,18 +419,3 @@ func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVer
func (_m *Client) Wait() {
_m.Called()
}
type NewClientT interface {
mock.TestingT
Cleanup(func())
}
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewClient(t NewClientT) *Client {
mock := &Client{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -17,6 +17,12 @@ import (
"github.com/tendermint/tendermint/libs/service"
)
const (
// reqQueueSize is the max number of queued async requests.
// (memory: 256MB max assuming 1MB transactions)
reqQueueSize = 256
)
// This is goroutine-safe, but users should beware that the application in
// general is not meant to be interfaced with concurrent callers.
type socketClient struct {
@@ -42,7 +48,7 @@ var _ Client = (*socketClient)(nil)
func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client {
cli := &socketClient{
logger: logger,
reqQueue: make(chan *requestAndResponse),
reqQueue: make(chan *requestAndResponse, reqQueueSize),
mustConnect: mustConnect,
addr: addr,
reqSent: list.New(),
@@ -58,8 +64,6 @@ func (cli *socketClient) OnStart(ctx context.Context) error {
err error
conn net.Conn
)
timer := time.NewTimer(0)
defer timer.Stop()
for {
conn, err = tmnet.Connect(cli.addr)
@@ -67,19 +71,10 @@ func (cli *socketClient) OnStart(ctx context.Context) error {
if cli.mustConnect {
return err
}
cli.logger.Error("abci.socketClient failed to connect, retrying after",
"retry_after", dialRetryIntervalSeconds,
"target", cli.addr,
"err", err)
timer.Reset(time.Second * dialRetryIntervalSeconds)
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
continue
}
cli.logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...",
cli.addr, dialRetryIntervalSeconds), "err", err)
time.Sleep(time.Second * dialRetryIntervalSeconds)
continue
}
cli.conn = conn
@@ -95,7 +90,11 @@ func (cli *socketClient) OnStop() {
if cli.conn != nil {
cli.conn.Close()
}
cli.drainQueue()
// this timeout is arbitrary.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
cli.drainQueue(ctx)
}
// Error returns an error if the client was stopped abruptly.
@@ -114,10 +113,11 @@ func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer
case <-ctx.Done():
return
case reqres := <-cli.reqQueue:
// N.B. We must enqueue before sending out the request, otherwise the
// server may reply before we do it, and the receiver will fail for an
// unsolicited reply.
cli.trackRequest(reqres)
if ctx.Err() != nil {
return
}
cli.willSendReq(reqres)
if err := types.WriteMessage(reqres.Request, bw); err != nil {
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
@@ -159,13 +159,7 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader
}
}
func (cli *socketClient) trackRequest(reqres *requestAndResponse) {
// N.B. We must NOT hold the client state lock while checking this, or we
// may deadlock with shutdown.
if !cli.IsRunning() {
return
}
func (cli *socketClient) willSendReq(reqres *requestAndResponse) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
cli.reqSent.PushBack(reqres)
@@ -195,11 +189,137 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
//----------------------------------------
func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
if !cli.IsRunning() {
return nil, errors.New("client has stopped")
func (cli *socketClient) Flush(ctx context.Context) error {
_, err := cli.doRequest(ctx, types.ToRequestFlush())
if err != nil {
return err
}
return nil
}
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
res, err := cli.doRequest(ctx, types.ToRequestEcho(msg))
if err != nil {
return nil, err
}
return res.GetEcho(), nil
}
func (cli *socketClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
res, err := cli.doRequest(ctx, types.ToRequestInfo(req))
if err != nil {
return nil, err
}
return res.GetInfo(), nil
}
func (cli *socketClient) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
res, err := cli.doRequest(ctx, types.ToRequestCheckTx(req))
if err != nil {
return nil, err
}
return res.GetCheckTx(), nil
}
func (cli *socketClient) Query(ctx context.Context, req types.RequestQuery) (*types.ResponseQuery, error) {
res, err := cli.doRequest(ctx, types.ToRequestQuery(req))
if err != nil {
return nil, err
}
return res.GetQuery(), nil
}
func (cli *socketClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
res, err := cli.doRequest(ctx, types.ToRequestCommit())
if err != nil {
return nil, err
}
return res.GetCommit(), nil
}
func (cli *socketClient) InitChain(ctx context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) {
res, err := cli.doRequest(ctx, types.ToRequestInitChain(req))
if err != nil {
return nil, err
}
return res.GetInitChain(), nil
}
func (cli *socketClient) ListSnapshots(ctx context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
res, err := cli.doRequest(ctx, types.ToRequestListSnapshots(req))
if err != nil {
return nil, err
}
return res.GetListSnapshots(), nil
}
func (cli *socketClient) OfferSnapshot(ctx context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
res, err := cli.doRequest(ctx, types.ToRequestOfferSnapshot(req))
if err != nil {
return nil, err
}
return res.GetOfferSnapshot(), nil
}
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
res, err := cli.doRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
if err != nil {
return nil, err
}
return res.GetLoadSnapshotChunk(), nil
}
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
res, err := cli.doRequest(ctx, types.ToRequestApplySnapshotChunk(req))
if err != nil {
return nil, err
}
return res.GetApplySnapshotChunk(), nil
}
func (cli *socketClient) PrepareProposal(ctx context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
res, err := cli.doRequest(ctx, types.ToRequestPrepareProposal(req))
if err != nil {
return nil, err
}
return res.GetPrepareProposal(), nil
}
func (cli *socketClient) ProcessProposal(ctx context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
res, err := cli.doRequest(ctx, types.ToRequestProcessProposal(req))
if err != nil {
return nil, err
}
return res.GetProcessProposal(), nil
}
func (cli *socketClient) ExtendVote(ctx context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
res, err := cli.doRequest(ctx, types.ToRequestExtendVote(req))
if err != nil {
return nil, err
}
return res.GetExtendVote(), nil
}
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
res, err := cli.doRequest(ctx, types.ToRequestVerifyVoteExtension(req))
if err != nil {
return nil, err
}
return res.GetVerifyVoteExtension(), nil
}
func (cli *socketClient) FinalizeBlock(ctx context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
res, err := cli.doRequest(ctx, types.ToRequestFinalizeBlock(req))
if err != nil {
return nil, err
}
return res.GetFinalizeBlock(), nil
}
//----------------------------------------
func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
reqres := makeReqRes(req)
select {
@@ -222,7 +342,7 @@ func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*ty
// drainQueue marks as complete and discards all remaining pending requests
// from the queue.
func (cli *socketClient) drainQueue() {
func (cli *socketClient) drainQueue(ctx context.Context) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
@@ -231,136 +351,22 @@ func (cli *socketClient) drainQueue() {
reqres := req.Value.(*requestAndResponse)
reqres.markDone()
}
}
//----------------------------------------
func (cli *socketClient) Flush(ctx context.Context) error {
_, err := cli.doRequest(ctx, types.ToRequestFlush())
if err != nil {
return err
// Mark all queued messages as resolved.
//
// TODO(creachadair): We can't simply range the channel, because it is never
// closed, and the writer continues to add work.
// See https://github.com/tendermint/tendermint/issues/6996.
for {
select {
case <-ctx.Done():
return
case reqres := <-cli.reqQueue:
reqres.markDone()
default:
return
}
}
return nil
}
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
res, err := cli.doRequest(ctx, types.ToRequestEcho(msg))
if err != nil {
return nil, err
}
return res.GetEcho(), nil
}
func (cli *socketClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
res, err := cli.doRequest(ctx, types.ToRequestInfo(req))
if err != nil {
return nil, err
}
return res.GetInfo(), nil
}
func (cli *socketClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
res, err := cli.doRequest(ctx, types.ToRequestCheckTx(req))
if err != nil {
return nil, err
}
return res.GetCheckTx(), nil
}
func (cli *socketClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) {
res, err := cli.doRequest(ctx, types.ToRequestQuery(req))
if err != nil {
return nil, err
}
return res.GetQuery(), nil
}
func (cli *socketClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
res, err := cli.doRequest(ctx, types.ToRequestCommit())
if err != nil {
return nil, err
}
return res.GetCommit(), nil
}
func (cli *socketClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
res, err := cli.doRequest(ctx, types.ToRequestInitChain(req))
if err != nil {
return nil, err
}
return res.GetInitChain(), nil
}
func (cli *socketClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
res, err := cli.doRequest(ctx, types.ToRequestListSnapshots(req))
if err != nil {
return nil, err
}
return res.GetListSnapshots(), nil
}
func (cli *socketClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
res, err := cli.doRequest(ctx, types.ToRequestOfferSnapshot(req))
if err != nil {
return nil, err
}
return res.GetOfferSnapshot(), nil
}
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
res, err := cli.doRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
if err != nil {
return nil, err
}
return res.GetLoadSnapshotChunk(), nil
}
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
res, err := cli.doRequest(ctx, types.ToRequestApplySnapshotChunk(req))
if err != nil {
return nil, err
}
return res.GetApplySnapshotChunk(), nil
}
func (cli *socketClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
res, err := cli.doRequest(ctx, types.ToRequestPrepareProposal(req))
if err != nil {
return nil, err
}
return res.GetPrepareProposal(), nil
}
func (cli *socketClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
res, err := cli.doRequest(ctx, types.ToRequestProcessProposal(req))
if err != nil {
return nil, err
}
return res.GetProcessProposal(), nil
}
func (cli *socketClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
res, err := cli.doRequest(ctx, types.ToRequestExtendVote(req))
if err != nil {
return nil, err
}
return res.GetExtendVote(), nil
}
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
res, err := cli.doRequest(ctx, types.ToRequestVerifyVoteExtension(req))
if err != nil {
return nil, err
}
return res.GetVerifyVoteExtension(), nil
}
func (cli *socketClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
res, err := cli.doRequest(ctx, types.ToRequestFinalizeBlock(req))
if err != nil {
return nil, err
}
return res.GetFinalizeBlock(), nil
}
//----------------------------------------

View File

@@ -2,7 +2,6 @@ package main
import (
"bufio"
"bytes"
"encoding/hex"
"errors"
"fmt"
@@ -79,11 +78,10 @@ func RootCmmand(logger log.Logger) *cobra.Command {
// Structure for data passed to print response.
type response struct {
// generic abci response
Data []byte
Code uint32
Info string
Log string
Status int32
Data []byte
Code uint32
Info string
Log string
Query *queryResponse
}
@@ -132,8 +130,6 @@ func addCommands(cmd *cobra.Command, logger log.Logger) {
cmd.AddCommand(commitCmd)
cmd.AddCommand(versionCmd)
cmd.AddCommand(testCmd)
cmd.AddCommand(prepareProposalCmd)
cmd.AddCommand(processProposalCmd)
cmd.AddCommand(getQueryCmd())
// examples
@@ -155,10 +151,8 @@ where example.file looks something like:
check_tx 0x00
check_tx 0xff
finalize_block 0x00
commit
check_tx 0x00
finalize_block 0x01 0x04 0xff
commit
info
`,
Args: cobra.ExactArgs(0),
@@ -174,7 +168,7 @@ This command opens an interactive console for running any of the other commands
without opening a new connection each time
`,
Args: cobra.ExactArgs(0),
ValidArgs: []string{"echo", "info", "query", "check_tx", "prepare_proposal", "process_proposal", "finalize_block", "commit"},
ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "commit", "query"},
RunE: cmdConsole,
}
@@ -197,7 +191,7 @@ var finalizeBlockCmd = &cobra.Command{
Use: "finalize_block",
Short: "deliver a block of transactions to the application",
Long: "deliver a block of transactions to the application",
Args: cobra.MinimumNArgs(0),
Args: cobra.MinimumNArgs(1),
RunE: cmdFinalizeBlock,
}
@@ -228,22 +222,6 @@ var versionCmd = &cobra.Command{
},
}
var prepareProposalCmd = &cobra.Command{
Use: "prepare_proposal",
Short: "prepare proposal",
Long: "prepare proposal",
Args: cobra.MinimumNArgs(0),
RunE: cmdPrepareProposal,
}
var processProposalCmd = &cobra.Command{
Use: "process_proposal",
Short: "process proposal",
Long: "process proposal",
Args: cobra.MinimumNArgs(0),
RunE: cmdProcessProposal,
}
func getQueryCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "query",
@@ -320,23 +298,23 @@ func cmdTest(cmd *cobra.Command, args []string) error {
return compose(
[]func() error{
func() error { return servertest.InitChain(ctx, client) },
func() error { return servertest.Commit(ctx, client) },
func() error { return servertest.Commit(ctx, client, nil) },
func() error {
return servertest.FinalizeBlock(ctx, client, [][]byte{
[]byte("abc"),
}, []uint32{
code.CodeTypeBadNonce,
}, nil, nil)
}, nil)
},
func() error { return servertest.Commit(ctx, client) },
func() error { return servertest.Commit(ctx, client, nil) },
func() error {
return servertest.FinalizeBlock(ctx, client, [][]byte{
{0x00},
}, []uint32{
code.CodeTypeOK,
}, nil, []byte{0, 0, 0, 0, 0, 0, 0, 1})
}, nil)
},
func() error { return servertest.Commit(ctx, client) },
func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) },
func() error {
return servertest.FinalizeBlock(ctx, client, [][]byte{
{0x00},
@@ -352,21 +330,9 @@ func cmdTest(cmd *cobra.Command, args []string) error {
code.CodeTypeOK,
code.CodeTypeOK,
code.CodeTypeBadNonce,
}, nil, []byte{0, 0, 0, 0, 0, 0, 0, 5})
},
func() error { return servertest.Commit(ctx, client) },
func() error {
return servertest.PrepareProposal(ctx, client, [][]byte{
{0x01},
}, []types.TxRecord_TxAction{
types.TxRecord_UNMODIFIED,
}, nil)
},
func() error {
return servertest.ProcessProposal(ctx, client, [][]byte{
{0x01},
}, types.ResponseProcessProposal_ACCEPT)
},
func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) },
})
}
@@ -467,10 +433,6 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
return cmdInfo(cmd, actualArgs)
case "query":
return cmdQuery(cmd, actualArgs)
case "prepare_proposal":
return cmdPrepareProposal(cmd, actualArgs)
case "process_proposal":
return cmdProcessProposal(cmd, actualArgs)
default:
return cmdUnimplemented(cmd, pArgs)
}
@@ -520,7 +482,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
if len(args) == 1 {
version = args[0]
}
res, err := client.Info(cmd.Context(), &types.RequestInfo{Version: version})
res, err := client.Info(cmd.Context(), types.RequestInfo{Version: version})
if err != nil {
return err
}
@@ -532,8 +494,15 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
const codeBad uint32 = 10
// Append new txs to application
// Append a new tx to application
func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
printResponse(cmd, args, response{
Code: codeBad,
Log: "Must provide at least one transaction",
})
return nil
}
txs := make([][]byte, len(args))
for i, arg := range args {
txBytes, err := stringOrHexToBytes(arg)
@@ -542,23 +511,18 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
}
txs[i] = txBytes
}
res, err := client.FinalizeBlock(cmd.Context(), &types.RequestFinalizeBlock{Txs: txs})
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: txs})
if err != nil {
return err
}
resps := make([]response, 0, len(res.TxResults)+1)
for _, tx := range res.TxResults {
resps = append(resps, response{
printResponse(cmd, args, response{
Code: tx.Code,
Data: tx.Data,
Info: tx.Info,
Log: tx.Log,
})
}
resps = append(resps, response{
Data: res.AppHash,
})
printResponse(cmd, args, resps...)
return nil
}
@@ -575,24 +539,28 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
res, err := client.CheckTx(cmd.Context(), &types.RequestCheckTx{Tx: txBytes})
res, err := client.CheckTx(cmd.Context(), types.RequestCheckTx{Tx: txBytes})
if err != nil {
return err
}
printResponse(cmd, args, response{
Code: res.Code,
Data: res.Data,
Info: res.Info,
Log: res.Log,
})
return nil
}
// Get application Merkle root hash
func cmdCommit(cmd *cobra.Command, args []string) error {
_, err := client.Commit(cmd.Context())
res, err := client.Commit(cmd.Context())
if err != nil {
return err
}
printResponse(cmd, args, response{})
printResponse(cmd, args, response{
Data: res.Data,
})
return nil
}
@@ -611,7 +579,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
return err
}
resQuery, err := client.Query(cmd.Context(), &types.RequestQuery{
resQuery, err := client.Query(cmd.Context(), types.RequestQuery{
Data: queryBytes,
Path: flagPath,
Height: int64(flagHeight),
@@ -634,81 +602,6 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
return nil
}
func inTxArray(txByteArray [][]byte, tx []byte) bool {
for _, txTmp := range txByteArray {
if bytes.Equal(txTmp, tx) {
return true
}
}
return false
}
func cmdPrepareProposal(cmd *cobra.Command, args []string) error {
txsBytesArray := make([][]byte, len(args))
for i, arg := range args {
txBytes, err := stringOrHexToBytes(arg)
if err != nil {
return err
}
txsBytesArray[i] = txBytes
}
res, err := client.PrepareProposal(cmd.Context(), &types.RequestPrepareProposal{
Txs: txsBytesArray,
// kvstore has to have this parameter in order not to reject a tx as the default value is 0
MaxTxBytes: 65536,
})
if err != nil {
return err
}
resps := make([]response, 0, len(res.TxResults)+1)
for _, tx := range res.TxRecords {
existingTx := inTxArray(txsBytesArray, tx.Tx)
if tx.Action == types.TxRecord_UNKNOWN ||
(existingTx && tx.Action == types.TxRecord_ADDED) ||
(!existingTx && (tx.Action == types.TxRecord_UNMODIFIED || tx.Action == types.TxRecord_REMOVED)) {
resps = append(resps, response{
Code: codeBad,
Log: "Failed. Tx: " + string(tx.GetTx()) + " action: " + tx.Action.String(),
})
} else {
resps = append(resps, response{
Code: code.CodeTypeOK,
Log: "Succeeded. Tx: " + string(tx.Tx) + " action: " + tx.Action.String(),
})
}
}
printResponse(cmd, args, resps...)
return nil
}
func cmdProcessProposal(cmd *cobra.Command, args []string) error {
txsBytesArray := make([][]byte, len(args))
for i, arg := range args {
txBytes, err := stringOrHexToBytes(arg)
if err != nil {
return err
}
txsBytesArray[i] = txBytes
}
res, err := client.ProcessProposal(cmd.Context(), &types.RequestProcessProposal{
Txs: txsBytesArray,
})
if err != nil {
return err
}
printResponse(cmd, args, response{
Status: int32(res.Status),
})
return nil
}
func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
// Create the application - in memory or persisted to disk
@@ -741,48 +634,44 @@ func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error {
//--------------------------------------------------------------------------------
func printResponse(cmd *cobra.Command, args []string, rsps ...response) {
func printResponse(cmd *cobra.Command, args []string, rsp response) {
if flagVerbose {
fmt.Println(">", cmd.Use, strings.Join(args, " "))
}
for _, rsp := range rsps {
// Always print the status code.
if rsp.Code == types.CodeTypeOK {
fmt.Printf("-> code: OK\n")
} else {
fmt.Printf("-> code: %d\n", rsp.Code)
}
// Always print the status code.
if rsp.Code == types.CodeTypeOK {
fmt.Printf("-> code: OK\n")
} else {
fmt.Printf("-> code: %d\n", rsp.Code)
if len(rsp.Data) != 0 {
// Do no print this line when using the finalize_block command
// because the string comes out as gibberish
if cmd.Use != "finalize_block" {
fmt.Printf("-> data: %s\n", rsp.Data)
}
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
}
if rsp.Log != "" {
fmt.Printf("-> log: %s\n", rsp.Log)
}
if cmd.Use == "process_proposal" {
fmt.Printf("-> status: %s\n", types.ResponseProcessProposal_ProposalStatus_name[rsp.Status])
}
}
if rsp.Query != nil {
fmt.Printf("-> height: %d\n", rsp.Query.Height)
if rsp.Query.Key != nil {
fmt.Printf("-> key: %s\n", rsp.Query.Key)
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
}
if rsp.Query.Value != nil {
fmt.Printf("-> value: %s\n", rsp.Query.Value)
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
}
if rsp.Query.ProofOps != nil {
fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps)
}
if len(rsp.Data) != 0 {
// Do no print this line when using the commit command
// because the string comes out as gibberish
if cmd.Use != "commit" {
fmt.Printf("-> data: %s\n", rsp.Data)
}
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
}
if rsp.Log != "" {
fmt.Printf("-> log: %s\n", rsp.Log)
}
if rsp.Query != nil {
fmt.Printf("-> height: %d\n", rsp.Query.Height)
if rsp.Query.Key != nil {
fmt.Printf("-> key: %s\n", rsp.Query.Key)
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
}
if rsp.Query.Value != nil {
fmt.Printf("-> value: %s\n", rsp.Query.Value)
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
}
if rsp.Query.ProofOps != nil {
fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps)
}
}
}

View File

@@ -53,7 +53,7 @@ func TestGRPC(t *testing.T) {
logger := log.NewNopLogger()
t.Log("### Testing GRPC")
testGRPCSync(ctx, t, logger, types.NewBaseApplication())
testGRPCSync(ctx, t, logger, types.NewGRPCApplication(types.NewBaseApplication()))
}
func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) {
@@ -77,7 +77,7 @@ func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Ap
require.NoError(t, err)
// Construct request
rfb := &types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)}
rfb := types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)}
for counter := 0; counter < numDeliverTxs; counter++ {
rfb.Txs[counter] = []byte("test")
}
@@ -101,7 +101,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
return tmnet.Connect(addr)
}
func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) {
func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.ABCIApplicationServer) {
t.Helper()
numDeliverTxs := 680000
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))

View File

@@ -1,7 +1,6 @@
package kvstore
import (
"context"
mrand "math/rand"
"github.com/tendermint/tendermint/abci/types"
@@ -33,9 +32,8 @@ func RandVals(cnt int) []types.ValidatorUpdate {
// InitKVStore initializes the kvstore app with some data,
// which allows tests to pass and is fine as long as you
// don't make any tx that modify the validator state
func InitKVStore(ctx context.Context, app *PersistentKVStoreApplication) error {
_, err := app.InitChain(ctx, &types.RequestInitChain{
func InitKVStore(app *PersistentKVStoreApplication) {
app.InitChain(types.RequestInitChain{
Validators: RandVals(1),
})
return err
}

View File

@@ -2,7 +2,6 @@ package kvstore
import (
"bytes"
"context"
"encoding/base64"
"encoding/binary"
"encoding/json"
@@ -91,7 +90,7 @@ func NewApplication() *Application {
}
}
func (app *Application) InitChain(_ context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
func (app *Application) InitChain(req types.RequestInitChain) types.ResponseInitChain {
app.mu.Lock()
defer app.mu.Unlock()
@@ -102,19 +101,19 @@ func (app *Application) InitChain(_ context.Context, req *types.RequestInitChain
panic("problem updating validators")
}
}
return &types.ResponseInitChain{}, nil
return types.ResponseInitChain{}
}
func (app *Application) Info(_ context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
app.mu.Lock()
defer app.mu.Unlock()
return &types.ResponseInfo{
return types.ResponseInfo{
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
Version: version.ABCIVersion,
AppVersion: ProtocolVersion,
LastBlockHeight: app.state.Height,
LastBlockAppHash: app.state.AppHash,
}, nil
}
}
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
@@ -167,7 +166,7 @@ func (app *Application) Close() error {
return app.state.db.Close()
}
func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
app.mu.Lock()
defer app.mu.Unlock()
@@ -175,8 +174,8 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal
app.ValUpdates = make([]types.ValidatorUpdate, 0)
// Punish validators who committed equivocation.
for _, ev := range req.Misbehavior {
if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE {
for _, ev := range req.ByzantineValidators {
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
addr := string(ev.Validator.Address)
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
app.updateValidator(types.ValidatorUpdate{
@@ -196,34 +195,33 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal
respTxs[i] = app.handleTx(tx)
}
return types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates}
}
func (*Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
}
func (app *Application) Commit() types.ResponseCommit {
app.mu.Lock()
defer app.mu.Unlock()
// Using a memdb - just return the big endian size of the db
appHash := make([]byte, 8)
binary.PutVarint(appHash, app.state.Size)
app.state.AppHash = appHash
app.state.Height++
return &types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates, AppHash: appHash}, nil
}
func (*Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
return &types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}, nil
}
func (app *Application) Commit(_ context.Context) (*types.ResponseCommit, error) {
app.mu.Lock()
defer app.mu.Unlock()
saveState(app.state)
resp := &types.ResponseCommit{}
resp := types.ResponseCommit{Data: appHash}
if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks {
resp.RetainHeight = app.state.Height - app.RetainBlocks + 1
}
return resp, nil
return resp
}
// Returns an associated value or nil if missing.
func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (*types.ResponseQuery, error) {
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
app.mu.Lock()
defer app.mu.Unlock()
@@ -234,10 +232,10 @@ func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (
panic(err)
}
return &types.ResponseQuery{
return types.ResponseQuery{
Key: reqQuery.Data,
Value: value,
}, nil
}
}
if reqQuery.Prove {
@@ -259,7 +257,7 @@ func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (
resQuery.Log = "exists"
}
return &resQuery, nil
return resQuery
}
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
@@ -279,25 +277,26 @@ func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (
resQuery.Log = "exists"
}
return &resQuery, nil
return resQuery
}
func (app *Application) PrepareProposal(_ context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
func (app *Application) PrepareProposal(req types.RequestPrepareProposal) types.ResponsePrepareProposal {
app.mu.Lock()
defer app.mu.Unlock()
return &types.ResponsePrepareProposal{
TxRecords: app.substPrepareTx(req.Txs, req.MaxTxBytes),
}, nil
return types.ResponsePrepareProposal{
ModifiedTxStatus: types.ResponsePrepareProposal_MODIFIED,
TxRecords: app.substPrepareTx(req.Txs),
}
}
func (*Application) ProcessProposal(_ context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
func (*Application) ProcessProposal(req types.RequestProcessProposal) types.ResponseProcessProposal {
for _, tx := range req.Txs {
if len(tx) == 0 || isPrepareTx(tx) {
return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}, nil
if len(tx) == 0 {
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
}
}
return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}, nil
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
}
//---------------------------------------------
@@ -435,32 +434,28 @@ func (app *Application) execPrepareTx(tx []byte) *types.ExecTxResult {
}
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
// proposal for transactions with the prefix stripped.
// proposal for transactions with the prefix strips.
// It marks all of the original transactions as 'REMOVED' so that
// Tendermint will remove them from its mempool.
func (app *Application) substPrepareTx(blockData [][]byte, maxTxBytes int64) []*types.TxRecord {
trs := make([]*types.TxRecord, 0, len(blockData))
func (app *Application) substPrepareTx(blockData [][]byte) []*types.TxRecord {
trs := make([]*types.TxRecord, len(blockData))
var removed []*types.TxRecord
var totalBytes int64
for _, tx := range blockData {
txMod := tx
action := types.TxRecord_UNMODIFIED
for i, tx := range blockData {
if isPrepareTx(tx) {
removed = append(removed, &types.TxRecord{
Tx: tx,
Action: types.TxRecord_REMOVED,
})
txMod = bytes.TrimPrefix(tx, []byte(PreparePrefix))
action = types.TxRecord_ADDED
trs[i] = &types.TxRecord{
Tx: bytes.TrimPrefix(tx, []byte(PreparePrefix)),
Action: types.TxRecord_ADDED,
}
continue
}
totalBytes += int64(len(txMod))
if totalBytes > maxTxBytes {
break
trs[i] = &types.TxRecord{
Tx: tx,
Action: types.TxRecord_UNMODIFIED,
}
trs = append(trs, &types.TxRecord{
Tx: txMod,
Action: action,
})
}
return append(trs, removed...)

View File

@@ -16,6 +16,7 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
abciserver "github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
)
const (
@@ -23,43 +24,37 @@ const (
testValue = "def"
)
func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx []byte, key, value string) {
req := &types.RequestFinalizeBlock{Txs: [][]byte{tx}}
ar, err := app.FinalizeBlock(ctx, req)
require.NoError(t, err)
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
ar := app.FinalizeBlock(req)
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
// repeating tx doesn't raise error
ar, err = app.FinalizeBlock(ctx, req)
require.NoError(t, err)
ar = app.FinalizeBlock(req)
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
// commit
_, err = app.Commit(ctx)
require.NoError(t, err)
app.Commit()
info, err := app.Info(ctx, &types.RequestInfo{})
require.NoError(t, err)
info := app.Info(types.RequestInfo{})
require.NotZero(t, info.LastBlockHeight)
// make sure query is fine
resQuery, err := app.Query(ctx, &types.RequestQuery{
resQuery := app.Query(types.RequestQuery{
Path: "/store",
Data: []byte(key),
})
require.NoError(t, err)
require.Equal(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, key, string(resQuery.Key))
require.Equal(t, value, string(resQuery.Value))
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
// make sure proof is fine
resQuery, err = app.Query(ctx, &types.RequestQuery{
resQuery = app.Query(types.RequestQuery{
Path: "/store",
Data: []byte(key),
Prove: true,
})
require.NoError(t, err)
require.EqualValues(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, key, string(resQuery.Key))
require.Equal(t, value, string(resQuery.Value))
@@ -67,24 +62,18 @@ func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx []
}
func TestKVStoreKV(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
kvstore := NewApplication()
key := testKey
value := key
tx := []byte(key)
testKVStore(ctx, t, kvstore, tx, key, value)
testKVStore(t, kvstore, tx, key, value)
value = testValue
tx = []byte(key + "=" + value)
testKVStore(ctx, t, kvstore, tx, key, value)
testKVStore(t, kvstore, tx, key, value)
}
func TestPersistentKVStoreKV(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dir := t.TempDir()
logger := log.NewNopLogger()
@@ -92,30 +81,22 @@ func TestPersistentKVStoreKV(t *testing.T) {
key := testKey
value := key
tx := []byte(key)
testKVStore(ctx, t, kvstore, tx, key, value)
testKVStore(t, kvstore, tx, key, value)
value = testValue
tx = []byte(key + "=" + value)
testKVStore(ctx, t, kvstore, tx, key, value)
testKVStore(t, kvstore, tx, key, value)
}
func TestPersistentKVStoreInfo(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dir := t.TempDir()
logger := log.NewNopLogger()
kvstore := NewPersistentKVStoreApplication(logger, dir)
if err := InitKVStore(ctx, kvstore); err != nil {
t.Fatal(err)
}
InitKVStore(kvstore)
height := int64(0)
resInfo, err := kvstore.Info(ctx, &types.RequestInfo{})
if err != nil {
t.Fatal(err)
}
resInfo := kvstore.Info(types.RequestInfo{})
if resInfo.LastBlockHeight != height {
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
}
@@ -123,19 +104,13 @@ func TestPersistentKVStoreInfo(t *testing.T) {
// make and apply block
height = int64(1)
hash := []byte("foo")
if _, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Hash: hash, Height: height}); err != nil {
t.Fatal(err)
header := tmproto.Header{
Height: height,
}
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
kvstore.Commit()
if _, err := kvstore.Commit(ctx); err != nil {
t.Fatal(err)
}
resInfo, err = kvstore.Info(ctx, &types.RequestInfo{})
if err != nil {
t.Fatal(err)
}
resInfo = kvstore.Info(types.RequestInfo{})
if resInfo.LastBlockHeight != height {
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
}
@@ -144,9 +119,6 @@ func TestPersistentKVStoreInfo(t *testing.T) {
// add a validator, remove a validator, update a validator
func TestValUpdates(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
kvstore := NewApplication()
// init with some validators
@@ -154,12 +126,9 @@ func TestValUpdates(t *testing.T) {
nInit := 5
vals := RandVals(total)
// initialize with the first nInit
_, err := kvstore.InitChain(ctx, &types.RequestInitChain{
kvstore.InitChain(types.RequestInitChain{
Validators: vals[:nInit],
})
if err != nil {
t.Fatal(err)
}
vals1, vals2 := vals[:nInit], kvstore.Validators()
valsEqual(t, vals1, vals2)
@@ -172,7 +141,7 @@ func TestValUpdates(t *testing.T) {
tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power)
tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power)
makeApplyBlock(ctx, t, kvstore, 1, diff, tx1, tx2)
makeApplyBlock(t, kvstore, 1, diff, tx1, tx2)
vals1, vals2 = vals[:nInit+2], kvstore.Validators()
valsEqual(t, vals1, vals2)
@@ -187,7 +156,7 @@ func TestValUpdates(t *testing.T) {
tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power)
tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power)
makeApplyBlock(ctx, t, kvstore, 2, diff, tx1, tx2, tx3)
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
vals2 = kvstore.Validators()
@@ -203,7 +172,7 @@ func TestValUpdates(t *testing.T) {
diff = []types.ValidatorUpdate{v1}
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
makeApplyBlock(ctx, t, kvstore, 3, diff, tx1)
makeApplyBlock(t, kvstore, 3, diff, tx1)
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
vals2 = kvstore.Validators()
@@ -211,23 +180,26 @@ func TestValUpdates(t *testing.T) {
}
func makeApplyBlock(ctx context.Context, t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) {
func makeApplyBlock(
t *testing.T,
kvstore types.Application,
heightInt int,
diff []types.ValidatorUpdate,
txs ...[]byte) {
// make and apply block
height := int64(heightInt)
hash := []byte("foo")
resFinalizeBlock, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{
Hash: hash,
header := tmproto.Header{
Height: height,
Txs: txs,
})
if err != nil {
t.Fatal(err)
}
_, err = kvstore.Commit(ctx)
if err != nil {
t.Fatal(err)
}
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
Hash: hash,
Header: header,
Txs: txs,
})
kvstore.Commit()
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
@@ -296,7 +268,8 @@ func makeGRPCClientServer(
// Start the listener
socket := fmt.Sprintf("unix://%s.sock", name)
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app)
gapp := types.NewGRPCApplication(app)
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, gapp)
if err := server.Start(ctx); err != nil {
cancel()
@@ -350,12 +323,12 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client)
}
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
ar, err := app.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: [][]byte{tx}})
ar, err := app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
// repeating FinalizeBlock doesn't raise error
ar, err = app.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: [][]byte{tx}})
ar, err = app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
@@ -363,12 +336,12 @@ func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []b
_, err = app.Commit(ctx)
require.NoError(t, err)
info, err := app.Info(ctx, &types.RequestInfo{})
info, err := app.Info(ctx, types.RequestInfo{})
require.NoError(t, err)
require.NotZero(t, info.LastBlockHeight)
// make sure query is fine
resQuery, err := app.Query(ctx, &types.RequestQuery{
resQuery, err := app.Query(ctx, types.RequestQuery{
Path: "/store",
Data: []byte(key),
})
@@ -379,7 +352,7 @@ func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []b
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
// make sure proof is fine
resQuery, err = app.Query(ctx, &types.RequestQuery{
resQuery, err = app.Query(ctx, types.RequestQuery{
Path: "/store",
Data: []byte(key),
Prove: true,

View File

@@ -1,13 +1,14 @@
package kvstore
import (
"context"
"bytes"
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
ptypes "github.com/tendermint/tendermint/proto/tendermint/types"
)
const (
@@ -37,10 +38,41 @@ func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *Persisten
}
}
func (app *PersistentKVStoreApplication) OfferSnapshot(_ context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
return &types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}, nil
func (app *PersistentKVStoreApplication) OfferSnapshot(req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
}
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(_ context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
return &types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}, nil
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
}
func (app *PersistentKVStoreApplication) ExtendVote(req types.RequestExtendVote) types.ResponseExtendVote {
return types.ResponseExtendVote{VoteExtension: ConstructVoteExtension(req.Vote.ValidatorAddress)}
}
func (app *PersistentKVStoreApplication) VerifyVoteExtension(req types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
return types.RespondVerifyVoteExtension(app.verifyExtension(req.Vote.ValidatorAddress, req.Vote.VoteExtension))
}
// -----------------------------
func ConstructVoteExtension(valAddr []byte) *ptypes.VoteExtension {
return &ptypes.VoteExtension{
AppDataToSign: valAddr,
AppDataSelfAuthenticating: valAddr,
}
}
func (app *PersistentKVStoreApplication) verifyExtension(valAddr []byte, ext *ptypes.VoteExtension) bool {
if ext == nil {
return false
}
canonical := ConstructVoteExtension(valAddr)
if !bytes.Equal(canonical.AppDataToSign, ext.AppDataToSign) {
return false
}
if !bytes.Equal(canonical.AppDataSelfAuthenticating, ext.AppDataSelfAuthenticating) {
return false
}
return true
}

View File

@@ -20,11 +20,11 @@ type GRPCServer struct {
addr string
server *grpc.Server
app types.Application
app types.ABCIApplicationServer
}
// NewGRPCServer returns a new gRPC ABCI server
func NewGRPCServer(logger log.Logger, protoAddr string, app types.Application) service.Service {
func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicationServer) service.Service {
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
s := &GRPCServer{
logger: logger,
@@ -44,7 +44,7 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
}
s.server = grpc.NewServer()
types.RegisterABCIApplicationServer(s.server, &gRPCApplication{Application: s.app})
types.RegisterABCIApplicationServer(s.server, s.app)
s.logger.Info("Listening", "proto", s.proto, "addr", s.addr)
go func() {
@@ -62,22 +62,3 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
// OnStop stops the gRPC server.
func (s *GRPCServer) OnStop() { s.server.Stop() }
//-------------------------------------------------------
// gRPCApplication is a gRPC shim for Application
type gRPCApplication struct {
types.Application
}
func (app *gRPCApplication) Echo(_ context.Context, req *types.RequestEcho) (*types.ResponseEcho, error) {
return &types.ResponseEcho{Message: req.Message}, nil
}
func (app *gRPCApplication) Flush(_ context.Context, req *types.RequestFlush) (*types.ResponseFlush, error) {
return &types.ResponseFlush{}, nil
}
func (app *gRPCApplication) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) {
return app.Application.Commit(ctx)
}

View File

@@ -23,7 +23,7 @@ func NewServer(logger log.Logger, protoAddr, transport string, app types.Applica
case "socket":
s = NewSocketServer(logger, protoAddr, app)
case "grpc":
s = NewGRPCServer(logger, protoAddr, app)
s = NewGRPCServer(logger, protoAddr, types.NewGRPCApplication(app))
default:
err = fmt.Errorf("unknown server type %s", transport)
}

View File

@@ -159,7 +159,12 @@ func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) {
}
// Read requests from conn and deal with them
func (s *SocketServer) handleRequests(ctx context.Context, closer func(error), conn io.Reader, responses chan<- *types.Response) {
func (s *SocketServer) handleRequests(
ctx context.Context,
closer func(error),
conn io.Reader,
responses chan<- *types.Response,
) {
var bufReader = bufio.NewReader(conn)
defer func() {
@@ -179,12 +184,7 @@ func (s *SocketServer) handleRequests(ctx context.Context, closer func(error), c
return
}
resp, err := s.processRequest(ctx, req)
if err != nil {
closer(err)
return
}
resp := s.processRequest(req)
select {
case <-ctx.Done():
closer(ctx.Err())
@@ -194,99 +194,42 @@ func (s *SocketServer) handleRequests(ctx context.Context, closer func(error), c
}
}
func (s *SocketServer) processRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
func (s *SocketServer) processRequest(req *types.Request) *types.Response {
switch r := req.Value.(type) {
case *types.Request_Echo:
return types.ToResponseEcho(r.Echo.Message), nil
return types.ToResponseEcho(r.Echo.Message)
case *types.Request_Flush:
return types.ToResponseFlush(), nil
return types.ToResponseFlush()
case *types.Request_Info:
res, err := s.app.Info(ctx, r.Info)
if err != nil {
return nil, err
}
return types.ToResponseInfo(res), nil
return types.ToResponseInfo(s.app.Info(*r.Info))
case *types.Request_CheckTx:
res, err := s.app.CheckTx(ctx, r.CheckTx)
if err != nil {
return nil, err
}
return types.ToResponseCheckTx(res), nil
return types.ToResponseCheckTx(s.app.CheckTx(*r.CheckTx))
case *types.Request_Commit:
res, err := s.app.Commit(ctx)
if err != nil {
return nil, err
}
return types.ToResponseCommit(res), nil
return types.ToResponseCommit(s.app.Commit())
case *types.Request_Query:
res, err := s.app.Query(ctx, r.Query)
if err != nil {
return nil, err
}
return types.ToResponseQuery(res), nil
return types.ToResponseQuery(s.app.Query(*r.Query))
case *types.Request_InitChain:
res, err := s.app.InitChain(ctx, r.InitChain)
if err != nil {
return nil, err
}
return types.ToResponseInitChain(res), nil
return types.ToResponseInitChain(s.app.InitChain(*r.InitChain))
case *types.Request_ListSnapshots:
res, err := s.app.ListSnapshots(ctx, r.ListSnapshots)
if err != nil {
return nil, err
}
return types.ToResponseListSnapshots(res), nil
return types.ToResponseListSnapshots(s.app.ListSnapshots(*r.ListSnapshots))
case *types.Request_OfferSnapshot:
res, err := s.app.OfferSnapshot(ctx, r.OfferSnapshot)
if err != nil {
return nil, err
}
return types.ToResponseOfferSnapshot(res), nil
return types.ToResponseOfferSnapshot(s.app.OfferSnapshot(*r.OfferSnapshot))
case *types.Request_PrepareProposal:
res, err := s.app.PrepareProposal(ctx, r.PrepareProposal)
if err != nil {
return nil, err
}
return types.ToResponsePrepareProposal(res), nil
return types.ToResponsePrepareProposal(s.app.PrepareProposal(*r.PrepareProposal))
case *types.Request_ProcessProposal:
res, err := s.app.ProcessProposal(ctx, r.ProcessProposal)
if err != nil {
return nil, err
}
return types.ToResponseProcessProposal(res), nil
return types.ToResponseProcessProposal(s.app.ProcessProposal(*r.ProcessProposal))
case *types.Request_LoadSnapshotChunk:
res, err := s.app.LoadSnapshotChunk(ctx, r.LoadSnapshotChunk)
if err != nil {
return nil, err
}
return types.ToResponseLoadSnapshotChunk(res), nil
return types.ToResponseLoadSnapshotChunk(s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk))
case *types.Request_ApplySnapshotChunk:
res, err := s.app.ApplySnapshotChunk(ctx, r.ApplySnapshotChunk)
if err != nil {
return nil, err
}
return types.ToResponseApplySnapshotChunk(res), nil
return types.ToResponseApplySnapshotChunk(s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk))
case *types.Request_ExtendVote:
res, err := s.app.ExtendVote(ctx, r.ExtendVote)
if err != nil {
return nil, err
}
return types.ToResponseExtendVote(res), nil
return types.ToResponseExtendVote(s.app.ExtendVote(*r.ExtendVote))
case *types.Request_VerifyVoteExtension:
res, err := s.app.VerifyVoteExtension(ctx, r.VerifyVoteExtension)
if err != nil {
return nil, err
}
return types.ToResponseVerifyVoteExtension(res), nil
return types.ToResponseVerifyVoteExtension(s.app.VerifyVoteExtension(*r.VerifyVoteExtension))
case *types.Request_FinalizeBlock:
res, err := s.app.FinalizeBlock(ctx, r.FinalizeBlock)
if err != nil {
return nil, err
}
return types.ToResponseFinalizeBlock(res), nil
return types.ToResponseFinalizeBlock(s.app.FinalizeBlock(*r.FinalizeBlock))
default:
return types.ToResponseException("Unknown request"), errors.New("unknown request type")
return types.ToResponseException("Unknown request")
}
}

View File

@@ -21,7 +21,7 @@ func InitChain(ctx context.Context, client abciclient.Client) error {
power := mrand.Int()
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
}
_, err := client.InitChain(ctx, &types.RequestInitChain{
_, err := client.InitChain(ctx, types.RequestInitChain{
Validators: vals,
})
if err != nil {
@@ -32,20 +32,25 @@ func InitChain(ctx context.Context, client abciclient.Client) error {
return nil
}
func Commit(ctx context.Context, client abciclient.Client) error {
_, err := client.Commit(ctx)
func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error {
res, err := client.Commit(ctx)
data := res.Data
if err != nil {
fmt.Println("Failed test: Commit")
fmt.Printf("error while committing: %v\n", err)
return err
}
if !bytes.Equal(data, hashExp) {
fmt.Println("Failed test: Commit")
fmt.Printf("Commit hash was unexpected. Got %X expected %X\n", data, hashExp)
return errors.New("commitTx failed")
}
fmt.Println("Passed test: Commit")
return nil
}
func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte, hashExp []byte) error {
res, _ := client.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: txBytes})
appHash := res.AppHash
func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error {
res, _ := client.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: txBytes})
for i, tx := range res.TxResults {
code, data, log := tx.Code, tx.Data, tx.Log
if code != codeExp[i] {
@@ -61,48 +66,17 @@ func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]by
return errors.New("FinalizeBlock error")
}
}
if !bytes.Equal(appHash, hashExp) {
fmt.Println("Failed test: FinalizeBlock")
fmt.Printf("Application hash was unexpected. Got %X expected %X\n", appHash, hashExp)
return errors.New("FinalizeBlock error")
}
fmt.Println("Passed test: FinalizeBlock")
return nil
}
func PrepareProposal(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []types.TxRecord_TxAction, dataExp []byte) error {
res, _ := client.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: txBytes})
for i, tx := range res.TxRecords {
if tx.Action != codeExp[i] {
fmt.Println("Failed test: PrepareProposal")
fmt.Printf("PrepareProposal response code was unexpected. Got %v expected %v.",
tx.Action, codeExp)
return errors.New("PrepareProposal error")
}
}
fmt.Println("Passed test: PrepareProposal")
return nil
}
func ProcessProposal(ctx context.Context, client abciclient.Client, txBytes [][]byte, statusExp types.ResponseProcessProposal_ProposalStatus) error {
res, _ := client.ProcessProposal(ctx, &types.RequestProcessProposal{Txs: txBytes})
if res.Status != statusExp {
fmt.Println("Failed test: ProcessProposal")
fmt.Printf("ProcessProposal response status was unexpected. Got %v expected %v.",
res.Status, statusExp)
return errors.New("ProcessProposal error")
}
fmt.Println("Passed test: ProcessProposal")
return nil
}
func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes})
code, data := res.Code, res.Data
res, _ := client.CheckTx(ctx, types.RequestCheckTx{Tx: txBytes})
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: CheckTx")
fmt.Printf("CheckTx response code was unexpected. Got %v expected %v.,",
code, codeExp)
fmt.Printf("CheckTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("checkTx")
}
if !bytes.Equal(data, dataExp) {

View File

@@ -1,18 +1,10 @@
echo hello
info
prepare_proposal "abc"
process_proposal "abc"
finalize_block "abc"
commit
finalize_block "abc"
info
commit
query "abc"
finalize_block "def=xyz" "ghi=123"
commit
query "def"
prepare_proposal "preparedef"
process_proposal "def"
process_proposal "preparedef"
prepare_proposal
process_proposal
finalize_block
commit

View File

@@ -8,31 +8,26 @@
-> data: {"size":0}
-> data.hex: 0x7B2273697A65223A307D
> prepare_proposal "abc"
> commit
-> code: OK
-> log: Succeeded. Tx: abc action: UNMODIFIED
> process_proposal "abc"
-> code: OK
-> status: ACCEPT
-> data.hex: 0x0000000000000000
> finalize_block "abc"
-> code: OK
-> code: OK
-> data.hex: 0x0200000000000000
> commit
-> code: OK
> info
-> code: OK
-> data: {"size":1}
-> data.hex: 0x7B2273697A65223A317D
> commit
-> code: OK
-> data.hex: 0x0200000000000000
> query "abc"
-> code: OK
-> log: exists
-> height: 1
-> height: 2
-> key: abc
-> key.hex: 616263
-> value: abc
@@ -40,46 +35,19 @@
> finalize_block "def=xyz" "ghi=123"
-> code: OK
> finalize_block "def=xyz" "ghi=123"
-> code: OK
-> code: OK
-> data.hex: 0x0600000000000000
> commit
-> code: OK
-> data.hex: 0x0600000000000000
> query "def"
-> code: OK
-> log: exists
-> height: 2
-> height: 3
-> key: def
-> key.hex: 646566
-> value: xyz
-> value.hex: 78797A
> prepare_proposal "preparedef"
-> code: OK
-> log: Succeeded. Tx: def action: ADDED
-> code: OK
-> log: Succeeded. Tx: preparedef action: REMOVED
> process_proposal "def"
-> code: OK
-> status: ACCEPT
> process_proposal "preparedef"
-> code: OK
-> status: REJECT
> prepare_proposal
> process_proposal
-> code: OK
-> status: ACCEPT
> finalize_block
-> code: OK
-> data.hex: 0x0600000000000000
> commit
-> code: OK

View File

@@ -1,10 +1,7 @@
check_tx 0x00
check_tx 0xff
finalize_block 0x00
commit
check_tx 0x00
finalize_block 0x01
commit
finalize_block 0x04
commit
info

View File

@@ -6,30 +6,15 @@
> finalize_block 0x00
-> code: OK
-> code: OK
-> data.hex: 0x0200000000000000
> commit
-> code: OK
> check_tx 0x00
-> code: OK
> finalize_block 0x01
-> code: OK
-> code: OK
-> data.hex: 0x0400000000000000
> commit
-> code: OK
> finalize_block 0x04
-> code: OK
-> code: OK
-> data.hex: 0x0600000000000000
> commit
-> code: OK
> info
-> code: OK

View File

@@ -30,8 +30,6 @@ function testExample() {
cat "${INPUT}.out.new"
echo "Expected:"
cat "${INPUT}.out"
echo "Diff:"
diff "${INPUT}.out" "${INPUT}.out.new"
exit 1
fi

View File

@@ -1,36 +1,40 @@
package types
import "context"
import (
"context"
)
//go:generate ../../scripts/mockery_generate.sh Application
// Application is an interface that enables any finite, deterministic state machine
// to be driven by a blockchain-based replication engine via the ABCI.
// All methods take a RequestXxx argument and return a ResponseXxx argument,
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
type Application interface {
// Info/Query Connection
Info(context.Context, *RequestInfo) (*ResponseInfo, error) // Return application info
Query(context.Context, *RequestQuery) (*ResponseQuery, error) // Query for state
Info(RequestInfo) ResponseInfo // Return application info
Query(RequestQuery) ResponseQuery // Query for state
// Mempool Connection
CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) // Validate a tx for the mempool
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
// Consensus Connection
InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) // Initialize blockchain w validators/other info from TendermintCore
PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error)
ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error)
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal
ProcessProposal(RequestProcessProposal) ResponseProcessProposal
// Commit the state and return the application Merkle root hash
Commit(context.Context) (*ResponseCommit, error)
Commit() ResponseCommit
// Create application specific vote extension
ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error)
ExtendVote(RequestExtendVote) ResponseExtendVote
// Verify application's vote extension data
VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error)
VerifyVoteExtension(RequestVerifyVoteExtension) ResponseVerifyVoteExtension
// Deliver the decided block with its txs to the Application
FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error)
FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock
// State Sync Connection
ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) // List available snapshots
OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) // Offer a snapshot to the application
LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) // Load a snapshot chunk
ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) // Apply a shapshot chunk
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
OfferSnapshot(RequestOfferSnapshot) ResponseOfferSnapshot // Offer a snapshot to the application
LoadSnapshotChunk(RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk // Load a snapshot chunk
ApplySnapshotChunk(RequestApplySnapshotChunk) ResponseApplySnapshotChunk // Apply a shapshot chunk
}
//-------------------------------------------------------
@@ -44,78 +48,164 @@ func NewBaseApplication() *BaseApplication {
return &BaseApplication{}
}
func (BaseApplication) Info(_ context.Context, req *RequestInfo) (*ResponseInfo, error) {
return &ResponseInfo{}, nil
func (BaseApplication) Info(req RequestInfo) ResponseInfo {
return ResponseInfo{}
}
func (BaseApplication) CheckTx(_ context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
return &ResponseCheckTx{Code: CodeTypeOK}, nil
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
return ResponseCheckTx{Code: CodeTypeOK}
}
func (BaseApplication) Commit(_ context.Context) (*ResponseCommit, error) {
return &ResponseCommit{}, nil
func (BaseApplication) Commit() ResponseCommit {
return ResponseCommit{}
}
func (BaseApplication) ExtendVote(_ context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) {
return &ResponseExtendVote{}, nil
func (BaseApplication) ExtendVote(req RequestExtendVote) ResponseExtendVote {
return ResponseExtendVote{}
}
func (BaseApplication) VerifyVoteExtension(_ context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) {
return &ResponseVerifyVoteExtension{
func (BaseApplication) VerifyVoteExtension(req RequestVerifyVoteExtension) ResponseVerifyVoteExtension {
return ResponseVerifyVoteExtension{
Status: ResponseVerifyVoteExtension_ACCEPT,
}, nil
}
func (BaseApplication) Query(_ context.Context, req *RequestQuery) (*ResponseQuery, error) {
return &ResponseQuery{Code: CodeTypeOK}, nil
}
func (BaseApplication) InitChain(_ context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
return &ResponseInitChain{}, nil
}
func (BaseApplication) ListSnapshots(_ context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
return &ResponseListSnapshots{}, nil
}
func (BaseApplication) OfferSnapshot(_ context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) {
return &ResponseOfferSnapshot{}, nil
}
func (BaseApplication) LoadSnapshotChunk(_ context.Context, _ *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) {
return &ResponseLoadSnapshotChunk{}, nil
}
func (BaseApplication) ApplySnapshotChunk(_ context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) {
return &ResponseApplySnapshotChunk{}, nil
}
func (BaseApplication) PrepareProposal(_ context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
trs := make([]*TxRecord, 0, len(req.Txs))
var totalBytes int64
for _, tx := range req.Txs {
totalBytes += int64(len(tx))
if totalBytes > req.MaxTxBytes {
break
}
trs = append(trs, &TxRecord{
Action: TxRecord_UNMODIFIED,
Tx: tx,
})
}
return &ResponsePrepareProposal{TxRecords: trs}, nil
}
func (BaseApplication) ProcessProposal(_ context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) {
return &ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}, nil
func (BaseApplication) Query(req RequestQuery) ResponseQuery {
return ResponseQuery{Code: CodeTypeOK}
}
func (BaseApplication) FinalizeBlock(_ context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
return ResponseInitChain{}
}
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
return ResponseListSnapshots{}
}
func (BaseApplication) OfferSnapshot(req RequestOfferSnapshot) ResponseOfferSnapshot {
return ResponseOfferSnapshot{}
}
func (BaseApplication) LoadSnapshotChunk(req RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk {
return ResponseLoadSnapshotChunk{}
}
func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) ResponseApplySnapshotChunk {
return ResponseApplySnapshotChunk{}
}
func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal {
return ResponsePrepareProposal{ModifiedTxStatus: ResponsePrepareProposal_UNMODIFIED}
}
func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProcessProposal {
return ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}
}
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
txs := make([]*ExecTxResult, len(req.Txs))
for i := range req.Txs {
txs[i] = &ExecTxResult{Code: CodeTypeOK}
}
return &ResponseFinalizeBlock{
return ResponseFinalizeBlock{
TxResults: txs,
}, nil
}
}
//-------------------------------------------------------
// GRPCApplication is a GRPC wrapper for Application
type GRPCApplication struct {
app Application
}
func NewGRPCApplication(app Application) *GRPCApplication {
return &GRPCApplication{app}
}
func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) {
return &ResponseEcho{Message: req.Message}, nil
}
func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) {
return &ResponseFlush{}, nil
}
func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) {
res := app.app.Info(*req)
return &res, nil
}
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
res := app.app.CheckTx(*req)
return &res, nil
}
func (app *GRPCApplication) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) {
res := app.app.Query(*req)
return &res, nil
}
func (app *GRPCApplication) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) {
res := app.app.Commit()
return &res, nil
}
func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
res := app.app.InitChain(*req)
return &res, nil
}
func (app *GRPCApplication) ListSnapshots(
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
res := app.app.ListSnapshots(*req)
return &res, nil
}
func (app *GRPCApplication) OfferSnapshot(
ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) {
res := app.app.OfferSnapshot(*req)
return &res, nil
}
func (app *GRPCApplication) LoadSnapshotChunk(
ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) {
res := app.app.LoadSnapshotChunk(*req)
return &res, nil
}
func (app *GRPCApplication) ApplySnapshotChunk(
ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) {
res := app.app.ApplySnapshotChunk(*req)
return &res, nil
}
func (app *GRPCApplication) ExtendVote(
ctx context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) {
res := app.app.ExtendVote(*req)
return &res, nil
}
func (app *GRPCApplication) VerifyVoteExtension(
ctx context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) {
res := app.app.VerifyVoteExtension(*req)
return &res, nil
}
func (app *GRPCApplication) PrepareProposal(
ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
res := app.app.PrepareProposal(*req)
return &res, nil
}
func (app *GRPCApplication) ProcessProposal(
ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) {
res := app.app.ProcessProposal(*req)
return &res, nil
}
func (app *GRPCApplication) FinalizeBlock(
ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
res := app.app.FinalizeBlock(*req)
return &res, nil
}

View File

@@ -39,15 +39,15 @@ func ToRequestFlush() *Request {
}
}
func ToRequestInfo(req *RequestInfo) *Request {
func ToRequestInfo(req RequestInfo) *Request {
return &Request{
Value: &Request_Info{req},
Value: &Request_Info{&req},
}
}
func ToRequestCheckTx(req *RequestCheckTx) *Request {
func ToRequestCheckTx(req RequestCheckTx) *Request {
return &Request{
Value: &Request_CheckTx{req},
Value: &Request_CheckTx{&req},
}
}
@@ -57,69 +57,69 @@ func ToRequestCommit() *Request {
}
}
func ToRequestQuery(req *RequestQuery) *Request {
func ToRequestQuery(req RequestQuery) *Request {
return &Request{
Value: &Request_Query{req},
Value: &Request_Query{&req},
}
}
func ToRequestInitChain(req *RequestInitChain) *Request {
func ToRequestInitChain(req RequestInitChain) *Request {
return &Request{
Value: &Request_InitChain{req},
Value: &Request_InitChain{&req},
}
}
func ToRequestListSnapshots(req *RequestListSnapshots) *Request {
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
return &Request{
Value: &Request_ListSnapshots{req},
Value: &Request_ListSnapshots{&req},
}
}
func ToRequestOfferSnapshot(req *RequestOfferSnapshot) *Request {
func ToRequestOfferSnapshot(req RequestOfferSnapshot) *Request {
return &Request{
Value: &Request_OfferSnapshot{req},
Value: &Request_OfferSnapshot{&req},
}
}
func ToRequestLoadSnapshotChunk(req *RequestLoadSnapshotChunk) *Request {
func ToRequestLoadSnapshotChunk(req RequestLoadSnapshotChunk) *Request {
return &Request{
Value: &Request_LoadSnapshotChunk{req},
Value: &Request_LoadSnapshotChunk{&req},
}
}
func ToRequestApplySnapshotChunk(req *RequestApplySnapshotChunk) *Request {
func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
return &Request{
Value: &Request_ApplySnapshotChunk{req},
Value: &Request_ApplySnapshotChunk{&req},
}
}
func ToRequestExtendVote(req *RequestExtendVote) *Request {
func ToRequestExtendVote(req RequestExtendVote) *Request {
return &Request{
Value: &Request_ExtendVote{req},
Value: &Request_ExtendVote{&req},
}
}
func ToRequestVerifyVoteExtension(req *RequestVerifyVoteExtension) *Request {
func ToRequestVerifyVoteExtension(req RequestVerifyVoteExtension) *Request {
return &Request{
Value: &Request_VerifyVoteExtension{req},
Value: &Request_VerifyVoteExtension{&req},
}
}
func ToRequestPrepareProposal(req *RequestPrepareProposal) *Request {
func ToRequestPrepareProposal(req RequestPrepareProposal) *Request {
return &Request{
Value: &Request_PrepareProposal{req},
Value: &Request_PrepareProposal{&req},
}
}
func ToRequestProcessProposal(req *RequestProcessProposal) *Request {
func ToRequestProcessProposal(req RequestProcessProposal) *Request {
return &Request{
Value: &Request_ProcessProposal{req},
Value: &Request_ProcessProposal{&req},
}
}
func ToRequestFinalizeBlock(req *RequestFinalizeBlock) *Request {
func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request {
return &Request{
Value: &Request_FinalizeBlock{req},
Value: &Request_FinalizeBlock{&req},
}
}
@@ -143,86 +143,86 @@ func ToResponseFlush() *Response {
}
}
func ToResponseInfo(res *ResponseInfo) *Response {
func ToResponseInfo(res ResponseInfo) *Response {
return &Response{
Value: &Response_Info{res},
Value: &Response_Info{&res},
}
}
func ToResponseCheckTx(res *ResponseCheckTx) *Response {
func ToResponseCheckTx(res ResponseCheckTx) *Response {
return &Response{
Value: &Response_CheckTx{res},
Value: &Response_CheckTx{&res},
}
}
func ToResponseCommit(res *ResponseCommit) *Response {
func ToResponseCommit(res ResponseCommit) *Response {
return &Response{
Value: &Response_Commit{res},
Value: &Response_Commit{&res},
}
}
func ToResponseQuery(res *ResponseQuery) *Response {
func ToResponseQuery(res ResponseQuery) *Response {
return &Response{
Value: &Response_Query{res},
Value: &Response_Query{&res},
}
}
func ToResponseInitChain(res *ResponseInitChain) *Response {
func ToResponseInitChain(res ResponseInitChain) *Response {
return &Response{
Value: &Response_InitChain{res},
Value: &Response_InitChain{&res},
}
}
func ToResponseListSnapshots(res *ResponseListSnapshots) *Response {
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
return &Response{
Value: &Response_ListSnapshots{res},
Value: &Response_ListSnapshots{&res},
}
}
func ToResponseOfferSnapshot(res *ResponseOfferSnapshot) *Response {
func ToResponseOfferSnapshot(res ResponseOfferSnapshot) *Response {
return &Response{
Value: &Response_OfferSnapshot{res},
Value: &Response_OfferSnapshot{&res},
}
}
func ToResponseLoadSnapshotChunk(res *ResponseLoadSnapshotChunk) *Response {
func ToResponseLoadSnapshotChunk(res ResponseLoadSnapshotChunk) *Response {
return &Response{
Value: &Response_LoadSnapshotChunk{res},
Value: &Response_LoadSnapshotChunk{&res},
}
}
func ToResponseApplySnapshotChunk(res *ResponseApplySnapshotChunk) *Response {
func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
return &Response{
Value: &Response_ApplySnapshotChunk{res},
Value: &Response_ApplySnapshotChunk{&res},
}
}
func ToResponseExtendVote(res *ResponseExtendVote) *Response {
func ToResponseExtendVote(res ResponseExtendVote) *Response {
return &Response{
Value: &Response_ExtendVote{res},
Value: &Response_ExtendVote{&res},
}
}
func ToResponseVerifyVoteExtension(res *ResponseVerifyVoteExtension) *Response {
func ToResponseVerifyVoteExtension(res ResponseVerifyVoteExtension) *Response {
return &Response{
Value: &Response_VerifyVoteExtension{res},
Value: &Response_VerifyVoteExtension{&res},
}
}
func ToResponsePrepareProposal(res *ResponsePrepareProposal) *Response {
func ToResponsePrepareProposal(res ResponsePrepareProposal) *Response {
return &Response{
Value: &Response_PrepareProposal{res},
Value: &Response_PrepareProposal{&res},
}
}
func ToResponseProcessProposal(res *ResponseProcessProposal) *Response {
func ToResponseProcessProposal(res ResponseProcessProposal) *Response {
return &Response{
Value: &Response_ProcessProposal{res},
Value: &Response_ProcessProposal{&res},
}
}
func ToResponseFinalizeBlock(res *ResponseFinalizeBlock) *Response {
func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response {
return &Response{
Value: &Response_FinalizeBlock{res},
Value: &Response_FinalizeBlock{&res},
}
}

View File

@@ -21,6 +21,14 @@ func TestMarshalJSON(t *testing.T) {
Code: 1,
Data: []byte("hello"),
GasWanted: 43,
Events: []Event{
{
Type: "testEvent",
Attributes: []EventAttribute{
{Key: "pho", Value: "bo"},
},
},
},
}
b, err = json.Marshal(&r1)
assert.NoError(t, err)
@@ -78,7 +86,16 @@ func TestWriteReadMessage2(t *testing.T) {
cases := []proto.Message{
&ResponseCheckTx{
Data: []byte(phrase),
Log: phrase,
GasWanted: 10,
Events: []Event{
{
Type: "testEvent",
Attributes: []EventAttribute{
{Key: "abc", Value: "def"},
},
},
},
},
// TODO: add the rest
}

View File

@@ -3,8 +3,6 @@
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
types "github.com/tendermint/tendermint/abci/types"
)
@@ -14,339 +12,198 @@ type Application struct {
mock.Mock
}
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseApplySnapshotChunk
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CheckTx provides a mock function with given fields: _a0, _a1
func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseCheckTx
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseCheckTx)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Commit provides a mock function with given fields: _a0
func (_m *Application) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
// ApplySnapshotChunk provides a mock function with given fields: _a0
func (_m *Application) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
ret := _m.Called(_a0)
var r0 *types.ResponseCommit
if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok {
var r0 types.ResponseApplySnapshotChunk
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseCommit)
}
r0 = ret.Get(0).(types.ResponseApplySnapshotChunk)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// ExtendVote provides a mock function with given fields: _a0, _a1
func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
ret := _m.Called(_a0, _a1)
// CheckTx provides a mock function with given fields: _a0
func (_m *Application) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx {
ret := _m.Called(_a0)
var r0 *types.ResponseExtendVote
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok {
r0 = rf(_a0, _a1)
var r0 types.ResponseCheckTx
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) types.ResponseCheckTx); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseExtendVote)
}
r0 = ret.Get(0).(types.ResponseCheckTx)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// FinalizeBlock provides a mock function with given fields: _a0, _a1
func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
ret := _m.Called(_a0, _a1)
// Commit provides a mock function with given fields:
func (_m *Application) Commit() types.ResponseCommit {
ret := _m.Called()
var r0 *types.ResponseFinalizeBlock
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
r0 = rf(_a0, _a1)
var r0 types.ResponseCommit
if rf, ok := ret.Get(0).(func() types.ResponseCommit); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
}
r0 = ret.Get(0).(types.ResponseCommit)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// Info provides a mock function with given fields: _a0, _a1
func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) {
ret := _m.Called(_a0, _a1)
// ExtendVote provides a mock function with given fields: _a0
func (_m *Application) ExtendVote(_a0 types.RequestExtendVote) types.ResponseExtendVote {
ret := _m.Called(_a0)
var r0 *types.ResponseInfo
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok {
r0 = rf(_a0, _a1)
var r0 types.ResponseExtendVote
if rf, ok := ret.Get(0).(func(types.RequestExtendVote) types.ResponseExtendVote); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseInfo)
}
r0 = ret.Get(0).(types.ResponseExtendVote)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// InitChain provides a mock function with given fields: _a0, _a1
func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) {
ret := _m.Called(_a0, _a1)
// FinalizeBlock provides a mock function with given fields: _a0
func (_m *Application) FinalizeBlock(_a0 types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
ret := _m.Called(_a0)
var r0 *types.ResponseInitChain
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok {
r0 = rf(_a0, _a1)
var r0 types.ResponseFinalizeBlock
if rf, ok := ret.Get(0).(func(types.RequestFinalizeBlock) types.ResponseFinalizeBlock); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseInitChain)
}
r0 = ret.Get(0).(types.ResponseFinalizeBlock)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// ListSnapshots provides a mock function with given fields: _a0, _a1
func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
ret := _m.Called(_a0, _a1)
// Info provides a mock function with given fields: _a0
func (_m *Application) Info(_a0 types.RequestInfo) types.ResponseInfo {
ret := _m.Called(_a0)
var r0 *types.ResponseListSnapshots
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
r0 = rf(_a0, _a1)
var r0 types.ResponseInfo
if rf, ok := ret.Get(0).(func(types.RequestInfo) types.ResponseInfo); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseListSnapshots)
}
r0 = ret.Get(0).(types.ResponseInfo)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
ret := _m.Called(_a0, _a1)
// InitChain provides a mock function with given fields: _a0
func (_m *Application) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain {
ret := _m.Called(_a0)
var r0 *types.ResponseLoadSnapshotChunk
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
r0 = rf(_a0, _a1)
var r0 types.ResponseInitChain
if rf, ok := ret.Get(0).(func(types.RequestInitChain) types.ResponseInitChain); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk)
}
r0 = ret.Get(0).(types.ResponseInitChain)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// OfferSnapshot provides a mock function with given fields: _a0, _a1
func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
ret := _m.Called(_a0, _a1)
// ListSnapshots provides a mock function with given fields: _a0
func (_m *Application) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots {
ret := _m.Called(_a0)
var r0 *types.ResponseOfferSnapshot
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
r0 = rf(_a0, _a1)
var r0 types.ResponseListSnapshots
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) types.ResponseListSnapshots); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseOfferSnapshot)
}
r0 = ret.Get(0).(types.ResponseListSnapshots)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// PrepareProposal provides a mock function with given fields: _a0, _a1
func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
ret := _m.Called(_a0, _a1)
// LoadSnapshotChunk provides a mock function with given fields: _a0
func (_m *Application) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
ret := _m.Called(_a0)
var r0 *types.ResponsePrepareProposal
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
r0 = rf(_a0, _a1)
var r0 types.ResponseLoadSnapshotChunk
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
}
r0 = ret.Get(0).(types.ResponseLoadSnapshotChunk)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// ProcessProposal provides a mock function with given fields: _a0, _a1
func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
ret := _m.Called(_a0, _a1)
// OfferSnapshot provides a mock function with given fields: _a0
func (_m *Application) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
ret := _m.Called(_a0)
var r0 *types.ResponseProcessProposal
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
r0 = rf(_a0, _a1)
var r0 types.ResponseOfferSnapshot
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) types.ResponseOfferSnapshot); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseProcessProposal)
}
r0 = ret.Get(0).(types.ResponseOfferSnapshot)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
return r0
}
// Query provides a mock function with given fields: _a0, _a1
func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) {
ret := _m.Called(_a0, _a1)
// PrepareProposal provides a mock function with given fields: _a0
func (_m *Application) PrepareProposal(_a0 types.RequestPrepareProposal) types.ResponsePrepareProposal {
ret := _m.Called(_a0)
var r0 *types.ResponseQuery
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok {
r0 = rf(_a0, _a1)
var r0 types.ResponsePrepareProposal
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) types.ResponsePrepareProposal); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseQuery)
}
r0 = ret.Get(0).(types.ResponsePrepareProposal)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok {
r1 = rf(_a0, _a1)
return r0
}
// ProcessProposal provides a mock function with given fields: _a0
func (_m *Application) ProcessProposal(_a0 types.RequestProcessProposal) types.ResponseProcessProposal {
ret := _m.Called(_a0)
var r0 types.ResponseProcessProposal
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) types.ResponseProcessProposal); ok {
r0 = rf(_a0)
} else {
r1 = ret.Error(1)
r0 = ret.Get(0).(types.ResponseProcessProposal)
}
return r0, r1
return r0
}
// VerifyVoteExtension provides a mock function with given fields: _a0, _a1
func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
ret := _m.Called(_a0, _a1)
// Query provides a mock function with given fields: _a0
func (_m *Application) Query(_a0 types.RequestQuery) types.ResponseQuery {
ret := _m.Called(_a0)
var r0 *types.ResponseVerifyVoteExtension
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
r0 = rf(_a0, _a1)
var r0 types.ResponseQuery
if rf, ok := ret.Get(0).(func(types.RequestQuery) types.ResponseQuery); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension)
}
r0 = ret.Get(0).(types.ResponseQuery)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok {
r1 = rf(_a0, _a1)
return r0
}
// VerifyVoteExtension provides a mock function with given fields: _a0
func (_m *Application) VerifyVoteExtension(_a0 types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
ret := _m.Called(_a0)
var r0 types.ResponseVerifyVoteExtension
if rf, ok := ret.Get(0).(func(types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension); ok {
r0 = rf(_a0)
} else {
r1 = ret.Error(1)
r0 = ret.Get(0).(types.ResponseVerifyVoteExtension)
}
return r0, r1
}
type NewApplicationT interface {
mock.TestingT
Cleanup(func())
}
// NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewApplication(t NewApplicationT) *Application {
mock := &Application{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
return r0
}

175
abci/types/mocks/base.go Normal file
View File

@@ -0,0 +1,175 @@
package mocks
import (
types "github.com/tendermint/tendermint/abci/types"
)
// BaseMock provides a wrapper around the generated Application mock and a BaseApplication.
// BaseMock first tries to use the mock's implementation of the method.
// If no functionality was provided for the mock by the user, BaseMock dispatches
// to the BaseApplication and uses its functionality.
// BaseMock allows users to provide mocked functionality for only the methods that matter
// for their test while avoiding a panic if the code calls Application methods that are
// not relevant to the test.
type BaseMock struct {
base *types.BaseApplication
*Application
}
func NewBaseMock() BaseMock {
return BaseMock{
base: types.NewBaseApplication(),
Application: new(Application),
}
}
// Info/Query Connection
// Return application info
func (m BaseMock) Info(input types.RequestInfo) (ret types.ResponseInfo) {
defer func() {
if r := recover(); r != nil {
ret = m.base.Info(input)
}
}()
ret = m.Application.Info(input)
return ret
}
func (m BaseMock) Query(input types.RequestQuery) (ret types.ResponseQuery) {
defer func() {
if r := recover(); r != nil {
ret = m.base.Query(input)
}
}()
ret = m.Application.Query(input)
return ret
}
// Mempool Connection
// Validate a tx for the mempool
func (m BaseMock) CheckTx(input types.RequestCheckTx) (ret types.ResponseCheckTx) {
defer func() {
if r := recover(); r != nil {
ret = m.base.CheckTx(input)
}
}()
ret = m.Application.CheckTx(input)
return ret
}
// Consensus Connection
// Initialize blockchain w validators/other info from TendermintCore
func (m BaseMock) InitChain(input types.RequestInitChain) (ret types.ResponseInitChain) {
defer func() {
if r := recover(); r != nil {
ret = m.base.InitChain(input)
}
}()
ret = m.Application.InitChain(input)
return ret
}
func (m BaseMock) PrepareProposal(input types.RequestPrepareProposal) (ret types.ResponsePrepareProposal) {
defer func() {
if r := recover(); r != nil {
ret = m.base.PrepareProposal(input)
}
}()
ret = m.Application.PrepareProposal(input)
return ret
}
func (m BaseMock) ProcessProposal(input types.RequestProcessProposal) (ret types.ResponseProcessProposal) {
defer func() {
if r := recover(); r != nil {
ret = m.base.ProcessProposal(input)
}
}()
ret = m.Application.ProcessProposal(input)
return ret
}
// Commit the state and return the application Merkle root hash
func (m BaseMock) Commit() (ret types.ResponseCommit) {
defer func() {
if r := recover(); r != nil {
ret = m.base.Commit()
}
}()
ret = m.Application.Commit()
return ret
}
// Create application specific vote extension
func (m BaseMock) ExtendVote(input types.RequestExtendVote) (ret types.ResponseExtendVote) {
defer func() {
if r := recover(); r != nil {
ret = m.base.ExtendVote(input)
}
}()
ret = m.Application.ExtendVote(input)
return ret
}
// Verify application's vote extension data
func (m BaseMock) VerifyVoteExtension(input types.RequestVerifyVoteExtension) (ret types.ResponseVerifyVoteExtension) {
defer func() {
if r := recover(); r != nil {
ret = m.base.VerifyVoteExtension(input)
}
}()
ret = m.Application.VerifyVoteExtension(input)
return ret
}
// State Sync Connection
// List available snapshots
func (m BaseMock) ListSnapshots(input types.RequestListSnapshots) (ret types.ResponseListSnapshots) {
defer func() {
if r := recover(); r != nil {
ret = m.base.ListSnapshots(input)
}
}()
ret = m.Application.ListSnapshots(input)
return ret
}
func (m BaseMock) OfferSnapshot(input types.RequestOfferSnapshot) (ret types.ResponseOfferSnapshot) {
defer func() {
if r := recover(); r != nil {
ret = m.base.OfferSnapshot(input)
}
}()
ret = m.Application.OfferSnapshot(input)
return ret
}
func (m BaseMock) LoadSnapshotChunk(input types.RequestLoadSnapshotChunk) (ret types.ResponseLoadSnapshotChunk) {
defer func() {
if r := recover(); r != nil {
ret = m.base.LoadSnapshotChunk(input)
}
}()
ret = m.Application.LoadSnapshotChunk(input)
return ret
}
func (m BaseMock) ApplySnapshotChunk(input types.RequestApplySnapshotChunk) (ret types.ResponseApplySnapshotChunk) {
defer func() {
if r := recover(); r != nil {
ret = m.base.ApplySnapshotChunk(input)
}
}()
ret = m.Application.ApplySnapshotChunk(input)
return ret
}
func (m BaseMock) FinalizeBlock(input types.RequestFinalizeBlock) (ret types.ResponseFinalizeBlock) {
defer func() {
if r := recover(); r != nil {
ret = m.base.FinalizeBlock(input)
}
}()
ret = m.Application.FinalizeBlock(input)
return ret
}

View File

@@ -5,9 +5,8 @@ import (
"encoding/json"
"github.com/gogo/protobuf/jsonpb"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/internal/jsontypes"
types "github.com/tendermint/tendermint/proto/tendermint/types"
)
const (
@@ -54,6 +53,14 @@ func (r ResponseQuery) IsErr() bool {
return r.Code != CodeTypeOK
}
func (r ResponsePrepareProposal) IsTxStatusUnknown() bool {
return r.ModifiedTxStatus == ResponsePrepareProposal_UNKNOWN
}
func (r ResponsePrepareProposal) IsTxStatusModified() bool {
return r.ModifiedTxStatus == ResponsePrepareProposal_MODIFIED
}
func (r ResponseProcessProposal) IsAccepted() bool {
return r.Status == ResponseProcessProposal_ACCEPT
}
@@ -138,48 +145,6 @@ func (r *EventAttribute) UnmarshalJSON(b []byte) error {
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
// validatorUpdateJSON is the JSON encoding of a validator update.
//
// It handles translation of public keys from the protobuf representation to
// the legacy Amino-compatible format expected by RPC clients.
type validatorUpdateJSON struct {
PubKey json.RawMessage `json:"pub_key,omitempty"`
Power int64 `json:"power,string"`
}
func (v *ValidatorUpdate) MarshalJSON() ([]byte, error) {
key, err := encoding.PubKeyFromProto(v.PubKey)
if err != nil {
return nil, err
}
jkey, err := jsontypes.Marshal(key)
if err != nil {
return nil, err
}
return json.Marshal(validatorUpdateJSON{
PubKey: jkey,
Power: v.GetPower(),
})
}
func (v *ValidatorUpdate) UnmarshalJSON(data []byte) error {
var vu validatorUpdateJSON
if err := json.Unmarshal(data, &vu); err != nil {
return err
}
var key crypto.PubKey
if err := jsontypes.Unmarshal(vu.PubKey, &key); err != nil {
return err
}
pkey, err := encoding.PubKeyToProto(key)
if err != nil {
return err
}
v.PubKey = pkey
v.Power = vu.Power
return nil
}
// Some compile time assertions to ensure we don't
// have accidental runtime surprises later on.
@@ -200,6 +165,15 @@ var _ jsonRoundTripper = (*EventAttribute)(nil)
// -----------------------------------------------
// construct Result data
func RespondExtendVote(appDataToSign, appDataSelfAuthenticating []byte) ResponseExtendVote {
return ResponseExtendVote{
VoteExtension: &types.VoteExtension{
AppDataToSign: appDataToSign,
AppDataSelfAuthenticating: appDataSelfAuthenticating,
},
}
}
func RespondVerifyVoteExtension(ok bool) ResponseVerifyVoteExtension {
status := ResponseVerifyVoteExtension_REJECT
if ok {

File diff suppressed because it is too large Load Diff

View File

@@ -113,7 +113,7 @@ func main() {
// add prometheus metrics for unary RPC calls
opts = append(opts, grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor))
ss := grpcprivval.NewSignerServer(logger, *chainID, pv)
ss := grpcprivval.NewSignerServer(*chainID, pv, logger)
protocol, address := tmnet.ProtocolAndAddress(*addr)

View File

@@ -1,71 +0,0 @@
package commands
import (
"errors"
"path/filepath"
"sync"
"github.com/spf13/cobra"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
)
func MakeCompactDBCommand(cfg *config.Config, logger log.Logger) *cobra.Command {
cmd := &cobra.Command{
Use: "experimental-compact-goleveldb",
Short: "force compacts the tendermint storage engine (only GoLevelDB supported)",
Long: `
This is a temporary utility command that performs a force compaction on the state
and blockstores to reduce disk space for a pruning node. This should only be run
once the node has stopped. This command will likely be omitted in the future after
the planned refactor to the storage engine.
Currently, only GoLevelDB is supported.
`,
RunE: func(cmd *cobra.Command, args []string) error {
if cfg.DBBackend != "goleveldb" {
return errors.New("compaction is currently only supported with goleveldb")
}
compactGoLevelDBs(cfg.RootDir, logger)
return nil
},
}
return cmd
}
func compactGoLevelDBs(rootDir string, logger log.Logger) {
dbNames := []string{"state", "blockstore"}
o := &opt.Options{
DisableSeeksCompaction: true,
}
wg := sync.WaitGroup{}
for _, dbName := range dbNames {
dbName := dbName
wg.Add(1)
go func() {
defer wg.Done()
dbPath := filepath.Join(rootDir, "data", dbName+".db")
store, err := leveldb.OpenFile(dbPath, o)
if err != nil {
logger.Error("failed to initialize tendermint db", "path", dbPath, "err", err)
return
}
defer store.Close()
logger.Info("starting compaction...", "db", dbPath)
err = store.CompactRange(util.Range{Start: nil, Limit: nil})
if err != nil {
logger.Error("failed to compact tendermint db", "path", dbPath, "err", err)
}
}()
}
wg.Wait()
}

View File

@@ -2,7 +2,6 @@ package debug
import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/libs/log"
)

View File

@@ -33,14 +33,10 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`,
Args: cobra.ExactArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
// Using Atoi so that the size of an integer can be automatically inferred.
pid, err := strconv.Atoi(args[0])
pid, err := strconv.ParseInt(args[0], 10, 64)
if err != nil {
return err
}
if pid <= 0 {
return fmt.Errorf("PID value must be > 0; given value %q, got %d", args[0], pid)
}
outFile := args[1]
if outFile == "" {
@@ -99,7 +95,7 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`,
}
logger.Info("killing Tendermint process")
if err := killProc(pid, tmpDir); err != nil {
if err := killProc(int(pid), tmpDir); err != nil {
return err
}
@@ -117,9 +113,6 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`,
// if the output file cannot be created or the tail command cannot be started.
// An error is not returned if any subsequent syscall fails.
func killProc(pid int, dir string) error {
if pid <= 0 {
return fmt.Errorf("PID must be > 0, got %d", pid)
}
// pipe STDERR output from tailing the Tendermint process to a file
//
// NOTE: This will only work on UNIX systems.

View File

@@ -6,18 +6,56 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/config"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/scripts/keymigrate"
"github.com/tendermint/tendermint/scripts/scmigrate"
)
func MakeKeyMigrateCommand(conf *config.Config, logger log.Logger) *cobra.Command {
func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command {
cmd := &cobra.Command{
Use: "key-migrate",
Short: "Run Database key migration",
RunE: func(cmd *cobra.Command, args []string) error {
return RunDatabaseMigration(cmd.Context(), logger, conf)
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
contexts := []string{
// this is ordered to put the
// (presumably) biggest/most important
// subsets first.
"blockstore",
"state",
"peerstore",
"tx_index",
"evidence",
"light",
}
for idx, dbctx := range contexts {
logger.Info("beginning a key migration",
"dbctx", dbctx,
"num", idx+1,
"total", len(contexts),
)
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
ID: dbctx,
Config: conf,
})
if err != nil {
return fmt.Errorf("constructing database handle: %w", err)
}
if err = keymigrate.Migrate(ctx, db); err != nil {
return fmt.Errorf("running migration for context %q: %w",
dbctx, err)
}
}
logger.Info("completed database migration successfully")
return nil
},
}
@@ -26,50 +64,3 @@ func MakeKeyMigrateCommand(conf *config.Config, logger log.Logger) *cobra.Comman
return cmd
}
func RunDatabaseMigration(ctx context.Context, logger log.Logger, conf *config.Config) error {
contexts := []string{
// this is ordered to put
// the more ephemeral tables first to
// reduce the possibility of the
// ephemeral data overwriting later data
"tx_index",
"light",
"blockstore",
"state",
"evidence",
}
for idx, dbctx := range contexts {
logger.Info("beginning a key migration",
"dbctx", dbctx,
"num", idx+1,
"total", len(contexts),
)
db, err := config.DefaultDBProvider(&config.DBContext{
ID: dbctx,
Config: conf,
})
if err != nil {
return fmt.Errorf("constructing database handle: %w", err)
}
if err = keymigrate.Migrate(ctx, dbctx, db); err != nil {
return fmt.Errorf("running migration for context %q: %w",
dbctx, err)
}
if dbctx == "blockstore" {
if err := scmigrate.Migrate(ctx, db); err != nil {
return fmt.Errorf("running seen commit migration: %w", err)
}
}
}
logger.Info("completed database migration successfully")
return nil
}

View File

@@ -171,8 +171,7 @@ for applications built w/ Cosmos SDK).
// If necessary adjust global WriteTimeout to ensure it's greater than
// TimeoutBroadcastTxCommit.
// See https://github.com/tendermint/tendermint/issues/3435
// Note we don't need to adjust anything if the timeout is already unlimited.
if cfg.WriteTimeout > 0 && cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit {
if cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit {
cfg.WriteTimeout = conf.RPC.TimeoutBroadcastTxCommit + 1*time.Second
}
@@ -181,7 +180,7 @@ for applications built w/ Cosmos SDK).
return err
}
ctx, cancel := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGTERM)
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
defer cancel()
go func() {

View File

@@ -193,7 +193,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
}
r, err := args.stateStore.LoadFinalizeBlockResponses(i)
r, err := args.stateStore.LoadABCIResponses(i)
if err != nil {
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
}
@@ -201,7 +201,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
e := types.EventDataNewBlockHeader{
Header: b.Header,
NumTxs: int64(len(b.Txs)),
ResultFinalizeBlock: *r,
ResultFinalizeBlock: *r.FinalizeBlock,
}
var batch *indexer.Batch
@@ -213,7 +213,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
Height: b.Height,
Index: uint32(i),
Tx: b.Data.Txs[i],
Result: *(r.TxResults[i]),
Result: *(r.FinalizeBlock.TxResults[i]),
}
_ = batch.Add(&tr)

View File

@@ -16,6 +16,7 @@ import (
"github.com/tendermint/tendermint/internal/state/indexer"
"github.com/tendermint/tendermint/internal/state/mocks"
"github.com/tendermint/tendermint/libs/log"
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/types"
_ "github.com/lib/pq" // for the psql sink
@@ -153,14 +154,16 @@ func TestReIndexEvent(t *testing.T) {
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
dtx := abcitypes.ExecTxResult{}
abciResp := &abcitypes.ResponseFinalizeBlock{
TxResults: []*abcitypes.ExecTxResult{&dtx},
abciResp := &prototmstate.ABCIResponses{
FinalizeBlock: &abcitypes.ResponseFinalizeBlock{
TxResults: []*abcitypes.ExecTxResult{&dtx},
},
}
mockStateStore.
On("LoadFinalizeBlockResponses", base).Return(nil, errors.New("")).Once().
On("LoadFinalizeBlockResponses", base).Return(abciResp, nil).
On("LoadFinalizeBlockResponses", height).Return(abciResp, nil)
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
On("LoadABCIResponses", base).Return(abciResp, nil).
On("LoadABCIResponses", height).Return(abciResp, nil)
testCases := []struct {
startHeight int64
@@ -168,7 +171,7 @@ func TestReIndexEvent(t *testing.T) {
reIndexErr bool
}{
{base, height, true}, // LoadBlock error
{base, height, true}, // LoadFinalizeBlockResponses error
{base, height, true}, // LoadABCIResponses error
{base, height, true}, // index block event error
{base, height, true}, // index tx event error
{base, base, false},

View File

@@ -13,92 +13,81 @@ import (
"github.com/tendermint/tendermint/types"
)
// MakeResetCommand constructs a command that removes the database of
// MakeResetAllCommand constructs a command that removes the database of
// the specified Tendermint core instance.
func MakeResetCommand(conf *config.Config, logger log.Logger) *cobra.Command {
func MakeResetAllCommand(conf *config.Config, logger log.Logger) *cobra.Command {
var keyType string
resetCmd := &cobra.Command{
Use: "reset",
Short: "Set of commands to conveniently reset tendermint related data",
}
resetBlocksCmd := &cobra.Command{
Use: "blockchain",
Short: "Removes all blocks, state, transactions and evidence stored by the tendermint node",
cmd := &cobra.Command{
Use: "unsafe-reset-all",
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
RunE: func(cmd *cobra.Command, args []string) error {
return ResetState(conf.DBDir(), logger)
},
}
resetPeersCmd := &cobra.Command{
Use: "peers",
Short: "Removes all peer addresses",
RunE: func(cmd *cobra.Command, args []string) error {
return ResetPeerStore(conf.DBDir())
},
}
resetSignerCmd := &cobra.Command{
Use: "unsafe-signer",
Short: "esets private validator signer state",
Long: `Resets private validator signer state.
Only use in testing. This can cause the node to double sign`,
RunE: func(cmd *cobra.Command, args []string) error {
return ResetFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile(), logger, keyType)
},
}
resetAllCmd := &cobra.Command{
Use: "unsafe-all",
Short: "Removes all tendermint data including signing state",
Long: `Removes all tendermint data including signing state.
Only use in testing. This can cause the node to double sign`,
RunE: func(cmd *cobra.Command, args []string) error {
return ResetAll(conf.DBDir(), conf.PrivValidator.KeyFile(),
return resetAll(conf.DBDir(), conf.PrivValidator.KeyFile(),
conf.PrivValidator.StateFile(), logger, keyType)
},
}
cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Key type to generate privval file with. Options: ed25519, secp256k1")
resetSignerCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Signer key type. Options: ed25519, secp256k1")
resetAllCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Signer key type. Options: ed25519, secp256k1")
resetCmd.AddCommand(resetBlocksCmd)
resetCmd.AddCommand(resetPeersCmd)
resetCmd.AddCommand(resetSignerCmd)
resetCmd.AddCommand(resetAllCmd)
return resetCmd
return cmd
}
// ResetAll removes address book files plus all data, and resets the privValdiator data.
// Exported for extenal CLI usage
// XXX: this is unsafe and should only suitable for testnets.
func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
// MakeResetStateCommand constructs a command that removes the database of
// the specified Tendermint core instance.
func MakeResetStateCommand(conf *config.Config, logger log.Logger) *cobra.Command {
var keyType string
return &cobra.Command{
Use: "reset-state",
Short: "Remove all the data and WAL",
RunE: func(cmd *cobra.Command, args []string) error {
return resetState(conf.DBDir(), logger, keyType)
},
}
}
func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *cobra.Command {
var keyType string
cmd := &cobra.Command{
Use: "unsafe-reset-priv-validator",
Short: "(unsafe) Reset this node's validator to genesis state",
RunE: func(cmd *cobra.Command, args []string) error {
return resetFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile(), logger, keyType)
},
}
cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Key type to generate privval file with. Options: ed25519, secp256k1")
return cmd
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
// XXX: this is totally unsafe.
// it's only suitable for testnets.
// resetAll removes address book files plus all data, and resets the privValdiator data.
func resetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
if err := os.RemoveAll(dbDir); err == nil {
logger.Info("Removed all blockchain history", "dir", dbDir)
} else {
logger.Error("error removing all blockchain history", "dir", dbDir, "err", err)
}
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
logger.Error("unable to recreate dbDir", "err", err)
}
// recreate the dbDir since the privVal state needs to live there
return ResetFilePV(privValKeyFile, privValStateFile, logger, keyType)
return resetFilePV(privValKeyFile, privValStateFile, logger, keyType)
}
// ResetState removes all blocks, tendermint state, indexed transactions and evidence.
func ResetState(dbDir string, logger log.Logger) error {
// resetState removes address book files plus all databases.
func resetState(dbDir string, logger log.Logger, keyType string) error {
blockdb := filepath.Join(dbDir, "blockstore.db")
state := filepath.Join(dbDir, "state.db")
wal := filepath.Join(dbDir, "cs.wal")
evidence := filepath.Join(dbDir, "evidence.db")
txIndex := filepath.Join(dbDir, "tx_index.db")
peerstore := filepath.Join(dbDir, "peerstore.db")
if tmos.FileExists(blockdb) {
if err := os.RemoveAll(blockdb); err == nil {
@@ -140,13 +129,20 @@ func ResetState(dbDir string, logger log.Logger) error {
}
}
return tmos.EnsureDir(dbDir, 0700)
if tmos.FileExists(peerstore) {
if err := os.RemoveAll(peerstore); err == nil {
logger.Info("Removed peerstore.db", "dir", peerstore)
} else {
logger.Error("error removing peerstore.db", "dir", peerstore, "err", err)
}
}
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
logger.Error("unable to recreate dbDir", "err", err)
}
return nil
}
// ResetFilePV loads the file private validator and resets the watermark to 0. If used on an existing network,
// this can cause the node to double sign.
// XXX: this is unsafe and should only suitable for testnets.
func ResetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
if _, err := os.Stat(privValKeyFile); err == nil {
pv, err := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
if err != nil {
@@ -170,13 +166,3 @@ func ResetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, key
}
return nil
}
// ResetPeerStore removes the peer store containing all information used by the tendermint networking layer
// In the case of a reset, new peers will need to be set either via the config or through the discovery mechanism
func ResetPeerStore(dbDir string) error {
peerstore := filepath.Join(dbDir, "peerstore.db")
if tmos.FileExists(peerstore) {
return os.RemoveAll(peerstore)
}
return nil
}

View File

@@ -1,62 +0,0 @@
package commands
import (
"context"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
)
func Test_ResetAll(t *testing.T) {
config := cfg.TestConfig()
dir := t.TempDir()
config.SetRoot(dir)
logger := log.NewNopLogger()
cfg.EnsureRoot(dir)
require.NoError(t, initFilesWithConfig(context.Background(), config, logger, types.ABCIPubKeyTypeEd25519))
pv, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
require.NoError(t, err)
pv.LastSignState.Height = 10
require.NoError(t, pv.Save())
require.NoError(t, ResetAll(config.DBDir(), config.PrivValidator.KeyFile(),
config.PrivValidator.StateFile(), logger, types.ABCIPubKeyTypeEd25519))
require.DirExists(t, config.DBDir())
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
require.FileExists(t, config.PrivValidator.StateFile())
pv, err = privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
require.NoError(t, err)
require.Equal(t, int64(0), pv.LastSignState.Height)
}
func Test_ResetState(t *testing.T) {
config := cfg.TestConfig()
dir := t.TempDir()
config.SetRoot(dir)
logger := log.NewNopLogger()
cfg.EnsureRoot(dir)
require.NoError(t, initFilesWithConfig(context.Background(), config, logger, types.ABCIPubKeyTypeEd25519))
pv, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
require.NoError(t, err)
pv.LastSignState.Height = 10
require.NoError(t, pv.Save())
require.NoError(t, ResetState(config.DBDir(), logger))
require.DirExists(t, config.DBDir())
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
require.FileExists(t, config.PrivValidator.StateFile())
pv, err = privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
require.NoError(t, err)
// private validator state should still be in tact.
require.Equal(t, int64(10), pv.LastSignState.Height)
}

View File

@@ -15,10 +15,6 @@ import (
)
func TestRollbackIntegration(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
var height int64
dir := t.TempDir()
ctx, cancel := context.WithCancel(context.Background())

View File

@@ -51,12 +51,10 @@ func RootCommand(conf *config.Config, logger log.Logger) *cobra.Command {
}
*conf = *pconf
config.EnsureRoot(conf.RootDir)
if err := log.OverrideWithNewLogger(logger, conf.LogFormat, conf.LogLevel); err != nil {
return err
}
if warning := pconf.DeprecatedFieldWarning(); warning != nil {
logger.Info("WARNING", "deprecated field warning", warning)
}
return nil
},

View File

@@ -63,6 +63,7 @@ func AddNodeFlags(cmd *cobra.Command, conf *cfg.Config) {
"p2p.laddr",
conf.P2P.ListenAddress,
"node listen address. (0.0.0.0:0 means any interface, any port)")
cmd.Flags().String("p2p.seeds", conf.P2P.Seeds, "comma-delimited ID@host:port seed nodes") //nolint: staticcheck
cmd.Flags().String("p2p.persistent-peers", conf.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
cmd.Flags().Bool("p2p.upnp", conf.P2P.UPNP, "enable/disable UPNP port forwarding")
cmd.Flags().Bool("p2p.pex", conf.P2P.PexReactor, "enable/disable Peer-Exchange")
@@ -104,7 +105,7 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider, conf *cfg.Config, logger lo
return err
}
ctx, cancel := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGTERM)
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
defer cancel()
n, err := nodeProvider(ctx, conf, logger)

View File

@@ -239,6 +239,7 @@ Example:
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
config.SetRoot(nodeDir)
config.P2P.AllowDuplicateIP = true
if populatePersistentPeers {
persistentPeersWithoutSelf := make([]string, 0)
for j := 0; j < len(persistentPeers); j++ {

View File

@@ -33,7 +33,9 @@ func main() {
commands.MakeLightCommand(conf, logger),
commands.MakeReplayCommand(conf, logger),
commands.MakeReplayConsoleCommand(conf, logger),
commands.MakeResetCommand(conf, logger),
commands.MakeResetAllCommand(conf, logger),
commands.MakeResetStateCommand(conf, logger),
commands.MakeResetPrivateValidatorCommand(conf, logger),
commands.MakeShowValidatorCommand(conf, logger),
commands.MakeTestnetFilesCommand(conf, logger),
commands.MakeShowNodeIDCommand(conf),
@@ -44,7 +46,6 @@ func main() {
commands.MakeKeyMigrateCommand(conf, logger),
debug.GetDebugCommand(logger),
commands.NewCompletionCmd(rcmd, true),
commands.MakeCompactDBCommand(conf, logger),
)
// NOTE:

View File

@@ -8,7 +8,6 @@ import (
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/tendermint/tendermint/libs/log"
@@ -146,10 +145,6 @@ func (cfg *Config) ValidateBasic() error {
return nil
}
func (cfg *Config) DeprecatedFieldWarning() error {
return cfg.Consensus.DeprecatedFieldWarning()
}
//-----------------------------------------------------------------------------
// BaseConfig
@@ -523,7 +518,7 @@ func DefaultRPCConfig() *RPCConfig {
MaxSubscriptionClients: 100,
MaxSubscriptionsPerClient: 5,
ExperimentalDisableWebsocket: false, // compatible with TM v0.35 and earlier
EventLogWindowSize: 30 * time.Second,
EventLogWindowSize: 0, // disables /events RPC by default
EventLogMaxItems: 0,
TimeoutBroadcastTxCommit: 10 * time.Second,
@@ -612,6 +607,15 @@ type P2PConfig struct { //nolint: maligned
// Address to advertise to peers for them to dial
ExternalAddress string `mapstructure:"external-address"`
// Comma separated list of seed nodes to connect to
// We only use these if we cant connect to peers in the addrbook
//
// Deprecated: This value is not used by the new PEX reactor. Use
// BootstrapPeers instead.
//
// TODO(#5670): Remove once the p2p refactor is complete.
Seeds string `mapstructure:"seeds"`
// Comma separated list of peers to be added to the peer store
// on startup. Either BootstrapPeers or PersistentPeers are
// needed for peer discovery
@@ -627,10 +631,6 @@ type P2PConfig struct { //nolint: maligned
// outbound).
MaxConnections uint16 `mapstructure:"max-connections"`
// MaxOutgoingConnections defines the maximum number of connected peers (inbound and
// outbound).
MaxOutgoingConnections uint16 `mapstructure:"max-outgoing-connections"`
// MaxIncomingConnectionAttempts rate limits the number of incoming connection
// attempts per IP address.
MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"`
@@ -642,6 +642,9 @@ type P2PConfig struct { //nolint: maligned
// other peers)
PrivatePeerIDs string `mapstructure:"private-peer-ids"`
// Toggle to disable guard against peers connecting from the same ip.
AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"`
// Time to wait before flushing messages out on the connection
FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"`
@@ -658,9 +661,13 @@ type P2PConfig struct { //nolint: maligned
HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"`
DialTimeout time.Duration `mapstructure:"dial-timeout"`
// Testing params.
// Force dial to fail
TestDialFail bool `mapstructure:"test-dial-fail"`
// Makes it possible to configure which queue backend the p2p
// layer uses. Options are: "fifo" and "simple-priority", and "priority",
// with the default being "simple-priority".
// layer uses. Options are: "fifo" and "priority",
// with the default being "priority".
QueueType string `mapstructure:"queue-type"`
}
@@ -671,7 +678,6 @@ func DefaultP2PConfig() *P2PConfig {
ExternalAddress: "",
UPNP: false,
MaxConnections: 64,
MaxOutgoingConnections: 12,
MaxIncomingConnectionAttempts: 100,
FlushThrottleTimeout: 100 * time.Millisecond,
// The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes.
@@ -683,9 +689,11 @@ func DefaultP2PConfig() *P2PConfig {
SendRate: 5120000, // 5 mB/s
RecvRate: 5120000, // 5 mB/s
PexReactor: true,
AllowDuplicateIP: false,
HandshakeTimeout: 20 * time.Second,
DialTimeout: 3 * time.Second,
QueueType: "simple-priority",
TestDialFail: false,
QueueType: "priority",
}
}
@@ -704,9 +712,6 @@ func (cfg *P2PConfig) ValidateBasic() error {
if cfg.RecvRate < 0 {
return errors.New("recv-rate can't be negative")
}
if cfg.MaxOutgoingConnections > cfg.MaxConnections {
return errors.New("max-outgoing-connections cannot be larger than max-connections")
}
return nil
}
@@ -714,6 +719,7 @@ func (cfg *P2PConfig) ValidateBasic() error {
func TestP2PConfig() *P2PConfig {
cfg := DefaultP2PConfig()
cfg.ListenAddress = "tcp://127.0.0.1:36656"
cfg.AllowDuplicateIP = true
cfg.FlushThrottleTimeout = 10 * time.Millisecond
return cfg
}
@@ -723,10 +729,9 @@ func TestP2PConfig() *P2PConfig {
// MempoolConfig defines the configuration options for the Tendermint mempool.
type MempoolConfig struct {
RootDir string `mapstructure:"home"`
// Whether to broadcast transactions to other nodes
Broadcast bool `mapstructure:"broadcast"`
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
Broadcast bool `mapstructure:"broadcast"`
// Maximum number of transactions in the mempool
Size int `mapstructure:"size"`
@@ -773,6 +778,7 @@ type MempoolConfig struct {
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
func DefaultMempoolConfig() *MempoolConfig {
return &MempoolConfig{
Recheck: true,
Broadcast: true,
// Each signature verification takes .5ms, Size reduced until we implement
// ABCI Recheck
@@ -950,6 +956,27 @@ type ConsensusConfig struct {
WalPath string `mapstructure:"wal-file"`
walFile string // overrides WalPath if set
// TODO: remove timeout configs, these should be global not local
// How long we wait for a proposal block before prevoting nil
TimeoutPropose time.Duration `mapstructure:"timeout-propose"`
// How much timeout-propose increases with each round
TimeoutProposeDelta time.Duration `mapstructure:"timeout-propose-delta"`
// How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
TimeoutPrevote time.Duration `mapstructure:"timeout-prevote"`
// How much the timeout-prevote increases with each round
TimeoutPrevoteDelta time.Duration `mapstructure:"timeout-prevote-delta"`
// How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
TimeoutPrecommit time.Duration `mapstructure:"timeout-precommit"`
// How much the timeout-precommit increases with each round
TimeoutPrecommitDelta time.Duration `mapstructure:"timeout-precommit-delta"`
// How long we wait after committing a block, before starting on the new
// height (this gives us a chance to receive some more precommits, even
// though we already have +2/3).
TimeoutCommit time.Duration `mapstructure:"timeout-commit"`
// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
SkipTimeoutCommit bool `mapstructure:"skip-timeout-commit"`
// EmptyBlocks mode and possible interval between empty blocks
CreateEmptyBlocks bool `mapstructure:"create-empty-blocks"`
CreateEmptyBlocksInterval time.Duration `mapstructure:"create-empty-blocks-interval"`
@@ -959,59 +986,20 @@ type ConsensusConfig struct {
PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer-query-maj23-sleep-duration"`
DoubleSignCheckHeight int64 `mapstructure:"double-sign-check-height"`
// TODO: The following fields are all temporary overrides that should exist only
// for the duration of the v0.36 release. The below fields should be completely
// removed in the v0.37 release of Tendermint.
// See: https://github.com/tendermint/tendermint/issues/8188
// UnsafeProposeTimeoutOverride provides an unsafe override of the Propose
// timeout consensus parameter. It configures how long the consensus engine
// will wait to receive a proposal block before prevoting nil.
UnsafeProposeTimeoutOverride time.Duration `mapstructure:"unsafe-propose-timeout-override"`
// UnsafeProposeTimeoutDeltaOverride provides an unsafe override of the
// ProposeDelta timeout consensus parameter. It configures how much the
// propose timeout increases with each round.
UnsafeProposeTimeoutDeltaOverride time.Duration `mapstructure:"unsafe-propose-timeout-delta-override"`
// UnsafeVoteTimeoutOverride provides an unsafe override of the Vote timeout
// consensus parameter. It configures how long the consensus engine will wait
// to gather additional votes after receiving +2/3 votes in a round.
UnsafeVoteTimeoutOverride time.Duration `mapstructure:"unsafe-vote-timeout-override"`
// UnsafeVoteTimeoutDeltaOverride provides an unsafe override of the VoteDelta
// timeout consensus parameter. It configures how much the vote timeout
// increases with each round.
UnsafeVoteTimeoutDeltaOverride time.Duration `mapstructure:"unsafe-vote-timeout-delta-override"`
// UnsafeCommitTimeoutOverride provides an unsafe override of the Commit timeout
// consensus parameter. It configures how long the consensus engine will wait
// after receiving +2/3 precommits before beginning the next height.
UnsafeCommitTimeoutOverride time.Duration `mapstructure:"unsafe-commit-timeout-override"`
// UnsafeBypassCommitTimeoutOverride provides an unsafe override of the
// BypassCommitTimeout consensus parameter. It configures if the consensus
// engine will wait for the full Commit timeout before proceeding to the next height.
// If it is set to true, the consensus engine will proceed to the next height
// as soon as the node has gathered votes from all of the validators on the network.
UnsafeBypassCommitTimeoutOverride *bool `mapstructure:"unsafe-bypass-commit-timeout-override"`
// Deprecated timeout parameters. These parameters are present in this struct
// so that they can be parsed so that validation can check if they have erroneously
// been included and provide a helpful error message.
// These fields should be completely removed in v0.37.
// See: https://github.com/tendermint/tendermint/issues/8188
DeprecatedTimeoutPropose *interface{} `mapstructure:"timeout-propose"`
DeprecatedTimeoutProposeDelta *interface{} `mapstructure:"timeout-propose-delta"`
DeprecatedTimeoutPrevote *interface{} `mapstructure:"timeout-prevote"`
DeprecatedTimeoutPrevoteDelta *interface{} `mapstructure:"timeout-prevote-delta"`
DeprecatedTimeoutPrecommit *interface{} `mapstructure:"timeout-precommit"`
DeprecatedTimeoutPrecommitDelta *interface{} `mapstructure:"timeout-precommit-delta"`
DeprecatedTimeoutCommit *interface{} `mapstructure:"timeout-commit"`
DeprecatedSkipTimeoutCommit *interface{} `mapstructure:"skip-timeout-commit"`
}
// DefaultConsensusConfig returns a default configuration for the consensus service
func DefaultConsensusConfig() *ConsensusConfig {
return &ConsensusConfig{
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
TimeoutPropose: 3000 * time.Millisecond,
TimeoutProposeDelta: 500 * time.Millisecond,
TimeoutPrevote: 1000 * time.Millisecond,
TimeoutPrevoteDelta: 500 * time.Millisecond,
TimeoutPrecommit: 1000 * time.Millisecond,
TimeoutPrecommitDelta: 500 * time.Millisecond,
TimeoutCommit: 1000 * time.Millisecond,
SkipTimeoutCommit: false,
CreateEmptyBlocks: true,
CreateEmptyBlocksInterval: 0 * time.Second,
PeerGossipSleepDuration: 100 * time.Millisecond,
@@ -1023,6 +1011,14 @@ func DefaultConsensusConfig() *ConsensusConfig {
// TestConsensusConfig returns a configuration for testing the consensus service
func TestConsensusConfig() *ConsensusConfig {
cfg := DefaultConsensusConfig()
cfg.TimeoutPropose = 40 * time.Millisecond
cfg.TimeoutProposeDelta = 1 * time.Millisecond
cfg.TimeoutPrevote = 10 * time.Millisecond
cfg.TimeoutPrevoteDelta = 1 * time.Millisecond
cfg.TimeoutPrecommit = 10 * time.Millisecond
cfg.TimeoutPrecommitDelta = 1 * time.Millisecond
cfg.TimeoutCommit = 10 * time.Millisecond
cfg.SkipTimeoutCommit = true
cfg.PeerGossipSleepDuration = 5 * time.Millisecond
cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond
cfg.DoubleSignCheckHeight = int64(0)
@@ -1034,6 +1030,33 @@ func (cfg *ConsensusConfig) WaitForTxs() bool {
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
}
// Propose returns the amount of time to wait for a proposal
func (cfg *ConsensusConfig) Propose(round int32) time.Duration {
return time.Duration(
cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes
func (cfg *ConsensusConfig) Prevote(round int32) time.Duration {
return time.Duration(
cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits
func (cfg *ConsensusConfig) Precommit(round int32) time.Duration {
return time.Duration(
cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits
// for a single block (ie. a commit).
func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
return t.Add(cfg.TimeoutCommit)
}
// WalFile returns the full path to the write-ahead log file
func (cfg *ConsensusConfig) WalFile() string {
if cfg.walFile != "" {
@@ -1050,20 +1073,26 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) {
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *ConsensusConfig) ValidateBasic() error {
if cfg.UnsafeProposeTimeoutOverride < 0 {
return errors.New("unsafe-propose-timeout-override can't be negative")
if cfg.TimeoutPropose < 0 {
return errors.New("timeout-propose can't be negative")
}
if cfg.UnsafeProposeTimeoutDeltaOverride < 0 {
return errors.New("unsafe-propose-timeout-delta-override can't be negative")
if cfg.TimeoutProposeDelta < 0 {
return errors.New("timeout-propose-delta can't be negative")
}
if cfg.UnsafeVoteTimeoutOverride < 0 {
return errors.New("unsafe-vote-timeout-override can't be negative")
if cfg.TimeoutPrevote < 0 {
return errors.New("timeout-prevote can't be negative")
}
if cfg.UnsafeVoteTimeoutDeltaOverride < 0 {
return errors.New("unsafe-vote-timeout-delta-override can't be negative")
if cfg.TimeoutPrevoteDelta < 0 {
return errors.New("timeout-prevote-delta can't be negative")
}
if cfg.UnsafeCommitTimeoutOverride < 0 {
return errors.New("unsafe-commit-timeout-override can't be negative")
if cfg.TimeoutPrecommit < 0 {
return errors.New("timeout-precommit can't be negative")
}
if cfg.TimeoutPrecommitDelta < 0 {
return errors.New("timeout-precommit-delta can't be negative")
}
if cfg.TimeoutCommit < 0 {
return errors.New("timeout-commit can't be negative")
}
if cfg.CreateEmptyBlocksInterval < 0 {
return errors.New("create-empty-blocks-interval can't be negative")
@@ -1080,44 +1109,6 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
return nil
}
func (cfg *ConsensusConfig) DeprecatedFieldWarning() error {
var fields []string
if cfg.DeprecatedSkipTimeoutCommit != nil {
fields = append(fields, "skip-timeout-commit")
}
if cfg.DeprecatedTimeoutPropose != nil {
fields = append(fields, "timeout-propose")
}
if cfg.DeprecatedTimeoutProposeDelta != nil {
fields = append(fields, "timeout-propose-delta")
}
if cfg.DeprecatedTimeoutPrevote != nil {
fields = append(fields, "timeout-prevote")
}
if cfg.DeprecatedTimeoutPrevoteDelta != nil {
fields = append(fields, "timeout-prevote-delta")
}
if cfg.DeprecatedTimeoutPrecommit != nil {
fields = append(fields, "timeout-precommit")
}
if cfg.DeprecatedTimeoutPrecommitDelta != nil {
fields = append(fields, "timeout-precommit-delta")
}
if cfg.DeprecatedTimeoutCommit != nil {
fields = append(fields, "timeout-commit")
}
if cfg.DeprecatedSkipTimeoutCommit != nil {
fields = append(fields, "skip-timeout-commit")
}
if len(fields) != 0 {
return fmt.Errorf("the following deprecated fields were set in the "+
"configuration file: %s. These fields were removed in v0.36. Timeout "+
"configuration has been moved to the ConsensusParams. For more information see "+
"https://tinyurl.com/adr074", strings.Join(fields, ", "))
}
return nil
}
//-----------------------------------------------------------------------------
// TxIndexConfig
// Remember that Event has the following structure:
@@ -1134,8 +1125,9 @@ type TxIndexConfig struct {
// If list contains `null`, meaning no indexer service will be used.
//
// Options:
// 1) "null" (default) - no indexer services.
// 2) "kv" - a simple indexer backed by key-value storage (see DBBackend)
// 1) "null" - no indexer services.
// 2) "kv" (default) - the simplest possible indexer,
// backed by key-value storage (defaults to levelDB; see DBBackend).
// 3) "psql" - the indexer services backed by PostgreSQL.
Indexer []string `mapstructure:"indexer"`
@@ -1146,12 +1138,14 @@ type TxIndexConfig struct {
// DefaultTxIndexConfig returns a default configuration for the transaction indexer.
func DefaultTxIndexConfig() *TxIndexConfig {
return &TxIndexConfig{Indexer: []string{"null"}}
return &TxIndexConfig{
Indexer: []string{"kv"},
}
}
// TestTxIndexConfig returns a default configuration for the transaction indexer.
func TestTxIndexConfig() *TxIndexConfig {
return &TxIndexConfig{Indexer: []string{"kv"}}
return DefaultTxIndexConfig()
}
//-----------------------------------------------------------------------------

View File

@@ -29,8 +29,8 @@ func TestConfigValidateBasic(t *testing.T) {
cfg := DefaultConfig()
assert.NoError(t, cfg.ValidateBasic())
// tamper with unsafe-propose-timeout-override
cfg.Consensus.UnsafeProposeTimeoutOverride = -10 * time.Second
// tamper with timeout_propose
cfg.Consensus.TimeoutPropose = -10 * time.Second
assert.Error(t, cfg.ValidateBasic())
}
@@ -106,21 +106,25 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) {
modify func(*ConsensusConfig)
expectErr bool
}{
"UnsafeProposeTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutOverride = time.Second }, false},
"UnsafeProposeTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutOverride = -1 }, true},
"UnsafeProposeTimeoutDeltaOverride": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutDeltaOverride = time.Second }, false},
"UnsafeProposeTimeoutDeltaOverride negative": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutDeltaOverride = -1 }, true},
"UnsafePrevoteTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutOverride = time.Second }, false},
"UnsafePrevoteTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutOverride = -1 }, true},
"UnsafePrevoteTimeoutDeltaOverride": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutDeltaOverride = time.Second }, false},
"UnsafePrevoteTimeoutDeltaOverride negative": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutDeltaOverride = -1 }, true},
"UnsafeCommitTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeCommitTimeoutOverride = time.Second }, false},
"UnsafeCommitTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeCommitTimeoutOverride = -1 }, true},
"PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false},
"PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true},
"PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false},
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
"TimeoutPropose": {func(c *ConsensusConfig) { c.TimeoutPropose = time.Second }, false},
"TimeoutPropose negative": {func(c *ConsensusConfig) { c.TimeoutPropose = -1 }, true},
"TimeoutProposeDelta": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = time.Second }, false},
"TimeoutProposeDelta negative": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = -1 }, true},
"TimeoutPrevote": {func(c *ConsensusConfig) { c.TimeoutPrevote = time.Second }, false},
"TimeoutPrevote negative": {func(c *ConsensusConfig) { c.TimeoutPrevote = -1 }, true},
"TimeoutPrevoteDelta": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = time.Second }, false},
"TimeoutPrevoteDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = -1 }, true},
"TimeoutPrecommit": {func(c *ConsensusConfig) { c.TimeoutPrecommit = time.Second }, false},
"TimeoutPrecommit negative": {func(c *ConsensusConfig) { c.TimeoutPrecommit = -1 }, true},
"TimeoutPrecommitDelta": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = time.Second }, false},
"TimeoutPrecommitDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = -1 }, true},
"TimeoutCommit": {func(c *ConsensusConfig) { c.TimeoutCommit = time.Second }, false},
"TimeoutCommit negative": {func(c *ConsensusConfig) { c.TimeoutCommit = -1 }, true},
"PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false},
"PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true},
"PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false},
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
}
for desc, tc := range testcases {
tc := tc // appease linter

View File

@@ -25,6 +25,5 @@ type DBProvider func(*DBContext) (dbm.DB, error)
// specified in the Config.
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
dbType := dbm.BackendType(ctx.Config.DBBackend)
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
}

View File

@@ -12,8 +12,8 @@ import (
tmrand "github.com/tendermint/tendermint/libs/rand"
)
// defaultDirPerm is the default permissions used when creating directories.
const defaultDirPerm = 0700
// DefaultDirPerm is the default permissions used when creating directories.
const DefaultDirPerm = 0700
var configTemplate *template.Template
@@ -32,13 +32,13 @@ func init() {
// EnsureRoot creates the root, config, and data directories if they don't exist,
// and panics if it fails.
func EnsureRoot(rootDir string) {
if err := tmos.EnsureDir(rootDir, defaultDirPerm); err != nil {
if err := tmos.EnsureDir(rootDir, DefaultDirPerm); err != nil {
panic(err.Error())
}
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), defaultDirPerm); err != nil {
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
panic(err.Error())
}
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), defaultDirPerm); err != nil {
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
panic(err.Error())
}
}
@@ -282,9 +282,7 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}"
#######################################################
[p2p]
# Select the p2p internal queue.
# Options are: "fifo" and "simple-priority", and "priority",
# with the default being "simple-priority".
# Select the p2p internal queue
queue-type = "{{ .P2P.QueueType }}"
# Address to listen for incoming connections
@@ -297,6 +295,13 @@ laddr = "{{ .P2P.ListenAddress }}"
# example: 159.89.10.97:26656
external-address = "{{ .P2P.ExternalAddress }}"
# Comma separated list of seed nodes to connect to
# We only use these if we cant connect to peers in the addrbook
# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead.
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
seeds = "{{ .P2P.Seeds }}"
# Comma separated list of peers to be added to the peer store
# on startup. Either BootstrapPeers or PersistentPeers are
# needed for peer discovery
@@ -311,10 +316,6 @@ upnp = {{ .P2P.UPNP }}
# Maximum number of connections (inbound and outbound).
max-connections = {{ .P2P.MaxConnections }}
# Maximum number of connections reserved for outgoing
# connections. Must be less than max-connections
max-outgoing-connections = {{ .P2P.MaxOutgoingConnections }}
# Rate limits the number of incoming connection attempts per IP address.
max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }}
@@ -325,6 +326,9 @@ pex = {{ .P2P.PexReactor }}
# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055
private-peer-ids = "{{ .P2P.PrivatePeerIDs }}"
# Toggle to disable guard against peers connecting from the same ip.
allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }}
# Peer connection configuration.
handshake-timeout = "{{ .P2P.HandshakeTimeout }}"
dial-timeout = "{{ .P2P.DialTimeout }}"
@@ -351,11 +355,7 @@ recv-rate = {{ .P2P.RecvRate }}
#######################################################
[mempool]
# recheck has been moved from a config option to a global
# consensus param in v0.36
# See https://github.com/tendermint/tendermint/issues/8244 for more information.
# Set true to broadcast transactions in the mempool to other nodes
recheck = {{ .Mempool.Recheck }}
broadcast = {{ .Mempool.Broadcast }}
# Maximum number of transactions in the mempool
@@ -450,12 +450,32 @@ fetchers = "{{ .StateSync.Fetchers }}"
wal-file = "{{ js .Consensus.WalPath }}"
# How long we wait for a proposal block before prevoting nil
timeout-propose = "{{ .Consensus.TimeoutPropose }}"
# How much timeout-propose increases with each round
timeout-propose-delta = "{{ .Consensus.TimeoutProposeDelta }}"
# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
timeout-prevote = "{{ .Consensus.TimeoutPrevote }}"
# How much the timeout-prevote increases with each round
timeout-prevote-delta = "{{ .Consensus.TimeoutPrevoteDelta }}"
# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
timeout-precommit = "{{ .Consensus.TimeoutPrecommit }}"
# How much the timeout-precommit increases with each round
timeout-precommit-delta = "{{ .Consensus.TimeoutPrecommitDelta }}"
# How long we wait after committing a block, before starting on the new
# height (this gives us a chance to receive some more precommits, even
# though we already have +2/3).
timeout-commit = "{{ .Consensus.TimeoutCommit }}"
# How many blocks to look back to check existence of the node's consensus votes before joining consensus
# When non-zero, the node will panic upon restart
# if the same consensus key was used to sign {double-sign-check-height} last blocks.
# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic.
double-sign-check-height = {{ .Consensus.DoubleSignCheckHeight }}
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip-timeout-commit = {{ .Consensus.SkipTimeoutCommit }}
# EmptyBlocks mode and possible interval between empty blocks
create-empty-blocks = {{ .Consensus.CreateEmptyBlocks }}
create-empty-blocks-interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
@@ -464,50 +484,6 @@ create-empty-blocks-interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
peer-gossip-sleep-duration = "{{ .Consensus.PeerGossipSleepDuration }}"
peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
### Unsafe Timeout Overrides ###
# These fields provide temporary overrides for the Timeout consensus parameters.
# Use of these parameters is strongly discouraged. Using these parameters may have serious
# liveness implications for the validator and for the chain.
#
# These fields will be removed from the configuration file in the v0.37 release of Tendermint.
# For additional information, see ADR-74:
# https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-074-timeout-params.md
# This field provides an unsafe override of the Propose timeout consensus parameter.
# This field configures how long the consensus engine will wait for a proposal block before prevoting nil.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-propose-timeout-override = {{ .Consensus.UnsafeProposeTimeoutOverride }}
# This field provides an unsafe override of the ProposeDelta timeout consensus parameter.
# This field configures how much the propose timeout increases with each round.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-propose-timeout-delta-override = {{ .Consensus.UnsafeProposeTimeoutDeltaOverride }}
# This field provides an unsafe override of the Vote timeout consensus parameter.
# This field configures how long the consensus engine will wait after
# receiving +2/3 votes in a round.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-vote-timeout-override = {{ .Consensus.UnsafeVoteTimeoutOverride }}
# This field provides an unsafe override of the VoteDelta timeout consensus parameter.
# This field configures how much the vote timeout increases with each round.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-vote-timeout-delta-override = {{ .Consensus.UnsafeVoteTimeoutDeltaOverride }}
# This field provides an unsafe override of the Commit timeout consensus parameter.
# This field configures how long the consensus engine will wait after receiving
# +2/3 precommits before beginning the next height.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-commit-timeout-override = {{ .Consensus.UnsafeCommitTimeoutOverride }}
# This field provides an unsafe override of the BypassCommitTimeout consensus parameter.
# This field configures if the consensus engine will wait for the full Commit timeout
# before proceeding to the next height.
# If this field is set to true, the consensus engine will proceed to the next height
# as soon as the node has gathered votes from all of the validators on the network.
# unsafe-bypass-commit-timeout-override =
#######################################################
### Transaction Indexer Configuration Options ###
#######################################################
@@ -520,8 +496,8 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
# to decide which txs to index based on configuration set in the application.
#
# Options:
# 1) "null" (default) - no indexer services.
# 2) "kv" - a simple indexer backed by key-value storage (see DBBackend)
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
# 3) "psql" - the indexer services backed by PostgreSQL.
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}]
@@ -566,10 +542,10 @@ func ResetTestRootWithChainID(dir, testName string, chainID string) (*Config, er
return nil, err
}
// ensure config and data subdirs are created
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), defaultDirPerm); err != nil {
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
return nil, err
}
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), defaultDirPerm); err != nil {
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
return nil, err
}
@@ -612,7 +588,7 @@ func writeFile(filePath string, contents []byte, mode os.FileMode) error {
return nil
}
const testGenesisFmt = `{
var testGenesisFmt = `{
"genesis_time": "2018-10-10T08:20:13.695936996Z",
"chain_id": "%s",
"initial_height": "1",
@@ -626,14 +602,6 @@ const testGenesisFmt = `{
"message_delay": "500000000",
"precision": "10000000"
},
"timeout": {
"propose": "30000000",
"propose_delta": "50000",
"vote": "30000000",
"vote_delta": "50000",
"commit": "10000000",
"bypass_timeout_commit": true
},
"evidence": {
"max_age_num_blocks": "100000",
"max_age_duration": "172800000000000",
@@ -659,7 +627,7 @@ const testGenesisFmt = `{
"app_hash": ""
}`
const testPrivValidatorKey = `{
var testPrivValidatorKey = `{
"address": "A3258DCBF45DCA0DF052981870F2D1441A36D145",
"pub_key": {
"type": "tendermint/PubKeyEd25519",
@@ -671,7 +639,7 @@ const testPrivValidatorKey = `{
}
}`
const testPrivValidatorState = `{
var testPrivValidatorState = `{
"height": "0",
"round": 0,
"step": 0

View File

@@ -12,7 +12,7 @@ For any specific algorithm, use its specific module e.g.
## Binary encoding
For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/core/encoding.html).
For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/blockchain/encoding.html).
## JSON Encoding

View File

@@ -1,18 +1,14 @@
package crypto
import (
"crypto/sha256"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/internal/jsontypes"
"github.com/tendermint/tendermint/libs/bytes"
)
const (
// HashSize is the size in bytes of an AddressHash.
HashSize = sha256.Size
// AddressSize is the size of a pubkey address.
AddressSize = 20
AddressSize = tmhash.TruncatedSize
)
// An address is a []byte, but hex-encoded even in JSON.
@@ -20,19 +16,8 @@ const (
// Use an alias so Unmarshal methods (with ptr receivers) are available too.
type Address = bytes.HexBytes
// AddressHash computes a truncated SHA-256 hash of bz for use as
// a peer address.
//
// See: https://docs.tendermint.com/master/spec/core/data_structures.html#address
func AddressHash(bz []byte) Address {
h := sha256.Sum256(bz)
return Address(h[:AddressSize])
}
// Checksum returns the SHA256 of the bz.
func Checksum(bz []byte) []byte {
h := sha256.Sum256(bz)
return h[:]
return Address(tmhash.SumTruncated(bz))
}
type PubKey interface {

View File

@@ -2,8 +2,6 @@ package ed25519
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"crypto/subtle"
"errors"
"fmt"
@@ -13,6 +11,7 @@ import (
"github.com/oasisprotocol/curve25519-voi/primitives/ed25519/extra/cache"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/internal/jsontypes"
)
@@ -125,7 +124,7 @@ func (privKey PrivKey) Type() string {
// It uses OS randomness in conjunction with the current global random seed
// in tendermint/libs/common to generate the private key.
func GenPrivKey() PrivKey {
return genPrivKey(rand.Reader)
return genPrivKey(crypto.CReader())
}
// genPrivKey generates a new ed25519 private key using the provided reader.
@@ -143,8 +142,9 @@ func genPrivKey(rand io.Reader) PrivKey {
// NOTE: secret should be the output of a KDF like bcrypt,
// if it's derived from user input.
func GenPrivKeyFromSecret(secret []byte) PrivKey {
seed := sha256.Sum256(secret)
return PrivKey(ed25519.NewKeyFromSeed(seed[:]))
seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes.
return PrivKey(ed25519.NewKeyFromSeed(seed))
}
//-------------------------------------
@@ -162,7 +162,7 @@ func (pubKey PubKey) Address() crypto.Address {
if len(pubKey) != PubKeySize {
panic("pubkey is incorrect size")
}
return crypto.AddressHash(pubKey)
return crypto.Address(tmhash.SumTruncated(pubKey))
}
// Bytes returns the PubKey byte format.
@@ -229,5 +229,5 @@ func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error {
}
func (b *BatchVerifier) Verify() (bool, []bool) {
return b.BatchVerifier.Verify(rand.Reader)
return b.BatchVerifier.Verify(crypto.CReader())
}

28
crypto/example_test.go Normal file
View File

@@ -0,0 +1,28 @@
// Copyright 2017 Tendermint. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crypto_test
import (
"fmt"
"github.com/tendermint/tendermint/crypto"
)
func ExampleSha256() {
sum := crypto.Sha256([]byte("This is Tendermint"))
fmt.Printf("%x\n", sum)
// Output:
// f91afb642f3d1c87c17eb01aae5cb65c242dfdbe7cf1066cc260f4ce5d33b94e
}

11
crypto/hash.go Normal file
View File

@@ -0,0 +1,11 @@
package crypto
import (
"crypto/sha256"
)
func Sha256(bytes []byte) []byte {
hasher := sha256.New()
hasher.Write(bytes)
return hasher.Sum(nil)
}

View File

@@ -3,7 +3,7 @@ package merkle
import (
"hash"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
)
// TODO: make these have a large predefined capacity
@@ -14,12 +14,12 @@ var (
// returns tmhash(<empty>)
func emptyHash() []byte {
return crypto.Checksum([]byte{})
return tmhash.Sum([]byte{})
}
// returns tmhash(0x00 || leaf)
func leafHash(leaf []byte) []byte {
return crypto.Checksum(append(leafPrefix, leaf...))
return tmhash.Sum(append(leafPrefix, leaf...))
}
// returns tmhash(0x00 || leaf)
@@ -36,7 +36,7 @@ func innerHash(left []byte, right []byte) []byte {
n := copy(data, innerPrefix)
n += copy(data[n:], left)
copy(data[n:], right)
return crypto.Checksum(data)[:]
return tmhash.Sum(data)
}
func innerHashOpt(s hash.Hash, left []byte, right []byte) []byte {

View File

@@ -5,7 +5,7 @@ import (
"errors"
"fmt"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
)
@@ -102,15 +102,15 @@ func (sp *Proof) ValidateBasic() error {
if sp.Index < 0 {
return errors.New("negative Index")
}
if len(sp.LeafHash) != crypto.HashSize {
return fmt.Errorf("expected LeafHash size to be %d, got %d", crypto.HashSize, len(sp.LeafHash))
if len(sp.LeafHash) != tmhash.Size {
return fmt.Errorf("expected LeafHash size to be %d, got %d", tmhash.Size, len(sp.LeafHash))
}
if len(sp.Aunts) > MaxAunts {
return fmt.Errorf("expected no more than %d aunts, got %d", MaxAunts, len(sp.Aunts))
}
for i, auntHash := range sp.Aunts {
if len(auntHash) != crypto.HashSize {
return fmt.Errorf("expected Aunts#%d size to be %d, got %d", i, crypto.HashSize, len(auntHash))
if len(auntHash) != tmhash.Size {
return fmt.Errorf("expected Aunts#%d size to be %d, got %d", i, tmhash.Size, len(auntHash))
}
}
return nil

View File

@@ -2,9 +2,9 @@ package merkle
import (
"bytes"
"crypto/sha256"
"fmt"
"github.com/tendermint/tendermint/crypto/tmhash"
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
)
@@ -79,13 +79,14 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) {
return nil, fmt.Errorf("expected 1 arg, got %v", len(args))
}
value := args[0]
vhash := sha256.Sum256(value)
hasher := tmhash.New()
hasher.Write(value)
vhash := hasher.Sum(nil)
bz := new(bytes.Buffer)
// Wrap <op.Key, vhash> to hash the KVPair.
encodeByteSlice(bz, op.key) //nolint: errcheck // does not error
encodeByteSlice(bz, vhash[:]) //nolint: errcheck // does not error
encodeByteSlice(bz, op.key) //nolint: errcheck // does not error
encodeByteSlice(bz, vhash) //nolint: errcheck // does not error
kvhash := leafHash(bz.Bytes())
if !bytes.Equal(kvhash, op.Proof.LeafHash) {

View File

@@ -20,7 +20,7 @@ import (
"encoding/hex"
"testing"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
)
func TestRFC6962Hasher(t *testing.T) {
@@ -39,7 +39,7 @@ func TestRFC6962Hasher(t *testing.T) {
// echo -n '' | sha256sum
{
desc: "RFC6962 Empty Tree",
want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"[:crypto.HashSize*2],
want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"[:tmhash.Size*2],
got: emptyTreeHash,
},
@@ -47,19 +47,19 @@ func TestRFC6962Hasher(t *testing.T) {
// echo -n 00 | xxd -r -p | sha256sum
{
desc: "RFC6962 Empty Leaf",
want: "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"[:crypto.HashSize*2],
want: "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"[:tmhash.Size*2],
got: emptyLeafHash,
},
// echo -n 004C313233343536 | xxd -r -p | sha256sum
{
desc: "RFC6962 Leaf",
want: "395aa064aa4c29f7010acfe3f25db9485bbd4b91897b6ad7ad547639252b4d56"[:crypto.HashSize*2],
want: "395aa064aa4c29f7010acfe3f25db9485bbd4b91897b6ad7ad547639252b4d56"[:tmhash.Size*2],
got: leafHash,
},
// echo -n 014E3132334E343536 | xxd -r -p | sha256sum
{
desc: "RFC6962 Node",
want: "aa217fe888e47007fa15edab33c2b492a722cb106c64667fc2b044444de66bbb"[:crypto.HashSize*2],
want: "aa217fe888e47007fa15edab33c2b492a722cb106c64667fc2b044444de66bbb"[:tmhash.Size*2],
got: innerHash([]byte("N123"), []byte("N456")),
},
} {

View File

@@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
ctest "github.com/tendermint/tendermint/internal/libs/test"
tmrand "github.com/tendermint/tendermint/libs/rand"
)
@@ -53,7 +53,7 @@ func TestProof(t *testing.T) {
items := make([][]byte, total)
for i := 0; i < total; i++ {
items[i] = testItem(tmrand.Bytes(crypto.HashSize))
items[i] = testItem(tmrand.Bytes(tmhash.Size))
}
rootHash = HashFromByteSlices(items)
@@ -106,7 +106,7 @@ func TestHashAlternatives(t *testing.T) {
items := make([][]byte, total)
for i := 0; i < total; i++ {
items[i] = testItem(tmrand.Bytes(crypto.HashSize))
items[i] = testItem(tmrand.Bytes(tmhash.Size))
}
rootHash1 := HashFromByteSlicesIterative(items)
@@ -119,7 +119,7 @@ func BenchmarkHashAlternatives(b *testing.B) {
items := make([][]byte, total)
for i := 0; i < total; i++ {
items[i] = testItem(tmrand.Bytes(crypto.HashSize))
items[i] = testItem(tmrand.Bytes(tmhash.Size))
}
b.ResetTimer()

View File

@@ -1,15 +1,35 @@
package crypto
import (
"crypto/rand"
crand "crypto/rand"
"encoding/hex"
"io"
)
// This only uses the OS's randomness
func CRandBytes(numBytes int) []byte {
func randBytes(numBytes int) []byte {
b := make([]byte, numBytes)
_, err := rand.Read(b)
_, err := crand.Read(b)
if err != nil {
panic(err)
}
return b
}
// This only uses the OS's randomness
func CRandBytes(numBytes int) []byte {
return randBytes(numBytes)
}
// CRandHex returns a hex encoded string that's floor(numDigits/2) * 2 long.
//
// Note: CRandHex(24) gives 96 bits of randomness that
// are usually strong enough for most purposes.
func CRandHex(numDigits int) string {
return hex.EncodeToString(CRandBytes(numDigits / 2))
}
// Returns a crand.Reader.
func CReader() io.Reader {
return crand.Reader
}

View File

@@ -2,7 +2,6 @@ package secp256k1
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"crypto/subtle"
"fmt"
@@ -71,7 +70,7 @@ func (privKey PrivKey) Type() string {
// GenPrivKey generates a new ECDSA private key on curve secp256k1 private key.
// It uses OS randomness to generate the private key.
func GenPrivKey() PrivKey {
return genPrivKey(rand.Reader)
return genPrivKey(crypto.CReader())
}
// genPrivKey generates a new secp256k1 private key using the provided reader.
@@ -191,8 +190,8 @@ var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
// The returned signature will be of the form R || S (in lower-S form).
func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey)
seed := sha256.Sum256(msg)
sig, err := priv.Sign(seed[:])
sig, err := priv.Sign(crypto.Sha256(msg))
if err != nil {
return nil, err
}
@@ -221,8 +220,7 @@ func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool {
return false
}
seed := sha256.Sum256(msg)
return signature.Verify(seed[:], pub)
return signature.Verify(crypto.Sha256(msg), pub)
}
// Read Signature struct from R || S. Caller needs to ensure

View File

@@ -1,7 +1,6 @@
package sr25519
import (
"crypto/rand"
"fmt"
"github.com/oasisprotocol/curve25519-voi/primitives/sr25519"
@@ -43,5 +42,5 @@ func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error {
}
func (b *BatchVerifier) Verify() (bool, []bool) {
return b.BatchVerifier.Verify(rand.Reader)
return b.BatchVerifier.Verify(crypto.CReader())
}

View File

@@ -1,8 +1,6 @@
package sr25519
import (
"crypto/rand"
"crypto/sha256"
"encoding/json"
"fmt"
"io"
@@ -50,7 +48,7 @@ func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
st := signingCtx.NewTranscriptBytes(msg)
sig, err := privKey.kp.Sign(rand.Reader, st)
sig, err := privKey.kp.Sign(crypto.CReader(), st)
if err != nil {
return nil, fmt.Errorf("sr25519: failed to sign message: %w", err)
}
@@ -134,7 +132,7 @@ func (privKey *PrivKey) UnmarshalJSON(data []byte) error {
// It uses OS randomness in conjunction with the current global random seed
// in tendermint/libs/common to generate the private key.
func GenPrivKey() PrivKey {
return genPrivKey(rand.Reader)
return genPrivKey(crypto.CReader())
}
func genPrivKey(rng io.Reader) PrivKey {
@@ -156,9 +154,10 @@ func genPrivKey(rng io.Reader) PrivKey {
// NOTE: secret should be the output of a KDF like bcrypt,
// if it's derived from user input.
func GenPrivKeyFromSecret(secret []byte) PrivKey {
seed := sha256.Sum256(secret)
seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes.
var privKey PrivKey
if err := privKey.msk.UnmarshalBinary(seed[:]); err != nil {
if err := privKey.msk.UnmarshalBinary(seed); err != nil {
panic("sr25519: failed to deserialize MiniSecretKey: " + err.Error())
}

Some files were not shown because too many files have changed in this diff Show More