Compare commits

..

3 Commits

Author SHA1 Message Date
William Banfield
8dfd011a7f americanize canceled 2022-02-18 16:22:01 -05:00
William Banfield
d9c9f3277d Merge branch 'master' into wb/undo-queue-buffer-limit 2022-02-18 16:17:45 -05:00
William Banfield
da767e732c abci: undo socket buffer limit 2022-02-18 15:47:31 -05:00
433 changed files with 11694 additions and 41019 deletions

5
.github/CODEOWNERS vendored
View File

@@ -7,7 +7,4 @@
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @ancazamfir
# Spec related changes can be approved by the protocol design team
/spec @josef-widder @milosevic @cason
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair

View File

@@ -20,11 +20,11 @@ jobs:
goos: ["linux"]
timeout-minutes: 5
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
@@ -41,11 +41,11 @@ jobs:
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
@@ -63,11 +63,11 @@ jobs:
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go

View File

@@ -13,7 +13,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2.4.0
- name: Prepare
id: prep
run: |
@@ -43,13 +43,13 @@ jobs:
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v1.14.1
uses: docker/login-action@v1.12.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2.10.0
uses: docker/build-push-action@v2.9.0
with:
context: .
file: ./DOCKER/Dockerfile

View File

@@ -1,20 +0,0 @@
# Verify that important design docs have ToC entries.
name: Check documentation ToC
on:
pull_request:
push:
branches:
- master
jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
docs/architecture/**
docs/rfc/**
- run: ./docs/presubmit.sh
if: env.GIT_DIFF

View File

@@ -15,11 +15,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.17'
- uses: actions/checkout@v3
- uses: actions/checkout@v2.4.0
- name: Build
working-directory: test/e2e

View File

@@ -20,11 +20,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.17'
- uses: actions/checkout@v3
- uses: actions/checkout@v2.4.0
with:
ref: 'v0.34.x'

View File

@@ -20,11 +20,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.17'
- uses: actions/checkout@v3
- uses: actions/checkout@v2.4.0
with:
ref: 'v0.35.x'

View File

@@ -19,11 +19,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.17'
- uses: actions/checkout@v3
- uses: actions/checkout@v2.4.0
- name: Build
working-directory: test/e2e

View File

@@ -14,11 +14,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.17'
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go

View File

@@ -13,15 +13,15 @@ jobs:
fuzz-nightly-test:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.17'
- uses: actions/checkout@v3
- uses: actions/checkout@v2.4.0
- name: Install go-fuzz
working-directory: test/fuzz
run: go install github.com/dvyukov/go-fuzz/go-fuzz@latest github.com/dvyukov/go-fuzz/go-fuzz-build@latest
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
- name: Fuzz mempool
working-directory: test/fuzz
@@ -39,14 +39,14 @@ jobs:
continue-on-error: true
- name: Archive crashers
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
with:
name: crashers
path: test/fuzz/**/crashers
retention-days: 3
- name: Archive suppressions
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
with:
name: suppressions
path: test/fuzz/**/suppressions

View File

@@ -46,7 +46,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout the Jepsen repository
uses: actions/checkout@v3
uses: actions/checkout@v2.4.0
with:
repository: 'tendermint/jepsen'
@@ -58,7 +58,7 @@ jobs:
run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}'
- name: Archive results
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2
with:
name: results
path: tendermint/store/latest

View File

@@ -6,7 +6,7 @@ jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.14
- uses: actions/checkout@v2.4.0
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
with:
folder-path: "docs"

View File

@@ -1,11 +1,7 @@
name: Golang Linter
# Lint runs golangci-lint over the entire Tendermint repository.
#
# This workflow is run on every pull request and push to master.
#
# The `golangci` job will pass without running if no *.{go, mod, sum}
# files have been modified.
# Lint runs golangci-lint over the entire Tendermint repository
# This workflow is run on every pull request and push to master
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
on:
pull_request:
push:
@@ -17,22 +13,17 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 8
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '^1.17'
- uses: technote-space/get-diff-action@v6
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: golangci/golangci-lint-action@v3.1.0
- uses: golangci/golangci-lint-action@v2.5.2
with:
# Required: the version of golangci-lint is required and
# must be specified without patch version: we always use the
# latest patch version.
version: v1.45
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.42.1
args: --timeout 10m
github-token: ${{ secrets.github_token }}
if: env.GIT_DIFF

View File

@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v3
uses: actions/checkout@v2.4.0
- name: Lint Code Base
uses: docker://github/super-linter:v4
env:

View File

@@ -1,23 +1,18 @@
name: Check Markdown links
# Currently disabled until all links have been fixed
# name: Check Markdown links
on:
push:
branches:
- master
pull_request:
branches: [master]
# on:
# push:
# branches:
# - master
# pull_request:
# branches: [master]
jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
with:
PATTERNS: |
**/**.md
- uses: creachadair/github-action-markdown-link-check@master
with:
check-modified-files-only: 'yes'
config-file: '.md-link-check.json'
if: env.GIT_DIFF
# jobs:
# markdown-link-check:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@master
# - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
# with:
# check-modified-files-only: 'yes'

24
.github/workflows/proto-check.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Proto Check
# Protobuf runs buf (https://buf.build/) lint and check-breakage
# This workflow is only run when a file in the proto directory
# has been modified.
on:
workflow_dispatch: # allow running workflow manually
pull_request:
paths:
- "proto/*"
jobs:
proto-lint:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.4.0
- name: lint
run: make proto-lint
proto-breakage:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.4.0
- name: check-breakage
run: make proto-check-breaking-ci

64
.github/workflows/proto-dockerfile.yml vendored Normal file
View File

@@ -0,0 +1,64 @@
# This workflow (re)builds and pushes a Docker image containing the
# protobuf build tools used by the other workflows.
#
# When making changes that require updates to the builder image, you
# should merge the updates first and wait for this workflow to complete,
# so that the changes will be available for the dependent workflows.
#
name: Build & Push Proto Builder Image
on:
pull_request:
paths:
- "proto/*"
push:
branches:
- master
paths:
- "proto/*"
schedule:
# run this job once a month to recieve any go or buf updates
- cron: "0 9 1 * *"
env:
REGISTRY: ghcr.io
IMAGE_NAME: tendermint/docker-build-proto
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.4.0
- name: Check out and assign tags
id: prep
run: |
DOCKER_IMAGE="${REGISTRY}/${IMAGE_NAME}"
VERSION=noop
if [[ "$GITHUB_REF" == "refs/tags/*" ]]; then
VERSION="${GITHUB_REF#refs/tags/}"
elif [[ "$GITHUB_REF" == "refs/heads/*" ]]; then
VERSION="$(echo "${GITHUB_REF#refs/heads/}" | sed -r 's#/+#-#g')"
if [[ "${{ github.event.repository.default_branch }}" = "$VERSION" ]]; then
VERSION=latest
fi
fi
TAGS="${DOCKER_IMAGE}:${VERSION}"
echo ::set-output name=tags::"${TAGS}"
- name: Set up docker buildx
uses: docker/setup-buildx-action@v1.6.0
- name: Log in to the container registry
uses: docker/login-action@v1.12.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and publish image
uses: docker/build-push-action@v2.9.0
with:
context: ./proto
file: ./proto/Dockerfile
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -1,21 +0,0 @@
name: Protobuf Lint
on:
pull_request:
paths:
- 'proto/**'
push:
branches:
- master
paths:
- 'proto/**'
jobs:
lint:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v3
- uses: bufbuild/buf-setup-action@v1.3.1
- uses: bufbuild/buf-lint-action@v1
with:
input: 'proto'

View File

@@ -12,11 +12,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v2.4.0
with:
fetch-depth: 0
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: '1.17'
@@ -27,13 +27,11 @@ jobs:
version: latest
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
- name: Release
uses: goreleaser/goreleaser-action@v2
if: startsWith(github.ref, 'refs/tags/')
with:
version: latest
args: release --rm-dist --release-notes=../release_notes.md
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v5
- uses: actions/stale@v4
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: "This pull request has been automatically marked as stale because it has not had

View File

@@ -16,11 +16,11 @@ jobs:
matrix:
part: ["00", "01", "02", "03", "04", "05"]
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
@@ -32,7 +32,7 @@ jobs:
run: |
make test-group-${{ matrix.part }} NUM_SPLIT=6
if: env.GIT_DIFF
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v2
with:
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
path: ./build/${{ matrix.part }}.profile.out
@@ -41,8 +41,8 @@ jobs:
runs-on: ubuntu-latest
needs: tests
steps:
- uses: actions/checkout@v3
- uses: technote-space/get-diff-action@v6
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
@@ -50,26 +50,26 @@ jobs:
go.mod
go.sum
Makefile
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-00-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-01-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-02-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-03-coverage"
if: env.GIT_DIFF
- run: |
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v3.0.0
- uses: codecov/codecov-action@v2.1.0
with:
file: ./coverage.txt
if: env.GIT_DIFF

View File

@@ -1,6 +0,0 @@
{
"retryOn429": true,
"retryCount": 5,
"fallbackRetryDelay": "30s",
"aliveStatusCodes": [200, 206, 503]
}

View File

@@ -2,60 +2,6 @@
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
## v0.35.4
April 18, 2022
Special thanks to external contributors on this release: @firelizzard18
### FEATURES
- [cli] [\#8300](https://github.com/tendermint/tendermint/pull/8300) Add a tool to update old config files to the latest version [backport [\#8281](https://github.com/tendermint/tendermint/pull/8281)]. (@creachadair)
### IMPROVEMENTS
### BUG FIXES
- [cli] [\#8294](https://github.com/tendermint/tendermint/pull/8294) keymigrate: ensure block hash keys are correctly translated. (@creachadair)
- [cli] [\#8352](https://github.com/tendermint/tendermint/pull/8352) keymigrate: ensure transaction hash keys are correctly translated. (@creachadair)
## v0.35.3
April 8, 2022
### FEATURES
- [cli] [\#8081](https://github.com/tendermint/tendermint/pull/8081) add a safer-to-use `reset-state` command. (@marbar3778)
### IMPROVEMENTS
- [consensus] [\#8138](https://github.com/tendermint/tendermint/pull/8138) change lock handling in reactor and handleMsg for RoundState. (@williambanfield)
### BUG FIXES
- [cli] [\#8276](https://github.com/tendermint/tendermint/pull/8276) scmigrate: ensure target key is correctly renamed. (@creachadair)
## v0.35.2
February 28, 2022
Special thanks to external contributors on this release: @ashcherbakov, @yihuang, @waelsy123
### IMPROVEMENTS
- [consensus] [\#7875](https://github.com/tendermint/tendermint/pull/7875) additional timing metrics. (@williambanfield)
### BUG FIXES
- [abci] [\#7990](https://github.com/tendermint/tendermint/pull/7990) revert buffer limit change. (@williambanfield)
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
- [cli] [\#7869](https://github.com/tendermint/tendermint/pull/7869) Update unsafe-reset-all command to match release v35. (waelsy123)
- [light] [\#7640](https://github.com/tendermint/tendermint/pull/7640) Light Client: fix absence proof verification (@ashcherbakov)
- [light] [\#7641](https://github.com/tendermint/tendermint/pull/7641) Light Client: fix querying against the latest height (@ashcherbakov)
- [mempool] [\#7718](https://github.com/tendermint/tendermint/pull/7718) return duplicate tx errors more consistently. (@tychoish)
- [rpc] [\#7744](https://github.com/tendermint/tendermint/pull/7744) fix layout of endpoint list. (@creachadair)
- [statesync] [\#7886](https://github.com/tendermint/tendermint/pull/7886) assert app version matches. (@cmwaters)
## v0.35.1
January 26, 2022
@@ -127,7 +73,7 @@ Special thanks to external contributors on this release: @JayT106,
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
- [\#7142](https://github.com/tendermint/tendermint/pull/7142) mempool: remove panic when recheck-tx was not sent to ABCI application (@williambanfield).
- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060) <!-- markdown-link-check-disable-line -->
- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060)
wait until peerUpdates channel is closed to close remaining peers (@williambanfield)
- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
@@ -263,44 +209,6 @@ Special thanks to external contributors on this release: @JayT106,
- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
## v0.34.19
### BUG FIXES
- [cli] [\#8270](https://github.com/tendermint/tendermint/issues/8270) fix reset commands (@alexanderbez).
## v0.34.18
### BREAKING CHANGES
- CLI/RPC/Config
- [cli] [\#8258](https://github.com/tendermint/tendermint/pull/8258) Fix a bug in the cli that caused `unsafe-reset-all` to panic
## v0.34.17
### BREAKING CHANGES
- CLI/RPC/Config
- [cli] [\#8081](https://github.com/tendermint/tendermint/issues/8081) make the reset command safe to use (@marbar3778).
### BUG FIXES
- [consensus] [\#8079](https://github.com/tendermint/tendermint/issues/8079) start the timeout ticker before relay (backport #7844) (@creachadair).
- [consensus] [\#7992](https://github.com/tendermint/tendermint/issues/7992) [\#7994](https://github.com/tendermint/tendermint/issues/7994) change lock handling in handleMsg and reactor to alleviate issues gossiping during long ABCI calls (@williambanfield).
## v0.34.16
Special thanks to external contributors on this release: @yihuang
### BUG FIXES
- [consensus] [\#7617](https://github.com/tendermint/tendermint/issues/7617) calculate prevote message delay metric (backport #7551) (@williambanfield).
- [consensus] [\#7631](https://github.com/tendermint/tendermint/issues/7631) check proposal non-nil in prevote message delay metric (backport #7625) (@williambanfield).
- [statesync] [\#7885](https://github.com/tendermint/tendermint/issues/7885) statesync: assert app version matches (backport #7856) (@cmwaters).
- [statesync] [\#7881](https://github.com/tendermint/tendermint/issues/7881) fix app hash in state rollback (backport #7837) (@cmwaters).
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang).
## v0.34.15
Special thanks to external contributors on this release: @thanethomson
@@ -1072,7 +980,7 @@ and a validator address plus a timestamp. Note we may remove the validator
address & timestamp fields in the future (see ADR-25).
`lite2` package has been added to solve `lite` issues and introduce weak
subjectivity interface. Refer to the [spec](./spec/consensus/light-client/) for complete details.
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
`lite` package is now deprecated and will be removed in v0.34 release.
### BREAKING CHANGES:
@@ -1433,7 +1341,7 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah
*August 28, 2019*
@climber73 wrote the [Writing a Tendermint Core application in Java
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md) <!-- markdown-link-check-disable-line -->
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md)
guide.
Special thanks to external contributors on this release:
@@ -2012,7 +1920,7 @@ more details.
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
* Apps
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
@@ -2065,7 +1973,7 @@ more details.
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md)
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
(@guagualvcha)
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
@@ -2769,7 +2677,7 @@ Special thanks to external contributors on this release:
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
It also addresses some issues found via security audit, removes various unused
functions from `libs/common`, and implements
[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md).
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
BREAKING CHANGES:
@@ -2834,7 +2742,7 @@ are affected by a change.
A few more breaking changes are in the works - each will come with a clear
Architecture Decision Record (ADR) explaining the change. You can review ADRs
[here](https://github.com/tendermint/tendermint/tree/master/docs/architecture)
[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture)
or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls).
You can also check in on the [issues marked as
breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking).
@@ -2850,7 +2758,7 @@ BREAKING CHANGES:
- [abci] Added address of the original proposer of the block to Header
- [abci] Change ABCI Header to match Tendermint exactly
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
[ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)):
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
- Remove PubKey from `Validator` (so it's just Address and Power)
- Introduce `ValidatorUpdate` (with just PubKey and Power)
- InitChain and EndBlock use ValidatorUpdate
@@ -2872,7 +2780,7 @@ BREAKING CHANGES:
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
- Add NextValidatorSet to State, changes on-disk representation of state
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
[ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)).
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
- Remove ConsensusParams.BlockSize.MaxTxs
- Introduce maximum sizes for all components of a block, including ChainID
- [types] Updates to the block Header:
@@ -2883,7 +2791,7 @@ BREAKING CHANGES:
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
[ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)):
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
- malleability removed by requiring `s` to be in canonical form.
@@ -3113,7 +3021,7 @@ BREAKING CHANGES:
FEATURES
- [cmd] Added metrics (served under `/metrics` using a Prometheus client;
disabled by default). See the new `instrumentation` section in the config and
[metrics](https://github.com/tendermint/tendermint/blob/master/docs/nodes/metrics.md)
[metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html)
guide.
- [p2p] Add IPv6 support to peering.
- [p2p] Add `external_address` to config to allow specifying the address for
@@ -3227,7 +3135,7 @@ BREAKING:
FEATURES
- [rpc] the RPC documentation is now published to `https://tendermint.github.io/slate`
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
- [p2p] AllowDuplicateIP config option to refuse connections from same IP.
- true by default for now, false by default in next breaking release
- [docs] Add docs for query, tx indexing, events, pubsub
@@ -3705,7 +3613,7 @@ containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusCon
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) accomodate new config
- Logger
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`. <!-- markdown-link-check-disable-next-line -->
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details
- Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!)
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger

View File

@@ -17,15 +17,10 @@ Special thanks to external contributors on this release:
- [mempool] \#7171 Remove legacy mempool implementation. (@tychoish)
- [rpc] \#7575 Rework how RPC responses are written back via HTTP. (@creachadair)
- [rpc] \#7713 Remove unused options for websocket clients. (@creachadair)
- [config] \#7930 Add new event subscription options and defaults. (@creachadair)
- [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair)
- [cli] \#8081 make the reset command safe to use by intoducing `reset-state` command. Fixed by \#8259. (@marbar3778, @cmwaters)
- [config] \#8222 default indexer configuration to null. (@creachadair)
- Apps
- [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec).
- [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish)
- P2P Protocol
@@ -62,14 +57,12 @@ Special thanks to external contributors on this release:
- [consensus] \#7415 Update proposal validation logic to Prevote nil if a proposal does not meet the conditions for Timelyness per the proposer-based timestamp specification. (@anca)
- [consensus] \#7382 Update block validation to no longer require the block timestamp to be the median of the timestamps of the previous commit. (@anca)
- [consensus] \#7711 Use the proposer timestamp for the first height instead of the genesis time. Chains will still start consensus at the genesis time. (@anca)
- [cli] \#8281 Add a tool to update old config files to the latest version. (@creachadair)
### IMPROVEMENTS
- [internal/protoio] \#7325 Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em)
- [consensus] \#6969 remove logic to 'unlock' a locked block.
- [evidence] \#7700 Evidence messages contain single Evidence instead of EvidenceList (@jmalicevic)
- [evidence] \#7802 Evidence pool emits events when evidence is validated and updates a metric when the number of evidence in the evidence pool changes. (@jmalicevic)
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
- [node] \#7521 Define concrete type for seed node implementation (@spacech1mp)
- [rpc] \#7612 paginate mempool /unconfirmed_txs rpc endpoint (@spacech1mp)
@@ -81,7 +74,3 @@ Special thanks to external contributors on this release:
- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek)
- [light] \#7640 Light Client: fix absence proof verification (@ashcherbakov)
- [light] \#7641 Light Client: fix querying against the latest height (@ashcherbakov)
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
- [cli] \#8276 scmigrate: ensure target key is correctly renamed. (@creachadair)
- [cli] \#8294 keymigrate: ensure block hash keys are correctly translated. (@creachadair)
- [cli] \#8352 keymigrate: ensure transaction hash keys are correctly translated. (@creachadair)

View File

@@ -20,7 +20,7 @@ This code of conduct applies to all projects run by the Tendermint/COSMOS team a
* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](https://github.com/stumpsyn/policies/blob/master/citizen_code_of_conduct.md); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we dont tolerate behavior that excludes people in socially marginalized groups.
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we dont tolerate behavior that excludes people in socially marginalized groups.
* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether youre a regular contributor or a newcomer, we care about making this community a safe place for you and weve got your back.

View File

@@ -105,33 +105,11 @@ specify exactly the dependency you want to update, eg.
## Protobuf
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
with [`gogoproto`](https://github.com/gogo/protobuf) to generate code for use
across Tendermint Core.
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
To generate proto stubs, lint, and check protos for breaking changes, you will
need to install [buf](https://buf.build/) and `gogoproto`. Then, from the root
of the repository, run:
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
```bash
# Lint all of the .proto files in proto/tendermint
make proto-lint
# Check if any of your local changes (prior to committing to the Git repository)
# are breaking
make proto-check-breaking
# Generate Go code from the .proto files in proto/tendermint
make proto-gen
```
To automatically format `.proto` files, you will need
[`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) installed. Once
installed, you can run:
```bash
make proto-format
```
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. This command uses the spec repo to get the necessary protobuf files for generating the go code. If you are modifying the proto files manually for changes in the core data structures, you will need to clone them into the go repo and comment out lines 22-37 of the file `./scripts/protocgen.sh`.
### Visual Studio Code

1
DOCKER/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
tendermint

View File

@@ -2,7 +2,7 @@
FROM golang:1.17-alpine as builder
RUN apk update && \
apk upgrade && \
apk --no-cache add make git
apk --no-cache add make
COPY / /tendermint
WORKDIR /tendermint
RUN make build-linux
@@ -53,3 +53,4 @@ CMD ["start"]
# Expose the data directory as a volume since there's mutable state in there
VOLUME [ "$TMHOME" ]

View File

@@ -0,0 +1,28 @@
FROM amazonlinux:2
RUN yum -y update && \
yum -y install wget
RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \
rpm -ivh epel-release-latest-7.noarch.rpm
RUN yum -y groupinstall "Development Tools"
RUN yum -y install leveldb-devel which
ENV GOVERSION=1.16.5
RUN cd /tmp && \
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \
mkdir -p /go/src && \
mkdir -p /go/bin
ENV PATH=$PATH:/usr/local/go/bin:/go/bin
ENV GOBIN=/go/bin
ENV GOPATH=/go/src
RUN mkdir -p /tendermint
WORKDIR /tendermint
CMD ["/usr/bin/make", "build", "TENDERMINT_BUILD_OPTIONS=cleveldb"]

16
DOCKER/Dockerfile.testing Normal file
View File

@@ -0,0 +1,16 @@
FROM golang:latest
# Grab deps (jq, hexdump, xxd, killall)
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq bsdmainutils vim-common psmisc netcat
# Add testing deps for curl
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \
apt-get update && \
apt-get install -y --no-install-recommends curl
VOLUME /go
EXPOSE 26656
EXPOSE 26657

13
DOCKER/Makefile Normal file
View File

@@ -0,0 +1,13 @@
build:
@sh -c "'$(CURDIR)/build.sh'"
push:
@sh -c "'$(CURDIR)/push.sh'"
build_testing:
docker build --tag tendermint/testing -f ./Dockerfile.testing .
build_amazonlinux_buildimage:
docker build -t "tendermint/tendermint:build_c-amazonlinux" -f Dockerfile.build_c-amazonlinux .
.PHONY: build push build_testing build_amazonlinux_buildimage

View File

@@ -8,7 +8,7 @@ Official releases can be found [here](https://github.com/tendermint/tendermint/r
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
Respective versioned files can be found at `https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile` (replace the Xs with the version number).
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
## Quick reference

20
DOCKER/build.sh Executable file
View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -e
# Get the tag from the version, or try to figure it out.
if [ -z "$TAG" ]; then
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
fi
if [ -z "$TAG" ]; then
echo "Please specify a tag."
exit 1
fi
TAG_NO_PATCH=${TAG%.*}
read -p "==> Build 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$TAG" -t "tendermint/tendermint:$TAG_NO_PATCH" .
fi

22
DOCKER/push.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -e
# Get the tag from the version, or try to figure it out.
if [ -z "$TAG" ]; then
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
fi
if [ -z "$TAG" ]; then
echo "Please specify a tag."
exit 1
fi
TAG_NO_PATCH=${TAG%.*}
read -p "==> Push 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
docker push "tendermint/tendermint:latest"
docker push "tendermint/tendermint:$TAG"
docker push "tendermint/tendermint:$TAG_NO_PATCH"
fi

View File

@@ -13,6 +13,8 @@ endif
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE)
CGO_ENABLED ?= 0
# handle nostrip
@@ -71,57 +73,41 @@ install:
$(BUILDDIR)/:
mkdir -p $@
# The Docker image containing the generator, formatter, and linter.
# This is generated by proto/Dockerfile. To update tools, make changes
# there and run the Build & Push Proto Builder Image workflow.
IMAGE := ghcr.io/tendermint/docker-build-proto:latest
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(IMAGE)
HTTPS_GIT := https://github.com/tendermint/tendermint.git
###############################################################################
### Protobuf ###
###############################################################################
check-proto-deps:
ifeq (,$(shell which buf))
$(error "buf is required for Protobuf building, linting and breakage checking. See https://docs.buf.build/installation for installation instructions.")
endif
ifeq (,$(shell which protoc-gen-gogofaster))
$(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install")
endif
.PHONY: check-proto-deps
proto-all: proto-lint proto-check-breaking
.PHONY: proto-all
check-proto-format-deps:
ifeq (,$(shell which clang-format))
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
endif
.PHONY: check-proto-format-deps
proto-gen: check-proto-deps
proto-gen:
@echo "Generating Protobuf files"
@buf generate
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
@$(DOCKER_PROTO_BUILDER) buf generate --template=./buf.gen.yaml --config ./buf.yaml
.PHONY: proto-gen
# These targets are provided for convenience and are intended for local
# execution only.
proto-lint: check-proto-deps
@echo "Linting Protobuf files"
@buf lint
proto-lint:
@$(DOCKER_PROTO_BUILDER) buf lint --error-format=json --config ./buf.yaml
.PHONY: proto-lint
proto-format: check-proto-format-deps
proto-format:
@echo "Formatting Protobuf files"
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
@$(DOCKER_PROTO_BUILDER) find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
.PHONY: proto-format
proto-check-breaking: check-proto-deps
@echo "Checking for breaking changes in Protobuf files against local branch"
@echo "Note: This is only useful if your changes have not yet been committed."
@echo " Otherwise read up on buf's \"breaking\" command usage:"
@echo " https://docs.buf.build/breaking/usage"
@buf breaking --against ".git"
proto-check-breaking:
@$(DOCKER_PROTO_BUILDER) buf breaking --against .git --config ./buf.yaml
.PHONY: proto-check-breaking
# TODO: Should be removed when work on ABCI++ is complete.
# For more information, see https://github.com/tendermint/tendermint/issues/8066
abci-proto-gen:
./scripts/abci-gen.sh
.PHONY: abci-proto-gen
proto-check-breaking-ci:
@$(DOCKER_PROTO_BUILDER) buf breaking --against $(HTTPS_GIT) --config ./buf.yaml
.PHONY: proto-check-breaking-ci
###############################################################################
### Build ABCI ###
@@ -178,7 +164,7 @@ go.sum: go.mod
draw_deps:
@# requires brew install graphviz or apt-get install graphviz
go install github.com/RobotsAndPencils/goviz@latest
go get github.com/RobotsAndPencils/goviz
@goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png
.PHONY: draw_deps
@@ -243,8 +229,10 @@ build-docs:
### Docker image ###
###############################################################################
build-docker:
build-docker: build-linux
cp $(BUILDDIR)/tendermint DOCKER/tendermint
docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile .
rm -rf DOCKER/tendermint
.PHONY: build-docker
@@ -341,3 +329,4 @@ split-test-packages:$(BUILDDIR)/packages.txt
split -d -n l/$(NUM_SPLIT) $< $<.
test-group-%:split-test-packages
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out

View File

@@ -3,7 +3,7 @@
![banner](docs/tendermint-core-image.jpg)
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication).
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest)
@@ -20,14 +20,10 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines.
For protocol details, refer to the [Tendermint Specification](./spec/README.md).
For protocol details, see [the specification](https://github.com/tendermint/spec).
For detailed analysis of the consensus protocol, including safety and liveness proofs,
read our paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
## Documentation
Complete documentation can be found on the [website](https://docs.tendermint.com/).
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
## Releases
@@ -37,7 +33,7 @@ Tendermint has been in the production of private and public environments, most n
See below for more details about [versioning](#versioning).
In any case, if you intend to run Tendermint in production, we're happy to help. You can
contact us [over email](mailto:hello@interchain.io) or [join the chat](https://discord.gg/cosmosnetwork).
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork).
More on how releases are conducted can be found [here](./RELEASES.md).
@@ -56,15 +52,20 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
|-------------|------------------|
| Go version | Go1.17 or higher |
## Documentation
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
### Install
See the [install instructions](./docs/introduction/install.md).
See the [install instructions](/docs/introduction/install.md).
### Quick Start
- [Single node](./docs/introduction/quick-start.md)
- [Local cluster using docker-compose](./docs/tools/docker-compose.md)
- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md)
- [Single node](/docs/introduction/quick-start.md)
- [Local cluster using docker-compose](/docs/tools/docker-compose.md)
- [Remote cluster using Terraform and Ansible](/docs/tools/terraform-and-ansible.md)
- [Join the Cosmos testnet](https://cosmos.network/testnet)
## Contributing
@@ -72,9 +73,9 @@ Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
[specifications](./spec/README.md),
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
and familiarize yourself with our
[Architectural Decision Records (ADRs)](./docs/architecture/README.md) and [Request For Comments (RFCs)](./docs/rfc/README.md).
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
## Versioning
@@ -111,23 +112,26 @@ in [UPGRADING.md](./UPGRADING.md).
## Resources
### Roadmap
### Tendermint Core
We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap.md)
### Libraries
For details about the blockchain data structures and the p2p protocols, see the
[Tendermint specification](https://docs.tendermint.com/master/spec/).
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building applications in Golang
- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs)
- [ABCI Tower](https://github.com/penumbra-zone/tower-abci)
For details on using the software, see the [documentation](/docs/) which is also
hosted at: <https://docs.tendermint.com/master/>
### Tools
Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test).
Additional tooling can be found in [/docs/tools](/docs/tools).
### Applications
- [Cosmos Hub](https://hub.cosmos.network/)
- [Terra](https://www.terra.money/)
- [Celestia](https://celestia.org/)
- [Anoma](https://anoma.network/)
- [Vocdoni](https://docs.vocdoni.io/)
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
- [Many more](https://tendermint.com/ecosystem)
### Research
@@ -140,7 +144,7 @@ We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap
## Join us!
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/)!
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/p/682fb7e8a6f601-software-engineer-tendermint-core)!
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity

View File

@@ -42,42 +42,15 @@ In the following example, we'll assume that we're making a backport branch for
the 0.35.x line.
1. Start on `master`
2. Create and push the backport branch:
```sh
git checkout -b v0.35.x
git push origin v0.35.x
```
3. Create a PR to update the documentation directory for the backport branch.
We only maintain RFC and ADR documents on master, to avoid confusion.
In addition, we rewrite Markdown URLs pointing to master to point to the
backport branch, so that generated documentation will link to the correct
versions of files elsewhere in the repository. For context on the latter,
see https://github.com/tendermint/tendermint/issues/7675.
To prepare the PR:
```sh
# Remove the RFC and ADR documents from the backport.
# We only maintain these on master to avoid confusion.
git rm -r docs/rfc docs/architecture
# Update absolute links to point to the backport.
go run ./scripts/linkpatch -recur -target v0.35.x -skip-path docs/DOCS_README.md,docs/README.md docs
# Create and push the PR.
git checkout -b update-docs-v035x
git commit -m "Update docs for v0.35.x backport branch." docs
git push -u origin update-docs-v035x
```
Be sure to merge this PR before making other changes on the newly-created
backport branch.
After doing these steps, go back to `master` and do the following:
1. Tag `master` as the dev branch for the _next_ major release and push it up to GitHub.
1. Tag `master` as the dev branch for the _next_ major release and push it back up.
For example:
```sh
git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."

View File

@@ -2,149 +2,6 @@
This guide provides instructions for upgrading to specific versions of Tendermint Core.
## v0.36
### ABCI Changes
#### ABCI++
Coming soon...
#### ABCI Mutex
In previous versions of ABCI, Tendermint was prevented from making
concurrent calls to ABCI implementations by virtue of mutexes in the
implementation of Tendermint's ABCI infrastructure. These mutexes have
been removed from the current implementation and applications will now
be responsible for managing their own concurrency control.
To replicate the prior semantics, ensure that ABCI applications have a
single mutex that protects all ABCI method calls from concurrent
access. You can relax these requirements if your application can
provide safe concurrent access via other means. This safety is an
application concern so be very sure to test the application thoroughly
using realistic workloads and the race detector to ensure your
applications remains correct.
### Config Changes
- We have added a new, experimental tool to help operators migrate
configuration files created by previous versions of Tendermint.
To try this tool, run:
```shell
# Install the tool.
go install github.com/tendermint/tendermint/scripts/confix@latest
# Run the tool with the old configuration file as input.
# Replace the -config argument with your path.
confix -config ~/.tendermint/config/config.toml -out updated.toml
```
This tool should be able to update configurations from v0.34 and v0.35. We
plan to extend it to handle older configuration files in the future. For now,
it will report an error (without making any changes) if it does not recognize
the version that created the file.
- The default configuration for a newly-created node now disables indexing for
ABCI event metadata. Existing node configurations that already have indexing
turned on are not affected. Operators who wish to enable indexing for a new
node, however, must now edit the `config.toml` explicitly.
### RPC Changes
Tendermint v0.36 adds a new RPC event subscription API. The existing event
subscription API based on websockets is now deprecated. It will continue to
work throughout the v0.36 release, but the `subscribe`, `unsubscribe`, and
`unsubscribe_all` methods, along with websocket support, will be removed in
Tendermint v0.37. Callers currently using these features should migrate as
soon as is practical to the new API.
To enable the new API, node operators set a new `event-log-window-size`
parameter in the `[rpc]` section of the `config.toml` file. This defines a
duration of time during which the node will log all events published to the
event bus for use by RPC consumers.
Consumers use the new `events` JSON-RPC method to poll for events matching
their query in the log. Unlike the streaming API, events are not discarded if
the caller is slow, loses its connection, or crashes. As long as the client
recovers before its events expire from the log window, it will be able to
replay and catch up after recovering. Also unlike the streaming API, the client
can tell if it has truly missed events because they have expired from the log.
The `events` method is a normal JSON-RPC method, and does not require any
non-standard response processing (in contrast with the old `subscribe`).
Clients can modify their query at any time, and no longer need to coordinate
subscribe and unsubscribe calls to handle multiple queries.
The Go client implementations in the Tendermint Core repository have all been
updated to add a new `Events` method, including the light client proxy.
A new `rpc/client/eventstream` package has also been added to make it easier
for users to update existing use of the streaming API to use the polling API
The `eventstream` package handles polling and delivers matching events to a
callback.
For more detailed information, see [ADR 075](https://tinyurl.com/adr075) which
defines and describes the new API in detail.
### Timeout Parameter Changes
Tendermint v0.36 updates how the Tendermint consensus timing parameters are
configured. These parameters, `timeout-propose`, `timeout-propose-delta`,
`timeout-prevote`, `timeout-prevote-delta`, `timeout-precommit`,
`timeout-precommit-delta`, `timeout-commit`, and `skip-timeout-commit`, were
previously configured in `config.toml`. These timing parameters have moved and
are no longer configured in the `config.toml` file. These parameters have been
migrated into the `ConsensusParameters`. Nodes with these parameters set in the
local configuration file will see a warning logged on startup indicating that
these parameters are no longer used.
These parameters have also been pared-down. There are no longer separate
parameters for both the `prevote` and `precommit` phases of Tendermint. The
separate `timeout-prevote` and `timeout-precommit` parameters have been merged
into a single `timeout-vote` parameter that configures both of these similar
phases of the consensus protocol.
A set of reasonable defaults have been put in place for these new parameters
that will take effect when the node starts up in version v0.36. New chains
created using v0.36 and beyond will be able to configure these parameters in the
chain's `genesis.json` file. Chains that upgrade to v0.36 from a previous
compatible version of Tendermint will begin running with the default values.
Upgrading applications that wish to use different values from the defaults for
these parameters may do so by setting the `ConsensusParams.Timeout` field of the
`FinalizeBlock` `ABCI` response.
As a safety measure in case of unusual timing issues during the upgrade to
v0.36, an operator may override the consensus timeout values for a single node.
Note, however, that these overrides will be removed in Tendermint v0.37. See
[configuration](https://github.com/tendermint/tendermint/blob/master/docs/nodes/configuration.md)
for more information about these overrides.
For more discussion of this, see [ADR 074](https://tinyurl.com/adr074), which
lays out the reasoning for the changes as well as [RFC
009](https://tinyurl.com/rfc009) for a discussion of the complexities of
upgrading consensus parameters.
### CLI Changes
The functionality around resetting a node has been extended to make it safer. The
`unsafe-reset-all` command has been replaced by a `reset` command with four
subcommands: `blockchain`, `peers`, `unsafe-signer` and `unsafe-all`.
- `tendermint reset blockchain`: Clears a node of all blocks, consensus state, evidence,
and indexed transactions. NOTE: This command does not reset application state.
If you need to rollback the last application state (to recover from application
nondeterminism), see instead the `tendermint rollback` command.
- `tendermint reset peers`: Clears the peer store, which persists information on peers used
by the networking layer. This can be used to get rid of stale addresses or to switch
to a predefined set of static peers.
- `tendermint reset unsafe-signer`: Resets the watermark level of the PrivVal File signer
allowing it to sign votes from the genesis height. This should only be used in testing as
it can lead to the node double signing.
- `tendermint reset unsafe-all`: A summation of the other three commands. This will delete
the entire `data` directory which may include application data as well.
## v0.35
### ABCI Changes
@@ -256,11 +113,11 @@ To access any of the functionality previously available via the
`node.Node` type, use the `*local.Local` "RPC" client, that exposes
the full RPC interface provided as direct function calls. Import the
`github.com/tendermint/tendermint/rpc/client/local` package and pass
the node service as in the following:
the node service as in the following:
```go
node := node.NewDefault() //construct the node object
// start and set up the node service
// start and set up the node service
client := local.New(node.(local.NodeService))
// use client object to interact with the node
@@ -287,10 +144,10 @@ both stacks.
The P2P library was reimplemented in this release. The new implementation is
enabled by default in this version of Tendermint. The legacy implementation is still
included in this version of Tendermint as a backstop to work around unforeseen
production issues. The new and legacy version are interoperable. If necessary,
production issues. The new and legacy version are interoperable. If necessary,
you can enable the legacy implementation in the server configuration file.
To make use of the legacy P2P implemementation add or update the following field of
To make use of the legacy P2P implemementation add or update the following field of
your server's configuration file under the `[p2p]` section:
```toml
@@ -315,8 +172,8 @@ in the order in which they were received.
* `priority`: A priority queue of messages.
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
weighted deficit round robin queue is created per peer. Each queue contains a
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
weighted deficit round robin queue is created per peer. Each queue contains a
separate 'flow' for each of the channels of communication that exist between any two
peers. Tendermint maintains a channel per message type between peers. Each WDRR
queue maintains a shared buffered with a fixed capacity through which messages on different
@@ -360,7 +217,7 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher.
were added to support the new State Sync feature.
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
new nodes are able to join a network in a matter of seconds.
Read [the spec](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md)
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
if you want to learn more about State Sync, or if you'd like your application to use it.
(If you don't want to support State Sync in your application, you can just implement these new
ABCI methods as no-ops, leaving them empty.)
@@ -485,6 +342,7 @@ The `bech32` package has moved to the Cosmos SDK:
### CLI
The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API.
See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details.
### Light Client
@@ -759,7 +617,7 @@ the compilation tag:
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
use `make build_c` / `make install_c` (full instructions can be found at
<https://docs.tendermint.com/v0.35/introduction/install.html)
<https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support>)
## v0.31.0

View File

@@ -19,8 +19,8 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
A detailed description of the ABCI methods and message types is contained in:
- [The main spec](../spec/abci/abci.md)
- [A protobuf file](../proto/tendermint/abci/types.proto)
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
- [A protobuf file](https://github.com/tendermint/spec/blob/master/proto/tendermint/abci/types.proto)
- [A Go interface](./types/application.go)
## Protocol Buffers

View File

@@ -19,8 +19,8 @@ const (
// Client defines an interface for an ABCI client.
//
// All methods return the appropriate protobuf ResponseXxx struct and
// an error.
// All `Async` methods return a `ReqRes` object and an error.
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
//
// NOTE these are client errors, eg. ABCI socket connectivity issues.
// Application-related errors are reflected in response via ABCI error codes
@@ -52,35 +52,65 @@ type Client interface {
// NewClient returns a new ABCI client of the specified transport type.
// It returns an error if the transport is not "socket" or "grpc"
func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (Client, error) {
func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (client Client, err error) {
switch transport {
case "socket":
return NewSocketClient(logger, addr, mustConnect), nil
client = NewSocketClient(logger, addr, mustConnect)
case "grpc":
return NewGRPCClient(logger, addr, mustConnect), nil
client = NewGRPCClient(logger, addr, mustConnect)
default:
return nil, fmt.Errorf("unknown abci transport %s", transport)
err = fmt.Errorf("unknown abci transport %s", transport)
}
return
}
type requestAndResponse struct {
type ReqRes struct {
*types.Request
*types.Response
*types.Response // Not set atomically, so be sure to use WaitGroup.
mtx sync.Mutex
signal chan struct{}
cb func(*types.Response) // A single callback that may be set.
}
func makeReqRes(req *types.Request) *requestAndResponse {
return &requestAndResponse{
func NewReqRes(req *types.Request) *ReqRes {
return &ReqRes{
Request: req,
Response: nil,
signal: make(chan struct{}),
cb: nil,
}
}
// markDone marks the ReqRes object as done.
func (r *requestAndResponse) markDone() {
// Sets sets the callback. If reqRes is already done, it will call the cb
// immediately. Note, reqRes.cb should not change if reqRes.done and only one
// callback is supported.
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
r.mtx.Lock()
select {
case <-r.signal:
r.mtx.Unlock()
cb(r.Response)
default:
r.cb = cb
r.mtx.Unlock()
}
}
// InvokeCallback invokes a thread-safe execution of the configured callback
// if non-nil.
func (r *ReqRes) InvokeCallback() {
r.mtx.Lock()
defer r.mtx.Unlock()
if r.cb != nil {
r.cb(r.Response)
}
}
// SetDone marks the ReqRes object as done.
func (r *ReqRes) SetDone() {
r.mtx.Lock()
defer r.mtx.Unlock()

33
abci/client/creators.go Normal file
View File

@@ -0,0 +1,33 @@
package abciclient
import (
"fmt"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
)
// Creator creates new ABCI clients.
type Creator func(log.Logger) (Client, error)
// NewLocalCreator returns a Creator for the given app,
// which will be running locally.
func NewLocalCreator(app types.Application) Creator {
return func(logger log.Logger) (Client, error) {
return NewLocalClient(logger, app), nil
}
}
// NewRemoteCreator returns a Creator for the given address (e.g.
// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you
// want the client to connect before reporting success.
func NewRemoteCreator(logger log.Logger, addr, transport string, mustConnect bool) Creator {
return func(log.Logger) (Client, error) {
remoteApp, err := NewClient(logger, addr, transport, mustConnect)
if err != nil {
return nil, fmt.Errorf("failed to connect to proxy: %w", err)
}
return remoteApp, nil
}
}

View File

@@ -7,14 +7,23 @@
//
// ## Socket client
//
// The client blocks for enqueuing the request, for enqueuing the
// Flush to send the request, and for the Flush response to return.
// async: the client maintains an internal buffer of a fixed size. when the
// buffer becomes full, all Async calls will return an error immediately.
//
// sync: the client blocks on 1) enqueuing the Sync request 2) enqueuing the
// Flush requests 3) waiting for the Flush response
//
// ## Local client
//
// The global mutex is locked during each call
// async: global mutex is locked during each call (meaning it's not really async!)
// sync: global mutex is locked during each call
//
// ## gRPC client
//
// The client waits for all calls to complete.
// async: gRPC is synchronous, but an internal buffer of a fixed size is used
// to store responses and later call callbacks (separate goroutine per
// response).
//
// sync: waits for all Async calls to complete (essentially what Flush does in
// the socket client) and calls Sync method.
package abciclient

View File

@@ -24,8 +24,9 @@ type grpcClient struct {
mustConnect bool
client types.ABCIApplicationClient
conn *grpc.ClientConn
client types.ABCIApplicationClient
conn *grpc.ClientConn
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
mtx sync.Mutex
addr string
@@ -37,11 +38,25 @@ var _ Client = (*grpcClient)(nil)
// NewGRPCClient creates a gRPC client, which will connect to addr upon the
// start. Note Client#Start returns an error if connection is unsuccessful and
// mustConnect is true.
//
// GRPC calls are synchronous, but some callbacks expect to be called
// asynchronously (eg. the mempool expects to be able to lock to remove bad txs
// from cache). To accommodate, we finish each call in its own go-routine,
// which is expensive, but easy - if you want something better, use the socket
// protocol! maybe one day, if people really want it, we use grpc streams, but
// hopefully not :D
func NewGRPCClient(logger log.Logger, addr string, mustConnect bool) Client {
cli := &grpcClient{
logger: logger,
addr: addr,
mustConnect: mustConnect,
// Buffering the channel is needed to make calls appear asynchronous,
// which is required when the caller makes multiple async calls before
// processing callbacks (e.g. due to holding locks). 64 means that a
// caller can make up to 64 async calls before a callback must be
// processed (otherwise it deadlocks). It also means that we can make 64
// gRPC calls while processing a slow callback at the channel head.
chReqRes: make(chan *ReqRes, 64),
}
cli.BaseService = *service.NewBaseService(logger, "grpcClient", cli)
return cli
@@ -52,6 +67,35 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
}
func (cli *grpcClient) OnStart(ctx context.Context) error {
// This processes asynchronous request/response messages and dispatches
// them to callbacks.
go func() {
// Use a separate function to use defer for mutex unlocks (this handles panics)
callCb := func(reqres *ReqRes) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
reqres.SetDone()
// Notify reqRes listener if set
reqres.InvokeCallback()
}
for {
select {
case reqres := <-cli.chReqRes:
if reqres != nil {
callCb(reqres)
} else {
cli.logger.Error("Received nil reqres")
}
case <-ctx.Done():
return
}
}
}()
RETRY_LOOP:
for {
conn, err := grpc.Dial(cli.addr,
@@ -91,18 +135,30 @@ RETRY_LOOP:
}
func (cli *grpcClient) OnStop() {
cli.mtx.Lock()
defer cli.mtx.Unlock()
if cli.conn != nil {
cli.err = cli.conn.Close()
cli.conn.Close()
}
close(cli.chReqRes)
}
func (cli *grpcClient) StopForError(err error) {
if !cli.IsRunning() {
return
}
cli.mtx.Lock()
if cli.err == nil {
cli.err = err
}
cli.mtx.Unlock()
cli.logger.Error("Stopping abci.grpcClient for error", "err", err)
cli.Stop()
}
func (cli *grpcClient) Error() error {
cli.mtx.Lock()
defer cli.mtx.Unlock()
return cli.err
}

View File

@@ -2,6 +2,7 @@ package abciclient
import (
"context"
"sync"
types "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
@@ -14,6 +15,8 @@ import (
// RPC endpoint), but defers are used everywhere for the sake of consistency.
type localClient struct {
service.BaseService
mtx sync.Mutex
types.Application
}
@@ -22,7 +25,7 @@ var _ Client = (*localClient)(nil)
// NewLocalClient creates a local client, which will be directly calling the
// methods of the given app.
//
// The client methods ignore their context argument.
// Both Async and Sync methods ignore the given context.Context parameter.
func NewLocalClient(logger log.Logger, app types.Application) Client {
cli := &localClient{
Application: app,
@@ -33,82 +36,169 @@ func NewLocalClient(logger log.Logger, app types.Application) Client {
func (*localClient) OnStart(context.Context) error { return nil }
func (*localClient) OnStop() {}
func (*localClient) Error() error { return nil }
// TODO: change types.Application to include Error()?
func (app *localClient) Error() error {
return nil
}
//-------------------------------------------------------
func (*localClient) Flush(context.Context) error { return nil }
func (app *localClient) Flush(ctx context.Context) error {
return nil
}
func (app *localClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) {
func (app *localClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
return &types.ResponseEcho{Message: msg}, nil
}
func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Info(req)
return &res, nil
}
func (app *localClient) CheckTx(_ context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
func (app *localClient) CheckTx(
ctx context.Context,
req types.RequestCheckTx,
) (*types.ResponseCheckTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.CheckTx(req)
return &res, nil
}
func (app *localClient) Query(_ context.Context, req types.RequestQuery) (*types.ResponseQuery, error) {
func (app *localClient) Query(
ctx context.Context,
req types.RequestQuery,
) (*types.ResponseQuery, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Query(req)
return &res, nil
}
func (app *localClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Commit()
return &res, nil
}
func (app *localClient) InitChain(_ context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) {
func (app *localClient) InitChain(
ctx context.Context,
req types.RequestInitChain,
) (*types.ResponseInitChain, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.InitChain(req)
return &res, nil
}
func (app *localClient) ListSnapshots(_ context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
func (app *localClient) ListSnapshots(
ctx context.Context,
req types.RequestListSnapshots,
) (*types.ResponseListSnapshots, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.ListSnapshots(req)
return &res, nil
}
func (app *localClient) OfferSnapshot(_ context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
func (app *localClient) OfferSnapshot(
ctx context.Context,
req types.RequestOfferSnapshot,
) (*types.ResponseOfferSnapshot, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.OfferSnapshot(req)
return &res, nil
}
func (app *localClient) LoadSnapshotChunk(_ context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
func (app *localClient) LoadSnapshotChunk(
ctx context.Context,
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.LoadSnapshotChunk(req)
return &res, nil
}
func (app *localClient) ApplySnapshotChunk(_ context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
func (app *localClient) ApplySnapshotChunk(
ctx context.Context,
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.ApplySnapshotChunk(req)
return &res, nil
}
func (app *localClient) PrepareProposal(_ context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
func (app *localClient) PrepareProposal(
ctx context.Context,
req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.PrepareProposal(req)
return &res, nil
}
func (app *localClient) ProcessProposal(_ context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
func (app *localClient) ProcessProposal(
ctx context.Context,
req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.ProcessProposal(req)
return &res, nil
}
func (app *localClient) ExtendVote(_ context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
func (app *localClient) ExtendVote(
ctx context.Context,
req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.ExtendVote(req)
return &res, nil
}
func (app *localClient) VerifyVoteExtension(_ context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
func (app *localClient) VerifyVoteExtension(
ctx context.Context,
req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.VerifyVoteExtension(req)
return &res, nil
}
func (app *localClient) FinalizeBlock(_ context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
func (app *localClient) FinalizeBlock(
ctx context.Context,
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.FinalizeBlock(req)
return &res, nil
}

View File

@@ -5,7 +5,10 @@ package mocks
import (
context "context"
abciclient "github.com/tendermint/tendermint/abci/client"
mock "github.com/stretchr/testify/mock"
types "github.com/tendermint/tendermint/abci/types"
)
@@ -60,6 +63,29 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types
return r0, r1
}
// CheckTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Commit provides a mock function with given fields: _a0
func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
ret := _m.Called(_a0)

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"io"
"net"
"reflect"
"sync"
"time"
@@ -33,11 +34,12 @@ type socketClient struct {
mustConnect bool
conn net.Conn
reqQueue chan *requestAndResponse
reqQueue chan *ReqRes
mtx sync.Mutex
err error
reqSent *list.List // list of requests sent, waiting for response
reqSent *list.List // list of requests sent, waiting for response
resCb func(*types.Request, *types.Response) // called on all requests, if set.
}
var _ Client = (*socketClient)(nil)
@@ -48,10 +50,11 @@ var _ Client = (*socketClient)(nil)
func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client {
cli := &socketClient{
logger: logger,
reqQueue: make(chan *requestAndResponse, reqQueueSize),
reqQueue: make(chan *ReqRes, reqQueueSize),
mustConnect: mustConnect,
addr: addr,
reqSent: list.New(),
resCb: nil,
}
cli.BaseService = *service.NewBaseService(logger, "socketClient", cli)
return cli
@@ -123,7 +126,6 @@ func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
return
}
if err := bw.Flush(); err != nil {
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
return
@@ -138,20 +140,23 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader
if ctx.Err() != nil {
return
}
res := &types.Response{}
if err := types.ReadMessage(r, res); err != nil {
var res = &types.Response{}
err := types.ReadMessage(r, res)
if err != nil {
cli.stopForError(fmt.Errorf("read message: %w", err))
return
}
// cli.logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
switch r := res.Value.(type) {
case *types.Response_Exception: // app responded with error
// XXX After setting cli.err, release waiters (e.g. reqres.Done())
cli.stopForError(errors.New(r.Exception.Error))
return
default:
if err := cli.didRecvResponse(res); err != nil {
err := cli.didRecvResponse(res)
if err != nil {
cli.stopForError(err)
return
}
@@ -159,7 +164,7 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader
}
}
func (cli *socketClient) willSendReq(reqres *requestAndResponse) {
func (cli *socketClient) willSendReq(reqres *ReqRes) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
cli.reqSent.PushBack(reqres)
@@ -172,172 +177,258 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
// Get the first ReqRes.
next := cli.reqSent.Front()
if next == nil {
return fmt.Errorf("unexpected %T when nothing expected", res.Value)
return fmt.Errorf("unexpected %v when nothing expected", reflect.TypeOf(res.Value))
}
reqres := next.Value.(*requestAndResponse)
reqres := next.Value.(*ReqRes)
if !resMatchesReq(reqres.Request, res) {
return fmt.Errorf("unexpected %T when response to %T expected", res.Value, reqres.Request.Value)
return fmt.Errorf("unexpected %v when response to %v expected",
reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value))
}
reqres.Response = res
reqres.markDone() // release waiters
reqres.SetDone() // release waiters
cli.reqSent.Remove(next) // pop first item from linked list
// Notify client listener if set (global callback).
if cli.resCb != nil {
cli.resCb(reqres.Request, res)
}
// Notify reqRes listener if set (request specific callback).
//
// NOTE: It is possible this callback isn't set on the reqres object. At this
// point, in which case it will be called after, when it is set.
reqres.InvokeCallback()
return nil
}
//----------------------------------------
func (cli *socketClient) Flush(ctx context.Context) error {
_, err := cli.doRequest(ctx, types.ToRequestFlush())
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush())
if err != nil {
return queueErr(err)
}
if err := cli.Error(); err != nil {
return err
}
return nil
select {
case <-reqRes.signal:
return cli.Error()
case <-ctx.Done():
return ctx.Err()
}
}
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
res, err := cli.doRequest(ctx, types.ToRequestEcho(msg))
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestEcho(msg))
if err != nil {
return nil, err
}
return res.GetEcho(), nil
return reqres.Response.GetEcho(), nil
}
func (cli *socketClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
res, err := cli.doRequest(ctx, types.ToRequestInfo(req))
func (cli *socketClient) Info(
ctx context.Context,
req types.RequestInfo,
) (*types.ResponseInfo, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInfo(req))
if err != nil {
return nil, err
}
return res.GetInfo(), nil
return reqres.Response.GetInfo(), nil
}
func (cli *socketClient) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
res, err := cli.doRequest(ctx, types.ToRequestCheckTx(req))
func (cli *socketClient) CheckTx(
ctx context.Context,
req types.RequestCheckTx,
) (*types.ResponseCheckTx, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCheckTx(req))
if err != nil {
return nil, err
}
return res.GetCheckTx(), nil
return reqres.Response.GetCheckTx(), nil
}
func (cli *socketClient) Query(ctx context.Context, req types.RequestQuery) (*types.ResponseQuery, error) {
res, err := cli.doRequest(ctx, types.ToRequestQuery(req))
func (cli *socketClient) Query(
ctx context.Context,
req types.RequestQuery,
) (*types.ResponseQuery, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestQuery(req))
if err != nil {
return nil, err
}
return res.GetQuery(), nil
return reqres.Response.GetQuery(), nil
}
func (cli *socketClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
res, err := cli.doRequest(ctx, types.ToRequestCommit())
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCommit())
if err != nil {
return nil, err
}
return res.GetCommit(), nil
return reqres.Response.GetCommit(), nil
}
func (cli *socketClient) InitChain(ctx context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) {
res, err := cli.doRequest(ctx, types.ToRequestInitChain(req))
func (cli *socketClient) InitChain(
ctx context.Context,
req types.RequestInitChain,
) (*types.ResponseInitChain, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInitChain(req))
if err != nil {
return nil, err
}
return res.GetInitChain(), nil
return reqres.Response.GetInitChain(), nil
}
func (cli *socketClient) ListSnapshots(ctx context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
res, err := cli.doRequest(ctx, types.ToRequestListSnapshots(req))
func (cli *socketClient) ListSnapshots(
ctx context.Context,
req types.RequestListSnapshots,
) (*types.ResponseListSnapshots, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestListSnapshots(req))
if err != nil {
return nil, err
}
return res.GetListSnapshots(), nil
return reqres.Response.GetListSnapshots(), nil
}
func (cli *socketClient) OfferSnapshot(ctx context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
res, err := cli.doRequest(ctx, types.ToRequestOfferSnapshot(req))
func (cli *socketClient) OfferSnapshot(
ctx context.Context,
req types.RequestOfferSnapshot,
) (*types.ResponseOfferSnapshot, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestOfferSnapshot(req))
if err != nil {
return nil, err
}
return res.GetOfferSnapshot(), nil
return reqres.Response.GetOfferSnapshot(), nil
}
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
res, err := cli.doRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
func (cli *socketClient) LoadSnapshotChunk(
ctx context.Context,
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestLoadSnapshotChunk(req))
if err != nil {
return nil, err
}
return res.GetLoadSnapshotChunk(), nil
return reqres.Response.GetLoadSnapshotChunk(), nil
}
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
res, err := cli.doRequest(ctx, types.ToRequestApplySnapshotChunk(req))
func (cli *socketClient) ApplySnapshotChunk(
ctx context.Context,
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestApplySnapshotChunk(req))
if err != nil {
return nil, err
}
return res.GetApplySnapshotChunk(), nil
return reqres.Response.GetApplySnapshotChunk(), nil
}
func (cli *socketClient) PrepareProposal(ctx context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
res, err := cli.doRequest(ctx, types.ToRequestPrepareProposal(req))
func (cli *socketClient) PrepareProposal(
ctx context.Context,
req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestPrepareProposal(req))
if err != nil {
return nil, err
}
return res.GetPrepareProposal(), nil
return reqres.Response.GetPrepareProposal(), nil
}
func (cli *socketClient) ProcessProposal(ctx context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
res, err := cli.doRequest(ctx, types.ToRequestProcessProposal(req))
func (cli *socketClient) ProcessProposal(
ctx context.Context,
req types.RequestProcessProposal,
) (*types.ResponseProcessProposal, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestProcessProposal(req))
if err != nil {
return nil, err
}
return res.GetProcessProposal(), nil
return reqres.Response.GetProcessProposal(), nil
}
func (cli *socketClient) ExtendVote(ctx context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
res, err := cli.doRequest(ctx, types.ToRequestExtendVote(req))
func (cli *socketClient) ExtendVote(
ctx context.Context,
req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestExtendVote(req))
if err != nil {
return nil, err
}
return res.GetExtendVote(), nil
return reqres.Response.GetExtendVote(), nil
}
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
res, err := cli.doRequest(ctx, types.ToRequestVerifyVoteExtension(req))
func (cli *socketClient) VerifyVoteExtension(
ctx context.Context,
req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestVerifyVoteExtension(req))
if err != nil {
return nil, err
}
return res.GetVerifyVoteExtension(), nil
return reqres.Response.GetVerifyVoteExtension(), nil
}
func (cli *socketClient) FinalizeBlock(ctx context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
res, err := cli.doRequest(ctx, types.ToRequestFinalizeBlock(req))
func (cli *socketClient) FinalizeBlock(
ctx context.Context,
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestFinalizeBlock(req))
if err != nil {
return nil, err
}
return res.GetFinalizeBlock(), nil
return reqres.Response.GetFinalizeBlock(), nil
}
//----------------------------------------
func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
reqres := makeReqRes(req)
// queueRequest enqueues req onto the queue. The request can break early if the
// the context is canceled. If the queue is full, this method blocks to allow
// the request to be placed onto the queue. This has the effect of creating an
// unbounded queue of goroutines waiting to write to this queue which is a bit
// antithetical to the purposes of a queue, however, undoing this behavior has
// dangerous upstream implications as a result of the usage of this behavior upstream.
// Remove at your peril.
//
// The caller is responsible for checking cli.Error.
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request) (*ReqRes, error) {
reqres := NewReqRes(req)
select {
case cli.reqQueue <- reqres:
case <-ctx.Done():
return nil, fmt.Errorf("can't queue req: %w", ctx.Err())
}
select {
case <-reqres.signal:
if err := cli.Error(); err != nil {
return nil, err
}
return reqres.Response, nil
case <-ctx.Done():
return nil, ctx.Err()
}
return reqres, nil
}
func (cli *socketClient) queueRequestAndFlush(
ctx context.Context,
req *types.Request,
) (*ReqRes, error) {
reqres, err := cli.queueRequest(ctx, req)
if err != nil {
return nil, queueErr(err)
}
if err := cli.Flush(ctx); err != nil {
return nil, err
}
return reqres, cli.Error()
}
func queueErr(e error) error {
return fmt.Errorf("can't queue req: %w", e)
}
// drainQueue marks as complete and discards all remaining pending requests
@@ -348,8 +439,8 @@ func (cli *socketClient) drainQueue(ctx context.Context) {
// mark all in-flight messages as resolved (they will get cli.Error())
for req := cli.reqSent.Front(); req != nil; req = req.Next() {
reqres := req.Value.(*requestAndResponse)
reqres.markDone()
reqres := req.Value.(*ReqRes)
reqres.SetDone()
}
// Mark all queued messages as resolved.
@@ -362,7 +453,7 @@ func (cli *socketClient) drainQueue(ctx context.Context) {
case <-ctx.Done():
return
case reqres := <-cli.reqQueue:
reqres.markDone()
reqres.SetDone()
default:
return
}
@@ -387,8 +478,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_Query)
case *types.Request_InitChain:
_, ok = res.Value.(*types.Response_InitChain)
case *types.Request_ProcessProposal:
_, ok = res.Value.(*types.Response_ProcessProposal)
case *types.Request_PrepareProposal:
_, ok = res.Value.(*types.Response_PrepareProposal)
case *types.Request_ExtendVote:

View File

@@ -0,0 +1,85 @@
package abciclient_test
import (
"context"
"fmt"
"testing"
"time"
"math/rand"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
)
func TestProperSyncCalls(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
app := slowApp{}
logger := log.NewNopLogger()
_, c := setupClientServer(ctx, t, logger, app)
resp := make(chan error, 1)
go func() {
rsp, err := c.FinalizeBlock(ctx, types.RequestFinalizeBlock{})
assert.NoError(t, err)
assert.NoError(t, c.Flush(ctx))
assert.NotNil(t, rsp)
select {
case <-ctx.Done():
case resp <- c.Error():
}
}()
select {
case <-time.After(time.Second):
require.Fail(t, "No response arrived")
case err, ok := <-resp:
require.True(t, ok, "Must not close channel")
assert.NoError(t, err, "This should return success")
}
}
func setupClientServer(
ctx context.Context,
t *testing.T,
logger log.Logger,
app types.Application,
) (service.Service, abciclient.Client) {
t.Helper()
// some port between 20k and 30k
port := 20000 + rand.Int31()%10000
addr := fmt.Sprintf("localhost:%d", port)
s, err := server.NewServer(logger, addr, "socket", app)
require.NoError(t, err)
require.NoError(t, s.Start(ctx))
t.Cleanup(s.Wait)
c := abciclient.NewSocketClient(logger, addr, true)
require.NoError(t, c.Start(ctx))
t.Cleanup(c.Wait)
require.True(t, s.IsRunning())
require.True(t, c.IsRunning())
return s, c
}
type slowApp struct {
types.BaseApplication
}
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
time.Sleep(200 * time.Millisecond)
return types.ResponseFinalizeBlock{}
}

View File

@@ -125,7 +125,7 @@ func addCommands(cmd *cobra.Command, logger log.Logger) {
cmd.AddCommand(consoleCmd)
cmd.AddCommand(echoCmd)
cmd.AddCommand(infoCmd)
cmd.AddCommand(finalizeBlockCmd)
cmd.AddCommand(deliverTxCmd)
cmd.AddCommand(checkTxCmd)
cmd.AddCommand(commitCmd)
cmd.AddCommand(versionCmd)
@@ -150,9 +150,10 @@ where example.file looks something like:
check_tx 0x00
check_tx 0xff
finalize_block 0x00
deliver_tx 0x00
check_tx 0x00
finalize_block 0x01 0x04 0xff
deliver_tx 0x01
deliver_tx 0x04
info
`,
Args: cobra.ExactArgs(0),
@@ -168,7 +169,7 @@ This command opens an interactive console for running any of the other commands
without opening a new connection each time
`,
Args: cobra.ExactArgs(0),
ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "commit", "query"},
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"},
RunE: cmdConsole,
}
@@ -187,11 +188,11 @@ var infoCmd = &cobra.Command{
RunE: cmdInfo,
}
var finalizeBlockCmd = &cobra.Command{
Use: "finalize_block",
Short: "deliver a block of transactions to the application",
Long: "deliver a block of transactions to the application",
Args: cobra.MinimumNArgs(1),
var deliverTxCmd = &cobra.Command{
Use: "deliver_tx",
Short: "deliver a new transaction to the application",
Long: "deliver a new transaction to the application",
Args: cobra.ExactArgs(1),
RunE: cmdFinalizeBlock,
}
@@ -425,7 +426,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
return cmdCheckTx(cmd, actualArgs)
case "commit":
return cmdCommit(cmd, actualArgs)
case "finalize_block":
case "deliver_tx":
return cmdFinalizeBlock(cmd, actualArgs)
case "echo":
return cmdEcho(cmd, actualArgs)
@@ -499,23 +500,19 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
printResponse(cmd, args, response{
Code: codeBad,
Log: "Must provide at least one transaction",
Log: "want the tx",
})
return nil
}
txs := make([][]byte, len(args))
for i, arg := range args {
txBytes, err := stringOrHexToBytes(arg)
if err != nil {
return err
}
txs[i] = txBytes
}
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: txs})
txBytes, err := stringOrHexToBytes(args[0])
if err != nil {
return err
}
for _, tx := range res.TxResults {
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
if err != nil {
return err
}
for _, tx := range res.Txs {
printResponse(cmd, args, response{
Code: tx.Code,
Data: tx.Data,

View File

@@ -31,18 +31,18 @@ func init() {
func TestKVStore(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
logger := log.NewTestingLogger(t)
t.Log("### Testing KVStore")
logger.Info("### Testing KVStore")
testBulk(ctx, t, logger, kvstore.NewApplication())
}
func TestBaseApp(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
logger := log.NewTestingLogger(t)
t.Log("### Testing BaseApp")
logger.Info("### Testing BaseApp")
testBulk(ctx, t, logger, types.NewBaseApplication())
}
@@ -50,9 +50,9 @@ func TestGRPC(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
logger := log.NewTestingLogger(t)
t.Log("### Testing GRPC")
logger.Info("### Testing GRPC")
testGRPCSync(ctx, t, logger, types.NewGRPCApplication(types.NewBaseApplication()))
}
@@ -84,8 +84,8 @@ func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Ap
// Send bulk request
res, err := client.FinalizeBlock(ctx, rfb)
require.NoError(t, err)
require.Equal(t, numDeliverTxs, len(res.TxResults), "Number of txs doesn't match")
for _, tx := range res.TxResults {
require.Equal(t, numDeliverTxs, len(res.Txs), "Number of txs doesn't match")
for _, tx := range res.Txs {
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
}
@@ -138,8 +138,8 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
// Send request
response, err := client.FinalizeBlock(ctx, &rfb)
require.NoError(t, err, "Error in GRPC FinalizeBlock")
require.Equal(t, numDeliverTxs, len(response.TxResults), "Number of txs returned via GRPC doesn't match")
for _, tx := range response.TxResults {
require.Equal(t, numDeliverTxs, len(response.Txs), "Number of txs returned via GRPC doesn't match")
for _, tx := range response.Txs {
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
}
}

View File

@@ -2,21 +2,14 @@ package kvstore
import (
"bytes"
"encoding/base64"
"encoding/binary"
"encoding/json"
"fmt"
"strconv"
"strings"
"sync"
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/libs/log"
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
"github.com/tendermint/tendermint/version"
)
@@ -72,41 +65,17 @@ var _ types.Application = (*Application)(nil)
type Application struct {
types.BaseApplication
mu sync.Mutex
state State
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
logger log.Logger
// validator set
ValUpdates []types.ValidatorUpdate
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
}
func NewApplication() *Application {
return &Application{
logger: log.NewNopLogger(),
state: loadState(dbm.NewMemDB()),
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
}
state := loadState(dbm.NewMemDB())
return &Application{state: state}
}
func (app *Application) InitChain(req types.RequestInitChain) types.ResponseInitChain {
app.mu.Lock()
defer app.mu.Unlock()
for _, v := range req.Validators {
r := app.updateValidator(v)
if r.IsErr() {
app.logger.Error("error updating validators", "r", r)
panic("problem updating validators")
}
}
return types.ResponseInitChain{}
}
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
app.mu.Lock()
defer app.mu.Unlock()
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
return types.ResponseInfo{
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
Version: version.ABCIVersion,
@@ -116,20 +85,8 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
}
}
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
func (app *Application) handleTx(tx []byte) *types.ExecTxResult {
// if it starts with "val:", update the validator set
// format is "val:pubkey!power"
if isValidatorTx(tx) {
// update validators in the merkle tree
// and in app.ValUpdates
return app.execValidatorTx(tx)
}
if isPrepareTx(tx) {
return app.execPrepareTx(tx)
}
// tx is either "key=value" or just arbitrary bytes
func (app *Application) HandleTx(tx []byte) *types.ResponseDeliverTx {
var key, value string
parts := bytes.Split(tx, []byte("="))
if len(parts) == 2 {
@@ -156,56 +113,22 @@ func (app *Application) handleTx(tx []byte) *types.ExecTxResult {
},
}
return &types.ExecTxResult{Code: code.CodeTypeOK, Events: events}
}
func (app *Application) Close() error {
app.mu.Lock()
defer app.mu.Unlock()
return app.state.db.Close()
return &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
}
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
app.mu.Lock()
defer app.mu.Unlock()
// reset valset changes
app.ValUpdates = make([]types.ValidatorUpdate, 0)
// Punish validators who committed equivocation.
for _, ev := range req.ByzantineValidators {
if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE {
addr := string(ev.Validator.Address)
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
app.updateValidator(types.ValidatorUpdate{
PubKey: pubKey,
Power: ev.Validator.Power - 1,
})
app.logger.Info("Decreased val power by 1 because of the equivocation",
"val", addr)
} else {
panic(fmt.Errorf("wanted to punish val %q but can't find it", addr))
}
}
}
respTxs := make([]*types.ExecTxResult, len(req.Txs))
txs := make([]*types.ResponseDeliverTx, len(req.Txs))
for i, tx := range req.Txs {
respTxs[i] = app.handleTx(tx)
txs[i] = app.HandleTx(tx)
}
return types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates}
return types.ResponseFinalizeBlock{Txs: txs}
}
func (*Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
}
func (app *Application) Commit() types.ResponseCommit {
app.mu.Lock()
defer app.mu.Unlock()
// Using a memdb - just return the big endian size of the db
appHash := make([]byte, 8)
binary.PutVarint(appHash, app.state.Size)
@@ -221,245 +144,43 @@ func (app *Application) Commit() types.ResponseCommit {
}
// Returns an associated value or nil if missing.
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
app.mu.Lock()
defer app.mu.Unlock()
if reqQuery.Path == "/val" {
key := []byte("val:" + string(reqQuery.Data))
value, err := app.state.db.Get(key)
if err != nil {
panic(err)
}
return types.ResponseQuery{
Key: reqQuery.Data,
Value: value,
}
}
func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
if reqQuery.Prove {
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
if err != nil {
panic(err)
}
resQuery := types.ResponseQuery{
Index: -1,
Key: reqQuery.Data,
Value: value,
Height: app.state.Height,
}
if value == nil {
resQuery.Log = "does not exist"
} else {
resQuery.Log = "exists"
}
resQuery.Index = -1 // TODO make Proof return index
resQuery.Key = reqQuery.Data
resQuery.Value = value
resQuery.Height = app.state.Height
return resQuery
return
}
resQuery.Key = reqQuery.Data
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
if err != nil {
panic(err)
}
resQuery := types.ResponseQuery{
Key: reqQuery.Data,
Value: value,
Height: app.state.Height,
}
if value == nil {
resQuery.Log = "does not exist"
} else {
resQuery.Log = "exists"
}
resQuery.Value = value
resQuery.Height = app.state.Height
return resQuery
}
func (app *Application) PrepareProposal(req types.RequestPrepareProposal) types.ResponsePrepareProposal {
app.mu.Lock()
defer app.mu.Unlock()
func (app *Application) PrepareProposal(
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
return types.ResponsePrepareProposal{
TxRecords: app.substPrepareTx(req.Txs, req.MaxTxBytes),
}
}
func (*Application) ProcessProposal(req types.RequestProcessProposal) types.ResponseProcessProposal {
for _, tx := range req.Txs {
if len(tx) == 0 {
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
}
}
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
}
//---------------------------------------------
// update validators
func (app *Application) Validators() (validators []types.ValidatorUpdate) {
app.mu.Lock()
defer app.mu.Unlock()
itr, err := app.state.db.Iterator(nil, nil)
if err != nil {
panic(err)
}
for ; itr.Valid(); itr.Next() {
if isValidatorTx(itr.Key()) {
validator := new(types.ValidatorUpdate)
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
if err != nil {
panic(err)
}
validators = append(validators, *validator)
}
}
if err = itr.Error(); err != nil {
panic(err)
}
return
}
func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte {
pk, err := encoding.PubKeyFromProto(pubkey)
if err != nil {
panic(err)
}
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
}
func isValidatorTx(tx []byte) bool {
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
}
// format is "val:pubkey!power"
// pubkey is a base64-encoded 32-byte ed25519 key
func (app *Application) execValidatorTx(tx []byte) *types.ExecTxResult {
tx = tx[len(ValidatorSetChangePrefix):]
// get the pubkey and power
pubKeyAndPower := strings.Split(string(tx), "!")
if len(pubKeyAndPower) != 2 {
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
}
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
// decode the pubkey
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
if err != nil {
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
}
// decode the power
power, err := strconv.ParseInt(powerS, 10, 64)
if err != nil {
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
}
// update
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
}
// add, update, or remove a validator
func (app *Application) updateValidator(v types.ValidatorUpdate) *types.ExecTxResult {
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
if err != nil {
panic(fmt.Errorf("can't decode public key: %w", err))
}
key := []byte("val:" + string(pubkey.Bytes()))
if v.Power == 0 {
// remove validator
hasKey, err := app.state.db.Has(key)
if err != nil {
panic(err)
}
if !hasKey {
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
return &types.ExecTxResult{
Code: code.CodeTypeUnauthorized,
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
}
if err = app.state.db.Delete(key); err != nil {
panic(err)
}
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
} else {
// add or update validator
value := bytes.NewBuffer(make([]byte, 0))
if err := types.WriteMessage(&v, value); err != nil {
return &types.ExecTxResult{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("error encoding validator: %v", err)}
}
if err = app.state.db.Set(key, value.Bytes()); err != nil {
panic(err)
}
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
}
// we only update the changes array if we successfully updated the tree
app.ValUpdates = append(app.ValUpdates, v)
return &types.ExecTxResult{Code: code.CodeTypeOK}
}
// -----------------------------
// prepare proposal machinery
const PreparePrefix = "prepare"
func isPrepareTx(tx []byte) bool {
return bytes.HasPrefix(tx, []byte(PreparePrefix))
}
// execPrepareTx is noop. tx data is considered as placeholder
// and is substitute at the PrepareProposal.
func (app *Application) execPrepareTx(tx []byte) *types.ExecTxResult {
// noop
return &types.ExecTxResult{}
}
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
// proposal for transactions with the prefix stripped.
// It marks all of the original transactions as 'REMOVED' so that
// Tendermint will remove them from its mempool.
func (app *Application) substPrepareTx(blockData [][]byte, maxTxBytes int64) []*types.TxRecord {
trs := make([]*types.TxRecord, 0, len(blockData))
var removed []*types.TxRecord
var totalBytes int64
for _, tx := range blockData {
txMod := tx
action := types.TxRecord_UNMODIFIED
if isPrepareTx(tx) {
removed = append(removed, &types.TxRecord{
Tx: tx,
Action: types.TxRecord_REMOVED,
})
txMod = bytes.TrimPrefix(tx, []byte(PreparePrefix))
action = types.TxRecord_ADDED
}
totalBytes += int64(len(txMod))
if totalBytes > maxTxBytes {
break
}
trs = append(trs, &types.TxRecord{
Tx: txMod,
Action: action,
})
}
return append(trs, removed...)
BlockData: req.BlockData}
}

View File

@@ -6,7 +6,6 @@ import (
"sort"
"testing"
"github.com/fortytw2/leaktest"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/libs/log"
@@ -16,6 +15,7 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
abciserver "github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
)
const (
@@ -26,12 +26,12 @@ const (
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
ar := app.FinalizeBlock(req)
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
// repeating tx doesn't raise error
ar = app.FinalizeBlock(req)
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
// commit
app.Commit()
@@ -74,7 +74,7 @@ func TestKVStoreKV(t *testing.T) {
func TestPersistentKVStoreKV(t *testing.T) {
dir := t.TempDir()
logger := log.NewNopLogger()
logger := log.NewTestingLogger(t)
kvstore := NewPersistentKVStoreApplication(logger, dir)
key := testKey
@@ -89,7 +89,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
func TestPersistentKVStoreInfo(t *testing.T) {
dir := t.TempDir()
logger := log.NewNopLogger()
logger := log.NewTestingLogger(t)
kvstore := NewPersistentKVStoreApplication(logger, dir)
InitKVStore(kvstore)
@@ -103,7 +103,10 @@ func TestPersistentKVStoreInfo(t *testing.T) {
// make and apply block
height = int64(1)
hash := []byte("foo")
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Height: height})
header := tmproto.Header{
Height: height,
}
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header, Height: height})
kvstore.Commit()
resInfo = kvstore.Info(types.RequestInfo{})
@@ -115,7 +118,10 @@ func TestPersistentKVStoreInfo(t *testing.T) {
// add a validator, remove a validator, update a validator
func TestValUpdates(t *testing.T) {
kvstore := NewApplication()
dir := t.TempDir()
logger := log.NewTestingLogger(t)
kvstore := NewPersistentKVStoreApplication(logger, dir)
// init with some validators
total := 10
@@ -185,8 +191,13 @@ func makeApplyBlock(
// make and apply block
height := int64(heightInt)
hash := []byte("foo")
header := tmproto.Header{
Height: height,
}
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
Hash: hash,
Header: header,
Height: height,
Txs: txs,
})
@@ -199,7 +210,6 @@ func makeApplyBlock(
// order doesn't matter
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
t.Helper()
if len(vals1) != len(vals2) {
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
}
@@ -221,11 +231,9 @@ func makeSocketClientServer(
app types.Application,
name string,
) (abciclient.Client, service.Service, error) {
t.Helper()
ctx, cancel := context.WithCancel(ctx)
t.Cleanup(cancel)
t.Cleanup(leaktest.Check(t))
// Start the listener
socket := fmt.Sprintf("unix://%s.sock", name)
@@ -255,8 +263,6 @@ func makeGRPCClientServer(
) (abciclient.Client, service.Service, error) {
ctx, cancel := context.WithCancel(ctx)
t.Cleanup(cancel)
t.Cleanup(leaktest.Check(t))
// Start the listener
socket := fmt.Sprintf("unix://%s.sock", name)
@@ -280,7 +286,7 @@ func makeGRPCClientServer(
func TestClientServer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewNopLogger()
logger := log.NewTestingLogger(t)
// set up socket app
kvstore := NewApplication()
@@ -317,13 +323,13 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client)
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
ar, err := app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
// repeating FinalizeBlock doesn't raise error
ar, err = app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
require.NoError(t, err)
require.Equal(t, 1, len(ar.TxResults))
require.False(t, ar.TxResults[0].IsErr())
require.Equal(t, 1, len(ar.Txs))
require.False(t, ar.Txs[0].IsErr())
// commit
_, err = app.Commit(ctx)
require.NoError(t, err)

View File

@@ -1,11 +1,20 @@
package kvstore
import (
"bytes"
"encoding/base64"
"fmt"
"strconv"
"strings"
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/libs/log"
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
ptypes "github.com/tendermint/tendermint/proto/tendermint/types"
)
const (
@@ -17,28 +26,342 @@ const (
var _ types.Application = (*PersistentKVStoreApplication)(nil)
type PersistentKVStoreApplication struct {
*Application
app *Application
// validator set
ValUpdates []types.ValidatorUpdate
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
logger log.Logger
}
func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *PersistentKVStoreApplication {
db, err := dbm.NewGoLevelDB("kvstore", dbDir)
name := "kvstore"
db, err := dbm.NewGoLevelDB(name, dbDir)
if err != nil {
panic(err)
}
state := loadState(db)
return &PersistentKVStoreApplication{
Application: &Application{
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
state: loadState(db),
logger: logger,
},
app: &Application{state: state},
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
logger: logger,
}
}
func (app *PersistentKVStoreApplication) OfferSnapshot(req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
func (app *PersistentKVStoreApplication) Close() error {
return app.app.state.db.Close()
}
func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
res := app.app.Info(req)
res.LastBlockHeight = app.app.state.Height
res.LastBlockAppHash = app.app.state.AppHash
return res
}
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
func (app *PersistentKVStoreApplication) HandleTx(tx []byte) *types.ResponseDeliverTx {
// if it starts with "val:", update the validator set
// format is "val:pubkey!power"
if isValidatorTx(tx) {
// update validators in the merkle tree
// and in app.ValUpdates
return app.execValidatorTx(tx)
}
if isPrepareTx(tx) {
return app.execPrepareTx(tx)
}
// otherwise, update the key-value store
return app.app.HandleTx(tx)
}
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
return app.app.CheckTx(req)
}
// Commit will panic if InitChain was not called
func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
return app.app.Commit()
}
// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded.
// For any other path, returns an associated value or nil if missing.
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
switch reqQuery.Path {
case "/val":
key := []byte("val:" + string(reqQuery.Data))
value, err := app.app.state.db.Get(key)
if err != nil {
panic(err)
}
resQuery.Key = reqQuery.Data
resQuery.Value = value
return
default:
return app.app.Query(reqQuery)
}
}
// Save the validators in the merkle tree
func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
for _, v := range req.Validators {
r := app.updateValidator(v)
if r.IsErr() {
app.logger.Error("error updating validators", "r", r)
}
}
return types.ResponseInitChain{}
}
// Track the block hash and header information
// Execute transactions
// Update the validator set
func (app *PersistentKVStoreApplication) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
// reset valset changes
app.ValUpdates = make([]types.ValidatorUpdate, 0)
// Punish validators who committed equivocation.
for _, ev := range req.ByzantineValidators {
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
addr := string(ev.Validator.Address)
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
app.updateValidator(types.ValidatorUpdate{
PubKey: pubKey,
Power: ev.Validator.Power - 1,
})
app.logger.Info("Decreased val power by 1 because of the equivocation",
"val", addr)
} else {
app.logger.Error("Wanted to punish val, but can't find it",
"val", addr)
}
}
}
respTxs := make([]*types.ResponseDeliverTx, len(req.Txs))
for i, tx := range req.Txs {
respTxs[i] = app.HandleTx(tx)
}
return types.ResponseFinalizeBlock{Txs: respTxs, ValidatorUpdates: app.ValUpdates}
}
func (app *PersistentKVStoreApplication) ListSnapshots(
req types.RequestListSnapshots) types.ResponseListSnapshots {
return types.ResponseListSnapshots{}
}
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
return types.ResponseLoadSnapshotChunk{}
}
func (app *PersistentKVStoreApplication) OfferSnapshot(
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
}
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
}
func (app *PersistentKVStoreApplication) ExtendVote(
req types.RequestExtendVote) types.ResponseExtendVote {
return types.ResponseExtendVote{
VoteExtension: ConstructVoteExtension(req.Vote.ValidatorAddress),
}
}
func (app *PersistentKVStoreApplication) VerifyVoteExtension(
req types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
return types.RespondVerifyVoteExtension(
app.verifyExtension(req.Vote.ValidatorAddress, req.Vote.VoteExtension))
}
func (app *PersistentKVStoreApplication) PrepareProposal(
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
return types.ResponsePrepareProposal{BlockData: app.substPrepareTx(req.BlockData)}
}
func (app *PersistentKVStoreApplication) ProcessProposal(
req types.RequestProcessProposal) types.ResponseProcessProposal {
for _, tx := range req.Txs {
if len(tx) == 0 {
return types.ResponseProcessProposal{Result: types.ResponseProcessProposal_REJECT}
}
}
return types.ResponseProcessProposal{Result: types.ResponseProcessProposal_ACCEPT}
}
//---------------------------------------------
// update validators
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
itr, err := app.app.state.db.Iterator(nil, nil)
if err != nil {
panic(err)
}
for ; itr.Valid(); itr.Next() {
if isValidatorTx(itr.Key()) {
validator := new(types.ValidatorUpdate)
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
if err != nil {
panic(err)
}
validators = append(validators, *validator)
}
}
if err = itr.Error(); err != nil {
panic(err)
}
return
}
func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte {
pk, err := encoding.PubKeyFromProto(pubkey)
if err != nil {
panic(err)
}
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
}
func isValidatorTx(tx []byte) bool {
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
}
// format is "val:pubkey!power"
// pubkey is a base64-encoded 32-byte ed25519 key
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
tx = tx[len(ValidatorSetChangePrefix):]
// get the pubkey and power
pubKeyAndPower := strings.Split(string(tx), "!")
if len(pubKeyAndPower) != 2 {
return &types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
}
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
// decode the pubkey
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
if err != nil {
return &types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
}
// decode the power
power, err := strconv.ParseInt(powerS, 10, 64)
if err != nil {
return &types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
}
// update
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
}
// add, update, or remove a validator
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) *types.ResponseDeliverTx {
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
if err != nil {
panic(fmt.Errorf("can't decode public key: %w", err))
}
key := []byte("val:" + string(pubkey.Bytes()))
if v.Power == 0 {
// remove validator
hasKey, err := app.app.state.db.Has(key)
if err != nil {
panic(err)
}
if !hasKey {
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
return &types.ResponseDeliverTx{
Code: code.CodeTypeUnauthorized,
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
}
if err = app.app.state.db.Delete(key); err != nil {
panic(err)
}
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
} else {
// add or update validator
value := bytes.NewBuffer(make([]byte, 0))
if err := types.WriteMessage(&v, value); err != nil {
return &types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("error encoding validator: %v", err)}
}
if err = app.app.state.db.Set(key, value.Bytes()); err != nil {
panic(err)
}
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
}
// we only update the changes array if we successfully updated the tree
app.ValUpdates = append(app.ValUpdates, v)
return &types.ResponseDeliverTx{Code: code.CodeTypeOK}
}
// -----------------------------
const PreparePrefix = "prepare"
func isPrepareTx(tx []byte) bool {
return strings.HasPrefix(string(tx), PreparePrefix)
}
// execPrepareTx is noop. tx data is considered as placeholder
// and is substitute at the PrepareProposal.
func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) *types.ResponseDeliverTx {
// noop
return &types.ResponseDeliverTx{}
}
// substPrepareTx subst all the preparetx in the blockdata
// to null string(could be any arbitrary string).
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte) [][]byte {
// TODO: this mechanism will change with the current spec of PrepareProposal
// We now have a special type for marking a tx as changed
for i, tx := range blockData {
if isPrepareTx(tx) {
blockData[i] = make([]byte, len(tx))
}
}
return blockData
}
func ConstructVoteExtension(valAddr []byte) *ptypes.VoteExtension {
return &ptypes.VoteExtension{
AppDataToSign: valAddr,
AppDataSelfAuthenticating: valAddr,
}
}
func (app *PersistentKVStoreApplication) verifyExtension(valAddr []byte, ext *ptypes.VoteExtension) bool {
if ext == nil {
return false
}
canonical := ConstructVoteExtension(valAddr)
if !bytes.Equal(canonical.AppDataToSign, ext.AppDataToSign) {
return false
}
if !bytes.Equal(canonical.AppDataSelfAuthenticating, ext.AppDataSelfAuthenticating) {
return false
}
return true
}

View File

@@ -16,9 +16,10 @@ type GRPCServer struct {
service.BaseService
logger log.Logger
proto string
addr string
server *grpc.Server
proto string
addr string
listener net.Listener
server *grpc.Server
app types.ABCIApplicationServer
}
@@ -27,10 +28,11 @@ type GRPCServer struct {
func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicationServer) service.Service {
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
s := &GRPCServer{
logger: logger,
proto: proto,
addr: addr,
app: app,
logger: logger,
proto: proto,
addr: addr,
listener: nil,
app: app,
}
s.BaseService = *service.NewBaseService(logger, "ABCIServer", s)
return s
@@ -38,11 +40,13 @@ func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicatio
// OnStart starts the gRPC service.
func (s *GRPCServer) OnStart(ctx context.Context) error {
ln, err := net.Listen(s.proto, s.addr)
if err != nil {
return err
}
s.listener = ln
s.server = grpc.NewServer()
types.RegisterABCIApplicationServer(s.server, s.app)
@@ -53,7 +57,7 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
s.server.GracefulStop()
}()
if err := s.server.Serve(ln); err != nil {
if err := s.server.Serve(s.listener); err != nil {
s.logger.Error("error serving gRPC server", "err", err)
}
}()
@@ -61,4 +65,6 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
}
// OnStop stops the gRPC server.
func (s *GRPCServer) OnStop() { s.server.Stop() }
func (s *GRPCServer) OnStop() {
s.server.Stop()
}

View File

@@ -3,7 +3,6 @@ package server
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"net"
@@ -27,21 +26,22 @@ type SocketServer struct {
listener net.Listener
connsMtx sync.Mutex
connsClose map[int]func()
conns map[int]net.Conn
nextConnID int
app types.Application
appMtx sync.Mutex
app types.Application
}
func NewSocketServer(logger log.Logger, protoAddr string, app types.Application) service.Service {
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
s := &SocketServer{
logger: logger,
proto: proto,
addr: addr,
listener: nil,
app: app,
connsClose: make(map[int]func()),
logger: logger,
proto: proto,
addr: addr,
listener: nil,
app: app,
conns: make(map[int]net.Conn),
}
s.BaseService = *service.NewBaseService(logger, "ABCIServer", s)
return s
@@ -67,35 +67,44 @@ func (s *SocketServer) OnStop() {
s.connsMtx.Lock()
defer s.connsMtx.Unlock()
for _, closer := range s.connsClose {
closer()
for id, conn := range s.conns {
delete(s.conns, id)
if err := conn.Close(); err != nil {
s.logger.Error("error closing connection", "id", id, "conn", conn, "err", err)
}
}
}
func (s *SocketServer) addConn(closer func()) int {
func (s *SocketServer) addConn(conn net.Conn) int {
s.connsMtx.Lock()
defer s.connsMtx.Unlock()
connID := s.nextConnID
s.nextConnID++
s.connsClose[connID] = closer
s.conns[connID] = conn
return connID
}
// deletes conn even if close errs
func (s *SocketServer) rmConn(connID int) {
func (s *SocketServer) rmConn(connID int) error {
s.connsMtx.Lock()
defer s.connsMtx.Unlock()
if closer, ok := s.connsClose[connID]; ok {
closer()
delete(s.connsClose, connID)
conn, ok := s.conns[connID]
if !ok {
return fmt.Errorf("connection %d does not exist", connID)
}
delete(s.conns, connID)
return conn.Close()
}
func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) {
for {
if ctx.Err() != nil {
return
}
// Accept a connection
@@ -109,134 +118,149 @@ func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) {
continue
}
cctx, ccancel := context.WithCancel(ctx)
connID := s.addConn(ccancel)
s.logger.Info("Accepted a new connection")
s.logger.Info("Accepted a new connection", "id", connID)
connID := s.addConn(conn)
closeConn := make(chan error, 2) // Push to signal connection closed
responses := make(chan *types.Response, 1000) // A channel to buffer responses
once := &sync.Once{}
closer := func(err error) {
ccancel()
once.Do(func() {
if cerr := conn.Close(); err != nil {
s.logger.Error("error closing connection",
"id", connID,
"close_err", cerr,
"err", err)
}
s.rmConn(connID)
switch {
case errors.Is(err, context.Canceled):
s.logger.Error("Connection terminated",
"id", connID,
"err", err)
case errors.Is(err, context.DeadlineExceeded):
s.logger.Error("Connection encountered timeout",
"id", connID,
"err", err)
case errors.Is(err, io.EOF):
s.logger.Error("Connection was closed by client",
"id", connID)
case err != nil:
s.logger.Error("Connection error",
"id", connID,
"err", err)
default:
s.logger.Error("Connection was closed",
"id", connID)
}
})
}
// Read requests from conn and deal with them
go s.handleRequests(cctx, closer, conn, responses)
go s.handleRequests(ctx, closeConn, conn, responses)
// Pull responses from 'responses' and write them to conn.
go s.handleResponses(cctx, closer, conn, responses)
go s.handleResponses(ctx, closeConn, conn, responses)
// Wait until signal to close connection
go s.waitForClose(ctx, closeConn, connID)
}
}
func (s *SocketServer) waitForClose(ctx context.Context, closeConn chan error, connID int) {
defer func() {
// Close the connection
if err := s.rmConn(connID); err != nil {
s.logger.Error("error closing connection", "err", err)
}
}()
select {
case <-ctx.Done():
return
case err := <-closeConn:
switch {
case err == io.EOF:
s.logger.Error("Connection was closed by client")
case err != nil:
s.logger.Error("Connection error", "err", err)
default:
// never happens
s.logger.Error("Connection was closed")
}
}
}
// Read requests from conn and deal with them
func (s *SocketServer) handleRequests(
ctx context.Context,
closer func(error),
closeConn chan error,
conn io.Reader,
responses chan<- *types.Response,
) {
var count int
var bufReader = bufio.NewReader(conn)
defer func() {
// make sure to recover from any app-related panics to allow proper socket cleanup
if r := recover(); r != nil {
r := recover()
if r != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
closer(fmt.Errorf("recovered from panic: %v\n%s", r, buf))
err := fmt.Errorf("recovered from panic: %v\n%s", r, buf)
closeConn <- err
s.appMtx.Unlock()
}
}()
for {
req := &types.Request{}
if err := types.ReadMessage(bufReader, req); err != nil {
closer(fmt.Errorf("error reading message: %w", err))
if ctx.Err() != nil {
return
}
resp := s.processRequest(req)
select {
case <-ctx.Done():
closer(ctx.Err())
var req = &types.Request{}
err := types.ReadMessage(bufReader, req)
if err != nil {
if err == io.EOF {
closeConn <- err
} else {
closeConn <- fmt.Errorf("error reading message: %w", err)
}
return
case responses <- resp:
}
s.appMtx.Lock()
count++
s.handleRequest(req, responses)
s.appMtx.Unlock()
}
}
func (s *SocketServer) processRequest(req *types.Request) *types.Response {
func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) {
switch r := req.Value.(type) {
case *types.Request_Echo:
return types.ToResponseEcho(r.Echo.Message)
responses <- types.ToResponseEcho(r.Echo.Message)
case *types.Request_Flush:
return types.ToResponseFlush()
responses <- types.ToResponseFlush()
case *types.Request_Info:
return types.ToResponseInfo(s.app.Info(*r.Info))
res := s.app.Info(*r.Info)
responses <- types.ToResponseInfo(res)
case *types.Request_CheckTx:
return types.ToResponseCheckTx(s.app.CheckTx(*r.CheckTx))
res := s.app.CheckTx(*r.CheckTx)
responses <- types.ToResponseCheckTx(res)
case *types.Request_Commit:
return types.ToResponseCommit(s.app.Commit())
res := s.app.Commit()
responses <- types.ToResponseCommit(res)
case *types.Request_Query:
return types.ToResponseQuery(s.app.Query(*r.Query))
res := s.app.Query(*r.Query)
responses <- types.ToResponseQuery(res)
case *types.Request_InitChain:
return types.ToResponseInitChain(s.app.InitChain(*r.InitChain))
res := s.app.InitChain(*r.InitChain)
responses <- types.ToResponseInitChain(res)
case *types.Request_ListSnapshots:
return types.ToResponseListSnapshots(s.app.ListSnapshots(*r.ListSnapshots))
res := s.app.ListSnapshots(*r.ListSnapshots)
responses <- types.ToResponseListSnapshots(res)
case *types.Request_OfferSnapshot:
return types.ToResponseOfferSnapshot(s.app.OfferSnapshot(*r.OfferSnapshot))
res := s.app.OfferSnapshot(*r.OfferSnapshot)
responses <- types.ToResponseOfferSnapshot(res)
case *types.Request_PrepareProposal:
return types.ToResponsePrepareProposal(s.app.PrepareProposal(*r.PrepareProposal))
res := s.app.PrepareProposal(*r.PrepareProposal)
responses <- types.ToResponsePrepareProposal(res)
case *types.Request_ProcessProposal:
return types.ToResponseProcessProposal(s.app.ProcessProposal(*r.ProcessProposal))
res := s.app.ProcessProposal(*r.ProcessProposal)
responses <- types.ToResponseProcessProposal(res)
case *types.Request_LoadSnapshotChunk:
return types.ToResponseLoadSnapshotChunk(s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk))
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
responses <- types.ToResponseLoadSnapshotChunk(res)
case *types.Request_ApplySnapshotChunk:
return types.ToResponseApplySnapshotChunk(s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk))
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
responses <- types.ToResponseApplySnapshotChunk(res)
case *types.Request_ExtendVote:
return types.ToResponseExtendVote(s.app.ExtendVote(*r.ExtendVote))
res := s.app.ExtendVote(*r.ExtendVote)
responses <- types.ToResponseExtendVote(res)
case *types.Request_VerifyVoteExtension:
return types.ToResponseVerifyVoteExtension(s.app.VerifyVoteExtension(*r.VerifyVoteExtension))
res := s.app.VerifyVoteExtension(*r.VerifyVoteExtension)
responses <- types.ToResponseVerifyVoteExtension(res)
case *types.Request_FinalizeBlock:
return types.ToResponseFinalizeBlock(s.app.FinalizeBlock(*r.FinalizeBlock))
res := s.app.FinalizeBlock(*r.FinalizeBlock)
responses <- types.ToResponseFinalizeBlock(res)
default:
return types.ToResponseException("Unknown request")
responses <- types.ToResponseException("Unknown request")
}
}
// Pull responses from 'responses' and write them to conn.
func (s *SocketServer) handleResponses(
ctx context.Context,
closer func(error),
closeConn chan error,
conn io.Writer,
responses <-chan *types.Response,
) {
@@ -244,15 +268,21 @@ func (s *SocketServer) handleResponses(
for {
select {
case <-ctx.Done():
closer(ctx.Err())
return
case res := <-responses:
if err := types.WriteMessage(res, bw); err != nil {
closer(fmt.Errorf("error writing message: %w", err))
select {
case <-ctx.Done():
case closeConn <- fmt.Errorf("error writing message: %w", err):
}
return
}
if err := bw.Flush(); err != nil {
closer(fmt.Errorf("error writing message: %w", err))
select {
case <-ctx.Done():
case closeConn <- fmt.Errorf("error flushing write buffer: %w", err):
}
return
}
}

View File

@@ -51,7 +51,7 @@ func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error
func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error {
res, _ := client.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: txBytes})
for i, tx := range res.TxResults {
for i, tx := range res.Txs {
code, data, log := tx.Code, tx.Data, tx.Log
if code != codeExp[i] {
fmt.Println("Failed test: FinalizeBlock")

View File

@@ -1,10 +1,10 @@
echo hello
info
commit
finalize_block "abc"
deliver_tx "abc"
info
commit
query "abc"
finalize_block "def=xyz" "ghi=123"
deliver_tx "def=xyz"
commit
query "def"

View File

@@ -12,7 +12,7 @@
-> code: OK
-> data.hex: 0x0000000000000000
> finalize_block "abc"
> deliver_tx "abc"
-> code: OK
> info
@@ -33,14 +33,12 @@
-> value: abc
-> value.hex: 616263
> finalize_block "def=xyz" "ghi=123"
-> code: OK
> finalize_block "def=xyz" "ghi=123"
> deliver_tx "def=xyz"
-> code: OK
> commit
-> code: OK
-> data.hex: 0x0600000000000000
-> data.hex: 0x0400000000000000
> query "def"
-> code: OK

View File

@@ -1,7 +1,7 @@
check_tx 0x00
check_tx 0xff
finalize_block 0x00
deliver_tx 0x00
check_tx 0x00
finalize_block 0x01
finalize_block 0x04
deliver_tx 0x01
deliver_tx 0x04
info

View File

@@ -4,20 +4,20 @@
> check_tx 0xff
-> code: OK
> finalize_block 0x00
> deliver_tx 0x00
-> code: OK
> check_tx 0x00
-> code: OK
> finalize_block 0x01
> deliver_tx 0x01
-> code: OK
> finalize_block 0x04
> deliver_tx 0x04
-> code: OK
> info
-> code: OK
-> data: {"size":3}
-> data.hex: 0x7B2273697A65223A337D
-> data: {"hashes":0,"txs":3}
-> data.hex: 0x7B22686173686573223A302C22747873223A337D

View File

@@ -4,7 +4,6 @@ import (
"context"
)
//go:generate ../../scripts/mockery_generate.sh Application
// Application is an interface that enables any finite, deterministic state machine
// to be driven by a blockchain-based replication engine via the ABCI.
// All methods take a RequestXxx argument and return a ResponseXxx argument,
@@ -42,7 +41,8 @@ type Application interface {
var _ Application = (*BaseApplication)(nil)
type BaseApplication struct{}
type BaseApplication struct {
}
func NewBaseApplication() *BaseApplication {
return &BaseApplication{}
@@ -66,7 +66,7 @@ func (BaseApplication) ExtendVote(req RequestExtendVote) ResponseExtendVote {
func (BaseApplication) VerifyVoteExtension(req RequestVerifyVoteExtension) ResponseVerifyVoteExtension {
return ResponseVerifyVoteExtension{
Status: ResponseVerifyVoteExtension_ACCEPT,
Result: ResponseVerifyVoteExtension_ACCEPT,
}
}
@@ -95,32 +95,20 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
}
func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal {
trs := make([]*TxRecord, 0, len(req.Txs))
var totalBytes int64
for _, tx := range req.Txs {
totalBytes += int64(len(tx))
if totalBytes > req.MaxTxBytes {
break
}
trs = append(trs, &TxRecord{
Action: TxRecord_UNMODIFIED,
Tx: tx,
})
}
return ResponsePrepareProposal{TxRecords: trs}
return ResponsePrepareProposal{}
}
func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProcessProposal {
return ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}
return ResponseProcessProposal{}
}
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
txs := make([]*ExecTxResult, len(req.Txs))
txs := make([]*ResponseDeliverTx, len(req.Txs))
for i := range req.Txs {
txs[i] = &ExecTxResult{Code: CodeTypeOK}
txs[i] = &ResponseDeliverTx{Code: CodeTypeOK}
}
return ResponseFinalizeBlock{
TxResults: txs,
Txs: txs,
}
}

View File

@@ -13,7 +13,7 @@ import (
)
func TestMarshalJSON(t *testing.T) {
b, err := json.Marshal(&ExecTxResult{Code: 1})
b, err := json.Marshal(&ResponseDeliverTx{})
assert.NoError(t, err)
// include empty fields.
assert.True(t, strings.Contains(string(b), "code"))

View File

@@ -1,209 +0,0 @@
// Code generated by mockery. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
types "github.com/tendermint/tendermint/abci/types"
)
// Application is an autogenerated mock type for the Application type
type Application struct {
mock.Mock
}
// ApplySnapshotChunk provides a mock function with given fields: _a0
func (_m *Application) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
ret := _m.Called(_a0)
var r0 types.ResponseApplySnapshotChunk
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseApplySnapshotChunk)
}
return r0
}
// CheckTx provides a mock function with given fields: _a0
func (_m *Application) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx {
ret := _m.Called(_a0)
var r0 types.ResponseCheckTx
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) types.ResponseCheckTx); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseCheckTx)
}
return r0
}
// Commit provides a mock function with given fields:
func (_m *Application) Commit() types.ResponseCommit {
ret := _m.Called()
var r0 types.ResponseCommit
if rf, ok := ret.Get(0).(func() types.ResponseCommit); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(types.ResponseCommit)
}
return r0
}
// ExtendVote provides a mock function with given fields: _a0
func (_m *Application) ExtendVote(_a0 types.RequestExtendVote) types.ResponseExtendVote {
ret := _m.Called(_a0)
var r0 types.ResponseExtendVote
if rf, ok := ret.Get(0).(func(types.RequestExtendVote) types.ResponseExtendVote); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseExtendVote)
}
return r0
}
// FinalizeBlock provides a mock function with given fields: _a0
func (_m *Application) FinalizeBlock(_a0 types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
ret := _m.Called(_a0)
var r0 types.ResponseFinalizeBlock
if rf, ok := ret.Get(0).(func(types.RequestFinalizeBlock) types.ResponseFinalizeBlock); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseFinalizeBlock)
}
return r0
}
// Info provides a mock function with given fields: _a0
func (_m *Application) Info(_a0 types.RequestInfo) types.ResponseInfo {
ret := _m.Called(_a0)
var r0 types.ResponseInfo
if rf, ok := ret.Get(0).(func(types.RequestInfo) types.ResponseInfo); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseInfo)
}
return r0
}
// InitChain provides a mock function with given fields: _a0
func (_m *Application) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain {
ret := _m.Called(_a0)
var r0 types.ResponseInitChain
if rf, ok := ret.Get(0).(func(types.RequestInitChain) types.ResponseInitChain); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseInitChain)
}
return r0
}
// ListSnapshots provides a mock function with given fields: _a0
func (_m *Application) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots {
ret := _m.Called(_a0)
var r0 types.ResponseListSnapshots
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) types.ResponseListSnapshots); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseListSnapshots)
}
return r0
}
// LoadSnapshotChunk provides a mock function with given fields: _a0
func (_m *Application) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
ret := _m.Called(_a0)
var r0 types.ResponseLoadSnapshotChunk
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseLoadSnapshotChunk)
}
return r0
}
// OfferSnapshot provides a mock function with given fields: _a0
func (_m *Application) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
ret := _m.Called(_a0)
var r0 types.ResponseOfferSnapshot
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) types.ResponseOfferSnapshot); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseOfferSnapshot)
}
return r0
}
// PrepareProposal provides a mock function with given fields: _a0
func (_m *Application) PrepareProposal(_a0 types.RequestPrepareProposal) types.ResponsePrepareProposal {
ret := _m.Called(_a0)
var r0 types.ResponsePrepareProposal
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) types.ResponsePrepareProposal); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponsePrepareProposal)
}
return r0
}
// ProcessProposal provides a mock function with given fields: _a0
func (_m *Application) ProcessProposal(_a0 types.RequestProcessProposal) types.ResponseProcessProposal {
ret := _m.Called(_a0)
var r0 types.ResponseProcessProposal
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) types.ResponseProcessProposal); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseProcessProposal)
}
return r0
}
// Query provides a mock function with given fields: _a0
func (_m *Application) Query(_a0 types.RequestQuery) types.ResponseQuery {
ret := _m.Called(_a0)
var r0 types.ResponseQuery
if rf, ok := ret.Get(0).(func(types.RequestQuery) types.ResponseQuery); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseQuery)
}
return r0
}
// VerifyVoteExtension provides a mock function with given fields: _a0
func (_m *Application) VerifyVoteExtension(_a0 types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
ret := _m.Called(_a0)
var r0 types.ResponseVerifyVoteExtension
if rf, ok := ret.Get(0).(func(types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(types.ResponseVerifyVoteExtension)
}
return r0
}

View File

@@ -1,175 +0,0 @@
package mocks
import (
types "github.com/tendermint/tendermint/abci/types"
)
// BaseMock provides a wrapper around the generated Application mock and a BaseApplication.
// BaseMock first tries to use the mock's implementation of the method.
// If no functionality was provided for the mock by the user, BaseMock dispatches
// to the BaseApplication and uses its functionality.
// BaseMock allows users to provide mocked functionality for only the methods that matter
// for their test while avoiding a panic if the code calls Application methods that are
// not relevant to the test.
type BaseMock struct {
base *types.BaseApplication
*Application
}
func NewBaseMock() BaseMock {
return BaseMock{
base: types.NewBaseApplication(),
Application: new(Application),
}
}
// Info/Query Connection
// Return application info
func (m BaseMock) Info(input types.RequestInfo) (ret types.ResponseInfo) {
defer func() {
if r := recover(); r != nil {
ret = m.base.Info(input)
}
}()
ret = m.Application.Info(input)
return ret
}
func (m BaseMock) Query(input types.RequestQuery) (ret types.ResponseQuery) {
defer func() {
if r := recover(); r != nil {
ret = m.base.Query(input)
}
}()
ret = m.Application.Query(input)
return ret
}
// Mempool Connection
// Validate a tx for the mempool
func (m BaseMock) CheckTx(input types.RequestCheckTx) (ret types.ResponseCheckTx) {
defer func() {
if r := recover(); r != nil {
ret = m.base.CheckTx(input)
}
}()
ret = m.Application.CheckTx(input)
return ret
}
// Consensus Connection
// Initialize blockchain w validators/other info from TendermintCore
func (m BaseMock) InitChain(input types.RequestInitChain) (ret types.ResponseInitChain) {
defer func() {
if r := recover(); r != nil {
ret = m.base.InitChain(input)
}
}()
ret = m.Application.InitChain(input)
return ret
}
func (m BaseMock) PrepareProposal(input types.RequestPrepareProposal) (ret types.ResponsePrepareProposal) {
defer func() {
if r := recover(); r != nil {
ret = m.base.PrepareProposal(input)
}
}()
ret = m.Application.PrepareProposal(input)
return ret
}
func (m BaseMock) ProcessProposal(input types.RequestProcessProposal) (ret types.ResponseProcessProposal) {
defer func() {
if r := recover(); r != nil {
ret = m.base.ProcessProposal(input)
}
}()
ret = m.Application.ProcessProposal(input)
return ret
}
// Commit the state and return the application Merkle root hash
func (m BaseMock) Commit() (ret types.ResponseCommit) {
defer func() {
if r := recover(); r != nil {
ret = m.base.Commit()
}
}()
ret = m.Application.Commit()
return ret
}
// Create application specific vote extension
func (m BaseMock) ExtendVote(input types.RequestExtendVote) (ret types.ResponseExtendVote) {
defer func() {
if r := recover(); r != nil {
ret = m.base.ExtendVote(input)
}
}()
ret = m.Application.ExtendVote(input)
return ret
}
// Verify application's vote extension data
func (m BaseMock) VerifyVoteExtension(input types.RequestVerifyVoteExtension) (ret types.ResponseVerifyVoteExtension) {
defer func() {
if r := recover(); r != nil {
ret = m.base.VerifyVoteExtension(input)
}
}()
ret = m.Application.VerifyVoteExtension(input)
return ret
}
// State Sync Connection
// List available snapshots
func (m BaseMock) ListSnapshots(input types.RequestListSnapshots) (ret types.ResponseListSnapshots) {
defer func() {
if r := recover(); r != nil {
ret = m.base.ListSnapshots(input)
}
}()
ret = m.Application.ListSnapshots(input)
return ret
}
func (m BaseMock) OfferSnapshot(input types.RequestOfferSnapshot) (ret types.ResponseOfferSnapshot) {
defer func() {
if r := recover(); r != nil {
ret = m.base.OfferSnapshot(input)
}
}()
ret = m.Application.OfferSnapshot(input)
return ret
}
func (m BaseMock) LoadSnapshotChunk(input types.RequestLoadSnapshotChunk) (ret types.ResponseLoadSnapshotChunk) {
defer func() {
if r := recover(); r != nil {
ret = m.base.LoadSnapshotChunk(input)
}
}()
ret = m.Application.LoadSnapshotChunk(input)
return ret
}
func (m BaseMock) ApplySnapshotChunk(input types.RequestApplySnapshotChunk) (ret types.ResponseApplySnapshotChunk) {
defer func() {
if r := recover(); r != nil {
ret = m.base.ApplySnapshotChunk(input)
}
}()
ret = m.Application.ApplySnapshotChunk(input)
return ret
}
func (m BaseMock) FinalizeBlock(input types.RequestFinalizeBlock) (ret types.ResponseFinalizeBlock) {
defer func() {
if r := recover(); r != nil {
ret = m.base.FinalizeBlock(input)
}
}()
ret = m.Application.FinalizeBlock(input)
return ret
}

View File

@@ -5,6 +5,8 @@ import (
"encoding/json"
"github.com/gogo/protobuf/jsonpb"
types "github.com/tendermint/tendermint/proto/tendermint/types"
)
const (
@@ -31,16 +33,6 @@ func (r ResponseDeliverTx) IsErr() bool {
return r.Code != CodeTypeOK
}
// IsOK returns true if Code is OK.
func (r ExecTxResult) IsOK() bool {
return r.Code == CodeTypeOK
}
// IsErr returns true if Code is something other than OK.
func (r ExecTxResult) IsErr() bool {
return r.Code != CodeTypeOK
}
// IsOK returns true if Code is OK.
func (r ResponseQuery) IsOK() bool {
return r.Code == CodeTypeOK
@@ -51,27 +43,24 @@ func (r ResponseQuery) IsErr() bool {
return r.Code != CodeTypeOK
}
func (r ResponseProcessProposal) IsAccepted() bool {
return r.Status == ResponseProcessProposal_ACCEPT
}
func (r ResponseProcessProposal) IsStatusUnknown() bool {
return r.Status == ResponseProcessProposal_UNKNOWN
}
// IsStatusUnknown returns true if Code is Unknown
func (r ResponseVerifyVoteExtension) IsStatusUnknown() bool {
return r.Status == ResponseVerifyVoteExtension_UNKNOWN
// IsUnknown returns true if Code is Unknown
func (r ResponseVerifyVoteExtension) IsUnknown() bool {
return r.Result == ResponseVerifyVoteExtension_UNKNOWN
}
// IsOK returns true if Code is OK
func (r ResponseVerifyVoteExtension) IsOK() bool {
return r.Status == ResponseVerifyVoteExtension_ACCEPT
return r.Result == ResponseVerifyVoteExtension_ACCEPT
}
// IsErr returns true if Code is something other than OK.
func (r ResponseVerifyVoteExtension) IsErr() bool {
return r.Status != ResponseVerifyVoteExtension_ACCEPT
return r.Result != ResponseVerifyVoteExtension_ACCEPT
}
// IsOK returns true if Code is OK
func (r ResponseProcessProposal) IsOK() bool {
return r.Result == ResponseProcessProposal_ACCEPT
}
//---------------------------------------------------------------------------
@@ -155,40 +144,21 @@ var _ jsonRoundTripper = (*EventAttribute)(nil)
// -----------------------------------------------
// construct Result data
func RespondExtendVote(appDataToSign, appDataSelfAuthenticating []byte) ResponseExtendVote {
return ResponseExtendVote{
VoteExtension: &types.VoteExtension{
AppDataToSign: appDataToSign,
AppDataSelfAuthenticating: appDataSelfAuthenticating,
},
}
}
func RespondVerifyVoteExtension(ok bool) ResponseVerifyVoteExtension {
status := ResponseVerifyVoteExtension_REJECT
result := ResponseVerifyVoteExtension_REJECT
if ok {
status = ResponseVerifyVoteExtension_ACCEPT
result = ResponseVerifyVoteExtension_ACCEPT
}
return ResponseVerifyVoteExtension{
Status: status,
Result: result,
}
}
// deterministicExecTxResult constructs a copy of response that omits
// non-deterministic fields. The input response is not modified.
func deterministicExecTxResult(response *ExecTxResult) *ExecTxResult {
return &ExecTxResult{
Code: response.Code,
Data: response.Data,
GasWanted: response.GasWanted,
GasUsed: response.GasUsed,
}
}
// MarshalTxResults encodes the the TxResults as a list of byte
// slices. It strips off the non-deterministic pieces of the TxResults
// so that the resulting data can be used for hash comparisons and used
// in Merkle proofs.
func MarshalTxResults(r []*ExecTxResult) ([][]byte, error) {
s := make([][]byte, len(r))
for i, e := range r {
d := deterministicExecTxResult(e)
b, err := d.Marshal()
if err != nil {
return nil, err
}
s[i] = b
}
return s, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,74 +0,0 @@
package types_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/merkle"
)
func TestHashAndProveResults(t *testing.T) {
trs := []*abci.ExecTxResult{
// Note, these tests rely on the first two entries being in this order.
{Code: 0, Data: nil},
{Code: 0, Data: []byte{}},
{Code: 0, Data: []byte("one")},
{Code: 14, Data: nil},
{Code: 14, Data: []byte("foo")},
{Code: 14, Data: []byte("bar")},
}
// Nil and []byte{} should produce the same bytes
bz0, err := trs[0].Marshal()
require.NoError(t, err)
bz1, err := trs[1].Marshal()
require.NoError(t, err)
require.Equal(t, bz0, bz1)
// Make sure that we can get a root hash from results and verify proofs.
rs, err := abci.MarshalTxResults(trs)
require.NoError(t, err)
root := merkle.HashFromByteSlices(rs)
assert.NotEmpty(t, root)
_, proofs := merkle.ProofsFromByteSlices(rs)
for i, tr := range trs {
bz, err := tr.Marshal()
require.NoError(t, err)
valid := proofs[i].Verify(root, bz)
assert.NoError(t, valid, "%d", i)
}
}
func TestHashDeterministicFieldsOnly(t *testing.T) {
tr1 := abci.ExecTxResult{
Code: 1,
Data: []byte("transaction"),
Log: "nondeterministic data: abc",
Info: "nondeterministic data: abc",
GasWanted: 1000,
GasUsed: 1000,
Events: []abci.Event{},
Codespace: "nondeterministic.data.abc",
}
tr2 := abci.ExecTxResult{
Code: 1,
Data: []byte("transaction"),
Log: "nondeterministic data: def",
Info: "nondeterministic data: def",
GasWanted: 1000,
GasUsed: 1000,
Events: []abci.Event{},
Codespace: "nondeterministic.data.def",
}
r1, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr1})
require.NoError(t, err)
r2, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr2})
require.NoError(t, err)
require.Equal(t, merkle.HashFromByteSlices(r1), merkle.HashFromByteSlices(r2))
}

View File

@@ -1,9 +1,14 @@
version: v1
# The version of the generation template (required).
# The only currently-valid value is v1beta1.
version: v1beta1
# The plugins to run.
plugins:
# The name of the plugin.
- name: gogofaster
out: ./proto/
opt:
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
- plugins=grpc
- paths=source_relative
# The directory where the generated proto output will be written.
# The directory is relative to where the generation tool was run.
out: proto
# Set options to assign import paths to the well-known types
# and to enable service generation.
opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative

View File

@@ -1,3 +0,0 @@
version: v1
directories:
- proto

View File

@@ -1,11 +1,16 @@
version: v1
deps:
- buf.build/gogo/protobuf
breaking:
use:
- FILE
version: v1beta1
build:
roots:
- proto
- third_party/proto
lint:
use:
- BASIC
- FILE_LOWER_SNAKE_CASE
- UNARY_RPC
ignore:
- gogoproto
breaking:
use:
- FILE

View File

@@ -6,26 +6,34 @@ import (
"github.com/tendermint/tendermint/libs/log"
)
const (
var (
nodeRPCAddr string
profAddr string
frequency uint
flagNodeRPCAddr = "rpc-laddr"
flagProfAddr = "pprof-laddr"
flagFrequency = "frequency"
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
)
func GetDebugCommand(logger log.Logger) *cobra.Command {
cmd := &cobra.Command{
Use: "debug",
Short: "A utility to kill or watch a Tendermint process while aggregating debugging data",
}
cmd.PersistentFlags().SortFlags = true
cmd.PersistentFlags().String(
// DebugCmd defines the root command containing subcommands that assist in
// debugging running Tendermint processes.
var DebugCmd = &cobra.Command{
Use: "debug",
Short: "A utility to kill or watch a Tendermint process while aggregating debugging data",
}
func init() {
DebugCmd.PersistentFlags().SortFlags = true
DebugCmd.PersistentFlags().StringVar(
&nodeRPCAddr,
flagNodeRPCAddr,
"tcp://localhost:26657",
"the Tendermint node's RPC address <host>:<port>)",
"the Tendermint node's RPC address (<host>:<port>)",
)
cmd.AddCommand(getKillCmd(logger))
cmd.AddCommand(getDumpCmd(logger))
return cmd
DebugCmd.AddCommand(killCmd)
DebugCmd.AddCommand(dumpCmd)
}

View File

@@ -13,102 +13,78 @@ import (
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cli"
"github.com/tendermint/tendermint/libs/log"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
)
func getDumpCmd(logger log.Logger) *cobra.Command {
cmd := &cobra.Command{
Use: "dump [output-directory]",
Short: "Continuously poll a Tendermint process and dump debugging data into a single location",
Long: `Continuously poll a Tendermint process and dump debugging data into a single
var dumpCmd = &cobra.Command{
Use: "dump [output-directory]",
Short: "Continuously poll a Tendermint process and dump debugging data into a single location",
Long: `Continuously poll a Tendermint process and dump debugging data into a single
location at a specified frequency. At each frequency interval, an archived and compressed
file will contain node debugging information including the goroutine and heap profiles
if enabled.`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
outDir := args[0]
if outDir == "" {
return errors.New("invalid output directory")
}
frequency, err := cmd.Flags().GetUint(flagFrequency)
if err != nil {
return fmt.Errorf("flag %q not defined: %w", flagFrequency, err)
}
Args: cobra.ExactArgs(1),
RunE: dumpCmdHandler,
}
if frequency == 0 {
return errors.New("frequency must be positive")
}
nodeRPCAddr, err := cmd.Flags().GetString(flagNodeRPCAddr)
if err != nil {
return fmt.Errorf("flag %q not defined: %w", flagNodeRPCAddr, err)
}
profAddr, err := cmd.Flags().GetString(flagProfAddr)
if err != nil {
return fmt.Errorf("flag %q not defined: %w", flagProfAddr, err)
}
if _, err := os.Stat(outDir); os.IsNotExist(err) {
if err := os.Mkdir(outDir, os.ModePerm); err != nil {
return fmt.Errorf("failed to create output directory: %w", err)
}
}
rpc, err := rpchttp.New(nodeRPCAddr)
if err != nil {
return fmt.Errorf("failed to create new http client: %w", err)
}
ctx := cmd.Context()
home := viper.GetString(cli.HomeFlag)
conf := config.DefaultConfig()
conf = conf.SetRoot(home)
config.EnsureRoot(conf.RootDir)
dumpArgs := dumpDebugDataArgs{
conf: conf,
outDir: outDir,
profAddr: profAddr,
}
dumpDebugData(ctx, logger, rpc, dumpArgs)
ticker := time.NewTicker(time.Duration(frequency) * time.Second)
for range ticker.C {
dumpDebugData(ctx, logger, rpc, dumpArgs)
}
return nil
},
}
cmd.Flags().Uint(
func init() {
dumpCmd.Flags().UintVar(
&frequency,
flagFrequency,
30,
"the frequency (seconds) in which to poll, aggregate and dump Tendermint debug data",
)
cmd.Flags().String(
dumpCmd.Flags().StringVar(
&profAddr,
flagProfAddr,
"",
"the profiling server address (<host>:<port>)",
)
return cmd
}
type dumpDebugDataArgs struct {
conf *config.Config
outDir string
profAddr string
func dumpCmdHandler(cmd *cobra.Command, args []string) error {
outDir := args[0]
if outDir == "" {
return errors.New("invalid output directory")
}
if frequency == 0 {
return errors.New("frequency must be positive")
}
if _, err := os.Stat(outDir); os.IsNotExist(err) {
if err := os.Mkdir(outDir, os.ModePerm); err != nil {
return fmt.Errorf("failed to create output directory: %w", err)
}
}
rpc, err := rpchttp.New(nodeRPCAddr)
if err != nil {
return fmt.Errorf("failed to create new http client: %w", err)
}
ctx := cmd.Context()
home := viper.GetString(cli.HomeFlag)
conf := config.DefaultConfig()
conf = conf.SetRoot(home)
config.EnsureRoot(conf.RootDir)
dumpDebugData(ctx, outDir, conf, rpc)
ticker := time.NewTicker(time.Duration(frequency) * time.Second)
for range ticker.C {
dumpDebugData(ctx, outDir, conf, rpc)
}
return nil
}
func dumpDebugData(ctx context.Context, logger log.Logger, rpc *rpchttp.HTTP, args dumpDebugDataArgs) {
func dumpDebugData(ctx context.Context, outDir string, conf *config.Config, rpc *rpchttp.HTTP) {
start := time.Now().UTC()
tmpDir, err := os.MkdirTemp(args.outDir, "tendermint_debug_tmp")
tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp")
if err != nil {
logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err)
return
@@ -134,26 +110,26 @@ func dumpDebugData(ctx context.Context, logger log.Logger, rpc *rpchttp.HTTP, ar
}
logger.Info("copying node WAL...")
if err := copyWAL(args.conf, tmpDir); err != nil {
if err := copyWAL(conf, tmpDir); err != nil {
logger.Error("failed to copy node WAL", "error", err)
return
}
if args.profAddr != "" {
if profAddr != "" {
logger.Info("getting node goroutine profile...")
if err := dumpProfile(tmpDir, args.profAddr, "goroutine", 2); err != nil {
if err := dumpProfile(tmpDir, profAddr, "goroutine", 2); err != nil {
logger.Error("failed to dump goroutine profile", "error", err)
return
}
logger.Info("getting node heap profile...")
if err := dumpProfile(tmpDir, args.profAddr, "heap", 2); err != nil {
if err := dumpProfile(tmpDir, profAddr, "heap", 2); err != nil {
logger.Error("failed to dump heap profile", "error", err)
return
}
}
outFile := filepath.Join(args.outDir, fmt.Sprintf("%s.zip", start.Format(time.RFC3339)))
outFile := filepath.Join(outDir, fmt.Sprintf("%s.zip", start.Format(time.RFC3339)))
if err := zipDir(tmpDir, outFile); err != nil {
logger.Error("failed to create and compress archive", "file", outFile, "error", err)
}

View File

@@ -15,96 +15,89 @@ import (
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cli"
"github.com/tendermint/tendermint/libs/log"
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
)
func getKillCmd(logger log.Logger) *cobra.Command {
cmd := &cobra.Command{
Use: "kill [pid] [compressed-output-file]",
Short: "Kill a Tendermint process while aggregating and packaging debugging data",
Long: `Kill a Tendermint process while also aggregating Tendermint process data
var killCmd = &cobra.Command{
Use: "kill [pid] [compressed-output-file]",
Short: "Kill a Tendermint process while aggregating and packaging debugging data",
Long: `Kill a Tendermint process while also aggregating Tendermint process data
such as the latest node state, including consensus and networking state,
go-routine state, and the node's WAL and config information. This aggregated data
is packaged into a compressed archive.
Example:
$ tendermint debug kill 34255 /path/to/tm-debug.zip`,
Args: cobra.ExactArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
pid, err := strconv.ParseInt(args[0], 10, 64)
if err != nil {
return err
}
Args: cobra.ExactArgs(2),
RunE: killCmdHandler,
}
outFile := args[1]
if outFile == "" {
return errors.New("invalid output file")
}
nodeRPCAddr, err := cmd.Flags().GetString(flagNodeRPCAddr)
if err != nil {
return fmt.Errorf("flag %q not defined: %w", flagNodeRPCAddr, err)
}
rpc, err := rpchttp.New(nodeRPCAddr)
if err != nil {
return fmt.Errorf("failed to create new http client: %w", err)
}
home := viper.GetString(cli.HomeFlag)
conf := config.DefaultConfig()
conf = conf.SetRoot(home)
config.EnsureRoot(conf.RootDir)
// Create a temporary directory which will contain all the state dumps and
// relevant files and directories that will be compressed into a file.
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
if err != nil {
return fmt.Errorf("failed to create temporary directory: %w", err)
}
defer os.RemoveAll(tmpDir)
logger.Info("getting node status...")
if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil {
return err
}
logger.Info("getting node network info...")
if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil {
return err
}
logger.Info("getting node consensus state...")
if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil {
return err
}
logger.Info("copying node WAL...")
if err := copyWAL(conf, tmpDir); err != nil {
if !os.IsNotExist(err) {
return err
}
logger.Info("node WAL does not exist; continuing...")
}
logger.Info("copying node configuration...")
if err := copyConfig(home, tmpDir); err != nil {
return err
}
logger.Info("killing Tendermint process")
if err := killProc(int(pid), tmpDir); err != nil {
return err
}
logger.Info("archiving and compressing debug directory...")
return zipDir(tmpDir, outFile)
},
func killCmdHandler(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
pid, err := strconv.ParseInt(args[0], 10, 64)
if err != nil {
return err
}
return cmd
outFile := args[1]
if outFile == "" {
return errors.New("invalid output file")
}
rpc, err := rpchttp.New(nodeRPCAddr)
if err != nil {
return fmt.Errorf("failed to create new http client: %w", err)
}
home := viper.GetString(cli.HomeFlag)
conf := config.DefaultConfig()
conf = conf.SetRoot(home)
config.EnsureRoot(conf.RootDir)
// Create a temporary directory which will contain all the state dumps and
// relevant files and directories that will be compressed into a file.
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
if err != nil {
return fmt.Errorf("failed to create temporary directory: %w", err)
}
defer os.RemoveAll(tmpDir)
logger.Info("getting node status...")
if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil {
return err
}
logger.Info("getting node network info...")
if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil {
return err
}
logger.Info("getting node consensus state...")
if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil {
return err
}
logger.Info("copying node WAL...")
if err := copyWAL(conf, tmpDir); err != nil {
if !os.IsNotExist(err) {
return err
}
logger.Info("node WAL does not exist; continuing...")
}
logger.Info("copying node configuration...")
if err := copyConfig(home, tmpDir); err != nil {
return err
}
logger.Info("killing Tendermint process")
if err := killProc(int(pid), tmpDir); err != nil {
return err
}
logger.Info("archiving and compressing debug directory...")
return zipDir(tmpDir, outFile)
}
// killProc attempts to kill the Tendermint process with a given PID with an

View File

@@ -9,7 +9,6 @@ import (
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/scripts/keymigrate"
"github.com/tendermint/tendermint/scripts/scmigrate"
)
func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command {
@@ -52,13 +51,6 @@ func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command {
return fmt.Errorf("running migration for context %q: %w",
dbctx, err)
}
if dbctx == "blockstore" {
if err := scmigrate.Migrate(ctx, db); err != nil {
return fmt.Errorf("running seen commit migration: %w", err)
}
}
}
logger.Info("completed database migration successfully")

View File

@@ -1,6 +1,7 @@
package commands
import (
"context"
"errors"
"fmt"
"net/http"
@@ -148,7 +149,7 @@ for applications built w/ Cosmos SDK).
// Initiate the light client. If the trusted store already has blocks in it, this
// will be used else we use the trusted options.
c, err := light.NewHTTPClient(
cmd.Context(),
context.Background(),
chainID,
light.TrustOptions{
Period: trustingPeriod,
@@ -180,7 +181,7 @@ for applications built w/ Cosmos SDK).
return err
}
ctx, cancel := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGTERM)
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
defer cancel()
go func() {

View File

@@ -213,7 +213,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
Height: b.Height,
Index: uint32(i),
Tx: b.Data.Txs[i],
Result: *(r.FinalizeBlock.TxResults[i]),
Result: *(r.FinalizeBlock.Txs[i]),
}
_ = batch.Add(&tr)

View File

@@ -153,10 +153,10 @@ func TestReIndexEvent(t *testing.T) {
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
dtx := abcitypes.ExecTxResult{}
dtx := abcitypes.ResponseDeliverTx{}
abciResp := &prototmstate.ABCIResponses{
FinalizeBlock: &abcitypes.ResponseFinalizeBlock{
TxResults: []*abcitypes.ExecTxResult{&dtx},
Txs: []*abcitypes.ResponseDeliverTx{&dtx},
},
}

View File

@@ -1,182 +0,0 @@
package commands
import (
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
)
// MakeResetCommand constructs a command that removes the database of
// the specified Tendermint core instance.
func MakeResetCommand(conf *config.Config, logger log.Logger) *cobra.Command {
var keyType string
resetCmd := &cobra.Command{
Use: "reset",
Short: "Set of commands to conveniently reset tendermint related data",
}
resetBlocksCmd := &cobra.Command{
Use: "blockchain",
Short: "Removes all blocks, state, transactions and evidence stored by the tendermint node",
RunE: func(cmd *cobra.Command, args []string) error {
return ResetState(conf.DBDir(), logger)
},
}
resetPeersCmd := &cobra.Command{
Use: "peers",
Short: "Removes all peer addresses",
RunE: func(cmd *cobra.Command, args []string) error {
return ResetPeerStore(conf.DBDir())
},
}
resetSignerCmd := &cobra.Command{
Use: "unsafe-signer",
Short: "esets private validator signer state",
Long: `Resets private validator signer state.
Only use in testing. This can cause the node to double sign`,
RunE: func(cmd *cobra.Command, args []string) error {
return ResetFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile(), logger, keyType)
},
}
resetAllCmd := &cobra.Command{
Use: "unsafe-all",
Short: "Removes all tendermint data including signing state",
Long: `Removes all tendermint data including signing state.
Only use in testing. This can cause the node to double sign`,
RunE: func(cmd *cobra.Command, args []string) error {
return ResetAll(conf.DBDir(), conf.PrivValidator.KeyFile(),
conf.PrivValidator.StateFile(), logger, keyType)
},
}
resetSignerCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Signer key type. Options: ed25519, secp256k1")
resetAllCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Signer key type. Options: ed25519, secp256k1")
resetCmd.AddCommand(resetBlocksCmd)
resetCmd.AddCommand(resetPeersCmd)
resetCmd.AddCommand(resetSignerCmd)
resetCmd.AddCommand(resetAllCmd)
return resetCmd
}
// ResetAll removes address book files plus all data, and resets the privValdiator data.
// Exported for extenal CLI usage
// XXX: this is unsafe and should only suitable for testnets.
func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
if err := os.RemoveAll(dbDir); err == nil {
logger.Info("Removed all blockchain history", "dir", dbDir)
} else {
logger.Error("error removing all blockchain history", "dir", dbDir, "err", err)
}
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
logger.Error("unable to recreate dbDir", "err", err)
}
// recreate the dbDir since the privVal state needs to live there
return ResetFilePV(privValKeyFile, privValStateFile, logger, keyType)
}
// ResetState removes all blocks, tendermint state, indexed transactions and evidence.
func ResetState(dbDir string, logger log.Logger) error {
blockdb := filepath.Join(dbDir, "blockstore.db")
state := filepath.Join(dbDir, "state.db")
wal := filepath.Join(dbDir, "cs.wal")
evidence := filepath.Join(dbDir, "evidence.db")
txIndex := filepath.Join(dbDir, "tx_index.db")
if tmos.FileExists(blockdb) {
if err := os.RemoveAll(blockdb); err == nil {
logger.Info("Removed all blockstore.db", "dir", blockdb)
} else {
logger.Error("error removing all blockstore.db", "dir", blockdb, "err", err)
}
}
if tmos.FileExists(state) {
if err := os.RemoveAll(state); err == nil {
logger.Info("Removed all state.db", "dir", state)
} else {
logger.Error("error removing all state.db", "dir", state, "err", err)
}
}
if tmos.FileExists(wal) {
if err := os.RemoveAll(wal); err == nil {
logger.Info("Removed all cs.wal", "dir", wal)
} else {
logger.Error("error removing all cs.wal", "dir", wal, "err", err)
}
}
if tmos.FileExists(evidence) {
if err := os.RemoveAll(evidence); err == nil {
logger.Info("Removed all evidence.db", "dir", evidence)
} else {
logger.Error("error removing all evidence.db", "dir", evidence, "err", err)
}
}
if tmos.FileExists(txIndex) {
if err := os.RemoveAll(txIndex); err == nil {
logger.Info("Removed tx_index.db", "dir", txIndex)
} else {
logger.Error("error removing tx_index.db", "dir", txIndex, "err", err)
}
}
return tmos.EnsureDir(dbDir, 0700)
}
// ResetFilePV loads the file private validator and resets the watermark to 0. If used on an existing network,
// this can cause the node to double sign.
// XXX: this is unsafe and should only suitable for testnets.
func ResetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
if _, err := os.Stat(privValKeyFile); err == nil {
pv, err := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
if err != nil {
return err
}
if err := pv.Reset(); err != nil {
return err
}
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
} else {
pv, err := privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
if err != nil {
return err
}
if err := pv.Save(); err != nil {
return err
}
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
}
return nil
}
// ResetPeerStore removes the peer store containing all information used by the tendermint networking layer
// In the case of a reset, new peers will need to be set either via the config or through the discovery mechanism
func ResetPeerStore(dbDir string) error {
peerstore := filepath.Join(dbDir, "peerstore.db")
if tmos.FileExists(peerstore) {
return os.RemoveAll(peerstore)
}
return nil
}

View File

@@ -0,0 +1,95 @@
package commands
import (
"os"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
)
// MakeResetAllCommand constructs a command that removes the database of
// the specified Tendermint core instance.
func MakeResetAllCommand(conf *config.Config, logger log.Logger) *cobra.Command {
var keyType string
cmd := &cobra.Command{
Use: "unsafe-reset-all",
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
RunE: func(cmd *cobra.Command, args []string) error {
return resetAll(conf.DBDir(), conf.PrivValidator.KeyFile(),
conf.PrivValidator.StateFile(), logger, keyType)
},
}
cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Key type to generate privval file with. Options: ed25519, secp256k1")
return cmd
}
func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *cobra.Command {
var keyType string
cmd := &cobra.Command{
Use: "unsafe-reset-priv-validator",
Short: "(unsafe) Reset this node's validator to genesis state",
RunE: func(cmd *cobra.Command, args []string) error {
return resetFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile(), logger, keyType)
},
}
cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
"Key type to generate privval file with. Options: ed25519, secp256k1")
return cmd
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
// XXX: this is totally unsafe.
// it's only suitable for testnets.
// resetAll removes address book files plus all data, and resets the privValdiator data.
// Exported so other CLI tools can use it.
func resetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
if err := os.RemoveAll(dbDir); err == nil {
logger.Info("Removed all blockchain history", "dir", dbDir)
} else {
logger.Error("error removing all blockchain history", "dir", dbDir, "err", err)
}
// recreate the dbDir since the privVal state needs to live there
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
logger.Error("unable to recreate dbDir", "err", err)
}
return resetFilePV(privValKeyFile, privValStateFile, logger, keyType)
}
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
if _, err := os.Stat(privValKeyFile); err == nil {
pv, err := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
if err != nil {
return err
}
if err := pv.Reset(); err != nil {
return err
}
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
} else {
pv, err := privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
if err != nil {
return err
}
if err := pv.Save(); err != nil {
return err
}
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
}
return nil
}

View File

@@ -1,62 +0,0 @@
package commands
import (
"context"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
)
func Test_ResetAll(t *testing.T) {
config := cfg.TestConfig()
dir := t.TempDir()
config.SetRoot(dir)
logger := log.NewNopLogger()
cfg.EnsureRoot(dir)
require.NoError(t, initFilesWithConfig(context.Background(), config, logger, types.ABCIPubKeyTypeEd25519))
pv, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
require.NoError(t, err)
pv.LastSignState.Height = 10
require.NoError(t, pv.Save())
require.NoError(t, ResetAll(config.DBDir(), config.PrivValidator.KeyFile(),
config.PrivValidator.StateFile(), logger, types.ABCIPubKeyTypeEd25519))
require.DirExists(t, config.DBDir())
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
require.FileExists(t, config.PrivValidator.StateFile())
pv, err = privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
require.NoError(t, err)
require.Equal(t, int64(0), pv.LastSignState.Height)
}
func Test_ResetState(t *testing.T) {
config := cfg.TestConfig()
dir := t.TempDir()
config.SetRoot(dir)
logger := log.NewNopLogger()
cfg.EnsureRoot(dir)
require.NoError(t, initFilesWithConfig(context.Background(), config, logger, types.ABCIPubKeyTypeEd25519))
pv, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
require.NoError(t, err)
pv.LastSignState.Height = 10
require.NoError(t, pv.Save())
require.NoError(t, ResetState(config.DBDir(), logger))
require.DirExists(t, config.DBDir())
require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db"))
require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db"))
require.FileExists(t, config.PrivValidator.StateFile())
pv, err = privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile())
require.NoError(t, err)
// private validator state should still be in tact.
require.Equal(t, int64(10), pv.LastSignState.Height)
}

View File

@@ -53,7 +53,6 @@ func TestRollbackIntegration(t *testing.T) {
defer cancel()
node2, _, err2 := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout)
require.NoError(t, err2)
t.Cleanup(node2.Wait)
logger := log.NewNopLogger()

View File

@@ -51,12 +51,6 @@ func RootCommand(conf *config.Config, logger log.Logger) *cobra.Command {
}
*conf = *pconf
config.EnsureRoot(conf.RootDir)
if err := log.OverrideWithNewLogger(logger, conf.LogFormat, conf.LogLevel); err != nil {
return err
}
if warning := pconf.DeprecatedFieldWarning(); warning != nil {
logger.Info("WARNING", "deprecated field warning", warning)
}
return nil
},

View File

@@ -105,7 +105,7 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider, conf *cfg.Config, logger lo
return err
}
ctx, cancel := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGTERM)
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
defer cancel()
n, err := nodeProvider(ctx, conf, logger)

View File

@@ -33,7 +33,8 @@ func main() {
commands.MakeLightCommand(conf, logger),
commands.MakeReplayCommand(conf, logger),
commands.MakeReplayConsoleCommand(conf, logger),
commands.MakeResetCommand(conf, logger),
commands.MakeResetAllCommand(conf, logger),
commands.MakeResetPrivateValidatorCommand(conf, logger),
commands.MakeShowValidatorCommand(conf, logger),
commands.MakeTestnetFilesCommand(conf, logger),
commands.MakeShowNodeIDCommand(conf),
@@ -42,7 +43,7 @@ func main() {
commands.MakeInspectCommand(conf, logger),
commands.MakeRollbackStateCommand(conf),
commands.MakeKeyMigrateCommand(conf, logger),
debug.GetDebugCommand(logger),
debug.DebugCmd,
commands.NewCompletionCmd(rcmd, true),
)

View File

@@ -8,7 +8,6 @@ import (
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/tendermint/tendermint/libs/log"
@@ -146,10 +145,6 @@ func (cfg *Config) ValidateBasic() error {
return nil
}
func (cfg *Config) DeprecatedFieldWarning() error {
return cfg.Consensus.DeprecatedFieldWarning()
}
//-----------------------------------------------------------------------------
// BaseConfig
@@ -447,33 +442,6 @@ type RPCConfig struct {
// to the estimated maximum number of broadcast_tx_commit calls per block.
MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"`
// If true, disable the websocket interface to the RPC service. This has
// the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all
// methods for event subscription.
//
// EXPERIMENTAL: This setting will be removed in Tendermint v0.37.
ExperimentalDisableWebsocket bool `mapstructure:"experimental-disable-websocket"`
// The time window size for the event log. All events up to this long before
// the latest (up to EventLogMaxItems) will be available for subscribers to
// fetch via the /events method. If 0 (the default) the event log and the
// /events RPC method are disabled.
EventLogWindowSize time.Duration `mapstructure:"event-log-window-size"`
// The maxiumum number of events that may be retained by the event log. If
// this value is 0, no upper limit is set. Otherwise, items in excess of
// this number will be discarded from the event log.
//
// Warning: This setting is a safety valve. Setting it too low may cause
// subscribers to miss events. Try to choose a value higher than the
// maximum worst-case expected event load within the chosen window size in
// ordinary operation.
//
// For example, if the window size is 10 minutes and the node typically
// averages 1000 events per ten minutes, but with occasional known spikes of
// up to 2000, choose a value > 2000.
EventLogMaxItems int `mapstructure:"event-log-max-items"`
// How long to wait for a tx to be committed during /broadcast_tx_commit
// WARNING: Using a value larger than 10s will result in increasing the
// global HTTP write timeout, which applies to all connections and endpoints.
@@ -519,14 +487,9 @@ func DefaultRPCConfig() *RPCConfig {
Unsafe: false,
MaxOpenConnections: 900,
// Settings for event subscription.
MaxSubscriptionClients: 100,
MaxSubscriptionsPerClient: 5,
ExperimentalDisableWebsocket: false, // compatible with TM v0.35 and earlier
EventLogWindowSize: 0, // disables /events RPC by default
EventLogMaxItems: 0,
TimeoutBroadcastTxCommit: 10 * time.Second,
MaxSubscriptionClients: 100,
MaxSubscriptionsPerClient: 5,
TimeoutBroadcastTxCommit: 10 * time.Second,
MaxBodyBytes: int64(1000000), // 1MB
MaxHeaderBytes: 1 << 20, // same as the net/http default
@@ -556,12 +519,6 @@ func (cfg *RPCConfig) ValidateBasic() error {
if cfg.MaxSubscriptionsPerClient < 0 {
return errors.New("max-subscriptions-per-client can't be negative")
}
if cfg.EventLogWindowSize < 0 {
return errors.New("event-log-window-size must not be negative")
}
if cfg.EventLogMaxItems < 0 {
return errors.New("event-log-max-items must not be negative")
}
if cfg.TimeoutBroadcastTxCommit < 0 {
return errors.New("timeout-broadcast-tx-commit can't be negative")
}
@@ -961,6 +918,27 @@ type ConsensusConfig struct {
WalPath string `mapstructure:"wal-file"`
walFile string // overrides WalPath if set
// TODO: remove timeout configs, these should be global not local
// How long we wait for a proposal block before prevoting nil
TimeoutPropose time.Duration `mapstructure:"timeout-propose"`
// How much timeout-propose increases with each round
TimeoutProposeDelta time.Duration `mapstructure:"timeout-propose-delta"`
// How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
TimeoutPrevote time.Duration `mapstructure:"timeout-prevote"`
// How much the timeout-prevote increases with each round
TimeoutPrevoteDelta time.Duration `mapstructure:"timeout-prevote-delta"`
// How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
TimeoutPrecommit time.Duration `mapstructure:"timeout-precommit"`
// How much the timeout-precommit increases with each round
TimeoutPrecommitDelta time.Duration `mapstructure:"timeout-precommit-delta"`
// How long we wait after committing a block, before starting on the new
// height (this gives us a chance to receive some more precommits, even
// though we already have +2/3).
TimeoutCommit time.Duration `mapstructure:"timeout-commit"`
// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
SkipTimeoutCommit bool `mapstructure:"skip-timeout-commit"`
// EmptyBlocks mode and possible interval between empty blocks
CreateEmptyBlocks bool `mapstructure:"create-empty-blocks"`
CreateEmptyBlocksInterval time.Duration `mapstructure:"create-empty-blocks-interval"`
@@ -970,59 +948,20 @@ type ConsensusConfig struct {
PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer-query-maj23-sleep-duration"`
DoubleSignCheckHeight int64 `mapstructure:"double-sign-check-height"`
// TODO: The following fields are all temporary overrides that should exist only
// for the duration of the v0.36 release. The below fields should be completely
// removed in the v0.37 release of Tendermint.
// See: https://github.com/tendermint/tendermint/issues/8188
// UnsafeProposeTimeoutOverride provides an unsafe override of the Propose
// timeout consensus parameter. It configures how long the consensus engine
// will wait to receive a proposal block before prevoting nil.
UnsafeProposeTimeoutOverride time.Duration `mapstructure:"unsafe-propose-timeout-override"`
// UnsafeProposeTimeoutDeltaOverride provides an unsafe override of the
// ProposeDelta timeout consensus parameter. It configures how much the
// propose timeout increases with each round.
UnsafeProposeTimeoutDeltaOverride time.Duration `mapstructure:"unsafe-propose-timeout-delta-override"`
// UnsafeVoteTimeoutOverride provides an unsafe override of the Vote timeout
// consensus parameter. It configures how long the consensus engine will wait
// to gather additional votes after receiving +2/3 votes in a round.
UnsafeVoteTimeoutOverride time.Duration `mapstructure:"unsafe-vote-timeout-override"`
// UnsafeVoteTimeoutDeltaOverride provides an unsafe override of the VoteDelta
// timeout consensus parameter. It configures how much the vote timeout
// increases with each round.
UnsafeVoteTimeoutDeltaOverride time.Duration `mapstructure:"unsafe-vote-timeout-delta-override"`
// UnsafeCommitTimeoutOverride provides an unsafe override of the Commit timeout
// consensus parameter. It configures how long the consensus engine will wait
// after receiving +2/3 precommits before beginning the next height.
UnsafeCommitTimeoutOverride time.Duration `mapstructure:"unsafe-commit-timeout-override"`
// UnsafeBypassCommitTimeoutOverride provides an unsafe override of the
// BypassCommitTimeout consensus parameter. It configures if the consensus
// engine will wait for the full Commit timeout before proceeding to the next height.
// If it is set to true, the consensus engine will proceed to the next height
// as soon as the node has gathered votes from all of the validators on the network.
UnsafeBypassCommitTimeoutOverride *bool `mapstructure:"unsafe-bypass-commit-timeout-override"`
// Deprecated timeout parameters. These parameters are present in this struct
// so that they can be parsed so that validation can check if they have erroneously
// been included and provide a helpful error message.
// These fields should be completely removed in v0.37.
// See: https://github.com/tendermint/tendermint/issues/8188
DeprecatedTimeoutPropose *interface{} `mapstructure:"timeout-propose"`
DeprecatedTimeoutProposeDelta *interface{} `mapstructure:"timeout-propose-delta"`
DeprecatedTimeoutPrevote *interface{} `mapstructure:"timeout-prevote"`
DeprecatedTimeoutPrevoteDelta *interface{} `mapstructure:"timeout-prevote-delta"`
DeprecatedTimeoutPrecommit *interface{} `mapstructure:"timeout-precommit"`
DeprecatedTimeoutPrecommitDelta *interface{} `mapstructure:"timeout-precommit-delta"`
DeprecatedTimeoutCommit *interface{} `mapstructure:"timeout-commit"`
DeprecatedSkipTimeoutCommit *interface{} `mapstructure:"skip-timeout-commit"`
}
// DefaultConsensusConfig returns a default configuration for the consensus service
func DefaultConsensusConfig() *ConsensusConfig {
return &ConsensusConfig{
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
TimeoutPropose: 3000 * time.Millisecond,
TimeoutProposeDelta: 500 * time.Millisecond,
TimeoutPrevote: 1000 * time.Millisecond,
TimeoutPrevoteDelta: 500 * time.Millisecond,
TimeoutPrecommit: 1000 * time.Millisecond,
TimeoutPrecommitDelta: 500 * time.Millisecond,
TimeoutCommit: 1000 * time.Millisecond,
SkipTimeoutCommit: false,
CreateEmptyBlocks: true,
CreateEmptyBlocksInterval: 0 * time.Second,
PeerGossipSleepDuration: 100 * time.Millisecond,
@@ -1034,6 +973,14 @@ func DefaultConsensusConfig() *ConsensusConfig {
// TestConsensusConfig returns a configuration for testing the consensus service
func TestConsensusConfig() *ConsensusConfig {
cfg := DefaultConsensusConfig()
cfg.TimeoutPropose = 40 * time.Millisecond
cfg.TimeoutProposeDelta = 1 * time.Millisecond
cfg.TimeoutPrevote = 10 * time.Millisecond
cfg.TimeoutPrevoteDelta = 1 * time.Millisecond
cfg.TimeoutPrecommit = 10 * time.Millisecond
cfg.TimeoutPrecommitDelta = 1 * time.Millisecond
cfg.TimeoutCommit = 10 * time.Millisecond
cfg.SkipTimeoutCommit = true
cfg.PeerGossipSleepDuration = 5 * time.Millisecond
cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond
cfg.DoubleSignCheckHeight = int64(0)
@@ -1045,6 +992,33 @@ func (cfg *ConsensusConfig) WaitForTxs() bool {
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
}
// Propose returns the amount of time to wait for a proposal
func (cfg *ConsensusConfig) Propose(round int32) time.Duration {
return time.Duration(
cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes
func (cfg *ConsensusConfig) Prevote(round int32) time.Duration {
return time.Duration(
cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits
func (cfg *ConsensusConfig) Precommit(round int32) time.Duration {
return time.Duration(
cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits
// for a single block (ie. a commit).
func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
return t.Add(cfg.TimeoutCommit)
}
// WalFile returns the full path to the write-ahead log file
func (cfg *ConsensusConfig) WalFile() string {
if cfg.walFile != "" {
@@ -1061,20 +1035,26 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) {
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *ConsensusConfig) ValidateBasic() error {
if cfg.UnsafeProposeTimeoutOverride < 0 {
return errors.New("unsafe-propose-timeout-override can't be negative")
if cfg.TimeoutPropose < 0 {
return errors.New("timeout-propose can't be negative")
}
if cfg.UnsafeProposeTimeoutDeltaOverride < 0 {
return errors.New("unsafe-propose-timeout-delta-override can't be negative")
if cfg.TimeoutProposeDelta < 0 {
return errors.New("timeout-propose-delta can't be negative")
}
if cfg.UnsafeVoteTimeoutOverride < 0 {
return errors.New("unsafe-vote-timeout-override can't be negative")
if cfg.TimeoutPrevote < 0 {
return errors.New("timeout-prevote can't be negative")
}
if cfg.UnsafeVoteTimeoutDeltaOverride < 0 {
return errors.New("unsafe-vote-timeout-delta-override can't be negative")
if cfg.TimeoutPrevoteDelta < 0 {
return errors.New("timeout-prevote-delta can't be negative")
}
if cfg.UnsafeCommitTimeoutOverride < 0 {
return errors.New("unsafe-commit-timeout-override can't be negative")
if cfg.TimeoutPrecommit < 0 {
return errors.New("timeout-precommit can't be negative")
}
if cfg.TimeoutPrecommitDelta < 0 {
return errors.New("timeout-precommit-delta can't be negative")
}
if cfg.TimeoutCommit < 0 {
return errors.New("timeout-commit can't be negative")
}
if cfg.CreateEmptyBlocksInterval < 0 {
return errors.New("create-empty-blocks-interval can't be negative")
@@ -1091,44 +1071,6 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
return nil
}
func (cfg *ConsensusConfig) DeprecatedFieldWarning() error {
var fields []string
if cfg.DeprecatedSkipTimeoutCommit != nil {
fields = append(fields, "skip-timeout-commit")
}
if cfg.DeprecatedTimeoutPropose != nil {
fields = append(fields, "timeout-propose")
}
if cfg.DeprecatedTimeoutProposeDelta != nil {
fields = append(fields, "timeout-propose-delta")
}
if cfg.DeprecatedTimeoutPrevote != nil {
fields = append(fields, "timeout-prevote")
}
if cfg.DeprecatedTimeoutPrevoteDelta != nil {
fields = append(fields, "timeout-prevote-delta")
}
if cfg.DeprecatedTimeoutPrecommit != nil {
fields = append(fields, "timeout-precommit")
}
if cfg.DeprecatedTimeoutPrecommitDelta != nil {
fields = append(fields, "timeout-precommit-delta")
}
if cfg.DeprecatedTimeoutCommit != nil {
fields = append(fields, "timeout-commit")
}
if cfg.DeprecatedSkipTimeoutCommit != nil {
fields = append(fields, "skip-timeout-commit")
}
if len(fields) != 0 {
return fmt.Errorf("the following deprecated fields were set in the "+
"configuration file: %s. These fields were removed in v0.36. Timeout "+
"configuration has been moved to the ConsensusParams. For more information see "+
"https://tinyurl.com/adr074", strings.Join(fields, ", "))
}
return nil
}
//-----------------------------------------------------------------------------
// TxIndexConfig
// Remember that Event has the following structure:
@@ -1145,8 +1087,9 @@ type TxIndexConfig struct {
// If list contains `null`, meaning no indexer service will be used.
//
// Options:
// 1) "null" (default) - no indexer services.
// 2) "kv" - a simple indexer backed by key-value storage (see DBBackend)
// 1) "null" - no indexer services.
// 2) "kv" (default) - the simplest possible indexer,
// backed by key-value storage (defaults to levelDB; see DBBackend).
// 3) "psql" - the indexer services backed by PostgreSQL.
Indexer []string `mapstructure:"indexer"`
@@ -1157,12 +1100,14 @@ type TxIndexConfig struct {
// DefaultTxIndexConfig returns a default configuration for the transaction indexer.
func DefaultTxIndexConfig() *TxIndexConfig {
return &TxIndexConfig{Indexer: []string{"null"}}
return &TxIndexConfig{
Indexer: []string{"kv"},
}
}
// TestTxIndexConfig returns a default configuration for the transaction indexer.
func TestTxIndexConfig() *TxIndexConfig {
return &TxIndexConfig{Indexer: []string{"kv"}}
return DefaultTxIndexConfig()
}
//-----------------------------------------------------------------------------

View File

@@ -29,8 +29,8 @@ func TestConfigValidateBasic(t *testing.T) {
cfg := DefaultConfig()
assert.NoError(t, cfg.ValidateBasic())
// tamper with unsafe-propose-timeout-override
cfg.Consensus.UnsafeProposeTimeoutOverride = -10 * time.Second
// tamper with timeout_propose
cfg.Consensus.TimeoutPropose = -10 * time.Second
assert.Error(t, cfg.ValidateBasic())
}
@@ -106,21 +106,25 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) {
modify func(*ConsensusConfig)
expectErr bool
}{
"UnsafeProposeTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutOverride = time.Second }, false},
"UnsafeProposeTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutOverride = -1 }, true},
"UnsafeProposeTimeoutDeltaOverride": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutDeltaOverride = time.Second }, false},
"UnsafeProposeTimeoutDeltaOverride negative": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutDeltaOverride = -1 }, true},
"UnsafePrevoteTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutOverride = time.Second }, false},
"UnsafePrevoteTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutOverride = -1 }, true},
"UnsafePrevoteTimeoutDeltaOverride": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutDeltaOverride = time.Second }, false},
"UnsafePrevoteTimeoutDeltaOverride negative": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutDeltaOverride = -1 }, true},
"UnsafeCommitTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeCommitTimeoutOverride = time.Second }, false},
"UnsafeCommitTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeCommitTimeoutOverride = -1 }, true},
"PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false},
"PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true},
"PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false},
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
"TimeoutPropose": {func(c *ConsensusConfig) { c.TimeoutPropose = time.Second }, false},
"TimeoutPropose negative": {func(c *ConsensusConfig) { c.TimeoutPropose = -1 }, true},
"TimeoutProposeDelta": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = time.Second }, false},
"TimeoutProposeDelta negative": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = -1 }, true},
"TimeoutPrevote": {func(c *ConsensusConfig) { c.TimeoutPrevote = time.Second }, false},
"TimeoutPrevote negative": {func(c *ConsensusConfig) { c.TimeoutPrevote = -1 }, true},
"TimeoutPrevoteDelta": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = time.Second }, false},
"TimeoutPrevoteDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = -1 }, true},
"TimeoutPrecommit": {func(c *ConsensusConfig) { c.TimeoutPrecommit = time.Second }, false},
"TimeoutPrecommit negative": {func(c *ConsensusConfig) { c.TimeoutPrecommit = -1 }, true},
"TimeoutPrecommitDelta": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = time.Second }, false},
"TimeoutPrecommitDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = -1 }, true},
"TimeoutCommit": {func(c *ConsensusConfig) { c.TimeoutCommit = time.Second }, false},
"TimeoutCommit negative": {func(c *ConsensusConfig) { c.TimeoutCommit = -1 }, true},
"PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false},
"PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true},
"PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false},
"PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true},
"DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true},
}
for desc, tc := range testcases {
tc := tc // appease linter

View File

@@ -220,33 +220,6 @@ max-subscription-clients = {{ .RPC.MaxSubscriptionClients }}
# to the estimated maximum number of broadcast_tx_commit calls per block.
max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }}
# If true, disable the websocket interface to the RPC service. This has
# the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all
# methods for event subscription.
#
# EXPERIMENTAL: This setting will be removed in Tendermint v0.37.
experimental-disable-websocket = {{ .RPC.ExperimentalDisableWebsocket }}
# The time window size for the event log. All events up to this long before
# the latest (up to EventLogMaxItems) will be available for subscribers to
# fetch via the /events method. If 0 (the default) the event log and the
# /events RPC method are disabled.
event-log-window-size = "{{ .RPC.EventLogWindowSize }}"
# The maxiumum number of events that may be retained by the event log. If
# this value is 0, no upper limit is set. Otherwise, items in excess of
# this number will be discarded from the event log.
#
# Warning: This setting is a safety valve. Setting it too low may cause
# subscribers to miss events. Try to choose a value higher than the
# maximum worst-case expected event load within the chosen window size in
# ordinary operation.
#
# For example, if the window size is 10 minutes and the node typically
# averages 1000 events per ten minutes, but with occasional known spikes of
# up to 2000, choose a value > 2000.
event-log-max-items = {{ .RPC.EventLogMaxItems }}
# How long to wait for a tx to be committed during /broadcast_tx_commit.
# WARNING: Using a value larger than 10s will result in increasing the
# global HTTP write timeout, which applies to all connections and endpoints.
@@ -450,12 +423,32 @@ fetchers = "{{ .StateSync.Fetchers }}"
wal-file = "{{ js .Consensus.WalPath }}"
# How long we wait for a proposal block before prevoting nil
timeout-propose = "{{ .Consensus.TimeoutPropose }}"
# How much timeout-propose increases with each round
timeout-propose-delta = "{{ .Consensus.TimeoutProposeDelta }}"
# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
timeout-prevote = "{{ .Consensus.TimeoutPrevote }}"
# How much the timeout-prevote increases with each round
timeout-prevote-delta = "{{ .Consensus.TimeoutPrevoteDelta }}"
# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
timeout-precommit = "{{ .Consensus.TimeoutPrecommit }}"
# How much the timeout-precommit increases with each round
timeout-precommit-delta = "{{ .Consensus.TimeoutPrecommitDelta }}"
# How long we wait after committing a block, before starting on the new
# height (this gives us a chance to receive some more precommits, even
# though we already have +2/3).
timeout-commit = "{{ .Consensus.TimeoutCommit }}"
# How many blocks to look back to check existence of the node's consensus votes before joining consensus
# When non-zero, the node will panic upon restart
# if the same consensus key was used to sign {double-sign-check-height} last blocks.
# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic.
double-sign-check-height = {{ .Consensus.DoubleSignCheckHeight }}
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip-timeout-commit = {{ .Consensus.SkipTimeoutCommit }}
# EmptyBlocks mode and possible interval between empty blocks
create-empty-blocks = {{ .Consensus.CreateEmptyBlocks }}
create-empty-blocks-interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
@@ -464,50 +457,6 @@ create-empty-blocks-interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
peer-gossip-sleep-duration = "{{ .Consensus.PeerGossipSleepDuration }}"
peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
### Unsafe Timeout Overrides ###
# These fields provide temporary overrides for the Timeout consensus parameters.
# Use of these parameters is strongly discouraged. Using these parameters may have serious
# liveness implications for the validator and for the chain.
#
# These fields will be removed from the configuration file in the v0.37 release of Tendermint.
# For additional information, see ADR-74:
# https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-074-timeout-params.md
# This field provides an unsafe override of the Propose timeout consensus parameter.
# This field configures how long the consensus engine will wait for a proposal block before prevoting nil.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-propose-timeout-override = {{ .Consensus.UnsafeProposeTimeoutOverride }}
# This field provides an unsafe override of the ProposeDelta timeout consensus parameter.
# This field configures how much the propose timeout increases with each round.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-propose-timeout-delta-override = {{ .Consensus.UnsafeProposeTimeoutDeltaOverride }}
# This field provides an unsafe override of the Vote timeout consensus parameter.
# This field configures how long the consensus engine will wait after
# receiving +2/3 votes in a round.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-vote-timeout-override = {{ .Consensus.UnsafeVoteTimeoutOverride }}
# This field provides an unsafe override of the VoteDelta timeout consensus parameter.
# This field configures how much the vote timeout increases with each round.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-vote-timeout-delta-override = {{ .Consensus.UnsafeVoteTimeoutDeltaOverride }}
# This field provides an unsafe override of the Commit timeout consensus parameter.
# This field configures how long the consensus engine will wait after receiving
# +2/3 precommits before beginning the next height.
# If this field is set to a value greater than 0, it will take effect.
# unsafe-commit-timeout-override = {{ .Consensus.UnsafeCommitTimeoutOverride }}
# This field provides an unsafe override of the BypassCommitTimeout consensus parameter.
# This field configures if the consensus engine will wait for the full Commit timeout
# before proceeding to the next height.
# If this field is set to true, the consensus engine will proceed to the next height
# as soon as the node has gathered votes from all of the validators on the network.
# unsafe-bypass-commit-timeout-override =
#######################################################
### Transaction Indexer Configuration Options ###
#######################################################
@@ -520,8 +469,8 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
# to decide which txs to index based on configuration set in the application.
#
# Options:
# 1) "null" (default) - no indexer services.
# 2) "kv" - a simple indexer backed by key-value storage (see DBBackend)
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
# 3) "psql" - the indexer services backed by PostgreSQL.
# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed.
indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}]
@@ -626,14 +575,6 @@ var testGenesisFmt = `{
"message_delay": "500000000",
"precision": "10000000"
},
"timeout": {
"propose": "30000000",
"propose_delta": "50000",
"vote": "30000000",
"vote_delta": "50000",
"commit": "10000000",
"bypass_timeout_commit": true
},
"evidence": {
"max_age_num_blocks": "100000",
"max_age_duration": "172800000000000",

View File

@@ -12,7 +12,7 @@ For any specific algorithm, use its specific module e.g.
## Binary encoding
For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/core/encoding.html).
For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/blockchain/encoding.html).
## JSON Encoding

View File

@@ -1,87 +0,0 @@
package blst
import (
"crypto/rand"
bls "github.com/supranational/blst/bindings/go"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/libs/bytes"
)
const (
PrivKeyName = "blst/PrivKey"
PubKeyName = "blst/PubKey"
PrivKeySize = 32
PubKeySize = 192
SignatureSize = 96
)
var dst = []byte("BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_")
// PrivKey implements tendermint.crypto.PrivKey.
type PrivKey struct {
sk *bls.SecretKey
}
func GenPrivKey() *PrivKey {
var ikm [32]byte
_, _ = rand.Read(ikm[:])
return &PrivKey{bls.KeyGen(ikm[:])}
}
func (k *PrivKey) Bytes() []byte {
return k.sk.Serialize()
}
func (k *PrivKey) Equals(other crypto.PrivKey) bool {
if otherK, ok := other.(*PrivKey); ok {
return k.sk.Equals(otherK.sk)
}
return false
}
func (k *PrivKey) PubKey() crypto.PubKey {
return &PubKey{new(bls.P2Affine).From(k.sk)}
}
func (k *PrivKey) Sign(msg []byte) ([]byte, error) {
sig := new(bls.P1Affine).Sign(k.sk, msg, dst)
return sig.Serialize(), nil
}
func (k *PrivKey) Type() string {
return PrivKeyName
}
// PubKey implements tendermint.crypto.PubKey.
type PubKey struct {
pk *bls.P2Affine
}
func (k *PubKey) Address() bytes.HexBytes {
return tmhash.SumTruncated(k.Bytes())
}
func (k *PubKey) Bytes() []byte {
return k.pk.Serialize()
}
func (k *PubKey) Equals(other crypto.PubKey) bool {
if otherK, ok := other.(*PubKey); ok {
return k.pk.Equals(otherK.pk)
}
return false
}
func (k *PubKey) Type() string {
return PubKeyName
}
func (k *PubKey) VerifySignature(msg []byte, bsig []byte) bool {
sig := new(bls.P1Affine).Deserialize(bsig)
if sig == nil {
return false
}
return sig.Verify(true, k.pk, false, msg, dst)
}

View File

@@ -1,97 +0,0 @@
package blst
import "testing"
func TestKeyGen(t *testing.T) {
key1 := GenPrivKey()
if key1 == nil {
t.Fatal("GenPrivKey() return nil")
}
b := key1.Bytes()
if len(b) != PrivKeySize {
t.Error("Serilalized private key", len(b), b)
}
key2 := GenPrivKey()
if key2 == nil {
t.Fatal("GenPrivKey() return nil")
}
b = key2.Bytes()
if len(b) != PrivKeySize {
t.Error("Serilalized private key", len(b), b)
}
if !key1.Equals(key1) {
t.Error("Private key not equal to itself", key1)
}
if !key2.Equals(key2) {
t.Error("Private key not equal to itself", key1)
}
if key1.Equals(key2) || key2.Equals(key1) {
t.Error("Different private keys equal", key1, key2)
}
pub1 := key1.PubKey()
if pub1 == nil {
t.Fatal("PubKey() return nil")
}
b = pub1.Bytes()
if len(b) != PubKeySize {
t.Error("Serilalized public key", len(b), b)
}
pub2 := key2.PubKey()
if pub2 == nil {
t.Fatal("PubKey() return nil")
}
b = pub2.Bytes()
if len(b) != PubKeySize {
t.Error("Serilalized public key", len(b), b)
}
if !pub1.Equals(pub1) {
t.Error("Public key not equal to itself", pub1)
}
if !pub2.Equals(pub2) {
t.Error("Public key not equal to itself", pub1)
}
if pub1.Equals(pub2) || pub2.Equals(pub1) {
t.Error("Different public keys equal", pub1, pub2)
}
}
func TestKeySignVerify(t *testing.T) {
msg := []byte("a test message")
key1 := GenPrivKey()
sig1, err := key1.Sign(msg)
if sig1 == nil || err != nil {
t.Fatal("Failed to sign message", sig1, err)
}
if len(sig1) != SignatureSize {
t.Error("Signature size", len(sig1), sig1)
}
key2 := GenPrivKey()
sig2, err := key2.Sign(msg)
if sig2 == nil || err != nil {
t.Fatal("Failed to sign message", sig2, err)
}
if len(sig2) != SignatureSize {
t.Error("Signature size", len(sig2), sig2)
}
pub1 := key1.PubKey()
pub2 := key2.PubKey()
if !pub1.VerifySignature(msg, sig1) {
t.Error("Failed to verify own signature", msg, sig1)
}
if !pub2.VerifySignature(msg, sig2) {
t.Error("Failed to verify own signature", msg, sig2)
}
if pub1.VerifySignature(msg, sig2) {
t.Error("Signature verified by wrong key", msg, sig2)
}
if pub2.VerifySignature(msg, sig1) {
t.Error("Signature verified by wrong key", msg, sig1)
}
}

View File

@@ -1,21 +0,0 @@
module github.com/tendermint/tendermint/crypto/bls
go 1.17
require (
github.com/herumi/bls-eth-go-binary v0.0.0-20220216073600-600054663ec1
github.com/line/ostracon v1.0.4
github.com/stretchr/testify v1.7.1
github.com/supranational/blst v0.3.7
github.com/tendermint/tendermint v0.35.4
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b // indirect
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa // indirect
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)

View File

@@ -1,235 +0,0 @@
package ostracon
import (
"bytes"
"crypto/sha512"
"crypto/subtle"
"fmt"
tmjson "github.com/line/ostracon/libs/json"
"github.com/herumi/bls-eth-go-binary/bls"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
)
var _ crypto.PrivKey = PrivKey{}
const (
PrivKeyName = "ostracon/PrivKeyBLS12"
PubKeyName = "ostracon/PubKeyBLS12"
PrivKeySize = 32
PubKeySize = 48
SignatureSize = 96
KeyType = "bls12-381"
)
func init() {
tmjson.RegisterType(PubKey{}, PubKeyName)
tmjson.RegisterType(PrivKey{}, PrivKeyName)
err := bls.Init(bls.BLS12_381)
if err != nil {
panic(fmt.Sprintf("ERROR: %s", err))
}
err = bls.SetETHmode(bls.EthModeLatest)
if err != nil {
panic(fmt.Sprintf("ERROR: %s", err))
}
}
// PrivKey implements crypto.PrivKey.
type PrivKey [PrivKeySize]byte
// AddSignature adds a BLS signature to the init. When the init is nil, then a new aggregate signature is built
// from specified signature.
func AddSignature(init []byte, signature []byte) (aggrSign []byte, err error) {
if init == nil {
blsSign := bls.Sign{}
init = blsSign.Serialize()
} else if len(init) != SignatureSize {
err = fmt.Errorf("invalid BLS signature: aggregated signature size %d is not valid size %d",
len(init), SignatureSize)
return
}
if len(signature) != SignatureSize {
err = fmt.Errorf("invalid BLS signature: signature size %d is not valid size %d",
len(signature), SignatureSize)
return
}
blsSign := bls.Sign{}
err = blsSign.Deserialize(signature)
if err != nil {
return
}
aggrBLSSign := bls.Sign{}
err = aggrBLSSign.Deserialize(init)
if err != nil {
return
}
aggrBLSSign.Add(&blsSign)
aggrSign = aggrBLSSign.Serialize()
return
}
func VerifyAggregatedSignature(aggregatedSignature []byte, pubKeys []PubKey, msgs [][]byte) error {
if len(pubKeys) != len(msgs) {
return fmt.Errorf("the number of public keys %d doesn't match the one of messages %d",
len(pubKeys), len(msgs))
}
if aggregatedSignature == nil {
if len(pubKeys) == 0 {
return nil
}
return fmt.Errorf(
"the aggregate signature was omitted, even though %d public keys were specified", len(pubKeys))
}
aggrSign := bls.Sign{}
err := aggrSign.Deserialize(aggregatedSignature)
if err != nil {
return err
}
blsPubKeys := make([]bls.PublicKey, len(pubKeys))
hashes := make([][]byte, len(msgs))
for i := 0; i < len(pubKeys); i++ {
blsPubKeys[i] = bls.PublicKey{}
err = blsPubKeys[i].Deserialize(pubKeys[i][:])
if err != nil {
return err
}
hash := sha512.Sum512_256(msgs[i])
hashes[i] = hash[:]
}
if !aggrSign.VerifyAggregateHashes(blsPubKeys, hashes) {
return fmt.Errorf("failed to verify the aggregated hashes by %d public keys", len(blsPubKeys))
}
return nil
}
// GenPrivKey generates a new BLS12-381 private key.
func GenPrivKey() PrivKey {
sigKey := bls.SecretKey{}
sigKey.SetByCSPRNG()
sigKeyBinary := PrivKey{}
binary := sigKey.Serialize()
if len(binary) != PrivKeySize {
panic(fmt.Sprintf("unexpected BLS private key size: %d != %d", len(binary), PrivKeySize))
}
copy(sigKeyBinary[:], binary)
return sigKeyBinary
}
// Bytes marshals the privkey using amino encoding.
func (privKey PrivKey) Bytes() []byte {
return privKey[:]
}
// Sign produces a signature on the provided message.
func (privKey PrivKey) Sign(msg []byte) ([]byte, error) {
if msg == nil {
panic("Nil specified as the message")
}
blsKey := bls.SecretKey{}
err := blsKey.Deserialize(privKey[:])
if err != nil {
return nil, err
}
hash := sha512.Sum512_256(msg)
sign := blsKey.SignHash(hash[:])
return sign.Serialize(), nil
}
// FIXME: Type crypto.Proof not defined in Tendermint.
//// VRFProve is not supported in BLS12.
//func (privKey PrivKey) VRFProve(seed []byte) (crypto.Proof, error) {
// return nil, fmt.Errorf("VRF prove is not supported by the BLS12")
//}
// PubKey gets the corresponding public key from the private key.
func (privKey PrivKey) PubKey() crypto.PubKey {
blsKey := bls.SecretKey{}
err := blsKey.Deserialize(privKey[:])
if err != nil {
panic(fmt.Sprintf("Not a BLS12-381 private key: %X", privKey[:]))
}
pubKey := blsKey.GetPublicKey()
pubKeyBinary := PubKey{}
binary := pubKey.Serialize()
if len(binary) != PubKeySize {
panic(fmt.Sprintf("unexpected BLS public key size: %d != %d", len(binary), PubKeySize))
}
copy(pubKeyBinary[:], binary)
return pubKeyBinary
}
// Equals - you probably don't need to use this.
// Runs in constant time based on length of the keys.
func (privKey PrivKey) Equals(other crypto.PrivKey) bool {
if otherEd, ok := other.(PrivKey); ok {
return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1
}
return false
}
// Type returns information to identify the type of this key.
func (privKey PrivKey) Type() string {
return KeyType
}
var _ crypto.PubKey = PubKey{}
// PubKey implements crypto.PubKey for the BLS12-381 signature scheme.
type PubKey [PubKeySize]byte
// Address is the SHA256-20 of the raw pubkey bytes.
func (pubKey PubKey) Address() crypto.Address {
return tmhash.SumTruncated(pubKey[:])
}
// Bytes marshals the PubKey using amino encoding.
func (pubKey PubKey) Bytes() []byte {
return pubKey[:]
}
func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool {
// make sure we use the same algorithm to sign
if len(sig) != SignatureSize {
return false
}
blsPubKey := bls.PublicKey{}
err := blsPubKey.Deserialize(pubKey[:])
if err != nil {
return false
}
blsSign := bls.Sign{}
err = blsSign.Deserialize(sig)
if err != nil {
fmt.Printf("Signature Deserialize failed: %s", err)
return false
}
hash := sha512.Sum512_256(msg)
return blsSign.VerifyHash(&blsPubKey, hash[:])
}
// FIXME: Types crypto.Proof and crypto.Output not defined in Tendermint.
//// VRFVerify is not supported in BLS12.
//func (pubKey PubKey) VRFVerify(proof crypto.Proof, seed []byte) (crypto.Output, error) {
// return nil, fmt.Errorf("VRF verify is not supported by the BLS12")
//}
func (pubKey PubKey) String() string {
return fmt.Sprintf("PubKeyBLS12{%X}", pubKey[:])
}
func (pubKey PubKey) Equals(other crypto.PubKey) bool {
if otherEd, ok := other.(PubKey); ok {
return bytes.Equal(pubKey[:], otherEd[:])
}
return false
}
// Type returns information to identify the type of this key.
func (pubKey PubKey) Type() string {
return KeyType
}

View File

@@ -1,372 +0,0 @@
package ostracon
import (
"bytes"
"crypto/sha256"
"fmt"
"math/rand"
"reflect"
"testing"
"time"
b "github.com/herumi/bls-eth-go-binary/bls"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
)
func TestBasicSignatureFunctions(t *testing.T) {
privateKey := b.SecretKey{}
privateKey.SetByCSPRNG()
publicKey := privateKey.GetPublicKey()
duplicatedPrivateKey := b.SecretKey{}
err := duplicatedPrivateKey.Deserialize(privateKey.Serialize())
if err != nil {
t.Fatalf("Private key deserialization failed.")
}
if len(privateKey.Serialize()) != PrivKeySize {
t.Fatalf("The constant size %d of the private key is different from the actual size %d.",
PrivKeySize, len(privateKey.Serialize()))
}
duplicatedPublicKey := b.PublicKey{}
err = duplicatedPublicKey.Deserialize(publicKey.Serialize())
if err != nil {
t.Fatalf("Public key deserialization failed.")
}
if len(publicKey.Serialize()) != PubKeySize {
t.Fatalf("The constant size %d of the public key is different from the actual size %d.",
PubKeySize, len(publicKey.Serialize()))
}
duplicatedSignature := func(sig *b.Sign) *b.Sign {
duplicatedSign := b.Sign{}
err := duplicatedSign.Deserialize(sig.Serialize())
if err != nil {
t.Fatalf("Signature deserialization failed.")
}
if len(sig.Serialize()) != SignatureSize {
t.Fatalf("The constant size %d of the signature is different from the actual size %d.",
SignatureSize, len(sig.Serialize()))
}
return &duplicatedSign
}
msg := []byte("hello, world")
for _, privKey := range []b.SecretKey{privateKey, duplicatedPrivateKey} {
for _, pubKey := range []*b.PublicKey{publicKey, &duplicatedPublicKey} {
signature := privKey.SignByte(msg)
if !signature.VerifyByte(pubKey, msg) {
t.Errorf("Signature verification failed.")
}
if !duplicatedSignature(signature).VerifyByte(pubKey, msg) {
t.Errorf("Signature verification failed.")
}
for i := 0; i < len(msg); i++ {
for j := 0; j < 8; j++ {
garbled := make([]byte, len(msg))
copy(garbled, msg)
garbled[i] ^= 1 << (8 - j - 1)
if bytes.Equal(msg, garbled) {
t.Fatalf("Not a barbled message")
}
if signature.VerifyByte(pubKey, garbled) {
t.Errorf("Signature verification was successful against a garbled byte sequence.")
}
if duplicatedSignature(signature).VerifyByte(pubKey, garbled) {
t.Errorf("Signature verification was successful against a garbled byte sequence.")
}
}
}
}
}
}
func TestSignatureAggregationAndVerify(t *testing.T) {
privKeys := make([]b.SecretKey, 25)
pubKeys := make([]b.PublicKey, len(privKeys))
msgs := make([][]byte, len(privKeys))
hash32s := make([][32]byte, len(privKeys))
signatures := make([]b.Sign, len(privKeys))
for i, privKey := range privKeys {
privKey.SetByCSPRNG()
pubKeys[i] = *privKey.GetPublicKey()
msgs[i] = []byte(fmt.Sprintf("hello, world #%d", i))
hash32s[i] = sha256.Sum256(msgs[i])
signatures[i] = *privKey.SignHash(hash32s[i][:])
// normal single-hash case
if !signatures[i].VerifyHash(&pubKeys[i], hash32s[i][:]) {
t.Fail()
}
// in case where 1-bit of hash was garbled
garbledHash := make([]byte, len(msgs[i]))
copy(garbledHash, msgs[i])
garbledHash[0] ^= 1 << 0
if garbledHash[0] == msgs[i][0] || signatures[i].VerifyByte(&pubKeys[i], garbledHash) {
t.Fail()
}
// Verification using an invalid public key
}
// aggregation
multiSig := b.Sign{}
multiSig.Aggregate(signatures)
// normal case
hashes := make([][]byte, len(privKeys))
for i := 0; i < len(hashes); i++ {
hashes[i] = hash32s[i][:]
}
if !multiSig.VerifyAggregateHashes(pubKeys, hashes) {
t.Fatalf("failed to validate the aggregate signature of the hashed message")
}
// in case where 1-bit of signature was garbled
multiSigBytes := multiSig.Serialize()
for i := range multiSigBytes {
for j := 0; j < 8; j++ {
garbledMultiSigBytes := make([]byte, len(multiSigBytes))
copy(garbledMultiSigBytes, multiSigBytes)
garbledMultiSigBytes[i] ^= 1 << j
if garbledMultiSigBytes[i] == multiSigBytes[i] {
t.Fail()
}
garbledMultiSig := b.Sign{}
err := garbledMultiSig.Deserialize(garbledMultiSigBytes)
if err == nil {
// Note that in some cases Deserialize() fails
if garbledMultiSig.VerifyAggregateHashes(pubKeys, hashes) {
t.Errorf("successfully verified the redacted signature")
}
}
}
}
// in case a public key used for verification is replaced
invalidPrivKey := b.SecretKey{}
invalidPrivKey.SetByCSPRNG()
invalidPubKeys := make([]b.PublicKey, len(pubKeys))
copy(invalidPubKeys, pubKeys)
invalidPubKeys[len(invalidPubKeys)-1] = *invalidPrivKey.GetPublicKey()
if multiSig.VerifyAggregateHashes(invalidPubKeys, hashes) {
t.Fatalf("successfully verified that it contains a public key that was not involved in the signing")
}
// in case a hash used for verification is replaced
invalidHashes := make([][]byte, len(hashes))
copy(invalidHashes, hashes)
invalidHash := sha256.Sum256([]byte("hello, world #99"))
invalidHashes[len(invalidHashes)-1] = invalidHash[:]
if multiSig.VerifyAggregateHashes(pubKeys, invalidHashes) {
t.Fatalf("successfully verified that it contains a hash that was not involved in the signing")
}
}
func generatePubKeysAndSigns(t *testing.T, size int) ([]PubKey, [][]byte, [][]byte) {
pubKeys := make([]PubKey, size)
msgs := make([][]byte, len(pubKeys))
sigs := make([][]byte, len(pubKeys))
for i := 0; i < len(pubKeys); i++ {
var err error
privKey := GenPrivKey()
pubKeys[i] = blsPublicKey(t, privKey.PubKey())
msgs[i] = []byte(fmt.Sprintf("hello, workd #%d", i))
sigs[i], err = privKey.Sign(msgs[i])
if err != nil {
t.Fatal(fmt.Sprintf("fail to sign: %s", err))
}
if !pubKeys[i].VerifySignature(msgs[i], sigs[i]) {
t.Fatal("fail to verify signature")
}
}
return pubKeys, msgs, sigs
}
func blsPublicKey(t *testing.T, pubKey crypto.PubKey) PubKey {
blsPubKey, ok := pubKey.(PubKey)
if !ok {
var keyType string
if t := reflect.TypeOf(pubKey); t.Kind() == reflect.Ptr {
keyType = "*" + t.Elem().Name()
} else {
keyType = t.Name()
}
t.Fatal(fmt.Sprintf("specified public key is not for BLS: %s", keyType))
}
return blsPubKey
}
func aggregateSignatures(init []byte, signatures [][]byte) (aggrSig []byte, err error) {
aggrSig = init
for _, sign := range signatures {
aggrSig, err = AddSignature(aggrSig, sign)
if err != nil {
return
}
}
return
}
func TestAggregatedSignature(t *testing.T) {
// generate BLS signatures and public keys
pubKeys, msgs, sigs := generatePubKeysAndSigns(t, 25)
// aggregate signatures
aggrSig, err := aggregateSignatures(nil, sigs)
if err != nil {
t.Errorf("fail to aggregate BLS signatures: %s", err)
}
if len(aggrSig) != SignatureSize {
t.Errorf("inconpatible signature size: %d != %d", len(aggrSig), SignatureSize)
}
// validate the aggregated signature
if err := VerifyAggregatedSignature(aggrSig, pubKeys, msgs); err != nil {
t.Errorf("fail to verify aggregated signature: %s", err)
}
// validate with the public keys and messages pair in random order
t.Run("Doesn't Depend on the Order of PublicKey-Message Pairs", func(t *testing.T) {
shuffledPubKeys := make([]PubKey, len(pubKeys))
shuffledMsgs := make([][]byte, len(msgs))
copy(shuffledPubKeys, pubKeys)
copy(shuffledMsgs, msgs)
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(shuffledPubKeys), func(i, j int) {
shuffledPubKeys[i], shuffledPubKeys[j] = shuffledPubKeys[j], shuffledPubKeys[i]
shuffledMsgs[i], shuffledMsgs[j] = shuffledMsgs[j], shuffledMsgs[i]
})
if err := VerifyAggregatedSignature(aggrSig, shuffledPubKeys, shuffledMsgs); err != nil {
t.Errorf("fail to verify the aggregated signature with random order: %s", err)
}
})
// validate with the public keys in random order
t.Run("Incorrect Public Key Order", func(t *testing.T) {
shuffledPubKeys := make([]PubKey, len(pubKeys))
copy(shuffledPubKeys, pubKeys)
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(shuffledPubKeys), func(i, j int) {
shuffledPubKeys[i], shuffledPubKeys[j] = shuffledPubKeys[j], shuffledPubKeys[i]
})
if err := VerifyAggregatedSignature(aggrSig, shuffledPubKeys, msgs); err == nil {
t.Error("successfully validated with public keys of different order")
}
})
// validate with the messages in random order
t.Run("Incorrect Message Order", func(t *testing.T) {
shuffledMsgs := make([][]byte, len(msgs))
copy(shuffledMsgs, msgs)
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(shuffledMsgs), func(i, j int) {
shuffledMsgs[i], shuffledMsgs[j] = shuffledMsgs[j], shuffledMsgs[i]
})
if err := VerifyAggregatedSignature(aggrSig, pubKeys, shuffledMsgs); err == nil {
t.Error("successfully validated with messages of different order")
}
})
// replace one public key with another and detect
t.Run("Replace One Public Key", func(t *testing.T) {
pubKey, _ := GenPrivKey().PubKey().(PubKey)
replacedPubKeys := make([]PubKey, len(pubKeys))
copy(replacedPubKeys, pubKeys)
replacedPubKeys[0] = pubKey
if err := VerifyAggregatedSignature(aggrSig, replacedPubKeys, msgs); err == nil {
t.Error("verification with an invalid key was successful")
}
})
// replace one message with another and detect
t.Run("Replace One Message", func(t *testing.T) {
msg := []byte(fmt.Sprintf("hello, world #%d replaced", len(msgs)))
replacedMsgs := make([][]byte, len(msgs))
copy(replacedMsgs, msgs)
replacedMsgs[0] = msg
if err := VerifyAggregatedSignature(aggrSig, pubKeys, replacedMsgs); err == nil {
t.Error("verification with an invalid message was successful")
}
})
// add new signature to existing aggregated signature and verify
t.Run("Incremental Update", func(t *testing.T) {
msg := []byte(fmt.Sprintf("hello, world #%d", len(msgs)))
privKey := GenPrivKey()
pubKey := privKey.PubKey()
sig, err := privKey.Sign(msg)
assert.Nilf(t, err, "%s", err)
newAggrSig, _ := aggregateSignatures(aggrSig, [][]byte{sig})
newPubKeys := make([]PubKey, len(pubKeys))
copy(newPubKeys, pubKeys)
newPubKeys = append(newPubKeys, blsPublicKey(t, pubKey))
newMsgs := make([][]byte, len(msgs))
copy(newMsgs, msgs)
newMsgs = append(newMsgs, msg)
if err := VerifyAggregatedSignature(newAggrSig, newPubKeys, newMsgs); err != nil {
t.Errorf("fail to verify the aggregate signature with the new signature: %s", err)
}
})
// nil is returned for nil and empty signature
nilSig, _ := aggregateSignatures(nil, [][]byte{})
assert.Nil(t, nilSig)
// a non-BLS signature contained
func() {
_, err = aggregateSignatures(nil, [][]byte{make([]byte, 0)})
assert.NotNil(t, err)
}()
}
func TestSignatureAggregation(t *testing.T) {
publicKeys := make([]b.PublicKey, 25)
aggregatedSignature := b.Sign{}
aggregatedPublicKey := b.PublicKey{}
msg := []byte("hello, world")
for i := 0; i < len(publicKeys); i++ {
privateKey := b.SecretKey{}
privateKey.SetByCSPRNG()
publicKeys[i] = *privateKey.GetPublicKey()
aggregatedSignature.Add(privateKey.SignByte(msg))
aggregatedPublicKey.Add(&publicKeys[i])
}
if !aggregatedSignature.FastAggregateVerify(publicKeys, msg) {
t.Errorf("Aggregated signature verification failed.")
}
if !aggregatedSignature.VerifyByte(&aggregatedPublicKey, msg) {
t.Errorf("Aggregated signature verification failed.")
}
}
func TestSignAndValidateBLS12(t *testing.T) {
privKey := GenPrivKey()
pubKey := privKey.PubKey()
msg := crypto.CRandBytes(128)
sig, err := privKey.Sign(msg)
require.Nil(t, err)
//fmt.Printf("restoring signature: %x\n", sig)
// Test the signature
assert.True(t, pubKey.VerifySignature(msg, sig))
// Mutate the signature, just one bit.
// TODO: Replace this with a much better fuzzer, tendermint/ed25519/issues/10
sig[7] ^= byte(0x01)
assert.False(t, pubKey.VerifySignature(msg, sig))
}

View File

@@ -1,189 +0,0 @@
package bls
import (
"crypto/rand"
"testing"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/bls/blst"
"github.com/tendermint/tendermint/crypto/bls/ostracon"
"github.com/tendermint/tendermint/crypto/ed25519"
)
func TestEd25519SignVerify(t *testing.T) {
m := []byte("a test message to be signed")
privKey := ed25519.GenPrivKey()
pubKey := privKey.PubKey()
sig, err := privKey.Sign(m)
if err != nil {
t.Error("Unexpected nil signature or error", m, sig, err)
}
if !pubKey.VerifySignature(m, sig) {
t.Error("Failed to verify signature produced by the key", m, sig)
}
if pubKey.VerifySignature(scrambleBytes(m), sig) {
t.Error("Unexpected to verify scrambled message", scrambleBytes(m), sig)
}
if pubKey.VerifySignature(m, scrambleBytes(sig)) {
t.Error("Unexpected to verify scrambled signature", m, scrambleBytes(sig))
}
}
func TestBlstSignVerify(t *testing.T) {
m := []byte("a test message to be signed")
privKey := blst.GenPrivKey()
pubKey := privKey.PubKey()
sig, err := privKey.Sign(m)
if err != nil {
t.Error("Unexpected nil signature or error", m, sig, err)
}
if !pubKey.VerifySignature(m, sig) {
t.Error("Failed to verify signature produced by the key", m, sig)
}
if pubKey.VerifySignature(scrambleBytes(m), sig) {
t.Error("Unexpected to verify scrambled message", scrambleBytes(m), sig)
}
if pubKey.VerifySignature(m, scrambleBytes(sig)) {
t.Error("Unexpected to verify scrambled signature", m, scrambleBytes(sig))
}
}
func TestOstraconSignVerify(t *testing.T) {
m := []byte("a test message to be signed")
privKey := ostracon.GenPrivKey()
pubKey := privKey.PubKey()
sig, err := privKey.Sign(m)
if err != nil {
t.Error("Unexpected nil signature or error", m, sig, err)
}
if !pubKey.VerifySignature(m, sig) {
t.Error("Failed to verify signature produced by the key", m, sig)
}
if pubKey.VerifySignature(scrambleBytes(m), sig) {
t.Error("Unexpected to verify scrambled message", scrambleBytes(m), sig)
}
if pubKey.VerifySignature(m, scrambleBytes(sig)) {
t.Error("Unexpected to verify scrambled signature", m, scrambleBytes(sig))
}
}
// Scrambles a byte array, currently just flipping a bit.
// TODO: implement a more complex scrambling method.
func scrambleBytes(b []byte) []byte {
bb := make([]byte, len(b))
copy(bb, b)
index := len(b) / 2
bb[index] ^= 1
return bb
}
// Retrieve a set of public keys from a set of private keys.
func pubKeysFromPrivKeys(privKeys []crypto.PrivKey) []crypto.PubKey {
pubKeys := make([]crypto.PubKey, len(privKeys))
for i := 0; i < len(privKeys); i++ {
pubKeys[i] = privKeys[i].PubKey()
}
return pubKeys
}
// Generate n random messages with the given size.
func randomMessages(n, size int) [][]byte {
messages := make([][]byte, n)
for i := 0; i < n; i++ {
messages[i] = make([]byte, size)
rand.Read(messages[i])
}
return messages
}
// Sign a set of messages with a number of private keys.
func signMessages(messages [][]byte, privKeys []crypto.PrivKey) [][]byte {
sigs := make([][]byte, len(messages))
for i := 0; i < len(messages); i++ {
sigs[i], _ = privKeys[i%len(privKeys)].Sign(messages[i])
}
return sigs
}
var _sig []byte
func BenchmarkEd25519Sign1k(t *testing.B) {
messages := randomMessages(1000, 1024)
privKeys := make([]crypto.PrivKey, 128)
for i := 0; i < len(privKeys); i++ {
privKeys[i] = ed25519.GenPrivKey()
}
t.ResetTimer()
for i := 0; i < t.N; i++ {
_sig, _ = privKeys[i%len(privKeys)].Sign(messages[i%len(messages)])
}
}
func BenchmarkEd25519Verify1k(t *testing.B) {
messages := randomMessages(1000, 1024)
privKeys := make([]crypto.PrivKey, 128)
for i := 0; i < len(privKeys); i++ {
privKeys[i] = ed25519.GenPrivKey()
}
pubKeys := pubKeysFromPrivKeys(privKeys)
sigs := signMessages(messages, privKeys)
t.ResetTimer()
for i := 0; i < t.N; i++ {
pubKeys[i%len(pubKeys)].VerifySignature(messages[i%len(messages)], sigs[i%len(sigs)])
}
}
func BenchmarkBlstSign1k(t *testing.B) {
messages := randomMessages(1000, 1024)
privKeys := make([]crypto.PrivKey, 128)
for i := 0; i < len(privKeys); i++ {
privKeys[i] = blst.GenPrivKey()
}
t.ResetTimer()
for i := 0; i < t.N; i++ {
_sig, _ = privKeys[i%len(privKeys)].Sign(messages[i%len(messages)])
}
}
func BenchmarkBlstVerify1k(t *testing.B) {
messages := randomMessages(1000, 1024)
privKeys := make([]crypto.PrivKey, 128)
for i := 0; i < len(privKeys); i++ {
privKeys[i] = blst.GenPrivKey()
}
pubKeys := pubKeysFromPrivKeys(privKeys)
sigs := signMessages(messages, privKeys)
t.ResetTimer()
for i := 0; i < t.N; i++ {
pubKeys[i%len(pubKeys)].VerifySignature(messages[i%len(messages)], sigs[i%len(sigs)])
}
}
func BenchmarkOstraconSign1k(t *testing.B) {
messages := randomMessages(1000, 1024)
privKeys := make([]crypto.PrivKey, 128)
for i := 0; i < len(privKeys); i++ {
privKeys[i] = ostracon.GenPrivKey()
}
t.ResetTimer()
for i := 0; i < t.N; i++ {
_sig, _ = privKeys[i%len(privKeys)].Sign(messages[i%len(messages)])
}
}
func BenchmarkOstraconVerify1k(t *testing.B) {
messages := randomMessages(1000, 1024)
privKeys := make([]crypto.PrivKey, 128)
for i := 0; i < len(privKeys); i++ {
privKeys[i] = ostracon.GenPrivKey()
}
pubKeys := pubKeysFromPrivKeys(privKeys)
sigs := signMessages(messages, privKeys)
t.ResetTimer()
for i := 0; i < t.N; i++ {
pubKeys[i%len(pubKeys)].VerifySignature(messages[i%len(messages)], sigs[i%len(sigs)])
}
}

View File

@@ -33,6 +33,10 @@ module.exports = {
{
"label": "v0.35",
"key": "v0.35"
},
{
"label": "master",
"key": "master"
}
],
topbar: {
@@ -45,10 +49,8 @@ module.exports = {
title: 'Resources',
children: [
{
// TODO(creachadair): Figure out how to make this per-branch.
// See: https://github.com/tendermint/tendermint/issues/7908
title: 'RPC',
path: 'https://docs.tendermint.com/v0.35/rpc/',
path: 'https://docs.tendermint.com/master/rpc/',
static: true
},
]
@@ -160,12 +162,6 @@ module.exports = {
{
ga: 'UA-51029217-11'
}
],
[
'@vuepress/plugin-html-redirect',
{
countdown: 0
}
]
]
};

View File

@@ -1 +0,0 @@
/master/ /v0.35/

View File

@@ -21,7 +21,7 @@ Tendermint?](introduction/what-is-tendermint.md).
To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md).
To learn about application development on Tendermint, see the [Application Blockchain Interface](../spec/abci).
To learn about application development on Tendermint, see the [Application Blockchain Interface](https://github.com/tendermint/tendermint/tree/master/spec/abci).
For more details on using Tendermint, see the respective documentation for
[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](nodes/).

View File

@@ -27,17 +27,17 @@ Usage:
abci-cli [command]
Available Commands:
batch Run a batch of abci commands against an application
check_tx Validate a tx
commit Commit the application state and return the Merkle root hash
console Start an interactive abci console for multiple commands
finalize_block Send a set of transactions to the application
kvstore ABCI demo example
echo Have the application echo a message
help Help about any command
info Get some info about the application
query Query the application state
set_option Set an options on the application
batch Run a batch of abci commands against an application
check_tx Validate a tx
commit Commit the application state and return the Merkle root hash
console Start an interactive abci console for multiple commands
deliver_tx Deliver a new tx to the application
kvstore ABCI demo example
echo Have the application echo a message
help Help about any command
info Get some info about the application
query Query the application state
set_option Set an options on the application
Flags:
--abci string socket or grpc (default "socket")
@@ -53,7 +53,7 @@ Use "abci-cli [command] --help" for more information about a command.
The `abci-cli` tool lets us send ABCI messages to our application, to
help build and debug them.
The most important messages are `finalize_block`, `check_tx`, and `commit`,
The most important messages are `deliver_tx`, `check_tx`, and `commit`,
but there are others for convenience, configuration, and information
purposes.
@@ -173,7 +173,7 @@ Try running these commands:
-> code: OK
-> data.hex: 0x0000000000000000
> finalize_block "abc"
> deliver_tx "abc"
-> code: OK
> info
@@ -192,7 +192,7 @@ Try running these commands:
-> value: abc
-> value.hex: 616263
> finalize_block "def=xyz"
> deliver_tx "def=xyz"
-> code: OK
> commit
@@ -207,8 +207,8 @@ Try running these commands:
-> value.hex: 78797A
```
Note that if we do `finalize_block "abc"` it will store `(abc, abc)`, but if
we do `finalize_block "abc=efg"` it will store `(abc, efg)`.
Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if
we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
Similarly, you could put the commands in a file and run
`abci-cli --verbose batch < myfile`.

View File

@@ -182,7 +182,7 @@ node example/counter.js
In another window, reset and start `tendermint`:
```sh
tendermint reset unsafe-all
tendermint unsafe-reset-all
tendermint start
```

View File

@@ -67,10 +67,6 @@ Note the context/background should be written in the present tense.
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
- [ADR-066: E2E-Testing](./adr-066-e2e-testing.md)
- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md)
- [ADR-077: Block Retention](./adr-077-block-retention.md)
- [ADR-078: Non-zero Genesis](./adr-078-nonzero-genesis.md)
- [ADR-079: ED25519 Verification](./adr-079-ed25519-verification.md)
- [ADR-080: Reverse Sync](./adr-080-reverse-sync.md)
### Accepted
@@ -85,11 +81,6 @@ Note the context/background should be written in the present tense.
- [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md)
- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md)
- [ADR-076: Combine Spec and Tendermint Repositories](./adr-076-combine-spec-repo.md)
- [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md)
### Deprecated
None
### Rejected
@@ -97,6 +88,7 @@ None
- [ADR-029: Check-Tx-Consensus](./adr-029-check-tx-consensus.md)
- [ADR-058: Event-Hashing](./adr-058-event-hashing.md)
### Proposed
- [ADR-007: Trust-Metric-Usage](./adr-007-trust-metric-usage.md)
@@ -104,17 +96,12 @@ None
- [ADR-013: Symmetric-Crypto](./adr-013-symmetric-crypto.md)
- [ADR-022: ABCI-Errors](./adr-022-abci-errors.md)
- [ADR-030: Consensus-Refactor](./adr-030-consensus-refactor.md)
- [ADR-036: Empty Blocks via ABCI](./adr-036-empty-blocks-abci.md)
- [ADR-037: Deliver-Block](./adr-037-deliver-block.md)
- [ADR-038: Non-Zero-Start-Height](./adr-038-non-zero-start-height.md)
- [ADR-040: Blockchain Reactor Refactor](./adr-040-blockchain-reactor-refactor.md)
- [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md)
- [ADR-042: State Sync Design](./adr-042-state-sync.md)
- [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md)
- [ADR-050: Improved Trusted Peering](./adr-050-improved-trusted-peering.md)
- [ADR-057: RPC](./adr-057-RPC.md)
- [ADR-064: Batch Verification](./adr-064-batch-verification.md)
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md)
- [ADR-073: Adopt LibP2P](./adr-073-libp2p.md)
- [ADR-071: Proposer-Based Timestamps](adr-071-proposer-based-timestamps.md)
- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md)

View File

@@ -84,7 +84,7 @@ The linear verification algorithm requires downloading all headers
between the `TrustHeight` and the `LatestHeight`. The lite client downloads the
full header for the provided `TrustHeight` and then proceeds to download `N+1`
headers and applies the [Tendermint validation
rules](https://github.com/tendermint/tendermint/tree/master/spec/light-client/verification/README.md)
rules](https://docs.tendermint.com/master/spec/light-client/verification/)
to each block.
### Bisecting Verification

Some files were not shown because too many files have changed in this diff Show More