Compare commits

..

1 Commits

Author SHA1 Message Date
William Banfield
3276f86587 statesync and blocksync: remove tests for serialized bytes 2021-12-06 11:41:44 -05:00
489 changed files with 15007 additions and 21125 deletions

View File

@@ -1,16 +1,16 @@
pullRequestOpened: |
:wave: Thanks for creating a PR!
:wave: Thanks for creating a PR!
Before we can merge this PR, please make sure that all the following items have been
Before we can merge this PR, please make sure that all the following items have been
checked off. If any of the checklist items are not applicable, please leave them but
write a little note why.
write a little note why.
- [ ] Wrote tests
- [ ] Updated CHANGELOG_PENDING.md
- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work.
- [ ] Updated relevant documentation (`docs/`) and code comments
- [ ] Re-reviewed `Files changed` in the Github PR explorer
- [ ] Applied Appropriate Labels
- [ ] Wrote tests
- [ ] Updated CHANGELOG_PENDING.md
- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work.
- [ ] Updated relevant documentation (`docs/`) and code comments
- [ ] Re-reviewed `Files changed` in the Github PR explorer
- [ ] Applied Appropriate Labels
Thank you for your contribution to Tendermint! :rocket:
Thank you for your contribution to Tendermint! :rocket:

8
.github/linter/markdownlint.yml vendored Normal file
View File

@@ -0,0 +1,8 @@
default: true,
MD007: { "indent": 4 }
MD013: false
MD024: { siblings_only: true }
MD025: false
MD033: { no-inline-html: false }
no-hard-tabs: false
whitespace: false

View File

@@ -1,8 +0,0 @@
default: true,
MD007: {"indent": 4}
MD013: false
MD024: {siblings_only: true}
MD025: false
MD033: {no-inline-html: false}
no-hard-tabs: false
whitespace: false

View File

@@ -1,9 +0,0 @@
---
# Default rules for YAML linting from super-linter.
# See: See https://yamllint.readthedocs.io/en/stable/rules.html
extends: default
rules:
document-end: disable
document-start: disable
line-length: disable
truthy: disable

16
.github/mergify.yml vendored
View File

@@ -1,22 +1,13 @@
queue_rules:
- name: default
conditions:
- base=master
- label=S:automerge
pull_request_rules:
- name: Automerge to master
conditions:
- base=master
- label=S:automerge
actions:
queue:
merge:
method: squash
name: default
commit_message_template: |
{{ title }} (#{{ number }})
{{ body }}
strict: smart+fasttrack
commit_message: title+body
- name: backport patches to v0.34.x branch
conditions:
- base=master
@@ -33,3 +24,4 @@ pull_request_rules:
backport:
branches:
- v0.35.x

View File

@@ -1,82 +0,0 @@
name: Build
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
# This workflow runs on every push to master or release branch and every pull requests
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
on:
pull_request:
push:
branches:
- master
- release/**
jobs:
build:
name: Build
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
goarch: ["arm", "amd64"]
goos: ["linux"]
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
Makefile
- name: install
run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build
if: "env.GIT_DIFF != ''"
test_abci_cli:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- name: install
run: make install_abci
if: "env.GIT_DIFF != ''"
- run: abci/tests/test_cli/test.sh
shell: bash
if: "env.GIT_DIFF != ''"
test_apps:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- name: install
run: make install install_abci
if: "env.GIT_DIFF != ''"
- name: test_apps
run: test/app/test.sh
shell: bash
if: "env.GIT_DIFF != ''"

100
.github/workflows/coverage.yml vendored Normal file
View File

@@ -0,0 +1,100 @@
name: Test Coverage
on:
pull_request:
push:
paths:
- "**.go"
branches:
- master
- release/**
jobs:
build-linux:
name: Build
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
goarch: ["arm", "amd64"]
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
Makefile
- name: install
run: GOOS=linux GOARCH=${{ matrix.goarch }} make build
if: "env.GIT_DIFF != ''"
tests:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
part: ["00", "01", "02", "03"]
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
Makefile
- name: test & coverage report creation
run: |
make test-group-${{ matrix.part }} NUM_SPLIT=4
if: env.GIT_DIFF
- uses: actions/upload-artifact@v2
with:
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
path: ./build/${{ matrix.part }}.profile.out
upload-coverage-report:
runs-on: ubuntu-latest
needs: tests
steps:
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
Makefile
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-00-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-01-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-02-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
with:
name: "${{ github.sha }}-03-coverage"
if: env.GIT_DIFF
- run: |
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v2.1.0
with:
file: ./coverage.txt
if: env.GIT_DIFF

View File

@@ -1,13 +1,14 @@
name: Docker
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
name: Build & Push
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
# and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags
on:
pull_request:
push:
branches:
- master
tags:
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
jobs:
build:
@@ -38,18 +39,18 @@ jobs:
with:
platforms: all
- name: Set up Docker Build
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v1.12.0
uses: docker/login-action@v1.10.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2.8.0
uses: docker/build-push-action@v2.7.0
with:
context: .
file: ./DOCKER/Dockerfile

View File

@@ -6,7 +6,7 @@
name: e2e-nightly-34x
on:
workflow_dispatch: # allow running workflow manually, in theory
workflow_dispatch: # allow running workflow manually, in theory
schedule:
- cron: '0 2 * * *'
@@ -59,7 +59,7 @@ jobs:
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
SLACK_FOOTER: ''
e2e-nightly-success: # may turn this off once they seem to pass consistently
e2e-nightly-success: # may turn this off once they seem to pass consistently
needs: e2e-nightly-test
if: ${{ success() }}
runs-on: ubuntu-latest

View File

@@ -5,7 +5,7 @@
name: e2e-nightly-35x
on:
workflow_dispatch: # allow running workflow manually
workflow_dispatch: # allow running workflow manually
schedule:
- cron: '0 2 * * *'
@@ -59,7 +59,7 @@ jobs:
SLACK_MESSAGE: Nightly E2E tests failed on v0.35.x
SLACK_FOOTER: ''
e2e-nightly-success: # may turn this off once they seem to pass consistently
e2e-nightly-success: # may turn this off once they seem to pass consistently
needs: e2e-nightly-test
if: ${{ success() }}
runs-on: ubuntu-latest

View File

@@ -5,7 +5,7 @@
name: e2e-nightly-master
on:
workflow_dispatch: # allow running workflow manually
workflow_dispatch: # allow running workflow manually
schedule:
- cron: '0 2 * * *'
@@ -56,7 +56,7 @@ jobs:
SLACK_MESSAGE: Nightly E2E tests failed on master
SLACK_FOOTER: ''
e2e-nightly-success: # may turn this off once they seem to pass consistently
e2e-nightly-success: # may turn this off once they seem to pass consistently
needs: e2e-nightly-test
if: ${{ success() }}
runs-on: ubuntu-latest

View File

@@ -2,7 +2,7 @@ name: e2e
# Runs the CI end-to-end test network on all pushes to master or release branches
# and every pull request, but only if any Go files have been changed.
on:
workflow_dispatch: # allow running workflow manually
workflow_dispatch: # allow running workflow manually
pull_request:
push:
branches:
@@ -18,7 +18,7 @@ jobs:
with:
go-version: '1.17'
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
@@ -35,3 +35,4 @@ jobs:
working-directory: test/e2e
run: ./run-multiple.sh networks/ci.toml
if: "env.GIT_DIFF != ''"

View File

@@ -1,7 +1,7 @@
# Runs fuzzing nightly.
name: Fuzz Tests
name: fuzz-nightly
on:
workflow_dispatch: # allow running workflow manually
workflow_dispatch: # allow running workflow manually
schedule:
- cron: '0 3 * * *'
pull_request:

View File

@@ -1,5 +1,5 @@
name: Check Markdown links
on:
on:
schedule:
- cron: '* */24 * * *'
jobs:

View File

@@ -14,7 +14,7 @@ jobs:
timeout-minutes: 8
steps:
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go

View File

@@ -21,12 +21,12 @@ jobs:
- name: Checkout Code
uses: actions/checkout@v2.4.0
- name: Lint Code Base
uses: docker://github/super-linter:v4
uses: docker://github/super-linter:v3
env:
LINTER_RULES_PATH: .
VALIDATE_ALL_CODEBASE: true
DEFAULT_BRANCH: master
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
VALIDATE_MD: true
VALIDATE_OPENAPI: true
VALIDATE_YAML: true
YAML_CONFIG_FILE: yaml-lint.yml

View File

@@ -5,7 +5,7 @@ on:
branches:
- "RC[0-9]/**"
tags:
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
jobs:
goreleaser:

View File

@@ -1,75 +1,106 @@
name: Test
name: Tests
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
# This workflow runs on every push to master or release branch and every pull requests
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
on:
pull_request:
push:
paths:
- "**.go"
branches:
- master
- release/**
jobs:
tests:
build:
name: Build
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
part: ["00", "01", "02", "03", "04", "05"]
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
Makefile
- name: Run Go Tests
run: |
make test-group-${{ matrix.part }} NUM_SPLIT=6
if: env.GIT_DIFF
- uses: actions/upload-artifact@v2
- name: install
run: make install install_abci
if: "env.GIT_DIFF != ''"
- uses: actions/cache@v2.1.7
with:
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
path: ./build/${{ matrix.part }}.profile.out
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
# Cache binaries for use by other jobs
- uses: actions/cache@v2.1.7
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
if: env.GIT_DIFF
upload-coverage-report:
test_abci_cli:
runs-on: ubuntu-latest
needs: tests
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v6.0.1
- uses: technote-space/get-diff-action@v5
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
Makefile
- uses: actions/download-artifact@v2
- uses: actions/cache@v2.1.7
with:
name: "${{ github.sha }}-00-coverage"
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
- uses: actions/cache@v2.1.7
with:
name: "${{ github.sha }}-01-coverage"
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
- run: abci/tests/test_cli/test.sh
shell: bash
if: env.GIT_DIFF
test_apps:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
name: "${{ github.sha }}-02-coverage"
if: env.GIT_DIFF
- uses: actions/download-artifact@v2
go-version: "1.17"
- uses: actions/checkout@v2.4.0
- uses: technote-space/get-diff-action@v5
with:
name: "${{ github.sha }}-03-coverage"
if: env.GIT_DIFF
- run: |
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v2.1.0
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.7
with:
file: ./coverage.txt
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.7
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
if: env.GIT_DIFF
- name: test_apps
run: test/app/test.sh
shell: bash
if: env.GIT_DIFF

View File

@@ -29,8 +29,8 @@ release:
archives:
- files:
- LICENSE
- README.md
- UPGRADING.md
- SECURITY.md
- CHANGELOG.md
- LICENSE
- README.md
- UPGRADING.md
- SECURITY.md
- CHANGELOG.md

View File

@@ -2,41 +2,6 @@
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
## v0.35.1
January 26, 2022
Special thanks to external contributors on this release: @altergui, @odeke-em,
@thanethomson
### BREAKING CHANGES
- CLI/RPC/Config
- [config] [\#7276](https://github.com/tendermint/tendermint/pull/7276) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson).
- P2P Protocol
- [p2p] [\#7265](https://github.com/tendermint/tendermint/pull/7265) Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish)
- [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui)
### FEATURES
- [rpc] [\#7270](https://github.com/tendermint/tendermint/pull/7270) Add `header` and `header_by_hash` RPC Client queries. (@fedekunze) (@cmwaters)
### IMPROVEMENTS
- [internal/protoio] [\#7325](https://github.com/tendermint/tendermint/pull/7325) Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em)
- [\#7338](https://github.com/tendermint/tendermint/pull/7338) pubsub: Performance improvements for the event query API (backport of #7319) (@creachadair)
- [\#7252](https://github.com/tendermint/tendermint/pull/7252) Add basic metrics to the indexer package. (@creachadair)
- [\#7338](https://github.com/tendermint/tendermint/pull/7338) Performance improvements for the event query API. (@creachadair)
### BUG FIXES
- [\#7310](https://github.com/tendermint/tendermint/issues/7310) pubsub: Report a non-nil error when shutting down (fixes #7306).
- [\#7355](https://github.com/tendermint/tendermint/pull/7355) Fix incorrect tests using the PSQL sink. (@creachadair)
- [\#7683](https://github.com/tendermint/tendermint/pull/7683) rpc: check error code for broadcast_tx_commit. (@tychoish)
## v0.35.0
November 4, 2021
@@ -209,21 +174,6 @@ Special thanks to external contributors on this release: @JayT106,
- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
## v0.34.15
Special thanks to external contributors on this release: @thanethomson
### BUG FIXES
- [\#7368](https://github.com/tendermint/tendermint/issues/7368) cmd: add integration test for rollback functionality (@cmwaters).
- [\#7309](https://github.com/tendermint/tendermint/issues/7309) pubsub: Report a non-nil error when shutting down (fixes #7306).
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
### IMPROVEMENTS
- [config] [\#7230](https://github.com/tendermint/tendermint/issues/7230) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson).
## v0.34.14
This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash.

View File

@@ -12,11 +12,9 @@ Special thanks to external contributors on this release:
- CLI/RPC/Config
- [rpc] \#7121 Remove the deprecated gRPC interface to the RPC service. (@creachadair)
- [rpc] Remove the deprecated gRPC interface to the RPC service. (@creachadair)
- [blocksync] \#7159 Remove support for disabling blocksync in any circumstance. (@tychoish)
- [mempool] \#7171 Remove legacy mempool implementation. (@tychoish)
- [rpc] \#7575 Rework how RPC responses are written back via HTTP. (@creachadair)
- [rpc] \#7713 Remove unused options for websocket clients. (@creachadair)
- Apps
@@ -26,49 +24,29 @@ Special thanks to external contributors on this release:
- [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish)
- [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish)
- [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui)
- Go API
- [rpc] \#7474 Remove the "URI" RPC client. (@creachadair)
- [libs/pubsub] \#7451 Internalize the pubsub packages. (@creachadair)
- [libs/sync] \#7450 Internalize and remove the library. (@creachadair)
- [libs/async] \#7449 Move library to internal. (@creachadair)
- [pubsub] \#7231 Remove unbuffered subscriptions and rework the Subscription interface. (@creachadair)
- [eventbus] \#7231 Move the EventBus type to the internal/eventbus package. (@creachadair)
- [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish)
- [p2p] \#7064 Remove WDRR queue implementation. (@tychoish)
- [config] \#7169 `WriteConfigFile` now returns an error. (@tychoish)
- [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychoish)
- [abci/client] \#7607 Simplify client interface (removes most "async" methods). (@creachadair)
- [libs/json] \#7673 Remove the libs/json (tmjson) library. (@creachadair)
- [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychosih)
- Blockchain Protocol
### FEATURES
- [rpc] [\#7270](https://github.com/tendermint/tendermint/pull/7270) Add `header` and `header_by_hash` RPC Client queries. (@fedekunze)
- [rpc] [\#7701] Add `ApplicationInfo` to `status` rpc call which contains the application version. (@jonasbostoen)
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade.
- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish)
- [consensus] \#7354 add a new `synchrony` field to the `ConsensusParameter` struct for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield)
- [consensus] \#7376 Update the proposal logic per the Propose-based timestamps specification so that the proposer will wait for the previous block time to occur before proposing the next block. (@williambanfield)
- [consensus] \#7391 Use the proposed block timestamp as the proposal timestamp. Update the block validation logic to ensure that the proposed block's timestamp matches the timestamp in the proposal message. (@williambanfield)
- [consensus] \#7415 Update proposal validation logic to Prevote nil if a proposal does not meet the conditions for Timelyness per the proposer-based timestamp specification. (@anca)
- [consensus] \#7382 Update block validation to no longer require the block timestamp to be the median of the timestamps of the previous commit. (@anca)
- [consensus] \#7711 Use the proposer timestamp for the first height instead of the genesis time. Chains will still start consensus at the genesis time. (@anca)
### IMPROVEMENTS
- [internal/protoio] \#7325 Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em)
- [consensus] \#6969 remove logic to 'unlock' a locked block.
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
- [node] \#7521 Define concrete type for seed node implementation (@spacech1mp)
- [rpc] \#7612 paginate mempool /unconfirmed_txs rpc endpoint (@spacech1mp)
- [light] [\#7536](https://github.com/tendermint/tendermint/pull/7536) rpc /status call returns info about the light client (@jmalicevic)
### BUG FIXES
- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek)
- [light] \#7640 Light Client: fix absence proof verification (@ashcherbakov)
- [light] \#7641 Light Client: fix querying against the latest height (@ashcherbakov)

View File

@@ -1,5 +1,5 @@
# stage 1 Generate Tendermint Binary
FROM golang:1.17-alpine as builder
FROM golang:1.16-alpine as builder
RUN apk update && \
apk upgrade && \
apk --no-cache add make
@@ -8,7 +8,7 @@ WORKDIR /tendermint
RUN make build-linux
# stage 2
FROM golang:1.17-alpine
FROM golang:1.15-alpine
LABEL maintainer="hello@tendermint.com"
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json

View File

@@ -299,13 +299,11 @@ NUM_SPLIT ?= 4
$(BUILDDIR):
mkdir -p $@
# The format statement filters out all packages that don't have tests.
# Note we need to check for both in-package tests (.TestGoFiles) and
# out-of-package tests (.XTestGoFiles).
# the format statement filters out all packages that don't have tests.
$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR)
go list -f "{{ if (or .TestGoFiles .XTestGoFiles) }}{{ .ImportPath }}{{ end }}" ./... | sort > $@
go list -f "{{ if .TestGoFiles }}{{ .ImportPath }}{{ end }}" ./... | sort > $@
split-test-packages:$(BUILDDIR)/packages.txt
split -d -n l/$(NUM_SPLIT) $< $<.
test-group-%:split-test-packages
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=8m -race -coverprofile=$(BUILDDIR)/$*.profile.out

View File

@@ -29,7 +29,7 @@ see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/ab
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
See below for more details about [versioning](#versioning).
In any case, if you intend to run Tendermint in production, we're happy to help. You can
@@ -40,7 +40,7 @@ More on how releases are conducted can be found [here](./RELEASES.md).
## Security
To report a security vulnerability, see our [bug bounty
program](https://hackerone.com/cosmos).
program](https://hackerone.com/cosmos).
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
@@ -50,7 +50,7 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
| Requirement | Notes |
|-------------|------------------|
| Go version | Go1.17 or higher |
| Go version | Go1.16 or higher |
## Documentation
@@ -63,8 +63,8 @@ See the [install instructions](/docs/introduction/install.md).
### Quick Start
- [Single node](/docs/introduction/quick-start.md)
- [Local cluster using docker-compose](/docs/tools/docker-compose.md)
- [Remote cluster using Terraform and Ansible](/docs/tools/terraform-and-ansible.md)
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md)
- [Join the Cosmos testnet](https://cosmos.network/testnet)
## Contributing
@@ -73,7 +73,7 @@ Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
and familiarize yourself with our
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
@@ -97,7 +97,7 @@ In an effort to avoid accumulating technical debt prior to 1.0.0,
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
will work with existing Tendermint blockchains. In these cases you will
have to start a new blockchain, or write something custom to get the old
data into the new chain. However, any bump in the PATCH version should be
data into the new chain. However, any bump in the PATCH version should be
compatible with existing blockchain histories.

View File

@@ -6,6 +6,7 @@ import (
"sync"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
)
@@ -33,27 +34,35 @@ type Client interface {
// Asynchronous requests
FlushAsync(context.Context) (*ReqRes, error)
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
CommitAsync(context.Context) (*ReqRes, error)
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
// Synchronous requests
Flush(context.Context) error
Echo(ctx context.Context, msg string) (*types.ResponseEcho, error)
Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
DeliverTx(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
CheckTx(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
Query(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
Commit(context.Context) (*types.ResponseCommit, error)
InitChain(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
PrepareProposal(context.Context, types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)
ExtendVote(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error)
VerifyVoteExtension(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)
BeginBlock(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
EndBlock(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
ListSnapshots(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
OfferSnapshot(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
LoadSnapshotChunk(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
ApplySnapshotChunk(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
FlushSync(context.Context) error
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
CommitSync(context.Context) (*types.ResponseCommit, error)
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
}
//----------------------------------------
@@ -79,7 +88,7 @@ type ReqRes struct {
*sync.WaitGroup
*types.Response // Not set atomically, so be sure to use WaitGroup.
mtx sync.Mutex
mtx tmsync.Mutex
done bool // Gets set to true once *after* WaitGroup.Done().
cb func(*types.Response) // A single callback that may be set.
}

View File

@@ -2,9 +2,9 @@ package abciclient
import (
"fmt"
"sync"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/log"
)
@@ -14,10 +14,10 @@ type Creator func(log.Logger) (Client, error)
// NewLocalCreator returns a Creator for the given app,
// which will be running locally.
func NewLocalCreator(app types.Application) Creator {
mtx := new(sync.Mutex)
mtx := new(tmsync.Mutex)
return func(logger log.Logger) (Client, error) {
return NewLocalClient(logger, mtx, app), nil
return func(_ log.Logger) (Client, error) {
return NewLocalClient(mtx, app), nil
}
}

View File

@@ -2,16 +2,15 @@ package abciclient
import (
"context"
"errors"
"fmt"
"net"
"sync"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
@@ -28,7 +27,7 @@ type grpcClient struct {
conn *grpc.ClientConn
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
mtx sync.Mutex
mtx tmsync.Mutex
addr string
err error
resCb func(*types.Request, *types.Response) // listens to all callbacks
@@ -107,10 +106,7 @@ func (cli *grpcClient) OnStart(ctx context.Context) error {
RETRY_LOOP:
for {
conn, err := grpc.Dial(cli.addr,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(dialerFunc),
)
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
if err != nil {
if cli.mustConnect {
return err
@@ -126,14 +122,10 @@ RETRY_LOOP:
ENSURE_CONNECTED:
for {
_, err := client.Echo(ctx, &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
if err == nil {
break ENSURE_CONNECTED
}
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return err
}
cli.logger.Error("Echo failed", "err", err)
time.Sleep(time.Second * echoRetryIntervalSeconds)
}
@@ -161,9 +153,9 @@ func (cli *grpcClient) StopForError(err error) {
}
cli.mtx.Unlock()
cli.logger.Error("Stopping abci.grpcClient for error", "err", err)
cli.logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error()))
if err := cli.Stop(); err != nil {
cli.logger.Error("error stopping abci.grpcClient", "err", err)
cli.logger.Error("Error stopping abci.grpcClient", "err", err)
}
}
@@ -183,6 +175,16 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
//----------------------------------------
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
req := types.ToRequestEcho(msg)
res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
req := types.ToRequestFlush()
@@ -193,6 +195,16 @@ func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) {
req := types.ToRequestInfo(params)
res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
req := types.ToRequestDeliverTx(params)
@@ -213,6 +225,106 @@ func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestChe
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) {
req := types.ToRequestQuery(params)
res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
req := types.ToRequestCommit()
res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) {
req := types.ToRequestInitChain(params)
res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
req := types.ToRequestBeginBlock(params)
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
req := types.ToRequestEndBlock(params)
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
req := types.ToRequestListSnapshots(params)
res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) {
req := types.ToRequestOfferSnapshot(params)
res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) LoadSnapshotChunkAsync(
ctx context.Context,
params types.RequestLoadSnapshotChunk,
) (*ReqRes, error) {
req := types.ToRequestLoadSnapshotChunk(params)
res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
}
// NOTE: call is synchronous, use ctx to break early if needed
func (cli *grpcClient) ApplySnapshotChunkAsync(
ctx context.Context,
params types.RequestApplySnapshotChunk,
) (*ReqRes, error) {
req := types.ToRequestApplySnapshotChunk(params)
res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return cli.finishAsyncCall(
ctx,
req,
&types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}},
)
}
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
// with the response. We don't complete it until it's been ordered via the channel.
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
@@ -256,22 +368,30 @@ func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
//----------------------------------------
func (cli *grpcClient) Flush(ctx context.Context) error { return nil }
func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
req := types.ToRequestEcho(msg)
return cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true))
func (cli *grpcClient) FlushSync(ctx context.Context) error {
return nil
}
func (cli *grpcClient) Info(
func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
reqres, err := cli.EchoAsync(ctx, msg)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetEcho(), cli.Error()
}
func (cli *grpcClient) InfoSync(
ctx context.Context,
params types.RequestInfo,
req types.RequestInfo,
) (*types.ResponseInfo, error) {
req := types.ToRequestInfo(params)
return cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
reqres, err := cli.InfoAsync(ctx, req)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
}
func (cli *grpcClient) DeliverTx(
func (cli *grpcClient) DeliverTxSync(
ctx context.Context,
params types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
@@ -283,7 +403,7 @@ func (cli *grpcClient) DeliverTx(
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
}
func (cli *grpcClient) CheckTx(
func (cli *grpcClient) CheckTxSync(
ctx context.Context,
params types.RequestCheckTx,
) (*types.ResponseCheckTx, error) {
@@ -295,100 +415,103 @@ func (cli *grpcClient) CheckTx(
return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error()
}
func (cli *grpcClient) Query(
func (cli *grpcClient) QuerySync(
ctx context.Context,
params types.RequestQuery,
req types.RequestQuery,
) (*types.ResponseQuery, error) {
req := types.ToRequestQuery(params)
return cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true))
reqres, err := cli.QueryAsync(ctx, req)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetQuery(), cli.Error()
}
func (cli *grpcClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
req := types.ToRequestCommit()
return cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true))
func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
reqres, err := cli.CommitAsync(ctx)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetCommit(), cli.Error()
}
func (cli *grpcClient) InitChain(
func (cli *grpcClient) InitChainSync(
ctx context.Context,
params types.RequestInitChain,
) (*types.ResponseInitChain, error) {
req := types.ToRequestInitChain(params)
return cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
reqres, err := cli.InitChainAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
}
func (cli *grpcClient) BeginBlock(
func (cli *grpcClient) BeginBlockSync(
ctx context.Context,
params types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
req := types.ToRequestBeginBlock(params)
return cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
reqres, err := cli.BeginBlockAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
}
func (cli *grpcClient) EndBlock(
func (cli *grpcClient) EndBlockSync(
ctx context.Context,
params types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
req := types.ToRequestEndBlock(params)
return cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
reqres, err := cli.EndBlockAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
}
func (cli *grpcClient) ListSnapshots(
func (cli *grpcClient) ListSnapshotsSync(
ctx context.Context,
params types.RequestListSnapshots,
) (*types.ResponseListSnapshots, error) {
req := types.ToRequestListSnapshots(params)
return cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true))
reqres, err := cli.ListSnapshotsAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error()
}
func (cli *grpcClient) OfferSnapshot(
func (cli *grpcClient) OfferSnapshotSync(
ctx context.Context,
params types.RequestOfferSnapshot,
) (*types.ResponseOfferSnapshot, error) {
req := types.ToRequestOfferSnapshot(params)
return cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true))
reqres, err := cli.OfferSnapshotAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error()
}
func (cli *grpcClient) LoadSnapshotChunk(
func (cli *grpcClient) LoadSnapshotChunkSync(
ctx context.Context,
params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
req := types.ToRequestLoadSnapshotChunk(params)
return cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
reqres, err := cli.LoadSnapshotChunkAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error()
}
func (cli *grpcClient) ApplySnapshotChunk(
func (cli *grpcClient) ApplySnapshotChunkSync(
ctx context.Context,
params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
req := types.ToRequestApplySnapshotChunk(params)
return cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
}
func (cli *grpcClient) PrepareProposal(
ctx context.Context,
params types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
req := types.ToRequestPrepareProposal(params)
return cli.client.PrepareProposal(ctx, req.GetPrepareProposal(), grpc.WaitForReady(true))
}
func (cli *grpcClient) ExtendVote(
ctx context.Context,
params types.RequestExtendVote) (*types.ResponseExtendVote, error) {
req := types.ToRequestExtendVote(params)
return cli.client.ExtendVote(ctx, req.GetExtendVote(), grpc.WaitForReady(true))
}
func (cli *grpcClient) VerifyVoteExtension(
ctx context.Context,
params types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
req := types.ToRequestVerifyVoteExtension(params)
return cli.client.VerifyVoteExtension(ctx, req.GetVerifyVoteExtension(), grpc.WaitForReady(true))
reqres, err := cli.ApplySnapshotChunkAsync(ctx, params)
if err != nil {
return nil, err
}
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
}

View File

@@ -2,10 +2,9 @@ package abciclient
import (
"context"
"sync"
types "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/service"
)
@@ -16,7 +15,7 @@ import (
type localClient struct {
service.BaseService
mtx *sync.Mutex
mtx *tmsync.Mutex
types.Application
Callback
}
@@ -27,15 +26,15 @@ var _ Client = (*localClient)(nil)
// methods of the given app.
//
// Both Async and Sync methods ignore the given context.Context parameter.
func NewLocalClient(logger log.Logger, mtx *sync.Mutex, app types.Application) Client {
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
if mtx == nil {
mtx = new(sync.Mutex)
mtx = new(tmsync.Mutex)
}
cli := &localClient{
mtx: mtx,
Application: app,
}
cli.BaseService = *service.NewBaseService(logger, "localClient", cli)
cli.BaseService = *service.NewBaseService(nil, "localClient", cli)
return cli
}
@@ -58,6 +57,27 @@ func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
return newLocalReqRes(types.ToRequestFlush(), nil), nil
}
func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
return app.callback(
types.ToRequestEcho(msg),
types.ToResponseEcho(msg),
), nil
}
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Info(req)
return app.callback(
types.ToRequestInfo(req),
types.ToResponseInfo(res),
), nil
}
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -80,17 +100,122 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck
), nil
}
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Query(req)
return app.callback(
types.ToRequestQuery(req),
types.ToResponseQuery(res),
), nil
}
func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Commit()
return app.callback(
types.ToRequestCommit(),
types.ToResponseCommit(res),
), nil
}
func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.InitChain(req)
return app.callback(
types.ToRequestInitChain(req),
types.ToResponseInitChain(res),
), nil
}
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
return app.callback(
types.ToRequestBeginBlock(req),
types.ToResponseBeginBlock(res),
), nil
}
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
return app.callback(
types.ToRequestEndBlock(req),
types.ToResponseEndBlock(res),
), nil
}
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.ListSnapshots(req)
return app.callback(
types.ToRequestListSnapshots(req),
types.ToResponseListSnapshots(res),
), nil
}
func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.OfferSnapshot(req)
return app.callback(
types.ToRequestOfferSnapshot(req),
types.ToResponseOfferSnapshot(res),
), nil
}
func (app *localClient) LoadSnapshotChunkAsync(
ctx context.Context,
req types.RequestLoadSnapshotChunk,
) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.LoadSnapshotChunk(req)
return app.callback(
types.ToRequestLoadSnapshotChunk(req),
types.ToResponseLoadSnapshotChunk(res),
), nil
}
func (app *localClient) ApplySnapshotChunkAsync(
ctx context.Context,
req types.RequestApplySnapshotChunk,
) (*ReqRes, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.ApplySnapshotChunk(req)
return app.callback(
types.ToRequestApplySnapshotChunk(req),
types.ToResponseApplySnapshotChunk(res),
), nil
}
//-------------------------------------------------------
func (app *localClient) Flush(ctx context.Context) error {
func (app *localClient) FlushSync(ctx context.Context) error {
return nil
}
func (app *localClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
return &types.ResponseEcho{Message: msg}, nil
}
func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -98,7 +223,7 @@ func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types
return &res, nil
}
func (app *localClient) DeliverTx(
func (app *localClient) DeliverTxSync(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
@@ -110,7 +235,7 @@ func (app *localClient) DeliverTx(
return &res, nil
}
func (app *localClient) CheckTx(
func (app *localClient) CheckTxSync(
ctx context.Context,
req types.RequestCheckTx,
) (*types.ResponseCheckTx, error) {
@@ -121,7 +246,7 @@ func (app *localClient) CheckTx(
return &res, nil
}
func (app *localClient) Query(
func (app *localClient) QuerySync(
ctx context.Context,
req types.RequestQuery,
) (*types.ResponseQuery, error) {
@@ -132,7 +257,7 @@ func (app *localClient) Query(
return &res, nil
}
func (app *localClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -140,7 +265,7 @@ func (app *localClient) Commit(ctx context.Context) (*types.ResponseCommit, erro
return &res, nil
}
func (app *localClient) InitChain(
func (app *localClient) InitChainSync(
ctx context.Context,
req types.RequestInitChain,
) (*types.ResponseInitChain, error) {
@@ -152,7 +277,7 @@ func (app *localClient) InitChain(
return &res, nil
}
func (app *localClient) BeginBlock(
func (app *localClient) BeginBlockSync(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
@@ -164,7 +289,7 @@ func (app *localClient) BeginBlock(
return &res, nil
}
func (app *localClient) EndBlock(
func (app *localClient) EndBlockSync(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
@@ -176,7 +301,7 @@ func (app *localClient) EndBlock(
return &res, nil
}
func (app *localClient) ListSnapshots(
func (app *localClient) ListSnapshotsSync(
ctx context.Context,
req types.RequestListSnapshots,
) (*types.ResponseListSnapshots, error) {
@@ -188,7 +313,7 @@ func (app *localClient) ListSnapshots(
return &res, nil
}
func (app *localClient) OfferSnapshot(
func (app *localClient) OfferSnapshotSync(
ctx context.Context,
req types.RequestOfferSnapshot,
) (*types.ResponseOfferSnapshot, error) {
@@ -200,7 +325,7 @@ func (app *localClient) OfferSnapshot(
return &res, nil
}
func (app *localClient) LoadSnapshotChunk(
func (app *localClient) LoadSnapshotChunkSync(
ctx context.Context,
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
@@ -211,7 +336,7 @@ func (app *localClient) LoadSnapshotChunk(
return &res, nil
}
func (app *localClient) ApplySnapshotChunk(
func (app *localClient) ApplySnapshotChunkSync(
ctx context.Context,
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
@@ -222,39 +347,6 @@ func (app *localClient) ApplySnapshotChunk(
return &res, nil
}
func (app *localClient) PrepareProposal(
ctx context.Context,
req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.PrepareProposal(req)
return &res, nil
}
func (app *localClient) ExtendVote(
ctx context.Context,
req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.ExtendVote(req)
return &res, nil
}
func (app *localClient) VerifyVoteExtension(
ctx context.Context,
req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.VerifyVoteExtension(req)
return &res, nil
}
//-------------------------------------------------------
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {

View File

@@ -17,8 +17,31 @@ type Client struct {
mock.Mock
}
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1
func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseApplySnapshotChunk
@@ -40,16 +63,16 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApply
return r0, r1
}
// BeginBlock provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlock(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseBeginBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseBeginBlock)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
@@ -63,21 +86,21 @@ func (_m *Client) BeginBlock(_a0 context.Context, _a1 types.RequestBeginBlock) (
return r0, r1
}
// CheckTx provides a mock function with given fields: _a0, _a1
func (_m *Client) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
// BeginBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseCheckTx
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
var r0 *types.ResponseBeginBlock
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseCheckTx)
r0 = ret.Get(0).(*types.ResponseBeginBlock)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -109,8 +132,54 @@ func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*
return r0, r1
}
// Commit provides a mock function with given fields: _a0
func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
// CheckTxSync provides a mock function with given fields: _a0, _a1
func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseCheckTx
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseCheckTx)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CommitAsync provides a mock function with given fields: _a0
func (_m *Client) CommitAsync(_a0 context.Context) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0)
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CommitSync provides a mock function with given fields: _a0
func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) {
ret := _m.Called(_a0)
var r0 *types.ResponseCommit
@@ -132,29 +201,6 @@ func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
return r0, r1
}
// DeliverTx provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTx(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseDeliverTx
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseDeliverTx)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
@@ -178,8 +224,54 @@ func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx
return r0, r1
}
// Echo provides a mock function with given fields: ctx, msg
func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
// DeliverTxSync provides a mock function with given fields: _a0, _a1
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseDeliverTx
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseDeliverTx)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// EchoAsync provides a mock function with given fields: ctx, msg
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) {
ret := _m.Called(ctx, msg)
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, string) *abciclient.ReqRes); ok {
r0 = rf(ctx, msg)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(ctx, msg)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// EchoSync provides a mock function with given fields: ctx, msg
func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
ret := _m.Called(ctx, msg)
var r0 *types.ResponseEcho
@@ -201,8 +293,31 @@ func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, er
return r0, r1
}
// EndBlock provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlock(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
// EndBlockAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// EndBlockSync provides a mock function with given fields: _a0, _a1
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseEndBlock
@@ -238,43 +353,6 @@ func (_m *Client) Error() error {
return r0
}
// ExtendVote provides a mock function with given fields: _a0, _a1
func (_m *Client) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) (*types.ResponseExtendVote, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseExtendVote
if rf, ok := ret.Get(0).(func(context.Context, types.RequestExtendVote) *types.ResponseExtendVote); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseExtendVote)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestExtendVote) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Flush provides a mock function with given fields: _a0
func (_m *Client) Flush(_a0 context.Context) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// FlushAsync provides a mock function with given fields: _a0
func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0)
@@ -298,8 +376,45 @@ func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) {
return r0, r1
}
// Info provides a mock function with given fields: _a0, _a1
func (_m *Client) Info(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
// FlushSync provides a mock function with given fields: _a0
func (_m *Client) FlushSync(_a0 context.Context) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
// InfoAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// InfoSync provides a mock function with given fields: _a0, _a1
func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseInfo
@@ -321,8 +436,31 @@ func (_m *Client) Info(_a0 context.Context, _a1 types.RequestInfo) (*types.Respo
return r0, r1
}
// InitChain provides a mock function with given fields: _a0, _a1
func (_m *Client) InitChain(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
// InitChainAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// InitChainSync provides a mock function with given fields: _a0, _a1
func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseInitChain
@@ -358,8 +496,31 @@ func (_m *Client) IsRunning() bool {
return r0
}
// ListSnapshots provides a mock function with given fields: _a0, _a1
func (_m *Client) ListSnapshots(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ListSnapshotsSync provides a mock function with given fields: _a0, _a1
func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseListSnapshots
@@ -381,8 +542,31 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 types.RequestListSnapsh
return r0, r1
}
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1
func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseLoadSnapshotChunk
@@ -404,8 +588,31 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 types.RequestLoadSn
return r0, r1
}
// OfferSnapshot provides a mock function with given fields: _a0, _a1
func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// OfferSnapshotSync provides a mock function with given fields: _a0, _a1
func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseOfferSnapshot
@@ -427,21 +634,21 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 types.RequestOfferSnaps
return r0, r1
}
// PrepareProposal provides a mock function with given fields: _a0, _a1
func (_m *Client) PrepareProposal(_a0 context.Context, _a1 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
// QueryAsync provides a mock function with given fields: _a0, _a1
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abciclient.ReqRes, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponsePrepareProposal
if rf, ok := ret.Get(0).(func(context.Context, types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
var r0 *abciclient.ReqRes
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abciclient.ReqRes); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
r0 = ret.Get(0).(*abciclient.ReqRes)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestPrepareProposal) error); ok {
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
@@ -450,8 +657,8 @@ func (_m *Client) PrepareProposal(_a0 context.Context, _a1 types.RequestPrepareP
return r0, r1
}
// Query provides a mock function with given fields: _a0, _a1
func (_m *Client) Query(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
// QuerySync provides a mock function with given fields: _a0, _a1
func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseQuery
@@ -506,29 +713,6 @@ func (_m *Client) String() string {
return r0
}
// VerifyVoteExtension provides a mock function with given fields: _a0, _a1
func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
ret := _m.Called(_a0, _a1)
var r0 *types.ResponseVerifyVoteExtension
if rf, ok := ret.Get(0).(func(context.Context, types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
r0 = rf(_a0, _a1)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, types.RequestVerifyVoteExtension) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Wait provides a mock function with given fields:
func (_m *Client) Wait() {
_m.Called()

View File

@@ -9,10 +9,10 @@ import (
"io"
"net"
"reflect"
"sync"
"time"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
@@ -41,7 +41,7 @@ type socketClient struct {
reqQueue chan *reqResWithContext
mtx sync.Mutex
mtx tmsync.Mutex
err error
reqSent *list.List // list of requests sent, waiting for response
resCb func(*types.Request, *types.Response) // called on all requests, if set.
@@ -222,10 +222,18 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
//----------------------------------------
func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg))
}
func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestFlush())
}
func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
}
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
}
@@ -234,9 +242,51 @@ func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestChec
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
}
func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestQuery(req))
}
func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestCommit())
}
func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
}
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
}
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
}
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
}
func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req))
}
func (cli *socketClient) LoadSnapshotChunkAsync(
ctx context.Context,
req types.RequestLoadSnapshotChunk,
) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req))
}
func (cli *socketClient) ApplySnapshotChunkAsync(
ctx context.Context,
req types.RequestApplySnapshotChunk,
) (*ReqRes, error) {
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
}
//----------------------------------------
func (cli *socketClient) Flush(ctx context.Context) error {
func (cli *socketClient) FlushSync(ctx context.Context) error {
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true)
if err != nil {
return queueErr(err)
@@ -261,182 +311,149 @@ func (cli *socketClient) Flush(ctx context.Context) error {
}
}
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestEcho(msg))
func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg))
if err != nil {
return nil, err
}
return reqres.Response.GetEcho(), nil
}
func (cli *socketClient) Info(
func (cli *socketClient) InfoSync(
ctx context.Context,
req types.RequestInfo,
) (*types.ResponseInfo, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInfo(req))
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req))
if err != nil {
return nil, err
}
return reqres.Response.GetInfo(), nil
}
func (cli *socketClient) DeliverTx(
func (cli *socketClient) DeliverTxSync(
ctx context.Context,
req types.RequestDeliverTx,
) (*types.ResponseDeliverTx, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestDeliverTx(req))
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
if err != nil {
return nil, err
}
return reqres.Response.GetDeliverTx(), nil
}
func (cli *socketClient) CheckTx(
func (cli *socketClient) CheckTxSync(
ctx context.Context,
req types.RequestCheckTx,
) (*types.ResponseCheckTx, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCheckTx(req))
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req))
if err != nil {
return nil, err
}
return reqres.Response.GetCheckTx(), nil
}
func (cli *socketClient) Query(
func (cli *socketClient) QuerySync(
ctx context.Context,
req types.RequestQuery,
) (*types.ResponseQuery, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestQuery(req))
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req))
if err != nil {
return nil, err
}
return reqres.Response.GetQuery(), nil
}
func (cli *socketClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCommit())
func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit())
if err != nil {
return nil, err
}
return reqres.Response.GetCommit(), nil
}
func (cli *socketClient) InitChain(
func (cli *socketClient) InitChainSync(
ctx context.Context,
req types.RequestInitChain,
) (*types.ResponseInitChain, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInitChain(req))
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req))
if err != nil {
return nil, err
}
return reqres.Response.GetInitChain(), nil
}
func (cli *socketClient) BeginBlock(
func (cli *socketClient) BeginBlockSync(
ctx context.Context,
req types.RequestBeginBlock,
) (*types.ResponseBeginBlock, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestBeginBlock(req))
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetBeginBlock(), nil
}
func (cli *socketClient) EndBlock(
func (cli *socketClient) EndBlockSync(
ctx context.Context,
req types.RequestEndBlock,
) (*types.ResponseEndBlock, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestEndBlock(req))
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
if err != nil {
return nil, err
}
return reqres.Response.GetEndBlock(), nil
}
func (cli *socketClient) ListSnapshots(
func (cli *socketClient) ListSnapshotsSync(
ctx context.Context,
req types.RequestListSnapshots,
) (*types.ResponseListSnapshots, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestListSnapshots(req))
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req))
if err != nil {
return nil, err
}
return reqres.Response.GetListSnapshots(), nil
}
func (cli *socketClient) OfferSnapshot(
func (cli *socketClient) OfferSnapshotSync(
ctx context.Context,
req types.RequestOfferSnapshot,
) (*types.ResponseOfferSnapshot, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestOfferSnapshot(req))
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req))
if err != nil {
return nil, err
}
return reqres.Response.GetOfferSnapshot(), nil
}
func (cli *socketClient) LoadSnapshotChunk(
func (cli *socketClient) LoadSnapshotChunkSync(
ctx context.Context,
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestLoadSnapshotChunk(req))
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req))
if err != nil {
return nil, err
}
return reqres.Response.GetLoadSnapshotChunk(), nil
}
func (cli *socketClient) ApplySnapshotChunk(
func (cli *socketClient) ApplySnapshotChunkSync(
ctx context.Context,
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestApplySnapshotChunk(req))
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req))
if err != nil {
return nil, err
}
return reqres.Response.GetApplySnapshotChunk(), nil
}
func (cli *socketClient) PrepareProposal(
ctx context.Context,
req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestPrepareProposal(req))
if err != nil {
return nil, err
}
return reqres.Response.GetPrepareProposal(), nil
}
func (cli *socketClient) ExtendVote(
ctx context.Context,
req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestExtendVote(req))
if err != nil {
return nil, err
}
return reqres.Response.GetExtendVote(), nil
}
func (cli *socketClient) VerifyVoteExtension(
ctx context.Context,
req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestVerifyVoteExtension(req))
if err != nil {
return nil, err
}
return reqres.Response.GetVerifyVoteExtension(), nil
}
//----------------------------------------
// queueRequest enqueues req onto the queue. If the queue is full, it ether
@@ -480,7 +497,7 @@ func (cli *socketClient) queueRequestAsync(
return reqres, cli.Error()
}
func (cli *socketClient) queueRequestAndFlush(
func (cli *socketClient) queueRequestAndFlushSync(
ctx context.Context,
req *types.Request,
) (*ReqRes, error) {
@@ -490,7 +507,7 @@ func (cli *socketClient) queueRequestAndFlush(
return nil, queueErr(err)
}
if err := cli.Flush(ctx); err != nil {
if err := cli.FlushSync(ctx); err != nil {
return nil, err
}
@@ -548,12 +565,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
_, ok = res.Value.(*types.Response_Query)
case *types.Request_InitChain:
_, ok = res.Value.(*types.Response_InitChain)
case *types.Request_PrepareProposal:
_, ok = res.Value.(*types.Response_PrepareProposal)
case *types.Request_ExtendVote:
_, ok = res.Value.(*types.Response_ExtendVote)
case *types.Request_VerifyVoteExtension:
_, ok = res.Value.(*types.Response_VerifyVoteExtension)
case *types.Request_BeginBlock:
_, ok = res.Value.(*types.Response_BeginBlock)
case *types.Request_EndBlock:
@@ -581,6 +592,6 @@ func (cli *socketClient) stopForError(err error) {
cli.logger.Info("Stopping abci.socketClient", "reason", err)
if err := cli.Stop(); err != nil {
cli.logger.Error("error stopping abci.socketClient", "err", err)
cli.logger.Error("Error stopping abci.socketClient", "err", err)
}
}

View File

@@ -23,20 +23,20 @@ func TestProperSyncCalls(t *testing.T) {
defer cancel()
app := slowApp{}
logger := log.NewNopLogger()
logger := log.TestingLogger()
_, c := setupClientServer(ctx, t, logger, app)
resp := make(chan error, 1)
go func() {
rsp, err := c.BeginBlock(ctx, types.RequestBeginBlock{})
// This is BeginBlockSync unrolled....
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
assert.NoError(t, err)
assert.NoError(t, c.Flush(ctx))
assert.NotNil(t, rsp)
select {
case <-ctx.Done():
case resp <- c.Error():
}
err = c.FlushSync(ctx)
assert.NoError(t, err)
res := reqres.Response.GetBeginBlock()
assert.NotNil(t, res)
resp <- c.Error()
}()
select {

View File

@@ -14,7 +14,6 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/version"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
@@ -22,6 +21,7 @@ import (
"github.com/tendermint/tendermint/abci/server"
servertest "github.com/tendermint/tendermint/abci/tests/server"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/abci/version"
"github.com/tendermint/tendermint/proto/tendermint/crypto"
)
@@ -60,7 +60,7 @@ var RootCmd = &cobra.Command{
}
if logger == nil {
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
}
if client == nil {
@@ -231,7 +231,7 @@ var versionCmd = &cobra.Command{
Long: "print ABCI console version",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println(version.ABCIVersion)
fmt.Println(version.Version)
return nil
},
}
@@ -442,7 +442,7 @@ func cmdEcho(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
msg = args[0]
}
res, err := client.Echo(cmd.Context(), msg)
res, err := client.EchoSync(cmd.Context(), msg)
if err != nil {
return err
}
@@ -460,7 +460,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
if len(args) == 1 {
version = args[0]
}
res, err := client.Info(cmd.Context(), types.RequestInfo{Version: version})
res, err := client.InfoSync(cmd.Context(), types.RequestInfo{Version: version})
if err != nil {
return err
}
@@ -485,7 +485,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
res, err := client.DeliverTx(cmd.Context(), types.RequestDeliverTx{Tx: txBytes})
res, err := client.DeliverTxSync(cmd.Context(), types.RequestDeliverTx{Tx: txBytes})
if err != nil {
return err
}
@@ -511,7 +511,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
res, err := client.CheckTx(cmd.Context(), types.RequestCheckTx{Tx: txBytes})
res, err := client.CheckTxSync(cmd.Context(), types.RequestCheckTx{Tx: txBytes})
if err != nil {
return err
}
@@ -526,7 +526,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
// Get application Merkle root hash
func cmdCommit(cmd *cobra.Command, args []string) error {
res, err := client.Commit(cmd.Context())
res, err := client.CommitSync(cmd.Context())
if err != nil {
return err
}
@@ -551,7 +551,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
return err
}
resQuery, err := client.Query(cmd.Context(), types.RequestQuery{
resQuery, err := client.QuerySync(cmd.Context(), types.RequestQuery{
Data: queryBytes,
Path: flagPath,
Height: int64(flagHeight),
@@ -575,14 +575,15 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
}
func cmdKVStore(cmd *cobra.Command, args []string) error {
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
// Create the application - in memory or persisted to disk
var app types.Application
if flagPersist == "" {
app = kvstore.NewApplication()
} else {
app = kvstore.NewPersistentKVStoreApplication(logger, flagPersist)
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
}
// Start the listener

View File

@@ -13,7 +13,6 @@ import (
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
@@ -32,38 +31,34 @@ func init() {
func TestKVStore(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger.Info("### Testing KVStore")
testStream(ctx, t, logger, kvstore.NewApplication())
fmt.Println("### Testing KVStore")
testStream(ctx, t, kvstore.NewApplication())
}
func TestBaseApp(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger.Info("### Testing BaseApp")
testStream(ctx, t, logger, types.NewBaseApplication())
fmt.Println("### Testing BaseApp")
testStream(ctx, t, types.NewBaseApplication())
}
func TestGRPC(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger.Info("### Testing GRPC")
testGRPCSync(ctx, t, logger, types.NewGRPCApplication(types.NewBaseApplication()))
fmt.Println("### Testing GRPC")
testGRPCSync(ctx, t, types.NewGRPCApplication(types.NewBaseApplication()))
}
func testStream(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) {
func testStream(ctx context.Context, t *testing.T, app types.Application) {
t.Helper()
const numDeliverTxs = 20000
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
defer os.Remove(socketFile)
socket := fmt.Sprintf("unix://%v", socketFile)
logger := log.TestingLogger()
// Start the listener
server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app)
t.Cleanup(server.Wait)
@@ -71,7 +66,7 @@ func testStream(ctx context.Context, t *testing.T, logger log.Logger, app types.
require.NoError(t, err)
// Connect to the socket
client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false)
client := abciclient.NewSocketClient(log.TestingLogger().With("module", "abci-client"), socket, false)
t.Cleanup(client.Wait)
err = client.Start(ctx)
@@ -112,7 +107,7 @@ func testStream(ctx context.Context, t *testing.T, logger log.Logger, app types.
// Sometimes send flush messages
if counter%128 == 0 {
err = client.Flush(ctx)
err = client.FlushSync(context.Background())
require.NoError(t, err)
}
}
@@ -131,25 +126,26 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
return tmnet.Connect(addr)
}
func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.ABCIApplicationServer) {
t.Helper()
func testGRPCSync(ctx context.Context, t *testing.T, app types.ABCIApplicationServer) {
numDeliverTxs := 2000
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
defer os.Remove(socketFile)
socket := fmt.Sprintf("unix://%v", socketFile)
logger := log.TestingLogger()
// Start the listener
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app)
require.NoError(t, server.Start(ctx))
if err := server.Start(ctx); err != nil {
t.Fatalf("Error starting GRPC server: %v", err.Error())
}
t.Cleanup(func() { server.Wait() })
// Connect to the socket
conn, err := grpc.Dial(socket,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(dialerFunc),
)
require.NoError(t, err, "Error dialing GRPC server")
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
if err != nil {
t.Fatalf("Error dialing GRPC server: %v", err.Error())
}
t.Cleanup(func() {
if err := conn.Close(); err != nil {
@@ -162,9 +158,10 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
response, err := client.DeliverTx(ctx, &types.RequestDeliverTx{Tx: []byte("test")})
require.NoError(t, err, "Error in GRPC DeliverTx")
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
if err != nil {
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
}
counter++
if response.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", response.Code)

View File

@@ -4,7 +4,7 @@ There are two app's here: the KVStoreApplication and the PersistentKVStoreApplic
## KVStoreApplication
The KVStoreApplication is a simple merkle key-value store.
The KVStoreApplication is a simple merkle key-value store.
Transactions of the form `key=value` are stored as key-value pairs in the tree.
Transactions without an `=` sign set the value to the key.
The app has no replay protection (other than what the mempool provides).
@@ -12,7 +12,7 @@ The app has no replay protection (other than what the mempool provides).
## PersistentKVStoreApplication
The PersistentKVStoreApplication wraps the KVStoreApplication
and provides three additional features:
and provides two additional features:
1) persistence of state across app restarts (using Tendermint's ABCI-Handshake mechanism)
2) validator set changes
@@ -27,4 +27,4 @@ Validator set changes are effected using the following transaction format:
where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one).
To remove a validator from the validator set, set power to `0`.
There is no sybil protection against new validators joining.
There is no sybil protection against new validators joining.

View File

@@ -171,9 +171,3 @@ func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.Respo
return resQuery
}
func (app *Application) PrepareProposal(
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
return types.ResponsePrepareProposal{
BlockData: req.BlockData}
}

View File

@@ -76,9 +76,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
if err != nil {
t.Fatal(err)
}
logger := log.NewTestingLogger(t)
kvstore := NewPersistentKVStoreApplication(logger, dir)
kvstore := NewPersistentKVStoreApplication(dir)
key := testKey
value := key
tx := []byte(key)
@@ -94,9 +92,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
if err != nil {
t.Fatal(err)
}
logger := log.NewTestingLogger(t)
kvstore := NewPersistentKVStoreApplication(logger, dir)
kvstore := NewPersistentKVStoreApplication(dir)
InitKVStore(kvstore)
height := int64(0)
@@ -128,9 +124,7 @@ func TestValUpdates(t *testing.T) {
if err != nil {
t.Fatal(err)
}
logger := log.NewTestingLogger(t)
kvstore := NewPersistentKVStoreApplication(logger, dir)
kvstore := NewPersistentKVStoreApplication(dir)
// init with some validators
total := 10
@@ -295,7 +289,7 @@ func makeGRPCClientServer(
func TestClientServer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewTestingLogger(t)
logger := log.TestingLogger()
// set up socket app
kvstore := NewApplication()
@@ -330,39 +324,39 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client)
}
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
ar, err := app.DeliverTx(ctx, types.RequestDeliverTx{Tx: tx})
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar, err = app.DeliverTx(ctx, types.RequestDeliverTx{Tx: tx})
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// commit
_, err = app.Commit(ctx)
_, err = app.CommitSync(ctx)
require.NoError(t, err)
info, err := app.Info(ctx, types.RequestInfo{})
info, err := app.InfoSync(ctx, types.RequestInfo{})
require.NoError(t, err)
require.NotZero(t, info.LastBlockHeight)
// make sure query is fine
resQuery, err := app.Query(ctx, types.RequestQuery{
resQuery, err := app.QuerySync(ctx, types.RequestQuery{
Path: "/store",
Data: []byte(key),
})
require.NoError(t, err)
require.Nil(t, err)
require.Equal(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, key, string(resQuery.Key))
require.Equal(t, value, string(resQuery.Value))
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
// make sure proof is fine
resQuery, err = app.Query(ctx, types.RequestQuery{
resQuery, err = app.QuerySync(ctx, types.RequestQuery{
Path: "/store",
Data: []byte(key),
Prove: true,
})
require.NoError(t, err)
require.Nil(t, err)
require.Equal(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, key, string(resQuery.Key))
require.Equal(t, value, string(resQuery.Value))

View File

@@ -14,7 +14,6 @@ import (
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/libs/log"
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
ptypes "github.com/tendermint/tendermint/proto/tendermint/types"
)
const (
@@ -36,7 +35,7 @@ type PersistentKVStoreApplication struct {
logger log.Logger
}
func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *PersistentKVStoreApplication {
func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication {
name := "kvstore"
db, err := dbm.NewGoLevelDB(name, dbDir)
if err != nil {
@@ -48,7 +47,7 @@ func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *Persisten
return &PersistentKVStoreApplication{
app: &Application{state: state},
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
logger: logger,
logger: log.NewNopLogger(),
}
}
@@ -56,6 +55,10 @@ func (app *PersistentKVStoreApplication) Close() error {
return app.app.state.db.Close()
}
func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
app.logger = l
}
func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
res := app.app.Info(req)
res.LastBlockHeight = app.app.state.Height
@@ -73,10 +76,6 @@ func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) t
return app.execValidatorTx(req.Tx)
}
if isPrepareTx(req.Tx) {
return app.execPrepareTx(req.Tx)
}
// otherwise, update the key-value store
return app.app.DeliverTx(req)
}
@@ -114,7 +113,7 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
for _, v := range req.Validators {
r := app.updateValidator(v)
if r.IsErr() {
app.logger.Error("error updating validators", "r", r)
app.logger.Error("Error updating validators", "r", r)
}
}
return types.ResponseInitChain{}
@@ -171,24 +170,6 @@ func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
}
func (app *PersistentKVStoreApplication) ExtendVote(
req types.RequestExtendVote) types.ResponseExtendVote {
return types.ResponseExtendVote{
VoteExtension: ConstructVoteExtension(req.Vote.ValidatorAddress),
}
}
func (app *PersistentKVStoreApplication) VerifyVoteExtension(
req types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
return types.RespondVerifyVoteExtension(
app.verifyExtension(req.Vote.ValidatorAddress, req.Vote.VoteExtension))
}
func (app *PersistentKVStoreApplication) PrepareProposal(
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
return types.ResponsePrepareProposal{BlockData: app.substPrepareTx(req.BlockData)}
}
//---------------------------------------------
// update validators
@@ -290,7 +271,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
if err := types.WriteMessage(&v, value); err != nil {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("error encoding validator: %v", err)}
Log: fmt.Sprintf("Error encoding validator: %v", err)}
}
if err = app.app.state.db.Set(key, value.Bytes()); err != nil {
panic(err)
@@ -303,51 +284,3 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
}
// -----------------------------
const PreparePrefix = "prepare"
func isPrepareTx(tx []byte) bool {
return strings.HasPrefix(string(tx), PreparePrefix)
}
// execPrepareTx is noop. tx data is considered as placeholder
// and is substitute at the PrepareProposal.
func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.ResponseDeliverTx {
// noop
return types.ResponseDeliverTx{}
}
// substPrepareTx subst all the preparetx in the blockdata
// to null string(could be any arbitrary string).
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte) [][]byte {
for i, tx := range blockData {
if isPrepareTx(tx) {
blockData[i] = make([]byte, len(tx))
}
}
return blockData
}
func ConstructVoteExtension(valAddr []byte) *ptypes.VoteExtension {
return &ptypes.VoteExtension{
AppDataToSign: valAddr,
AppDataSelfAuthenticating: valAddr,
}
}
func (app *PersistentKVStoreApplication) verifyExtension(valAddr []byte, ext *ptypes.VoteExtension) bool {
if ext == nil {
return false
}
canonical := ConstructVoteExtension(valAddr)
if !bytes.Equal(canonical.AppDataToSign, ext.AppDataToSign) {
return false
}
if !bytes.Equal(canonical.AppDataSelfAuthenticating, ext.AppDataSelfAuthenticating) {
return false
}
return true
}

View File

@@ -58,7 +58,7 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
}()
if err := s.server.Serve(s.listener); err != nil {
s.logger.Error("error serving gRPC server", "err", err)
s.logger.Error("Error serving gRPC server", "err", err)
}
}()
return nil

View File

@@ -7,9 +7,9 @@ import (
"io"
"net"
"runtime"
"sync"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
@@ -25,11 +25,11 @@ type SocketServer struct {
addr string
listener net.Listener
connsMtx sync.Mutex
connsMtx tmsync.Mutex
conns map[int]net.Conn
nextConnID int
appMtx sync.Mutex
appMtx tmsync.Mutex
app types.Application
}
@@ -61,7 +61,7 @@ func (s *SocketServer) OnStart(ctx context.Context) error {
func (s *SocketServer) OnStop() {
if err := s.listener.Close(); err != nil {
s.logger.Error("error closing listener", "err", err)
s.logger.Error("Error closing listener", "err", err)
}
s.connsMtx.Lock()
@@ -70,7 +70,7 @@ func (s *SocketServer) OnStop() {
for id, conn := range s.conns {
delete(s.conns, id)
if err := conn.Close(); err != nil {
s.logger.Error("error closing connection", "id", id, "conn", conn, "err", err)
s.logger.Error("Error closing connection", "id", id, "conn", conn, "err", err)
}
}
}
@@ -139,7 +139,7 @@ func (s *SocketServer) waitForClose(ctx context.Context, closeConn chan error, c
defer func() {
// Close the connection
if err := s.rmConn(connID); err != nil {
s.logger.Error("error closing connection", "err", err)
s.logger.Error("Error closing connection", "err", err)
}
}()
@@ -240,21 +240,12 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
case *types.Request_OfferSnapshot:
res := s.app.OfferSnapshot(*r.OfferSnapshot)
responses <- types.ToResponseOfferSnapshot(res)
case *types.Request_PrepareProposal:
res := s.app.PrepareProposal(*r.PrepareProposal)
responses <- types.ToResponsePrepareProposal(res)
case *types.Request_LoadSnapshotChunk:
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
responses <- types.ToResponseLoadSnapshotChunk(res)
case *types.Request_ApplySnapshotChunk:
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
responses <- types.ToResponseApplySnapshotChunk(res)
case *types.Request_ExtendVote:
res := s.app.ExtendVote(*r.ExtendVote)
responses <- types.ToResponseExtendVote(res)
case *types.Request_VerifyVoteExtension:
res := s.app.VerifyVoteExtension(*r.VerifyVoteExtension)
responses <- types.ToResponseVerifyVoteExtension(res)
default:
responses <- types.ToResponseException("Unknown request")
}
@@ -268,26 +259,14 @@ func (s *SocketServer) handleResponses(
responses <-chan *types.Response,
) {
bw := bufio.NewWriter(conn)
for {
select {
case <-ctx.Done():
for res := range responses {
if err := types.WriteMessage(res, bw); err != nil {
closeConn <- fmt.Errorf("error writing message: %w", err)
return
}
if err := bw.Flush(); err != nil {
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
return
case res := <-responses:
if err := types.WriteMessage(res, bw); err != nil {
select {
case <-ctx.Done():
case closeConn <- fmt.Errorf("error writing message: %w", err):
}
return
}
if err := bw.Flush(); err != nil {
select {
case <-ctx.Done():
case closeConn <- fmt.Errorf("error flushing write buffer: %w", err):
}
return
}
}
}
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"testing"
"github.com/fortytw2/leaktest"
"github.com/stretchr/testify/assert"
abciclientent "github.com/tendermint/tendermint/abci/client"
@@ -14,8 +13,6 @@ import (
)
func TestClientServerNoAddrPrefix(t *testing.T) {
t.Cleanup(leaktest.Check(t))
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -24,17 +21,15 @@ func TestClientServerNoAddrPrefix(t *testing.T) {
transport = "socket"
)
app := kvstore.NewApplication()
logger := log.NewTestingLogger(t)
logger := log.TestingLogger()
server, err := abciserver.NewServer(logger, addr, transport, app)
assert.NoError(t, err, "expected no error on NewServer")
err = server.Start(ctx)
assert.NoError(t, err, "expected no error on server.Start")
t.Cleanup(server.Wait)
client, err := abciclientent.NewClient(logger, addr, transport, true)
assert.NoError(t, err, "expected no error on NewClient")
err = client.Start(ctx)
assert.NoError(t, err, "expected no error on client.Start")
t.Cleanup(client.Wait)
}

View File

@@ -21,7 +21,7 @@ func InitChain(ctx context.Context, client abciclient.Client) error {
power := mrand.Int()
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
}
_, err := client.InitChain(ctx, types.RequestInitChain{
_, err := client.InitChainSync(ctx, types.RequestInitChain{
Validators: vals,
})
if err != nil {
@@ -33,7 +33,7 @@ func InitChain(ctx context.Context, client abciclient.Client) error {
}
func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error {
res, err := client.Commit(ctx)
res, err := client.CommitSync(ctx)
data := res.Data
if err != nil {
fmt.Println("Failed test: Commit")
@@ -50,7 +50,7 @@ func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error
}
func DeliverTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.DeliverTx(ctx, types.RequestDeliverTx{Tx: txBytes})
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: DeliverTx")
@@ -69,7 +69,7 @@ func DeliverTx(ctx context.Context, client abciclient.Client, txBytes []byte, co
}
func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.CheckTx(ctx, types.RequestCheckTx{Tx: txBytes})
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: CheckTx")

View File

@@ -17,20 +17,11 @@ type Application interface {
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
// Consensus Connection
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal
// Signals the beginning of a block
BeginBlock(RequestBeginBlock) ResponseBeginBlock
// Deliver a tx for full processing
DeliverTx(RequestDeliverTx) ResponseDeliverTx
// Signals the end of a block, returns changes to the validator set
EndBlock(RequestEndBlock) ResponseEndBlock
// Commit the state and return the application Merkle root hash
Commit() ResponseCommit
// Create application specific vote extension
ExtendVote(RequestExtendVote) ResponseExtendVote
// Verify application's vote extension data
VerifyVoteExtension(RequestVerifyVoteExtension) ResponseVerifyVoteExtension
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
// State Sync Connection
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
@@ -67,14 +58,6 @@ func (BaseApplication) Commit() ResponseCommit {
return ResponseCommit{}
}
func (BaseApplication) ExtendVote(req RequestExtendVote) ResponseExtendVote {
return ResponseExtendVote{}
}
func (BaseApplication) VerifyVoteExtension(req RequestVerifyVoteExtension) ResponseVerifyVoteExtension {
return ResponseVerifyVoteExtension{}
}
func (BaseApplication) Query(req RequestQuery) ResponseQuery {
return ResponseQuery{Code: CodeTypeOK}
}
@@ -107,10 +90,6 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
return ResponseApplySnapshotChunk{}
}
func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal {
return ResponsePrepareProposal{}
}
//-------------------------------------------------------
// GRPCApplication is a GRPC wrapper for Application
@@ -193,21 +172,3 @@ func (app *GRPCApplication) ApplySnapshotChunk(
res := app.app.ApplySnapshotChunk(*req)
return &res, nil
}
func (app *GRPCApplication) ExtendVote(
ctx context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) {
res := app.app.ExtendVote(*req)
return &res, nil
}
func (app *GRPCApplication) VerifyVoteExtension(
ctx context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) {
res := app.app.VerifyVoteExtension(*req)
return &res, nil
}
func (app *GRPCApplication) PrepareProposal(
ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
res := app.app.PrepareProposal(*req)
return &res, nil
}

View File

@@ -110,24 +110,6 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
}
}
func ToRequestExtendVote(req RequestExtendVote) *Request {
return &Request{
Value: &Request_ExtendVote{&req},
}
}
func ToRequestVerifyVoteExtension(req RequestVerifyVoteExtension) *Request {
return &Request{
Value: &Request_VerifyVoteExtension{&req},
}
}
func ToRequestPrepareProposal(req RequestPrepareProposal) *Request {
return &Request{
Value: &Request_PrepareProposal{&req},
}
}
//----------------------------------------
func ToResponseException(errStr string) *Response {
@@ -218,21 +200,3 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
Value: &Response_ApplySnapshotChunk{&res},
}
}
func ToResponseExtendVote(res ResponseExtendVote) *Response {
return &Response{
Value: &Response_ExtendVote{&res},
}
}
func ToResponseVerifyVoteExtension(res ResponseVerifyVoteExtension) *Response {
return &Response{
Value: &Response_VerifyVoteExtension{&res},
}
}
func ToResponsePrepareProposal(res ResponsePrepareProposal) *Response {
return &Response{
Value: &Response_PrepareProposal{&res},
}
}

View File

@@ -14,7 +14,7 @@ import (
func TestMarshalJSON(t *testing.T) {
b, err := json.Marshal(&ResponseDeliverTx{})
assert.NoError(t, err)
assert.Nil(t, err)
// include empty fields.
assert.True(t, strings.Contains(string(b), "code"))
r1 := ResponseCheckTx{
@@ -31,11 +31,11 @@ func TestMarshalJSON(t *testing.T) {
},
}
b, err = json.Marshal(&r1)
assert.NoError(t, err)
assert.Nil(t, err)
var r2 ResponseCheckTx
err = json.Unmarshal(b, &r2)
assert.NoError(t, err)
assert.Nil(t, err)
assert.Equal(t, r1, r2)
}
@@ -49,11 +49,11 @@ func TestWriteReadMessageSimple(t *testing.T) {
for _, c := range cases {
buf := new(bytes.Buffer)
err := WriteMessage(c, buf)
assert.NoError(t, err)
assert.Nil(t, err)
msg := new(RequestEcho)
err = ReadMessage(buf, msg)
assert.NoError(t, err)
assert.Nil(t, err)
assert.True(t, proto.Equal(c, msg))
}
@@ -71,11 +71,11 @@ func TestWriteReadMessage(t *testing.T) {
for _, c := range cases {
buf := new(bytes.Buffer)
err := WriteMessage(c, buf)
assert.NoError(t, err)
assert.Nil(t, err)
msg := new(tmproto.Header)
err = ReadMessage(buf, msg)
assert.NoError(t, err)
assert.Nil(t, err)
assert.True(t, proto.Equal(c, msg))
}
@@ -103,11 +103,11 @@ func TestWriteReadMessage2(t *testing.T) {
for _, c := range cases {
buf := new(bytes.Buffer)
err := WriteMessage(c, buf)
assert.NoError(t, err)
assert.Nil(t, err)
msg := new(ResponseCheckTx)
err = ReadMessage(buf, msg)
assert.NoError(t, err)
assert.Nil(t, err)
assert.True(t, proto.Equal(c, msg))
}

View File

@@ -5,8 +5,6 @@ import (
"encoding/json"
"github.com/gogo/protobuf/jsonpb"
types "github.com/tendermint/tendermint/proto/tendermint/types"
)
const (
@@ -43,16 +41,6 @@ func (r ResponseQuery) IsErr() bool {
return r.Code != CodeTypeOK
}
// IsOK returns true if Code is OK
func (r ResponseVerifyVoteExtension) IsOK() bool {
return r.Result <= ResponseVerifyVoteExtension_ACCEPT
}
// IsErr returns true if Code is something other than OK.
func (r ResponseVerifyVoteExtension) IsErr() bool {
return r.Result > ResponseVerifyVoteExtension_ACCEPT
}
//---------------------------------------------------------------------------
// override JSON marshaling so we emit defaults (ie. disable omitempty)
@@ -130,25 +118,3 @@ var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
var _ jsonRoundTripper = (*EventAttribute)(nil)
// -----------------------------------------------
// construct Result data
func RespondExtendVote(appDataToSign, appDataSelfAuthenticating []byte) ResponseExtendVote {
return ResponseExtendVote{
VoteExtension: &types.VoteExtension{
AppDataToSign: appDataToSign,
AppDataSelfAuthenticating: appDataSelfAuthenticating,
},
}
}
func RespondVerifyVoteExtension(ok bool) ResponseVerifyVoteExtension {
result := ResponseVerifyVoteExtension_REJECT
if ok {
result = ResponseVerifyVoteExtension_ACCEPT
}
return ResponseVerifyVoteExtension{
Result: result,
}
}

File diff suppressed because it is too large Load Diff

9
abci/version/version.go Normal file
View File

@@ -0,0 +1,9 @@
package version
import (
"github.com/tendermint/tendermint/version"
)
// TODO: eliminate this after some version refactor
const Version = version.ABCIVersion

View File

@@ -9,8 +9,6 @@ import (
"net"
"net/http"
"os"
"os/signal"
"syscall"
"time"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
@@ -21,6 +19,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/privval"
grpcprivval "github.com/tendermint/tendermint/privval/grpc"
privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval"
@@ -46,7 +45,7 @@ func main() {
rootCA = flag.String("rootcafile", "", "absolute path to root CA")
prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)")
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo).
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false).
With("module", "priv_val")
)
flag.Parse()
@@ -134,10 +133,9 @@ func main() {
os.Exit(1)
}
opctx, opcancel := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
defer opcancel()
go func() {
<-opctx.Done()
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(ctx, logger, func() {
logger.Debug("SignerServer: calling Close")
if *prometheusAddr != "" {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
@@ -147,7 +145,7 @@ func main() {
}
}
s.GracefulStop()
}()
})
// Run forever.
select {}

View File

@@ -15,7 +15,7 @@ var (
flagProfAddr = "pprof-laddr"
flagFrequency = "frequency"
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
)
// DebugCmd defines the root command containing subcommands that assist in

View File

@@ -1,7 +1,6 @@
package debug
import (
"context"
"errors"
"fmt"
"os"
@@ -43,7 +42,7 @@ func init() {
)
}
func dumpCmdHandler(cmd *cobra.Command, args []string) error {
func dumpCmdHandler(_ *cobra.Command, args []string) error {
outDir := args[0]
if outDir == "" {
return errors.New("invalid output directory")
@@ -64,24 +63,22 @@ func dumpCmdHandler(cmd *cobra.Command, args []string) error {
return fmt.Errorf("failed to create new http client: %w", err)
}
ctx := cmd.Context()
home := viper.GetString(cli.HomeFlag)
conf := config.DefaultConfig()
conf = conf.SetRoot(home)
config.EnsureRoot(conf.RootDir)
dumpDebugData(ctx, outDir, conf, rpc)
dumpDebugData(outDir, conf, rpc)
ticker := time.NewTicker(time.Duration(frequency) * time.Second)
for range ticker.C {
dumpDebugData(ctx, outDir, conf, rpc)
dumpDebugData(outDir, conf, rpc)
}
return nil
}
func dumpDebugData(ctx context.Context, outDir string, conf *config.Config, rpc *rpchttp.HTTP) {
func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) {
start := time.Now().UTC()
tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp")
@@ -92,19 +89,19 @@ func dumpDebugData(ctx context.Context, outDir string, conf *config.Config, rpc
defer os.RemoveAll(tmpDir)
logger.Info("getting node status...")
if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil {
if err := dumpStatus(rpc, tmpDir, "status.json"); err != nil {
logger.Error("failed to dump node status", "error", err)
return
}
logger.Info("getting node network info...")
if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil {
if err := dumpNetInfo(rpc, tmpDir, "net_info.json"); err != nil {
logger.Error("failed to dump node network info", "error", err)
return
}
logger.Info("getting node consensus state...")
if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil {
if err := dumpConsensusState(rpc, tmpDir, "consensus_state.json"); err != nil {
logger.Error("failed to dump node consensus state", "error", err)
return
}

View File

@@ -33,7 +33,6 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`,
}
func killCmdHandler(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
pid, err := strconv.ParseInt(args[0], 10, 64)
if err != nil {
return err
@@ -63,17 +62,17 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
defer os.RemoveAll(tmpDir)
logger.Info("getting node status...")
if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil {
if err := dumpStatus(rpc, tmpDir, "status.json"); err != nil {
return err
}
logger.Info("getting node network info...")
if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil {
if err := dumpNetInfo(rpc, tmpDir, "net_info.json"); err != nil {
return err
}
logger.Info("getting node consensus state...")
if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil {
if err := dumpConsensusState(rpc, tmpDir, "consensus_state.json"); err != nil {
return err
}

View File

@@ -15,8 +15,8 @@ import (
// dumpStatus gets node status state dump from the Tendermint RPC and writes it
// to file. It returns an error upon failure.
func dumpStatus(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error {
status, err := rpc.Status(ctx)
func dumpStatus(rpc *rpchttp.HTTP, dir, filename string) error {
status, err := rpc.Status(context.Background())
if err != nil {
return fmt.Errorf("failed to get node status: %w", err)
}
@@ -26,8 +26,8 @@ func dumpStatus(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) er
// dumpNetInfo gets network information state dump from the Tendermint RPC and
// writes it to file. It returns an error upon failure.
func dumpNetInfo(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error {
netInfo, err := rpc.NetInfo(ctx)
func dumpNetInfo(rpc *rpchttp.HTTP, dir, filename string) error {
netInfo, err := rpc.NetInfo(context.Background())
if err != nil {
return fmt.Errorf("failed to get node network information: %w", err)
}
@@ -37,8 +37,8 @@ func dumpNetInfo(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) e
// dumpConsensusState gets consensus state dump from the Tendermint RPC and
// writes it to file. It returns an error upon failure.
func dumpConsensusState(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error {
consDump, err := rpc.DumpConsensusState(ctx)
func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error {
consDump, err := rpc.DumpConsensusState(context.Background())
if err != nil {
return fmt.Errorf("failed to get node consensus dump: %w", err)
}

View File

@@ -1,11 +1,11 @@
package commands
import (
"encoding/json"
"fmt"
"github.com/spf13/cobra"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/types"
)
@@ -20,7 +20,7 @@ var GenNodeKeyCmd = &cobra.Command{
func genNodeKey(cmd *cobra.Command, args []string) error {
nodeKey := types.GenNodeKey()
bz, err := json.Marshal(nodeKey)
bz, err := tmjson.Marshal(nodeKey)
if err != nil {
return fmt.Errorf("nodeKey -> json: %w", err)
}

View File

@@ -1,11 +1,11 @@
package commands
import (
"encoding/json"
"fmt"
"github.com/spf13/cobra"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
)
@@ -29,7 +29,7 @@ func genValidator(cmd *cobra.Command, args []string) error {
return err
}
jsbz, err := json.Marshal(pv)
jsbz, err := tmjson.Marshal(pv)
if err != nil {
return fmt.Errorf("validator -> json: %w", err)
}

View File

@@ -39,10 +39,10 @@ func initFiles(cmd *cobra.Command, args []string) error {
return errors.New("must specify a node type: tendermint init [validator|full|seed]")
}
config.Mode = args[0]
return initFilesWithConfig(cmd.Context(), config)
return initFilesWithConfig(config)
}
func initFilesWithConfig(ctx context.Context, config *cfg.Config) error {
func initFilesWithConfig(config *cfg.Config) error {
var (
pv *privval.FilePV
err error
@@ -65,9 +65,7 @@ func initFilesWithConfig(ctx context.Context, config *cfg.Config) error {
if err != nil {
return err
}
if err := pv.Save(); err != nil {
return err
}
pv.Save()
logger.Info("Generated private validator", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
}
@@ -100,7 +98,7 @@ func initFilesWithConfig(ctx context.Context, config *cfg.Config) error {
}
}
ctx, cancel := context.WithTimeout(ctx, ctxTimeout)
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
defer cancel()
// if this is a validator we add it to genesis

View File

@@ -17,6 +17,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/light"
lproxy "github.com/tendermint/tendermint/light/proxy"
lrpc "github.com/tendermint/tendermint/light/rpc"
@@ -102,7 +103,7 @@ func init() {
}
func runProxy(cmd *cobra.Command, args []string) error {
logger, err := log.NewDefaultLogger(logFormat, logLevel)
logger, err := log.NewDefaultLogger(logFormat, logLevel, false)
if err != nil {
return err
}
@@ -187,14 +188,15 @@ func runProxy(cmd *cobra.Command, args []string) error {
return err
}
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(cmd.Context(), logger, func() {
p.Listener.Close()
})
// this might be redundant to the above, eventually.
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
defer cancel()
go func() {
<-ctx.Done()
p.Listener.Close()
}()
logger.Info("Starting proxy...", "laddr", listenAddr)
if err := p.ListenAndServe(ctx); err != http.ErrServerClosed {
// Error starting or closing listener:

View File

@@ -3,7 +3,6 @@ package commands
import (
"errors"
"fmt"
"path/filepath"
"strings"
"github.com/spf13/cobra"
@@ -17,7 +16,6 @@ import (
"github.com/tendermint/tendermint/internal/state/indexer/sink/kv"
"github.com/tendermint/tendermint/internal/state/indexer/sink/psql"
"github.com/tendermint/tendermint/internal/store"
"github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/rpc/coretypes"
"github.com/tendermint/tendermint/types"
)
@@ -134,10 +132,6 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
dbType := dbm.BackendType(cfg.DBBackend)
if !os.FileExists(filepath.Join(cfg.DBDir(), "blockstore.db")) {
return nil, nil, fmt.Errorf("no blockstore found in %v", cfg.DBDir())
}
// Get BlockStore
blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir())
if err != nil {
@@ -145,10 +139,6 @@ func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store,
}
blockStore := store.NewBlockStore(blockStoreDB)
if !os.FileExists(filepath.Join(cfg.DBDir(), "state.db")) {
return nil, nil, fmt.Errorf("no blockstore found in %v", cfg.DBDir())
}
// Get StateStore
stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir())
if err != nil {

View File

@@ -15,7 +15,6 @@ import (
"github.com/tendermint/tendermint/internal/state/mocks"
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
_ "github.com/lib/pq" // for the psql sink
)
@@ -25,13 +24,13 @@ const (
base int64 = 2
)
func setupReIndexEventCmd(ctx context.Context) *cobra.Command {
func setupReIndexEventCmd() *cobra.Command {
reIndexEventCmd := &cobra.Command{
Use: ReIndexEventCmd.Use,
Run: func(cmd *cobra.Command, args []string) {},
}
_ = reIndexEventCmd.ExecuteContext(ctx)
_ = reIndexEventCmd.ExecuteContext(context.Background())
return reIndexEventCmd
}
@@ -110,29 +109,12 @@ func TestLoadEventSink(t *testing.T) {
}
func TestLoadBlockStore(t *testing.T) {
testCfg, err := tmcfg.ResetTestRoot(t.Name())
require.NoError(t, err)
testCfg.DBBackend = "goleveldb"
_, _, err = loadStateAndBlockStore(testCfg)
// we should return an error because the state store and block store
// don't yet exist
require.Error(t, err)
dbType := dbm.BackendType(testCfg.DBBackend)
bsdb, err := dbm.NewDB("blockstore", dbType, testCfg.DBDir())
require.NoError(t, err)
bsdb.Close()
ssdb, err := dbm.NewDB("state", dbType, testCfg.DBDir())
require.NoError(t, err)
ssdb.Close()
bs, ss, err := loadStateAndBlockStore(testCfg)
bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig())
require.NoError(t, err)
require.NotNil(t, bs)
require.NotNil(t, ss)
}
}
func TestReIndexEvent(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockStateStore := &mocks.Store{}
@@ -177,14 +159,11 @@ func TestReIndexEvent(t *testing.T) {
{height, height, false},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := eventReIndex(setupReIndexEventCmd(ctx), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
if tc.reIndexErr {
require.Error(t, err)
} else {

View File

@@ -53,7 +53,7 @@ func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger)
if err := os.RemoveAll(dbDir); err == nil {
logger.Info("Removed all blockchain history", "dir", dbDir)
} else {
logger.Error("error removing all blockchain history", "dir", dbDir, "err", err)
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
}
// recreate the dbDir since the privVal state needs to live there
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
@@ -68,9 +68,7 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) err
if err != nil {
return err
}
if err := pv.Reset(); err != nil {
return err
}
pv.Reset()
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
} else {
@@ -78,9 +76,7 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) err
if err != nil {
return err
}
if err := pv.Save(); err != nil {
return err
}
pv.Save()
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
"stateFile", privValStateFile)
}

View File

@@ -26,7 +26,7 @@ application.
return fmt.Errorf("failed to rollback state: %w", err)
}
fmt.Printf("Rolled back state to height %d and hash %X", height, hash)
fmt.Printf("Rolled back state to height %d and hash %v", height, hash)
return nil
},
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/cmd/tendermint/commands"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/rpc/client/local"
rpctest "github.com/tendermint/tendermint/rpc/test"
e2e "github.com/tendermint/tendermint/test/e2e/app"
@@ -50,9 +49,7 @@ func TestRollbackIntegration(t *testing.T) {
node2, _, err2 := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout)
require.NoError(t, err2)
logger := log.NewTestingLogger(t)
client, err := local.New(logger, node2.(local.NodeService))
client, err := local.New(node2.(local.NodeService))
require.NoError(t, err)
ticker := time.NewTicker(200 * time.Millisecond)

View File

@@ -13,7 +13,7 @@ import (
var (
config = cfg.DefaultConfig()
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
ctxTimeout = 4 * time.Second
)
@@ -36,7 +36,7 @@ func ParseConfig() (*cfg.Config, error) {
conf.SetRoot(conf.RootDir)
cfg.EnsureRoot(conf.RootDir)
if err := conf.ValidateBasic(); err != nil {
return nil, fmt.Errorf("error in config file: %w", err)
return nil, fmt.Errorf("error in config file: %v", err)
}
return conf, nil
}
@@ -55,7 +55,7 @@ var RootCmd = &cobra.Command{
return err
}
logger, err = log.NewDefaultLogger(config.LogFormat, config.LogLevel)
logger, err = log.NewDefaultLogger(config.LogFormat, config.LogLevel, false)
if err != nil {
return err
}

View File

@@ -18,12 +18,17 @@ import (
)
// clearConfig clears env vars, the given root dir, and resets viper.
func clearConfig(t *testing.T, dir string) {
t.Helper()
require.NoError(t, os.Unsetenv("TMHOME"))
require.NoError(t, os.Unsetenv("TM_HOME"))
require.NoError(t, os.RemoveAll(dir))
func clearConfig(dir string) {
if err := os.Unsetenv("TMHOME"); err != nil {
panic(err)
}
if err := os.Unsetenv("TM_HOME"); err != nil {
panic(err)
}
if err := os.RemoveAll(dir); err != nil {
panic(err)
}
viper.Reset()
config = cfg.DefaultConfig()
}
@@ -41,9 +46,8 @@ func testRootCmd() *cobra.Command {
return rootCmd
}
func testSetup(t *testing.T, rootDir string, args []string, env map[string]string) error {
t.Helper()
clearConfig(t, rootDir)
func testSetup(rootDir string, args []string, env map[string]string) error {
clearConfig(rootDir)
rootCmd := testRootCmd()
cmd := cli.PrepareBaseCmd(rootCmd, "TM", rootDir)
@@ -69,8 +73,8 @@ func TestRootHome(t *testing.T) {
for i, tc := range cases {
idxString := strconv.Itoa(i)
err := testSetup(t, defaultRoot, tc.args, tc.env)
require.NoError(t, err, idxString)
err := testSetup(defaultRoot, tc.args, tc.env)
require.Nil(t, err, idxString)
assert.Equal(t, tc.root, config.RootDir, idxString)
assert.Equal(t, tc.root, config.P2P.RootDir, idxString)
@@ -101,8 +105,8 @@ func TestRootFlagsEnv(t *testing.T) {
for i, tc := range cases {
idxString := strconv.Itoa(i)
err := testSetup(t, defaultRoot, tc.args, tc.env)
require.NoError(t, err, idxString)
err := testSetup(defaultRoot, tc.args, tc.env)
require.Nil(t, err, idxString)
assert.Equal(t, tc.logLevel, config.LogLevel, idxString)
}
@@ -130,17 +134,17 @@ func TestRootConfig(t *testing.T) {
for i, tc := range cases {
defaultRoot := t.TempDir()
idxString := strconv.Itoa(i)
clearConfig(t, defaultRoot)
clearConfig(defaultRoot)
// XXX: path must match cfg.defaultConfigPath
configFilePath := filepath.Join(defaultRoot, "config")
err := tmos.EnsureDir(configFilePath, 0700)
require.NoError(t, err)
require.Nil(t, err)
// write the non-defaults to a different path
// TODO: support writing sub configs so we can test that too
err = WriteConfigVals(configFilePath, cvals)
require.NoError(t, err)
require.Nil(t, err)
rootCmd := testRootCmd()
cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot)
@@ -148,7 +152,7 @@ func TestRootConfig(t *testing.T) {
// run with the args and env
tc.args = append([]string{rootCmd.Use}, tc.args...)
err = cli.RunWithArgs(cmd, tc.args, tc.env)
require.NoError(t, err, idxString)
require.Nil(t, err, idxString)
assert.Equal(t, tc.logLvl, config.LogLevel, idxString)
}

View File

@@ -62,7 +62,7 @@ func AddNodeFlags(cmd *cobra.Command) {
"p2p.laddr",
config.P2P.ListenAddress,
"node listen address. (0.0.0.0:0 means any interface, any port)")
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") //nolint: staticcheck
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
cmd.Flags().String("p2p.persistent-peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange")

View File

@@ -7,7 +7,7 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/internal/jsontypes"
tmjson "github.com/tendermint/tendermint/libs/json"
tmnet "github.com/tendermint/tendermint/libs/net"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/privval"
@@ -25,14 +25,13 @@ func showValidator(cmd *cobra.Command, args []string) error {
var (
pubKey crypto.PubKey
err error
bctx = cmd.Context()
)
//TODO: remove once gRPC is the only supported protocol
protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr)
switch protocol {
case "grpc":
pvsc, err := tmgrpc.DialRemoteSigner(
bctx,
config.PrivValidator,
config.ChainID(),
logger,
@@ -42,7 +41,7 @@ func showValidator(cmd *cobra.Command, args []string) error {
return fmt.Errorf("can't connect to remote validator %w", err)
}
ctx, cancel := context.WithTimeout(bctx, ctxTimeout)
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
defer cancel()
pubKey, err = pvsc.GetPubKey(ctx)
@@ -61,7 +60,7 @@ func showValidator(cmd *cobra.Command, args []string) error {
return err
}
ctx, cancel := context.WithTimeout(bctx, ctxTimeout)
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
defer cancel()
pubKey, err = pv.GetPubKey(ctx)
@@ -70,7 +69,7 @@ func showValidator(cmd *cobra.Command, args []string) error {
}
}
bz, err := jsontypes.Marshal(pubKey)
bz, err := tmjson.Marshal(pubKey)
if err != nil {
return fmt.Errorf("failed to marshal private validator pubkey: %w", err)
}

View File

@@ -122,7 +122,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
}
genVals := make([]types.GenesisValidator, nValidators)
ctx := cmd.Context()
for i := 0; i < nValidators; i++ {
nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i)
nodeDir := filepath.Join(outputDir, nodeDirName)
@@ -139,7 +139,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
return err
}
if err := initFilesWithConfig(ctx, config); err != nil {
if err := initFilesWithConfig(config); err != nil {
return err
}
@@ -150,7 +150,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
return err
}
ctx, cancel := context.WithTimeout(ctx, ctxTimeout)
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
defer cancel()
pubKey, err := pv.GetPubKey(ctx)
@@ -181,7 +181,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
return err
}
if err := initFilesWithConfig(ctx, config); err != nil {
if err := initFilesWithConfig(config); err != nil {
return err
}
}

View File

@@ -2,7 +2,6 @@ package config
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net/http"
@@ -10,6 +9,7 @@ import (
"path/filepath"
"time"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/types"
@@ -270,7 +270,7 @@ func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) {
return "", err
}
nodeKey := types.NodeKey{}
err = json.Unmarshal(jsonBytes, &nodeKey)
err = tmjson.Unmarshal(jsonBytes, &nodeKey)
if err != nil {
return "", err
}
@@ -571,11 +571,9 @@ type P2PConfig struct { //nolint: maligned
// Comma separated list of seed nodes to connect to
// We only use these if we cant connect to peers in the addrbook
//
// Deprecated: This value is not used by the new PEX reactor. Use
// BootstrapPeers instead.
//
// TODO(#5670): Remove once the p2p refactor is complete.
// NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead.
// TODO: Remove once p2p refactor is complete
// ref: https://github.com/tendermint/tendermint/issues/5670
Seeds string `mapstructure:"seeds"`
// Comma separated list of peers to be added to the peer store
@@ -683,6 +681,7 @@ func TestP2PConfig() *P2PConfig {
cfg.ListenAddress = "tcp://127.0.0.1:36656"
cfg.AllowDuplicateIP = true
cfg.FlushThrottleTimeout = 10 * time.Millisecond
return cfg
}

View File

@@ -10,19 +10,21 @@ import (
)
func TestDefaultConfig(t *testing.T) {
assert := assert.New(t)
// set up some defaults
cfg := DefaultConfig()
assert.NotNil(t, cfg.P2P)
assert.NotNil(t, cfg.Mempool)
assert.NotNil(t, cfg.Consensus)
assert.NotNil(cfg.P2P)
assert.NotNil(cfg.Mempool)
assert.NotNil(cfg.Consensus)
// check the root dir stuff...
cfg.SetRoot("/foo")
cfg.Genesis = "bar"
cfg.DBPath = "/opt/data"
assert.Equal(t, "/foo/bar", cfg.GenesisFile())
assert.Equal(t, "/opt/data", cfg.DBDir())
assert.Equal("/foo/bar", cfg.GenesisFile())
assert.Equal("/opt/data", cfg.DBDir())
}
func TestConfigValidateBasic(t *testing.T) {
@@ -35,18 +37,19 @@ func TestConfigValidateBasic(t *testing.T) {
}
func TestTLSConfiguration(t *testing.T) {
assert := assert.New(t)
cfg := DefaultConfig()
cfg.SetRoot("/home/user")
cfg.RPC.TLSCertFile = "file.crt"
assert.Equal(t, "/home/user/config/file.crt", cfg.RPC.CertFile())
assert.Equal("/home/user/config/file.crt", cfg.RPC.CertFile())
cfg.RPC.TLSKeyFile = "file.key"
assert.Equal(t, "/home/user/config/file.key", cfg.RPC.KeyFile())
assert.Equal("/home/user/config/file.key", cfg.RPC.KeyFile())
cfg.RPC.TLSCertFile = "/abs/path/to/file.crt"
assert.Equal(t, "/abs/path/to/file.crt", cfg.RPC.CertFile())
assert.Equal("/abs/path/to/file.crt", cfg.RPC.CertFile())
cfg.RPC.TLSKeyFile = "/abs/path/to/file.key"
assert.Equal(t, "/abs/path/to/file.key", cfg.RPC.KeyFile())
assert.Equal("/abs/path/to/file.key", cfg.RPC.KeyFile())
}
func TestBaseConfigValidateBasic(t *testing.T) {

View File

@@ -9,7 +9,6 @@ import (
"text/template"
tmos "github.com/tendermint/tendermint/libs/os"
tmrand "github.com/tendermint/tendermint/libs/rand"
)
// DefaultDirPerm is the default permissions used when creating directories.
@@ -550,7 +549,6 @@ func ResetTestRootWithChainID(testName string, chainID string) (*Config, error)
}
config := TestConfig().SetRoot(rootDir)
config.Instrumentation.Namespace = fmt.Sprintf("%s_%s_%s", testName, chainID, tmrand.Str(16))
return config, nil
}
@@ -571,10 +569,6 @@ var testGenesisFmt = `{
"max_gas": "-1",
"time_iota_ms": "10"
},
"synchrony": {
"message_delay": "500000000",
"precision": "10000000"
},
"evidence": {
"max_age_num_blocks": "100000",
"max_age_duration": "172800000000000",

View File

@@ -14,24 +14,26 @@ func ensureFiles(t *testing.T, rootDir string, files ...string) {
for _, f := range files {
p := rootify(rootDir, f)
_, err := os.Stat(p)
assert.NoError(t, err, p)
assert.Nil(t, err, p)
}
}
func TestEnsureRoot(t *testing.T) {
require := require.New(t)
// setup temp dir for test
tmpDir, err := os.MkdirTemp("", "config-test")
require.NoError(t, err)
require.NoError(err)
defer os.RemoveAll(tmpDir)
// create root dir
EnsureRoot(tmpDir)
require.NoError(t, WriteConfigFile(tmpDir, DefaultConfig()))
require.NoError(WriteConfigFile(tmpDir, DefaultConfig()))
// make sure config is set properly
data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
require.NoError(t, err)
require.NoError(err)
checkConfig(t, string(data))
@@ -39,17 +41,19 @@ func TestEnsureRoot(t *testing.T) {
}
func TestEnsureTestRoot(t *testing.T) {
require := require.New(t)
testName := "ensureTestRoot"
// create root dir
cfg, err := ResetTestRoot(testName)
require.NoError(t, err)
require.NoError(err)
defer os.RemoveAll(cfg.RootDir)
rootDir := cfg.RootDir
// make sure config is set properly
data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
require.NoError(t, err)
require.Nil(err)
checkConfig(t, string(data))

View File

@@ -2,7 +2,6 @@ package crypto
import (
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/internal/jsontypes"
"github.com/tendermint/tendermint/libs/bytes"
)
@@ -26,9 +25,6 @@ type PubKey interface {
VerifySignature(msg []byte, sig []byte) bool
Equals(PubKey) bool
Type() string
// Implementations must support tagged encoding in JSON.
jsontypes.Tagged
}
type PrivKey interface {
@@ -37,9 +33,6 @@ type PrivKey interface {
PubKey() PubKey
Equals(PrivKey) bool
Type() string
// Implementations must support tagged encoding in JSON.
jsontypes.Tagged
}
type Symmetric interface {

View File

@@ -12,7 +12,7 @@ import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/internal/jsontypes"
tmjson "github.com/tendermint/tendermint/libs/json"
)
//-------------------------------------
@@ -56,16 +56,13 @@ const (
)
func init() {
jsontypes.MustRegister(PubKey{})
jsontypes.MustRegister(PrivKey{})
tmjson.RegisterType(PubKey{}, PubKeyName)
tmjson.RegisterType(PrivKey{}, PrivKeyName)
}
// PrivKey implements crypto.PrivKey.
type PrivKey []byte
// TypeTag satisfies the jsontypes.Tagged interface.
func (PrivKey) TypeTag() string { return PrivKeyName }
// Bytes returns the privkey byte format.
func (privKey PrivKey) Bytes() []byte {
return []byte(privKey)
@@ -154,9 +151,6 @@ var _ crypto.PubKey = PubKey{}
// PubKeyEd25519 implements crypto.PubKey for the Ed25519 signature scheme.
type PubKey []byte
// TypeTag satisfies the jsontypes.Tagged interface.
func (PubKey) TypeTag() string { return PubKeyName }
// Address is the SHA256-20 of the raw pubkey bytes.
func (pubKey PubKey) Address() crypto.Address {
if len(pubKey) != PubKeySize {

View File

@@ -17,7 +17,7 @@ func TestSignAndValidateEd25519(t *testing.T) {
msg := crypto.CRandBytes(128)
sig, err := privKey.Sign(msg)
require.NoError(t, err)
require.Nil(t, err)
// Test the signature
assert.True(t, pubKey.VerifySignature(msg, sig))

View File

@@ -7,14 +7,14 @@ import (
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/crypto/sr25519"
"github.com/tendermint/tendermint/internal/jsontypes"
"github.com/tendermint/tendermint/libs/json"
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
)
func init() {
jsontypes.MustRegister((*cryptoproto.PublicKey)(nil))
jsontypes.MustRegister((*cryptoproto.PublicKey_Ed25519)(nil))
jsontypes.MustRegister((*cryptoproto.PublicKey_Secp256K1)(nil))
json.RegisterType((*cryptoproto.PublicKey)(nil), "tendermint.crypto.PublicKey")
json.RegisterType((*cryptoproto.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519")
json.RegisterType((*cryptoproto.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1")
}
// PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey

View File

@@ -24,10 +24,10 @@ const (
// everything. This also affects the generalized proof system as
// well.
type Proof struct {
Total int64 `json:"total,string"` // Total number of items.
Index int64 `json:"index,string"` // Index of item to prove.
LeafHash []byte `json:"leaf_hash"` // Hash of item value.
Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
Total int64 `json:"total"` // Total number of items.
Index int64 `json:"index"` // Index of item to prove.
LeafHash []byte `json:"leaf_hash"` // Hash of item value.
Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
}
// ProofsFromByteSlices computes inclusion proof for given items.

View File

@@ -28,13 +28,13 @@ func TestKeyPath(t *testing.T) {
case KeyEncodingHex:
rand.Read(keys[i])
default:
require.Fail(t, "Unexpected encoding")
panic("Unexpected encoding")
}
path = path.AppendKey(keys[i], enc)
}
res, err := KeyPathToKeys(path.String())
require.NoError(t, err)
require.Nil(t, err)
require.Equal(t, len(keys), len(res))
for i, key := range keys {

View File

@@ -79,58 +79,58 @@ func TestProofOperators(t *testing.T) {
// Good
popz := ProofOperators([]ProofOperator{op1, op2, op3, op4})
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.NoError(t, err)
assert.Nil(t, err)
err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1"))
assert.NoError(t, err)
assert.Nil(t, err)
// BAD INPUT
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1_WRONG")})
assert.Error(t, err)
assert.NotNil(t, err)
err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1_WRONG"))
assert.Error(t, err)
assert.NotNil(t, err)
// BAD KEY 1
err = popz.Verify(bz("OUTPUT4"), "/KEY3/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
assert.NotNil(t, err)
// BAD KEY 2
err = popz.Verify(bz("OUTPUT4"), "KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
assert.NotNil(t, err)
// BAD KEY 3
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1/", [][]byte{bz("INPUT1")})
assert.Error(t, err)
assert.NotNil(t, err)
// BAD KEY 4
err = popz.Verify(bz("OUTPUT4"), "//KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
assert.NotNil(t, err)
// BAD KEY 5
err = popz.Verify(bz("OUTPUT4"), "/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
assert.NotNil(t, err)
// BAD OUTPUT 1
err = popz.Verify(bz("OUTPUT4_WRONG"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
assert.NotNil(t, err)
// BAD OUTPUT 2
err = popz.Verify(bz(""), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
assert.NotNil(t, err)
// BAD POPZ 1
popz = []ProofOperator{op1, op2, op4}
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
assert.NotNil(t, err)
// BAD POPZ 2
popz = []ProofOperator{op4, op3, op2, op1}
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
assert.NotNil(t, err)
// BAD POPZ 3
popz = []ProofOperator{}
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
assert.NotNil(t, err)
}
func bz(s string) []byte {

View File

@@ -10,7 +10,7 @@ import (
secp256k1 "github.com/btcsuite/btcd/btcec"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/internal/jsontypes"
tmjson "github.com/tendermint/tendermint/libs/json"
// necessary for Bitcoin address format
"golang.org/x/crypto/ripemd160" // nolint
@@ -26,8 +26,8 @@ const (
)
func init() {
jsontypes.MustRegister(PubKey{})
jsontypes.MustRegister(PrivKey{})
tmjson.RegisterType(PubKey{}, PubKeyName)
tmjson.RegisterType(PrivKey{}, PrivKeyName)
}
var _ crypto.PrivKey = PrivKey{}
@@ -35,9 +35,6 @@ var _ crypto.PrivKey = PrivKey{}
// PrivKey implements PrivKey.
type PrivKey []byte
// TypeTag satisfies the jsontypes.Tagged interface.
func (PrivKey) TypeTag() string { return PrivKeyName }
// Bytes marshalls the private key using amino encoding.
func (privKey PrivKey) Bytes() []byte {
return []byte(privKey)
@@ -141,9 +138,6 @@ const PubKeySize = 33
// This prefix is followed with the x-coordinate.
type PubKey []byte
// TypeTag satisfies the jsontypes.Tagged interface.
func (PubKey) TypeTag() string { return PubKeyName }
// Address returns a Bitcoin style addresses: RIPEMD160(SHA256(pubkey))
func (pubKey PubKey) Address() crypto.Address {
if len(pubKey) != PubKeySize {

View File

@@ -52,7 +52,7 @@ func TestSignAndValidateSecp256k1(t *testing.T) {
msg := crypto.CRandBytes(128)
sig, err := privKey.Sign(msg)
require.NoError(t, err)
require.Nil(t, err)
assert.True(t, pubKey.VerifySignature(msg, sig))

View File

@@ -1,8 +1,6 @@
package sr25519
import (
"github.com/tendermint/tendermint/internal/jsontypes"
)
import tmjson "github.com/tendermint/tendermint/libs/json"
const (
PrivKeyName = "tendermint/PrivKeySr25519"
@@ -10,6 +8,6 @@ const (
)
func init() {
jsontypes.MustRegister(PubKey{})
jsontypes.MustRegister(PrivKey{})
tmjson.RegisterType(PubKey{}, PubKeyName)
tmjson.RegisterType(PrivKey{}, PrivKeyName)
}

View File

@@ -29,9 +29,6 @@ type PrivKey struct {
kp *sr25519.KeyPair
}
// TypeTag satisfies the jsontypes.Tagged interface.
func (PrivKey) TypeTag() string { return PrivKeyName }
// Bytes returns the byte-encoded PrivKey.
func (privKey PrivKey) Bytes() []byte {
if privKey.kp == nil {

View File

@@ -23,9 +23,6 @@ const (
// PubKey implements crypto.PubKey.
type PubKey []byte
// TypeTag satisfies the jsontypes.Tagged interface.
func (PubKey) TypeTag() string { return PubKeyName }
// Address is the SHA256-20 of the raw pubkey bytes.
func (pubKey PubKey) Address() crypto.Address {
if len(pubKey) != PubKeySize {

View File

@@ -18,7 +18,7 @@ func TestSignAndValidateSr25519(t *testing.T) {
msg := crypto.CRandBytes(128)
sig, err := privKey.Sign(msg)
require.NoError(t, err)
require.Nil(t, err)
// Test the signature
assert.True(t, pubKey.VerifySignature(msg, sig))

View File

@@ -0,0 +1,122 @@
package xchacha20poly1305
import (
"bytes"
"encoding/hex"
"testing"
)
func toHex(bits []byte) string {
return hex.EncodeToString(bits)
}
func fromHex(bits string) []byte {
b, err := hex.DecodeString(bits)
if err != nil {
panic(err)
}
return b
}
func TestHChaCha20(t *testing.T) {
for i, v := range hChaCha20Vectors {
var key [32]byte
var nonce [16]byte
copy(key[:], v.key)
copy(nonce[:], v.nonce)
HChaCha20(&key, &nonce, &key)
if !bytes.Equal(key[:], v.keystream) {
t.Errorf("test %d: keystream mismatch:\n \t got: %s\n \t want: %s", i, toHex(key[:]), toHex(v.keystream))
}
}
}
var hChaCha20Vectors = []struct {
key, nonce, keystream []byte
}{
{
fromHex("0000000000000000000000000000000000000000000000000000000000000000"),
fromHex("000000000000000000000000000000000000000000000000"),
fromHex("1140704c328d1d5d0e30086cdf209dbd6a43b8f41518a11cc387b669b2ee6586"),
},
{
fromHex("8000000000000000000000000000000000000000000000000000000000000000"),
fromHex("000000000000000000000000000000000000000000000000"),
fromHex("7d266a7fd808cae4c02a0a70dcbfbcc250dae65ce3eae7fc210f54cc8f77df86"),
},
{
fromHex("0000000000000000000000000000000000000000000000000000000000000001"),
fromHex("000000000000000000000000000000000000000000000002"),
fromHex("e0c77ff931bb9163a5460c02ac281c2b53d792b1c43fea817e9ad275ae546963"),
},
{
fromHex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"),
fromHex("000102030405060708090a0b0c0d0e0f1011121314151617"),
fromHex("51e3ff45a895675c4b33b46c64f4a9ace110d34df6a2ceab486372bacbd3eff6"),
},
{
fromHex("24f11cce8a1b3d61e441561a696c1c1b7e173d084fd4812425435a8896a013dc"),
fromHex("d9660c5900ae19ddad28d6e06e45fe5e"),
fromHex("5966b3eec3bff1189f831f06afe4d4e3be97fa9235ec8c20d08acfbbb4e851e3"),
},
}
func TestVectors(t *testing.T) {
for i, v := range vectors {
if len(v.plaintext) == 0 {
v.plaintext = make([]byte, len(v.ciphertext))
}
var nonce [24]byte
copy(nonce[:], v.nonce)
aead, err := New(v.key)
if err != nil {
t.Error(err)
}
dst := aead.Seal(nil, nonce[:], v.plaintext, v.ad)
if !bytes.Equal(dst, v.ciphertext) {
t.Errorf("test %d: ciphertext mismatch:\n \t got: %s\n \t want: %s", i, toHex(dst), toHex(v.ciphertext))
}
open, err := aead.Open(nil, nonce[:], dst, v.ad)
if err != nil {
t.Error(err)
}
if !bytes.Equal(open, v.plaintext) {
t.Errorf("test %d: plaintext mismatch:\n \t got: %s\n \t want: %s", i, string(open), string(v.plaintext))
}
}
}
var vectors = []struct {
key, nonce, ad, plaintext, ciphertext []byte
}{
{
[]byte{
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a,
0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
},
[]byte{0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b},
[]byte{0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7},
[]byte(
"Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it.",
),
[]byte{
0x45, 0x3c, 0x06, 0x93, 0xa7, 0x40, 0x7f, 0x04, 0xff, 0x4c, 0x56,
0xae, 0xdb, 0x17, 0xa3, 0xc0, 0xa1, 0xaf, 0xff, 0x01, 0x17, 0x49,
0x30, 0xfc, 0x22, 0x28, 0x7c, 0x33, 0xdb, 0xcf, 0x0a, 0xc8, 0xb8,
0x9a, 0xd9, 0x29, 0x53, 0x0a, 0x1b, 0xb3, 0xab, 0x5e, 0x69, 0xf2,
0x4c, 0x7f, 0x60, 0x70, 0xc8, 0xf8, 0x40, 0xc9, 0xab, 0xb4, 0xf6,
0x9f, 0xbf, 0xc8, 0xa7, 0xff, 0x51, 0x26, 0xfa, 0xee, 0xbb, 0xb5,
0x58, 0x05, 0xee, 0x9c, 0x1c, 0xf2, 0xce, 0x5a, 0x57, 0x26, 0x32,
0x87, 0xae, 0xc5, 0x78, 0x0f, 0x04, 0xec, 0x32, 0x4c, 0x35, 0x14,
0x12, 0x2c, 0xfc, 0x32, 0x31, 0xfc, 0x1a, 0x8b, 0x71, 0x8a, 0x62,
0x86, 0x37, 0x30, 0xa2, 0x70, 0x2b, 0xb7, 0x63, 0x66, 0x11, 0x6b,
0xed, 0x09, 0xe0, 0xfd, 0x5c, 0x6d, 0x84, 0xb6, 0xb0, 0xc1, 0xab,
0xaf, 0x24, 0x9d, 0x5d, 0xd0, 0xf7, 0xf5, 0xa7, 0xea,
},
},
}

View File

@@ -0,0 +1,259 @@
// Package xchacha20poly1305 creates an AEAD using hchacha, chacha, and poly1305
// This allows for randomized nonces to be used in conjunction with chacha.
package xchacha20poly1305
import (
"crypto/cipher"
"encoding/binary"
"errors"
"fmt"
"golang.org/x/crypto/chacha20poly1305"
)
// Implements crypto.AEAD
type xchacha20poly1305 struct {
key [KeySize]byte
}
const (
// KeySize is the size of the key used by this AEAD, in bytes.
KeySize = 32
// NonceSize is the size of the nonce used with this AEAD, in bytes.
NonceSize = 24
// TagSize is the size added from poly1305
TagSize = 16
// MaxPlaintextSize is the max size that can be passed into a single call of Seal
MaxPlaintextSize = (1 << 38) - 64
// MaxCiphertextSize is the max size that can be passed into a single call of Open,
// this differs from plaintext size due to the tag
MaxCiphertextSize = (1 << 38) - 48
// sigma are constants used in xchacha.
// Unrolled from a slice so that they can be inlined, as slices can't be constants.
sigma0 = uint32(0x61707865)
sigma1 = uint32(0x3320646e)
sigma2 = uint32(0x79622d32)
sigma3 = uint32(0x6b206574)
)
// New returns a new xchachapoly1305 AEAD
func New(key []byte) (cipher.AEAD, error) {
if len(key) != KeySize {
return nil, errors.New("xchacha20poly1305: bad key length")
}
ret := new(xchacha20poly1305)
copy(ret.key[:], key)
return ret, nil
}
func (c *xchacha20poly1305) NonceSize() int {
return NonceSize
}
func (c *xchacha20poly1305) Overhead() int {
return TagSize
}
func (c *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
if len(nonce) != NonceSize {
panic("xchacha20poly1305: bad nonce length passed to Seal")
}
if uint64(len(plaintext)) > MaxPlaintextSize {
panic("xchacha20poly1305: plaintext too large")
}
var subKey [KeySize]byte
var hNonce [16]byte
var subNonce [chacha20poly1305.NonceSize]byte
copy(hNonce[:], nonce[:16])
HChaCha20(&subKey, &hNonce, &c.key)
// This can't error because we always provide a correctly sized key
chacha20poly1305, _ := chacha20poly1305.New(subKey[:])
copy(subNonce[4:], nonce[16:])
return chacha20poly1305.Seal(dst, subNonce[:], plaintext, additionalData)
}
func (c *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
if len(nonce) != NonceSize {
return nil, fmt.Errorf("xchacha20poly1305: bad nonce length passed to Open")
}
if uint64(len(ciphertext)) > MaxCiphertextSize {
return nil, fmt.Errorf("xchacha20poly1305: ciphertext too large")
}
var subKey [KeySize]byte
var hNonce [16]byte
var subNonce [chacha20poly1305.NonceSize]byte
copy(hNonce[:], nonce[:16])
HChaCha20(&subKey, &hNonce, &c.key)
// This can't error because we always provide a correctly sized key
chacha20poly1305, _ := chacha20poly1305.New(subKey[:])
copy(subNonce[4:], nonce[16:])
return chacha20poly1305.Open(dst, subNonce[:], ciphertext, additionalData)
}
// HChaCha exported from
// https://github.com/aead/chacha20/blob/8b13a72661dae6e9e5dea04f344f0dc95ea29547/chacha/chacha_generic.go#L194
// TODO: Add support for the different assembly instructions used there.
// The MIT License (MIT)
// Copyright (c) 2016 Andreas Auernhammer
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// HChaCha20 generates 32 pseudo-random bytes from a 128 bit nonce and a 256 bit secret key.
// It can be used as a key-derivation-function (KDF).
func HChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) { hChaCha20Generic(out, nonce, key) }
func hChaCha20Generic(out *[32]byte, nonce *[16]byte, key *[32]byte) {
v00 := sigma0
v01 := sigma1
v02 := sigma2
v03 := sigma3
v04 := binary.LittleEndian.Uint32(key[0:])
v05 := binary.LittleEndian.Uint32(key[4:])
v06 := binary.LittleEndian.Uint32(key[8:])
v07 := binary.LittleEndian.Uint32(key[12:])
v08 := binary.LittleEndian.Uint32(key[16:])
v09 := binary.LittleEndian.Uint32(key[20:])
v10 := binary.LittleEndian.Uint32(key[24:])
v11 := binary.LittleEndian.Uint32(key[28:])
v12 := binary.LittleEndian.Uint32(nonce[0:])
v13 := binary.LittleEndian.Uint32(nonce[4:])
v14 := binary.LittleEndian.Uint32(nonce[8:])
v15 := binary.LittleEndian.Uint32(nonce[12:])
for i := 0; i < 20; i += 2 {
v00 += v04
v12 ^= v00
v12 = (v12 << 16) | (v12 >> 16)
v08 += v12
v04 ^= v08
v04 = (v04 << 12) | (v04 >> 20)
v00 += v04
v12 ^= v00
v12 = (v12 << 8) | (v12 >> 24)
v08 += v12
v04 ^= v08
v04 = (v04 << 7) | (v04 >> 25)
v01 += v05
v13 ^= v01
v13 = (v13 << 16) | (v13 >> 16)
v09 += v13
v05 ^= v09
v05 = (v05 << 12) | (v05 >> 20)
v01 += v05
v13 ^= v01
v13 = (v13 << 8) | (v13 >> 24)
v09 += v13
v05 ^= v09
v05 = (v05 << 7) | (v05 >> 25)
v02 += v06
v14 ^= v02
v14 = (v14 << 16) | (v14 >> 16)
v10 += v14
v06 ^= v10
v06 = (v06 << 12) | (v06 >> 20)
v02 += v06
v14 ^= v02
v14 = (v14 << 8) | (v14 >> 24)
v10 += v14
v06 ^= v10
v06 = (v06 << 7) | (v06 >> 25)
v03 += v07
v15 ^= v03
v15 = (v15 << 16) | (v15 >> 16)
v11 += v15
v07 ^= v11
v07 = (v07 << 12) | (v07 >> 20)
v03 += v07
v15 ^= v03
v15 = (v15 << 8) | (v15 >> 24)
v11 += v15
v07 ^= v11
v07 = (v07 << 7) | (v07 >> 25)
v00 += v05
v15 ^= v00
v15 = (v15 << 16) | (v15 >> 16)
v10 += v15
v05 ^= v10
v05 = (v05 << 12) | (v05 >> 20)
v00 += v05
v15 ^= v00
v15 = (v15 << 8) | (v15 >> 24)
v10 += v15
v05 ^= v10
v05 = (v05 << 7) | (v05 >> 25)
v01 += v06
v12 ^= v01
v12 = (v12 << 16) | (v12 >> 16)
v11 += v12
v06 ^= v11
v06 = (v06 << 12) | (v06 >> 20)
v01 += v06
v12 ^= v01
v12 = (v12 << 8) | (v12 >> 24)
v11 += v12
v06 ^= v11
v06 = (v06 << 7) | (v06 >> 25)
v02 += v07
v13 ^= v02
v13 = (v13 << 16) | (v13 >> 16)
v08 += v13
v07 ^= v08
v07 = (v07 << 12) | (v07 >> 20)
v02 += v07
v13 ^= v02
v13 = (v13 << 8) | (v13 >> 24)
v08 += v13
v07 ^= v08
v07 = (v07 << 7) | (v07 >> 25)
v03 += v04
v14 ^= v03
v14 = (v14 << 16) | (v14 >> 16)
v09 += v14
v04 ^= v09
v04 = (v04 << 12) | (v04 >> 20)
v03 += v04
v14 ^= v03
v14 = (v14 << 8) | (v14 >> 24)
v09 += v14
v04 ^= v09
v04 = (v04 << 7) | (v04 >> 25)
}
binary.LittleEndian.PutUint32(out[0:], v00)
binary.LittleEndian.PutUint32(out[4:], v01)
binary.LittleEndian.PutUint32(out[8:], v02)
binary.LittleEndian.PutUint32(out[12:], v03)
binary.LittleEndian.PutUint32(out[16:], v12)
binary.LittleEndian.PutUint32(out[20:], v13)
binary.LittleEndian.PutUint32(out[24:], v14)
binary.LittleEndian.PutUint32(out[28:], v15)
}

View File

@@ -0,0 +1,113 @@
package xchacha20poly1305
import (
"bytes"
crand "crypto/rand"
mrand "math/rand"
"testing"
)
// The following test is taken from
// https://github.com/golang/crypto/blob/master/chacha20poly1305/chacha20poly1305_test.go#L69
// It requires the below copyright notice, where "this source code" refers to the following function.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found at the bottom of this file.
func TestRandom(t *testing.T) {
// Some random tests to verify Open(Seal) == Plaintext
for i := 0; i < 256; i++ {
var nonce [24]byte
var key [32]byte
al := mrand.Intn(128)
pl := mrand.Intn(16384)
ad := make([]byte, al)
plaintext := make([]byte, pl)
_, err := crand.Read(key[:])
if err != nil {
t.Errorf("error on read: %w", err)
}
_, err = crand.Read(nonce[:])
if err != nil {
t.Errorf("error on read: %w", err)
}
_, err = crand.Read(ad)
if err != nil {
t.Errorf("error on read: %w", err)
}
_, err = crand.Read(plaintext)
if err != nil {
t.Errorf("error on read: %w", err)
}
aead, err := New(key[:])
if err != nil {
t.Fatal(err)
}
ct := aead.Seal(nil, nonce[:], plaintext, ad)
plaintext2, err := aead.Open(nil, nonce[:], ct, ad)
if err != nil {
t.Errorf("random #%d: Open failed", i)
continue
}
if !bytes.Equal(plaintext, plaintext2) {
t.Errorf("random #%d: plaintext's don't match: got %x vs %x", i, plaintext2, plaintext)
continue
}
if len(ad) > 0 {
alterAdIdx := mrand.Intn(len(ad))
ad[alterAdIdx] ^= 0x80
if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
t.Errorf("random #%d: Open was successful after altering additional data", i)
}
ad[alterAdIdx] ^= 0x80
}
alterNonceIdx := mrand.Intn(aead.NonceSize())
nonce[alterNonceIdx] ^= 0x80
if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
t.Errorf("random #%d: Open was successful after altering nonce", i)
}
nonce[alterNonceIdx] ^= 0x80
alterCtIdx := mrand.Intn(len(ct))
ct[alterCtIdx] ^= 0x80
if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
t.Errorf("random #%d: Open was successful after altering ciphertext", i)
}
ct[alterCtIdx] ^= 0x80
}
}
// AFOREMENTIONED LICENSE
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,54 @@
package xsalsa20symmetric
import (
"errors"
"fmt"
"golang.org/x/crypto/nacl/secretbox"
"github.com/tendermint/tendermint/crypto"
)
// TODO, make this into a struct that implements crypto.Symmetric.
const nonceLen = 24
const secretLen = 32
// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase))
// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext.
func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) {
if len(secret) != secretLen {
panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))
}
nonce := crypto.CRandBytes(nonceLen)
nonceArr := [nonceLen]byte{}
copy(nonceArr[:], nonce)
secretArr := [secretLen]byte{}
copy(secretArr[:], secret)
ciphertext = make([]byte, nonceLen+secretbox.Overhead+len(plaintext))
copy(ciphertext, nonce)
secretbox.Seal(ciphertext[nonceLen:nonceLen], plaintext, &nonceArr, &secretArr)
return ciphertext
}
// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase))
// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext.
func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) {
if len(secret) != secretLen {
panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))
}
if len(ciphertext) <= secretbox.Overhead+nonceLen {
return nil, errors.New("ciphertext is too short")
}
nonce := ciphertext[:nonceLen]
nonceArr := [nonceLen]byte{}
copy(nonceArr[:], nonce)
secretArr := [secretLen]byte{}
copy(secretArr[:], secret)
plaintext = make([]byte, len(ciphertext)-nonceLen-secretbox.Overhead)
_, ok := secretbox.Open(plaintext[:0], ciphertext[nonceLen:], &nonceArr, &secretArr)
if !ok {
return nil, errors.New("ciphertext decryption failed")
}
return plaintext, nil
}

View File

@@ -0,0 +1,40 @@
package xsalsa20symmetric
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/bcrypt"
"github.com/tendermint/tendermint/crypto"
)
func TestSimple(t *testing.T) {
plaintext := []byte("sometext")
secret := []byte("somesecretoflengththirtytwo===32")
ciphertext := EncryptSymmetric(plaintext, secret)
plaintext2, err := DecryptSymmetric(ciphertext, secret)
require.Nil(t, err, "%+v", err)
assert.Equal(t, plaintext, plaintext2)
}
func TestSimpleWithKDF(t *testing.T) {
plaintext := []byte("sometext")
secretPass := []byte("somesecret")
secret, err := bcrypt.GenerateFromPassword(secretPass, 12)
if err != nil {
t.Error(err)
}
secret = crypto.Sha256(secret)
ciphertext := EncryptSymmetric(plaintext, secret)
plaintext2, err := DecryptSymmetric(ciphertext, secret)
require.Nil(t, err, "%+v", err)
assert.Equal(t, plaintext, plaintext2)
}

View File

@@ -65,5 +65,5 @@ networks:
ipam:
driver: default
config:
-
subnet: 192.167.10.0/16
-
subnet: 192.167.10.0/16

View File

@@ -83,19 +83,19 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
// Stop upon receiving SIGTERM or CTRL-C.
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer cancel()
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(ctx); err != nil {
if err := srv.Start(); err != nil {
return err
}
// Run until shutdown.
<-ctx.Done()
srv.Wait()
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {
// Cleanup
srv.Stop()
})
// Run forever.
select {}
}
```

View File

@@ -79,7 +79,6 @@ Note the context/background should be written in the present tense.
- [ADR-065: Custom Event Indexing](./adr-065-custom-event-indexing.md)
- [ADR-068: Reverse-Sync](./adr-068-reverse-sync.md)
- [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md)
- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md)
### Rejected
@@ -102,4 +101,3 @@ Note the context/background should be written in the present tense.
- [ADR-057: RPC](./adr-057-RPC.md)
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
- [ADR-071: Proposer-Based Timestamps](adr-071-proposer-based-timestamps.md)
- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md)

View File

@@ -1,203 +0,0 @@
# ADR 74: Migrate Timeout Parameters to Consensus Parameters
## Changelog
- 03-Jan-2022: Initial draft (@williambanfield)
- 13-Jan-2022: Updated to indicate work on upgrade path needed (@williambanfield)
## Status
Proposed
## Context
### Background
Tendermint's consensus timeout parameters are currently configured locally by each validator
in the validator's [config.toml][config-toml].
This means that the validators on a Tendermint network may have different timeouts
from each other. There is no reason for validators on the same network to configure
different timeout values. Proper functioning of the Tendermint consensus algorithm
relies on these parameters being uniform across validators.
The configurable values are as follows:
* `TimeoutPropose`
* How long the consensus algorithm waits for a proposal block before issuing a prevote.
* If no prevote arrives by `TimeoutPropose`, then the consensus algorithm will issue a nil prevote.
* `TimeoutProposeDelta`
* How much the `TimeoutPropose` grows each round.
* `TimeoutPrevote`
* How long the consensus algorithm waits after receiving +2/3 prevotes with
no quorum for a value before issuing a precommit for nil.
(See the [arXiv paper][arxiv-paper], Algorithm 1, Line 34)
* `TimeoutPrevoteDelta`
* How much the `TimeoutPrevote` increases with each round.
* `TimeoutPrecommit`
* How long the consensus algorithm waits after receiving +2/3 precommits that
do not have a quorum for a value before entering the next round.
(See the [arXiv paper][arxiv-paper], Algorithm 1, Line 47)
* `TimeoutPrecommitDelta`
* How much the `TimeoutPrecommit` increases with each round.
* `TimeoutCommit`
* How long the consensus algorithm waits after committing a block but before starting the new height.
* This gives a validator a chance to receive slow precommits.
* `SkipTimeoutCommit`
* Make progress as soon as the node has 100% of the precommits.
### Overview of Change
We will consolidate the timeout parameters and migrate them from the node-local
`config.toml` file into the network-global consensus parameters.
The 8 timeout parameters will be consolidated down to 6. These will be as follows:
* `TimeoutPropose`
* Same as current `TimeoutPropose`.
* `TimeoutProposeDelta`
* Same as current `TimeoutProposeDelta`.
* `TimeoutVote`
* How long validators wait for votes in both the prevote
and precommit phase of the consensus algorithm. This parameter subsumes
the current `TimeoutPrevote` and `TimeoutPrecommit` parameters.
* `TimeoutVoteDelta`
* How much the `TimeoutVote` will grow each successive round.
This parameter subsumes the current `TimeoutPrevoteDelta` and `TimeoutPrecommitDelta`
parameters.
* `TimeoutCommit`
* Same as current `TimeoutCommit`.
* `EnableTimeoutCommitBypass`
* Same as current `SkipTimeoutCommit`, renamed for clarity.
A safe default will be provided by Tendermint for each of these parameters and
networks will be able to update the parameters as they see fit. Local updates
to these parameters will no longer be possible; instead, the application will control
updating the parameters. Applications using the Cosmos SDK will be automatically be
able to change the values of these consensus parameters [via a governance proposal][cosmos-sdk-consensus-params].
This change is low-risk. While parameters are locally configurable, many running chains
do not change them from their default values. For example, initializing
a node on Osmosis, Terra, and the Cosmos Hub using the their `init` command produces
a `config.toml` with Tendermint's default values for these parameters.
### Why this parameter consolidation?
Reducing the number of parameters is good for UX. Fewer superfluous parameters makes
running and operating a Tendermint network less confusing.
The Prevote and Precommit messages are both similar sizes, require similar amounts
of processing so there is no strong need for them to be configured separately.
The `TimeoutPropose` parameter governs how long Tendermint will wait for the proposed
block to be gossiped. Blocks are much larger than votes and therefore tend to be
gossiped much more slowly. It therefore makes sense to keep `TimeoutPropose` and
the `TimeoutProposeDelta` as parameters separate from the vote timeouts.
`TimeoutCommit` is used by chains to ensure that the network waits for the votes from
slower validators before proceeding to the next height. Without this timeout, the votes
from slower validators would consistently not be included in blocks and those validators
would not be counted as 'up' from the chain's perspective. Being down damages a validator's
reputation and causes potential stakers to think twice before delegating to that validator.
`TimeoutCommit` also prevents the network from producing the next height as soon as validators
on the fastest hardware with a summed voting power of +2/3 of the network's total have
completed execution of the block. Allowing the network to proceed as soon as the fastest
+2/3 completed execution would have a cumulative effect over heights, eventually
leaving slower validators unable to participate in consensus at all. `TimeoutCommit`
therefore allows networks to have greater variability in hardware. Additional
discussion of this can be found in [tendermint issue 5911][tendermint-issue-5911-comment]
and [spec issue 359][spec-issue-359].
## Alternative Approaches
### Hardcode the parameters
Many Tendermint networks run on similar cloud-hosted infrastructure. Therefore,
they have similar bandwidth and machine resources. The timings for propagating votes
and blocks are likely to be reasonably similar across networks. As a result, the
timeout parameters are good candidates for being hardcoded. Hardcoding the timeouts
in Tendermint would mean entirely removing these parameters from any configuration
that could be altered by either an application or a node operator. Instead,
Tendermint would ship with a set of timeouts and all applications using Tendermint
would use this exact same set of values.
While Tendermint nodes often run with similar bandwidth and on similar cloud-hosted
machines, there are enough points of variability to make configuring
consensus timeouts meaningful. Namely, Tendermint network topologies are likely to be
very different from chain to chain. Additionally, applications may vary greatly in
how long the `Commit` phase may take. Applications that perform more work during `Commit`
require a longer `TimeoutCommit` to allow the application to complete its work
and be prepared for the next height.
## Decision
The decision has been made to implement this work, with the caveat that the
specific mechanism for introducing the new parameters to chains is still ongoing.
## Detailed Design
### New Consensus Parameters
A new `TimeoutParams` `message` will be added to the [params.proto file][consensus-params-proto].
This message will have the following form:
```proto
message TimeoutParams {
google.protobuf.Duration propose = 1;
google.protobuf.Duration propose_delta = 2;
google.protobuf.Duration vote = 3;
google.protobuf.Duration vote_delta = 4;
google.protobuf.Duration commit = 5;
bool enable_commit_timeout_bypass = 6;
}
```
This new message will be added as a field into the [`ConsensusParams`
message][consensus-params-proto]. The same default values that are [currently
set for these parameters][current-timeout-defaults] in the local configuration
file will be used as the defaults for these new consensus parameters in the
[consensus parameter defaults][default-consensus-params].
The new consensus parameters will be subject to the same
[validity rules][time-param-validation] as the current configuration values,
namely, each value must be non-negative.
### Migration
The new `ConsensusParameters` will be added during an upcoming release. In this
release, the old `config.toml` parameters will cease to control the timeouts and
an error will be logged on nodes that continue to specify these values. The specific
mechanism by which these parameters will added to a chain is being discussed in
[RFC-009][rfc-009] and will be decided ahead of the next release.
The specific mechanism for adding these parameters depends on work related to
[soft upgrades][soft-upgrades], which is still ongoing.
## Consequences
### Positive
* Timeout parameters will be equal across all of the validators in a Tendermint network.
* Remove superfluous timeout parameters.
### Negative
### Neutral
* Timeout parameters require consensus to change.
## References
[conseusus-params-proto]: https://github.com/tendermint/spec/blob/a00de7199f5558cdd6245bbbcd1d8405ccfb8129/proto/tendermint/types/params.proto#L11
[hashed-params]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/types/params.go#L49
[default-consensus-params]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/types/params.go#L79
[current-timeout-defaults]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/config/config.go#L955
[config-toml]: https://github.com/tendermint/tendermint/blob/5cc980698a3402afce76b26693ab54b8f67f038b/config/toml.go#L425-L440
[cosmos-sdk-consensus-params]: https://github.com/cosmos/cosmos-sdk/issues/6197
[time-param-validation]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/config/config.go#L1038
[tendermint-issue-5911-comment]: https://github.com/tendermint/tendermint/issues/5911#issuecomment-973560381
[spec-issue-359]: https://github.com/tendermint/spec/issues/359
[arxiv-paper]: https://arxiv.org/pdf/1807.04938.pdf
[soft-upgrades]: https://github.com/tendermint/spec/pull/222
[rfc-009]: https://github.com/tendermint/tendermint/pull/7524

View File

@@ -1,684 +0,0 @@
# ADR 075: RPC Event Subscription Interface
## Changelog
- 26-Jan-2022: Marked accepted.
- 22-Jan-2022: Updated and expanded (@creachadair).
- 20-Nov-2021: Initial draft (@creachadair).
---
## Status
Accepted
---
## Background & Context
For context, see [RFC 006: Event Subscription][rfc006].
The [Tendermint RPC service][rpc-service] permits clients to subscribe to the
event stream generated by a consensus node. This allows clients to observe the
state of the consensus network, including details of the consensus algorithm
state machine, proposals, transaction delivery, and block completion. The
application may also attach custom key-value attributes to events to expose
application-specific details to clients.
The event subscription API in the RPC service currently comprises three methods:
1. `subscribe`: A request to subscribe to the events matching a specific
[query expression][query-grammar]. Events can be filtered by their key-value
attributes, including custom attributes provided by the application.
2. `unsubscribe`: A request to cancel an existing subscription based on its
query expression.
3. `unsubscribe_all`: A request to cancel all existing subscriptions belonging
to the client.
There are some important technical and UX issues with the current RPC event
subscription API. The rest of this ADR outlines these problems in detail, and
proposes a new API scheme intended to address them.
### Issue 1: Persistent connections
To subscribe to a node's event stream, a client needs a persistent connection
to the node. Unlike the other methods of the service, for which each call is
serviced by a short-lived HTTP round trip, subscription delivers a continuous
stream of events to the client by hijacking the HTTP channel for a websocket.
The stream (and hence the HTTP request) persists until either the subscription
is explicitly cancelled, or the connection is closed.
There are several problems with this API:
1. **Expensive per-connection state**: The server must maintain a substantial
amount of state per subscriber client:
- The current implementation uses a [WebSocket][ws] for each active
subscriber. The connection must be maintained even if there are no
matching events for a given client.
The server can drop idle connections to save resources, but doing so
terminates all subscriptions on those connections and forces those clients
to re-connect, adding additional resource churn for the server.
- In addition, the server maintains a separate buffer of undelivered events
for each client. This is to reduce the dual risks that a client will miss
events, and that a slow client could "push back" on the publisher,
impeding the progress of consensus.
Because event traffic is quite bursty, queues can potentially take up a
lot of memory. Moreover, each subscriber may have a different filter
query, so the server winds up having to duplicate the same events among
multiple subscriber queues. Not only does this add memory pressure, but it
does so most at the worst possible time, i.e., when the server is already
under load from high event traffic.
2. **Operational access control is difficult**: The server's websocket
interface exposes _all_ the RPC service endpoints, not only the subscription
methods. This includes methods that allow callers to inject arbitrary
transactions (`broadcast_tx_*`) and evidence (`broadcast_evidence`) into the
network, remove transactions (`remove_tx`), and request arbitrary amounts of
chain state.
Filtering requests to the GET endpoint is straightforward: A reverse proxy
like [nginx][nginx] can easily filter methods by URL path. Filtering POST
requests takes a bit more work, but can be managed with a filter program
that speaks [FastCGI][fcgi] and parses JSON-RPC request bodies.
Filtering the websocket interface requires a dedicated proxy implementation.
Although nginx can [reverse-proxy websockets][rp-ws], it does not support
filtering websocket traffic via FastCGI. The operator would need to either
implement a custom [nginx extension module][ng-xm] or build and run a
standalone proxy that implements websocket and filters each session. Apart
from the work, this also makes the system even more resource intensive, as
well as introducing yet another connection that could potentially time out
or stall on full buffers.
Even for the simple case of restricting access to only event subscription,
there is no easy solution currently: Once a caller has access to the
websocket endpoint, it has complete access to the RPC service.
### Issue 2: Inconvenient client API
The subscription interface has some inconvenient features for the client as
well as the server. These include:
1. **Non-standard protocol:** The RPC service is mostly [JSON-RPC 2.0][jsonrpc2],
but the subscription interface diverges from the standard.
In a standard JSON-RPC 2.0 call, the client initiates a request to the
server with a unique ID, and the server concludes the call by sending a
reply for that ID. The `subscribe` implementation, however, sends multiple
responses to the client's request:
- The client sends `subscribe` with some ID `x` and the desired query
- The server responds with ID `x` and an empty confirmation response.
- The server then (repeatedly) sends event result responses with ID `x`, one
for each item with a matching event.
Standard JSON-RPC clients will reject the subsequent replies, as they
announce a request ID (`x`) that is already complete. This means a caller
has to implement Tendermint-specific handling for these responses.
Moreover, the result format is different between the initial confirmation
and the subsequent responses. This means a caller has to implement special
logic for decoding the first response versus the subsequent ones.
2. **No way to detect data loss:** The subscriber connection can be terminated
for many reasons. Even ignoring ordinary network issues (e.g., packet loss):
- The server will drop messages and/or close the websocket if its write
buffer fills, or if the queue of undelivered matching events is not
drained fast enough. The client has no way to discover that messages were
dropped even if the connection remains open.
- Either the client or the server may close the websocket if the websocket
PING and PONG exchanges are not handled correctly, or frequently enough.
Even if correctly implemented, this may fail if the system is under high
load and cannot service those control messages in a timely manner.
When the connection is terminated, the server drops all the subscriptions
for that client (as if it had called `unsubscribe_all`). Even if the client
reconnects, any events that were published during the period between the
disconnect and re-connect and re-subscription will be silently lost, and the
client has no way to discover that it missed some relevant messages.
3. **No way to replay old events:** Even if a client knew it had missed some
events (due to a disconnection, for example), the API provides no way for
the client to "play back" events it may have missed.
4. **Large response sizes:** Some event data can be quite large, and there can
be substantial duplication across items. The API allows the client to select
_which_ events are reported, but has no way to control which parts of a
matching event it wishes to receive.
This can be costly on the server (which has to marshal those data into
JSON), the network, and the client (which has to unmarshal the result and
then pick through for the components that are relevant to it).
Besides being inefficient, this also contributes to some of the persistent
connection issues mentioned above, e.g., filling up the websocket write
buffer and forcing the server to queue potentially several copies of a large
value in memory.
5. **Client identity is tied to network address:** The Tendermint event API
identifies each subscriber by a (Client ID, Query) pair. In the RPC service,
the query is provided by the client, but the client ID is set to the TCP
address of the client (typically "host:port" or "ip:port").
This means that even if the server did _not_ drop subscriptions immediately
when the websocket connection is closed, a client may not be able to
reattach to its existing subscription. Dialing a new connection is likely
to result in a different port (and, depending on their own proxy setup,
possibly a different public IP).
In isolation, this problem would be easy to work around with a new
subscription parameter, but it would require several other changes to the
handling of event subscriptions for that workaround to become useful.
---
## Decision
(pending)
To address the described problems, we will:
1. Introduce a new API for event subscription to the Tendermint RPC service.
The proposed API is described in [Detailed Design](#detailed-design) below.
2. This new API will target the Tendermint v0.36 release, during which the
current ("streaming") API will remain available as-is, but deprecated.
3. The streaming API will be entirely removed in release v0.37, which will
require all users of event subscription to switch to the new API.
> **Point for discussion:** Given that ABCI++ and PBTS are the main priorities
> for v0.36, it would be fine to slip the first phase of this work to v0.37.
> Unless there is a time problem, however, the proposed design does not disrupt
> the work on ABCI++ or PBTS, and will not increase the scope of breaking
> changes. Therefore the plan is to begin in v0.36 and slip only if necessary.
---
## Detailed Design
### Design Goals
Specific goals of this design include:
1. Remove the need for a persistent connection to each subscription client.
Subscribers should use the same HTTP request flow for event subscription
requests as for other RPC calls.
2. The server retains minimal state (possibly none) per-subscriber. In
particular:
- The server does not buffer unconsumed writes nor queue undelivered events
on a per-client basis.
- A client that stalls or goes idle does not cost the server any resources.
- Any event data that is buffered or stored is shared among _all_
subscribers, and is not duplicated per client.
3. Slow clients have no impact (or minimal impact) on the rate of progress of
the consensus algorithm, beyond the ambient overhead of servicing individual
RPC requests.
4. Clients can tell when they have missed events matching their subscription,
within some reasonable (configurable) window of time, and can "replay"
events within that window to catch up.
5. Nice to have: It should be easy to use the event subscription API from
existing standard tools and libraries, including command-line use for
testing and experimentation.
### Definitions
- The **event stream** of a node is a single, time-ordered, heterogeneous
stream of event items.
- Each **event item** comprises an **event datum** (for example, block header
metadata for a new-block event), and zero or more optional **events**.
- An **event** means the [ABCI `Event` data type][abci-event], which comprises
a string type and zero or more string key-value **event attributes**.
The use of the new terms "event item" and "event datum" is to avert confusion
between the values that are published to the event bus (what we call here
"event items") and the ABCI `Event` data type.
- The node assigns each event item a unique identifier string called a
**cursor**. A cursor must be unique among all events published by a single
node, but it is not required to be unique globally across nodes.
Cursors are time-ordered so that given event items A and B, if A was
published before B, then cursor(A) < cursor(B) in lexicographic order.
A minimum viable cursor implementation is a tuple consisting of a timestamp
and a sequence number (e.g., `16CCC798FB5F4670-0123`). However, it may also
be useful to append basic type information to a cursor, to allow efficient
filtering (e.g., `16CCC87E91869050-0091:BeginBlock`).
The initial implementation will use the minimum viable format.
### Discussion
The node maintains an **event log**, a shared ordered record of the events
published to its event bus within an operator-configurable time window. The
initial implementation will store the event log in-memory, and the operator
will be given two per-node configuration settings. Note, these names are
provisional:
- `[event-subscription] time-window`: A duration before present during which the
node will retain event items published. Setting this value to zero disables
event subscription.
- `[event-subscription] max-items`: A maximum number of event items that the
node will retain within the time window. If the number of items exceeds this
value, the node discardes the oldest items in the window. Setting this value
to zero means that no limit is imposed on the number of items.
The node will retain all events within the time window, provided they do not
exceed the maximum number. These config parameters allow the operator to
loosely regulate how much memory and storage the node allocates to the event
log. The client can use the server reply to tell whether the events it wants
are still available from the event log.
The event log is shared among all subscribers to the node.
> **Discussion point:** Should events persist across node restarts?
>
> The current event API does not persist events across restarts, so this new
> design does not either. Note, however, that we may "spill" older event data
> to disk as a way of controlling memory use. Such usage is ephemeral, however,
> and does not need to be tracked as node data (e.g., it could be temp files).
### Query API
To retrieve event data, the client will call the (new) RPC method `events`.
The parameters of this method will correspond to the following Go types:
```go
type EventParams struct {
// Optional filter spec. If nil or empty, all items are eligible.
Filter *Filter `json:"filter"`
// The maximum number of eligible results to return.
// If zero or negative, the server will report a default number.
MaxResults int `json:"max_results"`
// Return only items after this cursor. If empty, the limit is just
// before the the beginning of the event log.
After string `json:"after_item"`
// Return only items before this cursor. If empty, the limit is just
// after the head of the event log.
Before string `json:"before_item"`
// Wait for up to this long for events to be available.
WaitTime time.Duration `json:"wait_time"`
}
type Filter struct {
Query string `json:"query"`
}
```
> **Discussion point:** The initial implementation will not cache filter
> queries for the client. If this turns out to be a performance issue in
> production, the service can keep a small shared cache of compiled queries.
> Given the improvements from #7319 et seq., this should not be necessary.
> **Discussion point:** For the initial implementation, the new API will use
> the existing query language as-is. Future work may extend the Filter message
> with a more structured and/or expressive query surface, but that is beyond
> the scope of this design.
The semantics of the request are as follows: An item in the event log is
**eligible** for a query if:
- It is newer than the `after_item` cursor (if set).
- It is older than the `before_item` cursor (if set).
- It matches the filter (if set).
Among the eligible items in the log, the server returns up to `max_results` of
the newest items, in reverse order of cursor. If `max_results` is unset the
server chooses a number to return, and will cap `max_results` at a sensible
limit.
The `wait_time` parameter is used to effect polling. If `before_item` is empty,
the server will wait for up to `wait_time` for additional items, if there are
fewer than `max_results` eligible results in the log. If `wait_time` is zero,
the server will return whatever eligible items are available immediately.
If `before_item` non-empty, `wait_time` is ignored: new results are only added
to the head of the log, so there is no need to wait. This allows the client to
poll for new data, and "page" backward through matching event items. This is
discussed in more detail below.
The server will set a sensible cap on the maximum `wait_time`, overriding
client-requested intervals longer than that.
A successful reply from the `events` request corresponds to the following Go
types:
```go
type EventReply struct {
// The items matching the request parameters, from newest
// to oldest, if any were available within the timeout.
Items []*EventItem `json:"items"`
// This is true if there is at least one older matching item
// available in the log that was not returned.
More bool `json:"more"`
// The cursor of the oldest item in the log at the time of this reply,
// or "" if the log is empty.
Oldest string `json:"oldest_item"`
// The cursor of the newest item in the log at the time of this reply,
// or "" if the log is empty.
Newest string `json:"newest_item"`
}
type EventItem struct {
// The cursor of this item.
Cursor string `json:"cursor"`
// The encoded event data for this item.
// The type identifies the structure of the value.
Data struct {
Type string `json:"type"`
Value json.RawMessage `json:"value"`
} `json:"data"`
}
```
The `oldest_item` and `newest_item` fields of the reply report the cursors of
the oldest and newest items (of any kind) recorded in the event log at the time
of the reply, or are `""` if the log is empty.
The `data` field contains the type-specific event datum, and `events` contain
the ABCI events, if any were defined.
> **Discussion point**: Based on [issue #7273][i7273], I did not include a
> separate field in the response for the ABCI events, since it duplicates data
> already stored elsewhere in the event data.
The semantics of the reply are as follows:
- If `items` is non-empty:
- Items are ordered from newest to oldest.
- If `more` is true, there is at least one additional, older item in the
event log that was not returned (in excess of `max_results`).
In this case the client can fetch the next page by setting `before_item`
in a new request, to the cursor of the oldest item fetched (i.e., the
last one in `items`).
- Otherwise (if `more` is false), all the matching results have been
reported (pagination is complete).
- The first element of `items` identifies the newest item considered.
Subsequent poll requests can set `after_item` to this cursor to skip
items that were already retrieved.
- If `items` is empty:
- If the `before_item` was set in the request, there are no further
eligible items for this query in the log (pagination is complete).
This is just a safety case; the client can detect this without issuing
another call by consulting the `more` field of the previous reply.
- If the `before_item` was empty in the request, no eligible items were
available before the `wait_time` expired. The client may poll again to
wait for more event items.
A client can store cursor values to detect data loss and to recover from
crashes and connectivity issues:
- After a crash, the client requests events after the newest cursor it has
seen. If the reply indicates that cursor is no longer in range, the client
may (conservatively) conclude some event data may have been lost.
- On the other hand, if it _is_ in range, the client can then page back through
the results that it missed, and then resume polling. As long as its recovery
cursor does not age out before it finishes, the client can be sure it has all
the relevant results.
### Other Notes
- The new API supports two general "modes" of operation:
1. In ordinary operation, clients will **long-poll** the head of the event
log for new events matching their criteria (by setting a `wait_time` and
no `before_item`).
2. If there are more events than the client requested, or if the client needs
to to read older events to recover from a stall or crash , clients will
**page** backward through the event log (by setting `before_item` and
possibly `after_item`).
- While the new API requires explicit polling by the client, it makes better
use of the node's existing HTTP infrastructure (e.g., connection pools).
Moreover, the direct implementation is easier to use from standard tools and
client libraries for HTTP and JSON-RPC.
Explicit polling does shift the burden of timeliness to the client. That is
arguably preferable, however, given that the RPC service is ancillary to the
node's primary goal, viz., consensus. The details of polling can be easily
hidden from client applications with simple libraries.
- The format of a cursor is considered opaque to the client. Clients must not
parse cursor values, but they may rely on their ordering properties.
- To maintain the event log, the server must prune items outside the time
window and in excess of the item limit.
The initial implementation will do this by checking the tail of the event log
after each new item is published. If the number of items in the log exceeds
the item limit, it will delete oldest items until the log is under the limit;
then discard any older than the time window before present.
To minimize coordination interference between the publisher (the event bus)
and the subcribers (the `events` service handlers), the event log will be
stored as a persistent linear queue with shared structure (a cons list). A
single reader-writer mutex will guard the "head" of the queue where new
items are published:
- **To publish a new item**, the publisher acquires the write lock, conses a
new item to the front of the existing queue, and replaces the head pointer
with the new item.
- **To scan the queue**, a reader acquires the read lock, captures the head
pointer, and then releases the lock. The rest of its request can be served
without holding a lock, since the queue structure will not change.
When a reader wants to wait, it will yield the lock and wait on a condition
that is signaled when the publisher swings the pointer.
- **To prune the queue**, the publisher (who is the sole writer) will track
the queue length and the age of the oldest item separately. When the
length and or age exceed the configured bounds, it will construct a new
queue spine on the same items, discarding out-of-band values.
Pruning can be done while the publisher already holds the write lock, or
could be done outside the lock entirely: Once the new queue is constructed,
the lock can be re-acquired to swing the pointer. This costs some extra
allocations for the cons cells, but avoids duplicating any event items.
The pruning step is a simple linear scan down the first (up to) max-items
elements of the queue, to find the breakpoint of age and length.
Moreover, the publisher can amortize the cost of pruning by item count, if
necessary, by pruning length "more aggressively" than the configuration
requires (e.g., reducing to 3/4 of the maximum rather than 1/1).
The state of the event log before the publisher acquires the lock:
![Before publish and pruning](./img/adr-075-log-before.png)
After the publisher has added a new item and pruned old ones:
![After publish and pruning](./img/adr-075-log-after.png)
### Migration Plan
This design requires that clients eventually migrate to the new event
subscription API, but provides a full release cycle with both APIs in place to
make this burden more tractable. The migration strategy is broadly:
**Phase 1**: Release v0.36.
- Implement the new `events` endpoint, keeping the existing methods as they are.
- Update the Go clients to support the new `events` endpoint, and handle polling.
- Update the old endpoints to log annoyingly about their own deprecation.
- Write tutorials about how to migrate client usage.
At or shortly after release, we should proactively update the Cosmos SDK to use
the new API, to remove a disincentive to upgrading.
**Phase 2**: Release v0.37
- During development, we should actively seek out any existing users of the
streaming event subscription API and help them migrate.
- Possibly also: Spend some time writing clients for JS, Rust, et al.
- Release: Delete the old implementation and all the websocket support code.
> **Discussion point**: Even though the plan is to keep the existing service,
> we might take the opportunity to restrict the websocket endpoint to _only_
> the event streaming service, removing the other endpoints. To minimize the
> disruption for users in the v0.36 cycle, I have decided not to do this for
> the first phase.
>
> If we wind up pushing this design into v0.37, however, we should re-evaulate
> this partial turn-down of the websocket.
### Future Work
- This design does not immediately address the problem of allowing the client
to control which data are reported back for event items. That concern is
deferred to future work. However, it would be straightforward to extend the
filter and/or the request parameters to allow more control.
- The node currently stores a subset of event data (specifically the block and
transaction events) for use in reindexing. While these data are redundant
with the event log described in this document, they are not sufficient to
cover event subscription, as they omit other event types.
In the future we should investigate consolidating or removing event data from
the state store entirely. For now this issue is out of scope for purposes of
updating the RPC API. We may be able to piggyback on the database unification
plans (see [RFC 001][rfc001]) to store the event log separately, so its
pruning policy does not need to be tied to the block and state stores.
- This design reuses the existing filter query language from the old API. In
the future we may want to use a more structured and/or expressive query. The
Filter object can be extended with more fields as needed to support this.
- Some users have trouble communicating with the RPC service because of
configuration problems like improperly-set CORS policies. While this design
does not address those issues directly, we might want to revisit how we set
policies in the RPC service to make it less susceptible to confusing errors
caused by misconfiguration.
---
## Consequences
- Reduces the number of transport options for RPC. Supports [RFC 002][rfc002].
- Removes the primary non-standard use of JSON-RPC.
- Forces clients to migrate to a different API (eventually).
- API requires clients to poll, but this reduces client state on the server.
- We have to maintain both implementations for a whole release, but this
gives clients time to migrate.
---
## Alternative Approaches
The following alternative approaches were considered:
1. **Leave it alone.** Since existing tools mostly already work with the API as
it stands today, we could leave it alone and do our best to improve its
performance and reliability.
Based on many issues reported by users and node operators (e.g.,
[#3380][i3380], [#6439][i6439], [#6729][i6729], [#7247][i7247]), the
problems described here affect even the existing use that works. Investing
further incremental effort in the existing API is unlikely to address these
issues.
2. **Design a better streaming API.** Instead of polling, we might try to
design a better "streaming" API for event subscription.
A significant advantage of switching away from streaming is to remove the
need for persistent connections between the node and subscribers. A new
streaming protocol design would lose that advantage, and would still need a
way to let clients recover and replay.
This approach might look better if we decided to use a different protocol
for event subscription, say gRPC instead of JSON-RPC. That choice, however,
would be just as breaking for existing clients, for marginal benefit.
Moreover, this option increases both the complexity and the resource cost on
the node implementation.
Given that resource consumption and complexity are important considerations,
this option was not chosen.
3. **Defer to an external event broker.** We might remove the entire event
subscription infrastructure from the node, and define an optional interface
to allow the node to publish all its events to an external event broker,
such as Apache Kafka.
This has the advantage of greatly simplifying the node, but at a great cost
to the node operator: To enable event subscription in this design, the
operator has to stand up and maintain a separate process in communion with
the node, and configuration changes would have to be coordinated across
both.
Moreover, this approach would be highly disruptive to existing client use,
and migration would probably require switching to third-party libraries.
Despite the potential benefits for the node itself, the costs to operators
and clients seems too large for this to be the best option.
Publishing to an external event broker might be a worthwhile future project,
if there is any demand for it. That decision is out of scope for this design,
as it interacts with the design of the indexer as well.
---
## References
- [RFC 006: Event Subscription][rfc006]
- [Tendermint RPC service][rpc-service]
- [Event query grammar][query-grammar]
- [RFC 6455: The WebSocket protocol][ws]
- [JSON-RPC 2.0 Specification][jsonrpc2]
- [Nginx proxy server][nginx]
- [Proxying websockets][rp-ws]
- [Extension modules][ng-xm]
- [FastCGI][fcgi]
- [RFC 001: Storage Engines & Database Layer][rfc001]
- [RFC 002: Interprocess Communication in Tendermint][rfc002]
- Issues:
- [rpc/client: test that client resubscribes upon disconnect][i3380] (#3380)
- [Too high memory usage when creating many events subscriptions][i6439] (#6439)
- [Tendermint emits events faster than clients can pull them][i6729] (#6729)
- [indexer: unbuffered event subscription slow down the consensus][i7247] (#7247)
- [rpc: remove duplication of events when querying][i7273] (#7273)
[rfc006]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-006-event-subscription.md
[rpc-service]: https://docs.tendermint.com/master/rpc
[query-grammar]: https://pkg.go.dev/github.com/tendermint/tendermint@master/internal/pubsub/query/syntax
[ws]: https://datatracker.ietf.org/doc/html/rfc6455
[jsonrpc2]: https://www.jsonrpc.org/specification
[nginx]: https://nginx.org/en/docs/
[fcgi]: http://www.mit.edu/~yandros/doc/specs/fcgi-spec.html
[rp-ws]: https://nginx.org/en/docs/http/websocket.html
[ng-xm]: https://www.nginx.com/resources/wiki/extending/
[abci-event]: https://pkg.go.dev/github.com/tendermint/tendermint/abci/types#Event
[rfc001]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-001-storage-engine.rst
[rfc002]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-002-ipc-ecosystem.md
[i3380]: https://github.com/tendermint/tendermint/issues/3380
[i6439]: https://github.com/tendermint/tendermint/issues/6439
[i6729]: https://github.com/tendermint/tendermint/issues/6729
[i7247]: https://github.com/tendermint/tendermint/issues/7247
[i7273]: https://github.com/tendermint/tendermint/issues/7273

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -61,7 +61,7 @@ Here are some relevant facts about TCP:
![tcp](../imgs/tcp-window.png)
In order to have performant TCP connections under the conditions created in Tendermint, we've created the `mconnection`, or the multiplexing connection. It is our own protocol built on top of TCP. It lets us reuse TCP connections to minimize overhead, and it keeps the window size high by sending auxiliary messages when necessary.
In order to have performant TCP connections under the conditions created in Tendermint, we've created the `mconnection`, or the multiplexing connection. It is our own protocol built on top of TCP. It lets us reuse TCP connections to minimize overhead, and it keeps the window size high by sending auxiliary messages when necessary.
The `mconnection` is represented by a struct, which contains a batch of messages, read and write buffers, and a map of channel IDs to reactors. It communicates with TCP via file descriptors, which it can write to. There is one `mconnection` per peer connection.

3302
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,10 +4,10 @@
"description": "Tendermint Core Documentation",
"main": "index.js",
"dependencies": {
"vuepress-theme-cosmos": "^1.0.183"
"vuepress-theme-cosmos": "^1.0.182"
},
"devDependencies": {
"watchpack": "^2.3.1"
"watchpack": "^2.3.0"
},
"scripts": {
"preserve": "./pre.sh",

View File

@@ -44,7 +44,5 @@ sections.
- [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md)
- [RFC-005: Event System](./rfc-005-event-system.rst)
- [RFC-006: Event Subscription](./rfc-006-event-subscription.md)
- [RFC-007: Deterministic Proto Byte Serialization](./rfc-007-deterministic-proto-bytes.md)
- [RFC-008: Don't Panic](./rfc-008-don't-panic.md)
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->

View File

@@ -1,140 +0,0 @@
# RFC 007 : Deterministic Proto Byte Serialization
## Changelog
- 09-Dec-2021: Initial draft (@williambanfield).
## Abstract
This document discusses the issue of stable byte-representation of serialized messages
within Tendermint and describes a few possible routes that could be taken to address it.
## Background
We use the byte representations of wire-format proto messages to produce
and verify hashes of data within the Tendermint codebase as well as for
producing and verifying cryptographic signatures over these signed bytes.
The protocol buffer [encoding spec][proto-spec-encoding] does not guarantee that the byte representation
of a protocol buffer message will be the same between two calls to an encoder.
While there is a mode to force the encoder to produce the same byte representation
of messages within a single binary, these guarantees are not good enough for our
use case in Tendermint. We require multiple different versions of a binary running
Tendermint to be able to inter-operate. Additionally, we require that multiple different
systems written in _different languages_ be able to participate in different aspects
of the protocols of Tendermint and be able to verify the integrity of the messages
they each produce.
While this has not yet created a problem that we know of in a running network, we should
make sure to provide stronger guarantees around the serialized representation of the messages
used within the Tendermint consensus algorithm to prevent any issue from occurring.
## Discussion
Proto has the following points of variability that can produce non-deterministic byte representation:
1. Encoding order of fields within a message.
Proto allows fields to be encoded in any order and even be repeated.
2. Encoding order of elements of a repeated field.
`repeated` fields in a proto message can be serialized in any order.
3. Presence or absence of default values.
Types in proto have defined default values similar to Go's zero values.
Writing or omitting a default value are both legal ways of encoding a wire message.
4. Serialization of 'unknown' fields.
Unknown fields can be present when a message is created by a binary with a newer
version of the proto that contains fields that the deserializer in a different
binary does not yet know about. Deserializers in binaries that do not know about the field
will maintain the bytes of the unknown field but not place them into the deserialized structure.
We have a few options to consider when producing this stable representation.
### Options for deterministic byte representation
#### Use only compliant serializers and constrain field usage
According to [Cosmos-SDK ADR-27][cosmos-sdk-adr-27], when message types obey a simple
set of rules, gogoproto produces a consistent byte representation of serialized messages.
This seems promising, although more research is needed to guarantee gogoproto always
produces a consistent set of bytes on serialized messages. This would solve the problem
within Tendermint as written in Go, but would require ensuring that there are similar
serializers written in other languages that produce the same output as gogoproto.
#### Reorder serialized bytes to ensure determinism.
The serialized form of a proto message can be transformed into a canonical representation
by applying simple rules to the serialized bytes. Re-ordering the serialized bytes
would allow Tendermint to produce a canonical byte representation without having to
simultaneously maintain a custom proto marshaller.
This could be implemented as a function in many languages that performed the following
producing bytes to sign or hashing:
1. Does not add any of the data from unknown fields into the type to hash.
Tendermint should not run into a case where it needs to verify the integrity of
data with unknown fields for the following reasons:
The purpose of checking hash equality within Tendermint is to ensure that
its local copy of data matches the data that the network agreed on. There should
therefore not be a case where a process is checking hash equality using data that it did not expect
to receive. What the data represent may be opaque to the process, such as when checking the
transactions in a block, _but the process will still have expected to receive this data_,
despite not understanding what their internal structure is. It's not clear what it would
mean to verify that a block contains data that a process does not know about.
The same reasoning applies for signature verification within Tendermint. Processes
verify that a digital signature signed over a set of bytes by locally reconstructing the
data structure that the digital signature signed using the process's local data.
2. Reordered all message fields to be in tag-sorted order.
Tag-sorting top-level fields will place all fields of the same tag in a adjacent
to eachother within the serialized representation.
3. Reordered the contents of all `repeated` fields to be in lexicographically sorted order.
`repeated` fields will appear in a message as having the same tag but will contain different
contents. Therefore, lexicographical sorting will produce a stable ordering of
fields with the same tag.
4. Deleted all default values from the byte representation.
Encoders can include default values or omit them. Most encoders appear to omit them
but we may wish to delete them just to be safe.
5. Recursively performed these operations on any length-delimited subfields.
Length delimited fields may contain messages, strings, or just bytes. However,
it's not possible to know what data is being represented by such a field.
A 'string' may happen to have the same structure as an embedded message and we cannot
disambiguate. For this reason, we must apply these same rules to all subfields that
may contain messages. Because we cannot know if we have totally mangled the interior 'string'
or not, this data should never be deserialized or used for anything beyond hashing.
A **prototype** implementation by @creachadair of this can be found in [the wirepb repo][wire-pb].
This could be implemented in multiple languages more simply than ensuring that there are
canonical proto serializers that match in each language.
### Future work
We should add clear documentation to the Tendermint codebase every time we
compare hashes of proto messages or use proto serialized bytes to produces a
digital signatures that we have been careful to ensure that the hashes are performed
properly.
### References
[proto-spec-encoding]: https://developers.google.com/protocol-buffers/docs/encoding
[spec-issue]: https://github.com/tendermint/tendermint/issues/5005
[cosmos-sdk-adr-27]: https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-027-deterministic-protobuf-serialization.md
[cer-proto-3]: https://github.com/regen-network/canonical-proto3
[wire-pb]: https://github.com/creachadair/wirepb

Some files were not shown because too many files have changed in this diff Show More