mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-22 04:32:51 +00:00
Compare commits
3 Commits
wb/remove-
...
wb/undo-qu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8dfd011a7f | ||
|
|
d9c9f3277d | ||
|
|
da767e732c |
5
.github/CODEOWNERS
vendored
5
.github/CODEOWNERS
vendored
@@ -7,7 +7,4 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @ancazamfir
|
||||
|
||||
# Spec related changes can be approved by the protocol design team
|
||||
/spec @josef-widder @milosevic @cason
|
||||
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair
|
||||
|
||||
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
|
||||
4
.github/workflows/docker.yml
vendored
4
.github/workflows/docker.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1.14.1
|
||||
uses: docker/login-action@v1.12.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
2
.github/workflows/e2e-manual.yml
vendored
2
.github/workflows/e2e-manual.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
|
||||
2
.github/workflows/e2e-nightly-34x.yml
vendored
2
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
|
||||
2
.github/workflows/e2e-nightly-35x.yml
vendored
2
.github/workflows/e2e-nightly-35x.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
ref: 'v0.35.x'
|
||||
|
||||
|
||||
2
.github/workflows/e2e-nightly-master.yml
vendored
2
.github/workflows/e2e-nightly-master.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
|
||||
2
.github/workflows/e2e.yml
vendored
2
.github/workflows/e2e.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
|
||||
2
.github/workflows/fuzz-nightly.yml
vendored
2
.github/workflows/fuzz-nightly.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
|
||||
2
.github/workflows/jepsen.yml
vendored
2
.github/workflows/jepsen.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the Jepsen repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
repository: 'tendermint/jepsen'
|
||||
|
||||
|
||||
2
.github/workflows/linkchecker.yml
vendored
2
.github/workflows/linkchecker.yml
vendored
@@ -6,7 +6,7 @@ jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
9
.github/workflows/lint.yml
vendored
9
.github/workflows/lint.yml
vendored
@@ -13,20 +13,17 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.17'
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3.1.0
|
||||
- uses: golangci/golangci-lint-action@v2.5.2
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.44
|
||||
version: v1.42.1
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
2
.github/workflows/linter.yml
vendored
2
.github/workflows/linter.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2.4.0
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v4
|
||||
env:
|
||||
|
||||
35
.github/workflows/markdown-links.yml
vendored
35
.github/workflows/markdown-links.yml
vendored
@@ -1,19 +1,18 @@
|
||||
# TODO: Re-enable when https://github.com/gaurav-nelson/github-action-markdown-link-check/pull/126 lands.
|
||||
# Currently disabled until all links have been fixed
|
||||
# name: Check Markdown links
|
||||
|
||||
#name: Check Markdown links
|
||||
#
|
||||
#on:
|
||||
# push:
|
||||
# branches:
|
||||
# - master
|
||||
# pull_request:
|
||||
# branches: [master]
|
||||
#
|
||||
#jobs:
|
||||
# markdown-link-check:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: actions/checkout@v3
|
||||
# - uses: gaurav-nelson/github-action-markdown-link-check@v1.0.13
|
||||
# with:
|
||||
# check-modified-files-only: 'yes'
|
||||
# on:
|
||||
# push:
|
||||
# branches:
|
||||
# - master
|
||||
# pull_request:
|
||||
# branches: [master]
|
||||
|
||||
# jobs:
|
||||
# markdown-link-check:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: actions/checkout@master
|
||||
# - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
# with:
|
||||
# check-modified-files-only: 'yes'
|
||||
|
||||
24
.github/workflows/proto-check.yml
vendored
Normal file
24
.github/workflows/proto-check.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Proto Check
|
||||
# Protobuf runs buf (https://buf.build/) lint and check-breakage
|
||||
# This workflow is only run when a file in the proto directory
|
||||
# has been modified.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
paths:
|
||||
- "proto/*"
|
||||
jobs:
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
64
.github/workflows/proto-dockerfile.yml
vendored
Normal file
64
.github/workflows/proto-dockerfile.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
# This workflow (re)builds and pushes a Docker image containing the
|
||||
# protobuf build tools used by the other workflows.
|
||||
#
|
||||
# When making changes that require updates to the builder image, you
|
||||
# should merge the updates first and wait for this workflow to complete,
|
||||
# so that the changes will be available for the dependent workflows.
|
||||
#
|
||||
|
||||
name: Build & Push Proto Builder Image
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "proto/*"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "proto/*"
|
||||
schedule:
|
||||
# run this job once a month to recieve any go or buf updates
|
||||
- cron: "0 9 1 * *"
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: tendermint/docker-build-proto
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- name: Check out and assign tags
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE="${REGISTRY}/${IMAGE_NAME}"
|
||||
VERSION=noop
|
||||
if [[ "$GITHUB_REF" == "refs/tags/*" ]]; then
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
elif [[ "$GITHUB_REF" == "refs/heads/*" ]]; then
|
||||
VERSION="$(echo "${GITHUB_REF#refs/heads/}" | sed -r 's#/+#-#g')"
|
||||
if [[ "${{ github.event.repository.default_branch }}" = "$VERSION" ]]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
echo ::set-output name=tags::"${TAGS}"
|
||||
|
||||
- name: Set up docker buildx
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
|
||||
- name: Log in to the container registry
|
||||
uses: docker/login-action@v1.12.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v2.9.0
|
||||
with:
|
||||
context: ./proto
|
||||
file: ./proto/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
21
.github/workflows/proto-lint.yml
vendored
21
.github/workflows/proto-lint.yml
vendored
@@ -1,21 +0,0 @@
|
||||
name: Protobuf Lint
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'proto/**'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'proto/**'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.1.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
4
.github/workflows/tests.yml
vendored
4
.github/workflows/tests.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
with:
|
||||
PATTERNS: |
|
||||
|
||||
35
CHANGELOG.md
35
CHANGELOG.md
@@ -2,27 +2,6 @@
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.35.2
|
||||
|
||||
February 28, 2022
|
||||
|
||||
Special thanks to external contributors on this release: @ashcherbakov, @yihuang, @waelsy123
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [consensus] [\#7875](https://github.com/tendermint/tendermint/pull/7875) additional timing metrics. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [abci] [\#7990](https://github.com/tendermint/tendermint/pull/7990) revert buffer limit change. (@williambanfield)
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
|
||||
- [cli] [\#7869](https://github.com/tendermint/tendermint/pull/7869) Update unsafe-reset-all command to match release v35. (waelsy123)
|
||||
- [light] [\#7640](https://github.com/tendermint/tendermint/pull/7640) Light Client: fix absence proof verification (@ashcherbakov)
|
||||
- [light] [\#7641](https://github.com/tendermint/tendermint/pull/7641) Light Client: fix querying against the latest height (@ashcherbakov)
|
||||
- [mempool] [\#7718](https://github.com/tendermint/tendermint/pull/7718) return duplicate tx errors more consistently. (@tychoish)
|
||||
- [rpc] [\#7744](https://github.com/tendermint/tendermint/pull/7744) fix layout of endpoint list. (@creachadair)
|
||||
- [statesync] [\#7886](https://github.com/tendermint/tendermint/pull/7886) assert app version matches. (@cmwaters)
|
||||
|
||||
## v0.35.1
|
||||
|
||||
January 26, 2022
|
||||
@@ -230,18 +209,6 @@ Special thanks to external contributors on this release: @JayT106,
|
||||
- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
|
||||
|
||||
## v0.34.16
|
||||
|
||||
Special thanks to external contributors on this release: @yihuang
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#7617](https://github.com/tendermint/tendermint/issues/7617) calculate prevote message delay metric (backport #7551) (@williambanfield).
|
||||
- [consensus] [\#7631](https://github.com/tendermint/tendermint/issues/7631) check proposal non-nil in prevote message delay metric (backport #7625) (@williambanfield).
|
||||
- [statesync] [\#7885](https://github.com/tendermint/tendermint/issues/7885) statesync: assert app version matches (backport #7856) (@cmwaters).
|
||||
- [statesync] [\#7881](https://github.com/tendermint/tendermint/issues/7881) fix app hash in state rollback (backport #7837) (@cmwaters).
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang).
|
||||
|
||||
## v0.34.15
|
||||
|
||||
Special thanks to external contributors on this release: @thanethomson
|
||||
@@ -1013,7 +980,7 @@ and a validator address plus a timestamp. Note we may remove the validator
|
||||
address & timestamp fields in the future (see ADR-25).
|
||||
|
||||
`lite2` package has been added to solve `lite` issues and introduce weak
|
||||
subjectivity interface. Refer to the [spec](./spec/consensus/light-client/) for complete details.
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
|
||||
`lite` package is now deprecated and will be removed in v0.34 release.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -17,14 +17,10 @@ Special thanks to external contributors on this release:
|
||||
- [mempool] \#7171 Remove legacy mempool implementation. (@tychoish)
|
||||
- [rpc] \#7575 Rework how RPC responses are written back via HTTP. (@creachadair)
|
||||
- [rpc] \#7713 Remove unused options for websocket clients. (@creachadair)
|
||||
- [config] \#7930 Add new event subscription options and defaults. (@creachadair)
|
||||
- [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair)
|
||||
- [cli] \#8081 make the reset command safe to use. (@marbar3778)
|
||||
|
||||
- Apps
|
||||
|
||||
- [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec).
|
||||
- [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish)
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
@@ -67,7 +63,6 @@ Special thanks to external contributors on this release:
|
||||
- [internal/protoio] \#7325 Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em)
|
||||
- [consensus] \#6969 remove logic to 'unlock' a locked block.
|
||||
- [evidence] \#7700 Evidence messages contain single Evidence instead of EvidenceList (@jmalicevic)
|
||||
- [evidence] \#7802 Evidence pool emits events when evidence is validated and updates a metric when the number of evidence in the evidence pool changes. (@jmalicevic)
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [node] \#7521 Define concrete type for seed node implementation (@spacech1mp)
|
||||
- [rpc] \#7612 paginate mempool /unconfirmed_txs rpc endpoint (@spacech1mp)
|
||||
@@ -79,4 +74,3 @@ Special thanks to external contributors on this release:
|
||||
- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek)
|
||||
- [light] \#7640 Light Client: fix absence proof verification (@ashcherbakov)
|
||||
- [light] \#7641 Light Client: fix querying against the latest height (@ashcherbakov)
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
|
||||
|
||||
@@ -105,33 +105,11 @@ specify exactly the dependency you want to update, eg.
|
||||
|
||||
## Protobuf
|
||||
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
|
||||
with [`gogoproto`](https://github.com/gogo/protobuf) to generate code for use
|
||||
across Tendermint Core.
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
|
||||
|
||||
To generate proto stubs, lint, and check protos for breaking changes, you will
|
||||
need to install [buf](https://buf.build/) and `gogoproto`. Then, from the root
|
||||
of the repository, run:
|
||||
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
|
||||
```bash
|
||||
# Lint all of the .proto files in proto/tendermint
|
||||
make proto-lint
|
||||
|
||||
# Check if any of your local changes (prior to committing to the Git repository)
|
||||
# are breaking
|
||||
make proto-check-breaking
|
||||
|
||||
# Generate Go code from the .proto files in proto/tendermint
|
||||
make proto-gen
|
||||
```
|
||||
|
||||
To automatically format `.proto` files, you will need
|
||||
[`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) installed. Once
|
||||
installed, you can run:
|
||||
|
||||
```bash
|
||||
make proto-format
|
||||
```
|
||||
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. This command uses the spec repo to get the necessary protobuf files for generating the go code. If you are modifying the proto files manually for changes in the core data structures, you will need to clone them into the go repo and comment out lines 22-37 of the file `./scripts/protocgen.sh`.
|
||||
|
||||
### Visual Studio Code
|
||||
|
||||
|
||||
56
Makefile
56
Makefile
@@ -13,6 +13,8 @@ endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto
|
||||
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE)
|
||||
CGO_ENABLED ?= 0
|
||||
|
||||
# handle nostrip
|
||||
@@ -71,57 +73,41 @@ install:
|
||||
|
||||
$(BUILDDIR)/:
|
||||
mkdir -p $@
|
||||
# The Docker image containing the generator, formatter, and linter.
|
||||
# This is generated by proto/Dockerfile. To update tools, make changes
|
||||
# there and run the Build & Push Proto Builder Image workflow.
|
||||
IMAGE := ghcr.io/tendermint/docker-build-proto:latest
|
||||
DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(IMAGE)
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
|
||||
###############################################################################
|
||||
### Protobuf ###
|
||||
###############################################################################
|
||||
|
||||
check-proto-deps:
|
||||
ifeq (,$(shell which buf))
|
||||
$(error "buf is required for Protobuf building, linting and breakage checking. See https://docs.buf.build/installation for installation instructions.")
|
||||
endif
|
||||
ifeq (,$(shell which protoc-gen-gogofaster))
|
||||
$(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install")
|
||||
endif
|
||||
.PHONY: check-proto-deps
|
||||
proto-all: proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
|
||||
check-proto-format-deps:
|
||||
ifeq (,$(shell which clang-format))
|
||||
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
|
||||
endif
|
||||
.PHONY: check-proto-format-deps
|
||||
|
||||
proto-gen: check-proto-deps
|
||||
proto-gen:
|
||||
@echo "Generating Protobuf files"
|
||||
@buf generate
|
||||
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
|
||||
@$(DOCKER_PROTO_BUILDER) buf generate --template=./buf.gen.yaml --config ./buf.yaml
|
||||
.PHONY: proto-gen
|
||||
|
||||
# These targets are provided for convenience and are intended for local
|
||||
# execution only.
|
||||
proto-lint: check-proto-deps
|
||||
@echo "Linting Protobuf files"
|
||||
@buf lint
|
||||
proto-lint:
|
||||
@$(DOCKER_PROTO_BUILDER) buf lint --error-format=json --config ./buf.yaml
|
||||
.PHONY: proto-lint
|
||||
|
||||
proto-format: check-proto-format-deps
|
||||
proto-format:
|
||||
@echo "Formatting Protobuf files"
|
||||
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
|
||||
@$(DOCKER_PROTO_BUILDER) find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-check-breaking: check-proto-deps
|
||||
@echo "Checking for breaking changes in Protobuf files against local branch"
|
||||
@echo "Note: This is only useful if your changes have not yet been committed."
|
||||
@echo " Otherwise read up on buf's \"breaking\" command usage:"
|
||||
@echo " https://docs.buf.build/breaking/usage"
|
||||
@buf breaking --against ".git"
|
||||
proto-check-breaking:
|
||||
@$(DOCKER_PROTO_BUILDER) buf breaking --against .git --config ./buf.yaml
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
# TODO: Should be removed when work on ABCI++ is complete.
|
||||
# For more information, see https://github.com/tendermint/tendermint/issues/8066
|
||||
abci-proto-gen:
|
||||
./scripts/abci-gen.sh
|
||||
.PHONY: abci-proto-gen
|
||||
proto-check-breaking-ci:
|
||||
@$(DOCKER_PROTO_BUILDER) buf breaking --against $(HTTPS_GIT) --config ./buf.yaml
|
||||
.PHONY: proto-check-breaking-ci
|
||||
|
||||
###############################################################################
|
||||
### Build ABCI ###
|
||||
|
||||
54
README.md
54
README.md
@@ -3,7 +3,7 @@
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
@@ -20,14 +20,10 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines.
|
||||
|
||||
For protocol details, refer to the [Tendermint Specification](./spec/README.md).
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
|
||||
For detailed analysis of the consensus protocol, including safety and liveness proofs,
|
||||
read our paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/).
|
||||
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Releases
|
||||
|
||||
@@ -37,7 +33,7 @@ Tendermint has been in the production of private and public environments, most n
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.io) or [join the chat](https://discord.gg/cosmosnetwork).
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
|
||||
@@ -56,15 +52,20 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.17 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
|
||||
|
||||
### Install
|
||||
|
||||
See the [install instructions](./docs/introduction/install.md).
|
||||
See the [install instructions](/docs/introduction/install.md).
|
||||
|
||||
### Quick Start
|
||||
|
||||
- [Single node](./docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](./docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md)
|
||||
- [Single node](/docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](/docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](/docs/tools/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -72,9 +73,9 @@ Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
|
||||
|
||||
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
|
||||
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
|
||||
[specifications](./spec/README.md),
|
||||
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
|
||||
and familiarize yourself with our
|
||||
[Architectural Decision Records (ADRs)](./docs/architecture/README.md) and [Request For Comments (RFCs)](./docs/rfc/README.md).
|
||||
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
|
||||
## Versioning
|
||||
|
||||
@@ -111,23 +112,26 @@ in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Resources
|
||||
|
||||
### Roadmap
|
||||
### Tendermint Core
|
||||
|
||||
We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap.md)
|
||||
|
||||
### Libraries
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](https://docs.tendermint.com/master/spec/).
|
||||
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building applications in Golang
|
||||
- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs)
|
||||
- [ABCI Tower](https://github.com/penumbra-zone/tower-abci)
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: <https://docs.tendermint.com/master/>
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test).
|
||||
Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
|
||||
### Applications
|
||||
|
||||
- [Cosmos Hub](https://hub.cosmos.network/)
|
||||
- [Terra](https://www.terra.money/)
|
||||
- [Celestia](https://celestia.org/)
|
||||
- [Anoma](https://anoma.network/)
|
||||
- [Vocdoni](https://docs.vocdoni.io/)
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
|
||||
### Research
|
||||
|
||||
@@ -140,7 +144,7 @@ We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap
|
||||
## Join us!
|
||||
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
|
||||
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/)!
|
||||
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/p/682fb7e8a6f601-software-engineer-tendermint-core)!
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
|
||||
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
|
||||
|
||||
29
RELEASES.md
29
RELEASES.md
@@ -42,42 +42,15 @@ In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
|
||||
1. Start on `master`
|
||||
|
||||
2. Create and push the backport branch:
|
||||
```sh
|
||||
git checkout -b v0.35.x
|
||||
git push origin v0.35.x
|
||||
```
|
||||
|
||||
3. Create a PR to update the documentation directory for the backport branch.
|
||||
|
||||
We only maintain RFC and ADR documents on master, to avoid confusion.
|
||||
In addition, we rewrite Markdown URLs pointing to master to point to the
|
||||
backport branch, so that generated documentation will link to the correct
|
||||
versions of files elsewhere in the repository. For context on the latter,
|
||||
see https://github.com/tendermint/tendermint/issues/7675.
|
||||
|
||||
To prepare the PR:
|
||||
```sh
|
||||
# Remove the RFC and ADR documents from the backport.
|
||||
# We only maintain these on master to avoid confusion.
|
||||
git rm -r docs/rfc docs/architecture
|
||||
|
||||
# Update absolute links to point to the backport.
|
||||
go run ./scripts/linkpatch -recur -target v0.35.x -skip-path docs/DOCS_README.md,docs/README.md docs
|
||||
|
||||
# Create and push the PR.
|
||||
git checkout -b update-docs-v035x
|
||||
git commit -m "Update docs for v0.35.x backport branch." docs
|
||||
git push -u origin update-docs-v035x
|
||||
```
|
||||
|
||||
Be sure to merge this PR before making other changes on the newly-created
|
||||
backport branch.
|
||||
|
||||
After doing these steps, go back to `master` and do the following:
|
||||
|
||||
1. Tag `master` as the dev branch for the _next_ major release and push it up to GitHub.
|
||||
1. Tag `master` as the dev branch for the _next_ major release and push it back up.
|
||||
For example:
|
||||
```sh
|
||||
git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."
|
||||
|
||||
78
UPGRADING.md
78
UPGRADING.md
@@ -2,67 +2,6 @@
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
|
||||
## v0.36
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
#### ABCI++
|
||||
|
||||
Coming soon...
|
||||
|
||||
#### ABCI Mutex
|
||||
|
||||
In previous versions of ABCI, Tendermint was prevented from making
|
||||
concurrent calls to ABCI implementations by virtue of mutexes in the
|
||||
implementation of Tendermint's ABCI infrastructure. These mutexes have
|
||||
been removed from the current implementation and applications will now
|
||||
be responsible for managing their own concurrency control.
|
||||
|
||||
To replicate the prior semantics, ensure that ABCI applications have a
|
||||
single mutex that protects all ABCI method calls from concurrent
|
||||
access. You can relax these requirements if your application can
|
||||
provide safe concurrent access via other means. This safety is an
|
||||
application concern so be very sure to test the application thoroughly
|
||||
using realistic workloads and the race detector to ensure your
|
||||
applications remains correct.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
Tendermint v0.36 adds a new RPC event subscription API. The existing event
|
||||
subscription API based on websockets is now deprecated. It will continue to
|
||||
work throughout the v0.36 release, but the `subscribe`, `unsubscribe`, and
|
||||
`unsubscribe_all` methods, along with websocket support, will be removed in
|
||||
Tendermint v0.37. Callers currently using these features should migrate as
|
||||
soon as is practical to the new API.
|
||||
|
||||
To enable the new API, node operators set a new `event-log-window-size`
|
||||
parameter in the `[rpc]` section of the `config.toml` file. This defines a
|
||||
duration of time during which the node will log all events published to the
|
||||
event bus for use by RPC consumers.
|
||||
|
||||
Consumers use the new `events` JSON-RPC method to poll for events matching
|
||||
their query in the log. Unlike the streaming API, events are not discarded if
|
||||
the caller is slow, loses its connection, or crashes. As long as the client
|
||||
recovers before its events expire from the log window, it will be able to
|
||||
replay and catch up after recovering. Also unlike the streaming API, the client
|
||||
can tell if it has truly missed events because they have expired from the log.
|
||||
|
||||
The `events` method is a normal JSON-RPC method, and does not require any
|
||||
non-standard response processing (in contrast with the old `subscribe`).
|
||||
Clients can modify their query at any time, and no longer need to coordinate
|
||||
subscribe and unsubscribe calls to handle multiple queries.
|
||||
|
||||
The Go client implementations in the Tendermint Core repository have all been
|
||||
updated to add a new `Events` method, including the light client proxy.
|
||||
|
||||
A new `rpc/client/eventstream` package has also been added to make it easier
|
||||
for users to update existing use of the streaming API to use the polling API
|
||||
The `eventstream` package handles polling and delivers matching events to a
|
||||
callback.
|
||||
|
||||
For more detailed information, see [ADR 075](https://tinyurl.com/adr075) which
|
||||
defines and describes the new API in detail.
|
||||
|
||||
## v0.35
|
||||
|
||||
### ABCI Changes
|
||||
@@ -174,11 +113,11 @@ To access any of the functionality previously available via the
|
||||
`node.Node` type, use the `*local.Local` "RPC" client, that exposes
|
||||
the full RPC interface provided as direct function calls. Import the
|
||||
`github.com/tendermint/tendermint/rpc/client/local` package and pass
|
||||
the node service as in the following:
|
||||
the node service as in the following:
|
||||
|
||||
```go
|
||||
node := node.NewDefault() //construct the node object
|
||||
// start and set up the node service
|
||||
// start and set up the node service
|
||||
|
||||
client := local.New(node.(local.NodeService))
|
||||
// use client object to interact with the node
|
||||
@@ -205,10 +144,10 @@ both stacks.
|
||||
The P2P library was reimplemented in this release. The new implementation is
|
||||
enabled by default in this version of Tendermint. The legacy implementation is still
|
||||
included in this version of Tendermint as a backstop to work around unforeseen
|
||||
production issues. The new and legacy version are interoperable. If necessary,
|
||||
production issues. The new and legacy version are interoperable. If necessary,
|
||||
you can enable the legacy implementation in the server configuration file.
|
||||
|
||||
To make use of the legacy P2P implemementation add or update the following field of
|
||||
To make use of the legacy P2P implemementation add or update the following field of
|
||||
your server's configuration file under the `[p2p]` section:
|
||||
|
||||
```toml
|
||||
@@ -233,8 +172,8 @@ in the order in which they were received.
|
||||
|
||||
* `priority`: A priority queue of messages.
|
||||
|
||||
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
|
||||
weighted deficit round robin queue is created per peer. Each queue contains a
|
||||
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
|
||||
weighted deficit round robin queue is created per peer. Each queue contains a
|
||||
separate 'flow' for each of the channels of communication that exist between any two
|
||||
peers. Tendermint maintains a channel per message type between peers. Each WDRR
|
||||
queue maintains a shared buffered with a fixed capacity through which messages on different
|
||||
@@ -278,7 +217,7 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
were added to support the new State Sync feature.
|
||||
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
|
||||
new nodes are able to join a network in a matter of seconds.
|
||||
Read [the spec](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md)
|
||||
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
|
||||
if you want to learn more about State Sync, or if you'd like your application to use it.
|
||||
(If you don't want to support State Sync in your application, you can just implement these new
|
||||
ABCI methods as no-ops, leaving them empty.)
|
||||
@@ -403,6 +342,7 @@ The `bech32` package has moved to the Cosmos SDK:
|
||||
### CLI
|
||||
|
||||
The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API.
|
||||
See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
|
||||
### Light Client
|
||||
|
||||
@@ -677,7 +617,7 @@ the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
<https://docs.tendermint.com/v0.35/introduction/install.html)
|
||||
<https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support>)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
|
||||
@@ -19,8 +19,8 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](../spec/abci/abci.md)
|
||||
- [A protobuf file](../proto/tendermint/abci/types.proto)
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [A protobuf file](https://github.com/tendermint/spec/blob/master/proto/tendermint/abci/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
## Protocol Buffers
|
||||
|
||||
@@ -19,8 +19,8 @@ const (
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
//
|
||||
// All methods return the appropriate protobuf ResponseXxx struct and
|
||||
// an error.
|
||||
// All `Async` methods return a `ReqRes` object and an error.
|
||||
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
|
||||
//
|
||||
// NOTE these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes
|
||||
@@ -52,35 +52,65 @@ type Client interface {
|
||||
|
||||
// NewClient returns a new ABCI client of the specified transport type.
|
||||
// It returns an error if the transport is not "socket" or "grpc"
|
||||
func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (Client, error) {
|
||||
func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (client Client, err error) {
|
||||
switch transport {
|
||||
case "socket":
|
||||
return NewSocketClient(logger, addr, mustConnect), nil
|
||||
client = NewSocketClient(logger, addr, mustConnect)
|
||||
case "grpc":
|
||||
return NewGRPCClient(logger, addr, mustConnect), nil
|
||||
client = NewGRPCClient(logger, addr, mustConnect)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown abci transport %s", transport)
|
||||
err = fmt.Errorf("unknown abci transport %s", transport)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type requestAndResponse struct {
|
||||
type ReqRes struct {
|
||||
*types.Request
|
||||
*types.Response
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx sync.Mutex
|
||||
signal chan struct{}
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
|
||||
func makeReqRes(req *types.Request) *requestAndResponse {
|
||||
return &requestAndResponse{
|
||||
func NewReqRes(req *types.Request) *ReqRes {
|
||||
return &ReqRes{
|
||||
Request: req,
|
||||
Response: nil,
|
||||
signal: make(chan struct{}),
|
||||
cb: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// markDone marks the ReqRes object as done.
|
||||
func (r *requestAndResponse) markDone() {
|
||||
// Sets sets the callback. If reqRes is already done, it will call the cb
|
||||
// immediately. Note, reqRes.cb should not change if reqRes.done and only one
|
||||
// callback is supported.
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
|
||||
select {
|
||||
case <-r.signal:
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
default:
|
||||
r.cb = cb
|
||||
r.mtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// InvokeCallback invokes a thread-safe execution of the configured callback
|
||||
// if non-nil.
|
||||
func (r *ReqRes) InvokeCallback() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
|
||||
33
abci/client/creators.go
Normal file
33
abci/client/creators.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package abciclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
// Creator creates new ABCI clients.
|
||||
type Creator func(log.Logger) (Client, error)
|
||||
|
||||
// NewLocalCreator returns a Creator for the given app,
|
||||
// which will be running locally.
|
||||
func NewLocalCreator(app types.Application) Creator {
|
||||
return func(logger log.Logger) (Client, error) {
|
||||
return NewLocalClient(logger, app), nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewRemoteCreator returns a Creator for the given address (e.g.
|
||||
// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you
|
||||
// want the client to connect before reporting success.
|
||||
func NewRemoteCreator(logger log.Logger, addr, transport string, mustConnect bool) Creator {
|
||||
return func(log.Logger) (Client, error) {
|
||||
remoteApp, err := NewClient(logger, addr, transport, mustConnect)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to proxy: %w", err)
|
||||
}
|
||||
|
||||
return remoteApp, nil
|
||||
}
|
||||
}
|
||||
@@ -7,14 +7,23 @@
|
||||
//
|
||||
// ## Socket client
|
||||
//
|
||||
// The client blocks for enqueuing the request, for enqueuing the
|
||||
// Flush to send the request, and for the Flush response to return.
|
||||
// async: the client maintains an internal buffer of a fixed size. when the
|
||||
// buffer becomes full, all Async calls will return an error immediately.
|
||||
//
|
||||
// sync: the client blocks on 1) enqueuing the Sync request 2) enqueuing the
|
||||
// Flush requests 3) waiting for the Flush response
|
||||
//
|
||||
// ## Local client
|
||||
//
|
||||
// The global mutex is locked during each call
|
||||
// async: global mutex is locked during each call (meaning it's not really async!)
|
||||
// sync: global mutex is locked during each call
|
||||
//
|
||||
// ## gRPC client
|
||||
//
|
||||
// The client waits for all calls to complete.
|
||||
// async: gRPC is synchronous, but an internal buffer of a fixed size is used
|
||||
// to store responses and later call callbacks (separate goroutine per
|
||||
// response).
|
||||
//
|
||||
// sync: waits for all Async calls to complete (essentially what Flush does in
|
||||
// the socket client) and calls Sync method.
|
||||
package abciclient
|
||||
|
||||
@@ -24,8 +24,9 @@ type grpcClient struct {
|
||||
|
||||
mustConnect bool
|
||||
|
||||
client types.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
client types.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
|
||||
|
||||
mtx sync.Mutex
|
||||
addr string
|
||||
@@ -37,11 +38,25 @@ var _ Client = (*grpcClient)(nil)
|
||||
// NewGRPCClient creates a gRPC client, which will connect to addr upon the
|
||||
// start. Note Client#Start returns an error if connection is unsuccessful and
|
||||
// mustConnect is true.
|
||||
//
|
||||
// GRPC calls are synchronous, but some callbacks expect to be called
|
||||
// asynchronously (eg. the mempool expects to be able to lock to remove bad txs
|
||||
// from cache). To accommodate, we finish each call in its own go-routine,
|
||||
// which is expensive, but easy - if you want something better, use the socket
|
||||
// protocol! maybe one day, if people really want it, we use grpc streams, but
|
||||
// hopefully not :D
|
||||
func NewGRPCClient(logger log.Logger, addr string, mustConnect bool) Client {
|
||||
cli := &grpcClient{
|
||||
logger: logger,
|
||||
addr: addr,
|
||||
mustConnect: mustConnect,
|
||||
// Buffering the channel is needed to make calls appear asynchronous,
|
||||
// which is required when the caller makes multiple async calls before
|
||||
// processing callbacks (e.g. due to holding locks). 64 means that a
|
||||
// caller can make up to 64 async calls before a callback must be
|
||||
// processed (otherwise it deadlocks). It also means that we can make 64
|
||||
// gRPC calls while processing a slow callback at the channel head.
|
||||
chReqRes: make(chan *ReqRes, 64),
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(logger, "grpcClient", cli)
|
||||
return cli
|
||||
@@ -52,6 +67,35 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStart(ctx context.Context) error {
|
||||
// This processes asynchronous request/response messages and dispatches
|
||||
// them to callbacks.
|
||||
go func() {
|
||||
// Use a separate function to use defer for mutex unlocks (this handles panics)
|
||||
callCb := func(reqres *ReqRes) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
reqres.SetDone()
|
||||
|
||||
// Notify reqRes listener if set
|
||||
reqres.InvokeCallback()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.chReqRes:
|
||||
if reqres != nil {
|
||||
callCb(reqres)
|
||||
} else {
|
||||
cli.logger.Error("Received nil reqres")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err := grpc.Dial(cli.addr,
|
||||
@@ -91,18 +135,30 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStop() {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
if cli.conn != nil {
|
||||
cli.err = cli.conn.Close()
|
||||
cli.conn.Close()
|
||||
}
|
||||
close(cli.chReqRes)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) StopForError(err error) {
|
||||
if !cli.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
if cli.err == nil {
|
||||
cli.err = err
|
||||
}
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.logger.Error("Stopping abci.grpcClient for error", "err", err)
|
||||
cli.Stop()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Error() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
return cli.err
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package abciclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -14,6 +15,8 @@ import (
|
||||
// RPC endpoint), but defers are used everywhere for the sake of consistency.
|
||||
type localClient struct {
|
||||
service.BaseService
|
||||
|
||||
mtx sync.Mutex
|
||||
types.Application
|
||||
}
|
||||
|
||||
@@ -22,7 +25,7 @@ var _ Client = (*localClient)(nil)
|
||||
// NewLocalClient creates a local client, which will be directly calling the
|
||||
// methods of the given app.
|
||||
//
|
||||
// The client methods ignore their context argument.
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(logger log.Logger, app types.Application) Client {
|
||||
cli := &localClient{
|
||||
Application: app,
|
||||
@@ -33,82 +36,169 @@ func NewLocalClient(logger log.Logger, app types.Application) Client {
|
||||
|
||||
func (*localClient) OnStart(context.Context) error { return nil }
|
||||
func (*localClient) OnStop() {}
|
||||
func (*localClient) Error() error { return nil }
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
func (app *localClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (*localClient) Flush(context.Context) error { return nil }
|
||||
func (app *localClient) Flush(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
func (app *localClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTx(_ context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
func (app *localClient) CheckTx(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.CheckTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Query(_ context.Context, req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
func (app *localClient) Query(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChain(_ context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
func (app *localClient) InitChain(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.InitChain(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshots(_ context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
func (app *localClient) ListSnapshots(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshot(_ context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
func (app *localClient) OfferSnapshot(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunk(_ context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
func (app *localClient) LoadSnapshotChunk(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunk(_ context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
func (app *localClient) ApplySnapshotChunk(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposal(_ context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
func (app *localClient) PrepareProposal(
|
||||
ctx context.Context,
|
||||
req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposal(_ context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
func (app *localClient) ProcessProposal(
|
||||
ctx context.Context,
|
||||
req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ExtendVote(_ context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
func (app *localClient) ExtendVote(
|
||||
ctx context.Context,
|
||||
req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ExtendVote(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) VerifyVoteExtension(_ context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
func (app *localClient) VerifyVoteExtension(
|
||||
ctx context.Context,
|
||||
req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.VerifyVoteExtension(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlock(_ context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
func (app *localClient) FinalizeBlock(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.FinalizeBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,10 @@ package mocks
|
||||
import (
|
||||
context "context"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
@@ -60,6 +63,29 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields: _a0
|
||||
func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -33,11 +34,12 @@ type socketClient struct {
|
||||
mustConnect bool
|
||||
conn net.Conn
|
||||
|
||||
reqQueue chan *requestAndResponse
|
||||
reqQueue chan *ReqRes
|
||||
|
||||
mtx sync.Mutex
|
||||
err error
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
}
|
||||
|
||||
var _ Client = (*socketClient)(nil)
|
||||
@@ -48,10 +50,11 @@ var _ Client = (*socketClient)(nil)
|
||||
func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client {
|
||||
cli := &socketClient{
|
||||
logger: logger,
|
||||
reqQueue: make(chan *requestAndResponse, reqQueueSize),
|
||||
reqQueue: make(chan *ReqRes, reqQueueSize),
|
||||
mustConnect: mustConnect,
|
||||
addr: addr,
|
||||
reqSent: list.New(),
|
||||
resCb: nil,
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(logger, "socketClient", cli)
|
||||
return cli
|
||||
@@ -123,7 +126,6 @@ func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer
|
||||
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := bw.Flush(); err != nil {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
return
|
||||
@@ -138,20 +140,23 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
res := &types.Response{}
|
||||
|
||||
if err := types.ReadMessage(r, res); err != nil {
|
||||
var res = &types.Response{}
|
||||
err := types.ReadMessage(r, res)
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("read message: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// cli.logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
|
||||
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_Exception: // app responded with error
|
||||
// XXX After setting cli.err, release waiters (e.g. reqres.Done())
|
||||
cli.stopForError(errors.New(r.Exception.Error))
|
||||
return
|
||||
default:
|
||||
if err := cli.didRecvResponse(res); err != nil {
|
||||
err := cli.didRecvResponse(res)
|
||||
if err != nil {
|
||||
cli.stopForError(err)
|
||||
return
|
||||
}
|
||||
@@ -159,7 +164,7 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) willSendReq(reqres *requestAndResponse) {
|
||||
func (cli *socketClient) willSendReq(reqres *ReqRes) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.reqSent.PushBack(reqres)
|
||||
@@ -172,172 +177,258 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
// Get the first ReqRes.
|
||||
next := cli.reqSent.Front()
|
||||
if next == nil {
|
||||
return fmt.Errorf("unexpected %T when nothing expected", res.Value)
|
||||
return fmt.Errorf("unexpected %v when nothing expected", reflect.TypeOf(res.Value))
|
||||
}
|
||||
|
||||
reqres := next.Value.(*requestAndResponse)
|
||||
reqres := next.Value.(*ReqRes)
|
||||
if !resMatchesReq(reqres.Request, res) {
|
||||
return fmt.Errorf("unexpected %T when response to %T expected", res.Value, reqres.Request.Value)
|
||||
return fmt.Errorf("unexpected %v when response to %v expected",
|
||||
reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value))
|
||||
}
|
||||
|
||||
reqres.Response = res
|
||||
reqres.markDone() // release waiters
|
||||
reqres.SetDone() // release waiters
|
||||
cli.reqSent.Remove(next) // pop first item from linked list
|
||||
|
||||
// Notify client listener if set (global callback).
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, res)
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set (request specific callback).
|
||||
//
|
||||
// NOTE: It is possible this callback isn't set on the reqres object. At this
|
||||
// point, in which case it will be called after, when it is set.
|
||||
reqres.InvokeCallback()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) Flush(ctx context.Context) error {
|
||||
_, err := cli.doRequest(ctx, types.ToRequestFlush())
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush())
|
||||
if err != nil {
|
||||
return queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
select {
|
||||
case <-reqRes.signal:
|
||||
return cli.Error()
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestEcho(msg))
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetEcho(), nil
|
||||
return reqres.Response.GetEcho(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestInfo(req))
|
||||
func (cli *socketClient) Info(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetInfo(), nil
|
||||
return reqres.Response.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestCheckTx(req))
|
||||
func (cli *socketClient) CheckTx(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetCheckTx(), nil
|
||||
return reqres.Response.GetCheckTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Query(ctx context.Context, req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestQuery(req))
|
||||
func (cli *socketClient) Query(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetQuery(), nil
|
||||
return reqres.Response.GetQuery(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestCommit())
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetCommit(), nil
|
||||
return reqres.Response.GetCommit(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChain(ctx context.Context, req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestInitChain(req))
|
||||
func (cli *socketClient) InitChain(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetInitChain(), nil
|
||||
return reqres.Response.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshots(ctx context.Context, req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestListSnapshots(req))
|
||||
func (cli *socketClient) ListSnapshots(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetListSnapshots(), nil
|
||||
return reqres.Response.GetListSnapshots(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshot(ctx context.Context, req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestOfferSnapshot(req))
|
||||
func (cli *socketClient) OfferSnapshot(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetOfferSnapshot(), nil
|
||||
return reqres.Response.GetOfferSnapshot(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
func (cli *socketClient) LoadSnapshotChunk(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetLoadSnapshotChunk(), nil
|
||||
return reqres.Response.GetLoadSnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
func (cli *socketClient) ApplySnapshotChunk(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetApplySnapshotChunk(), nil
|
||||
return reqres.Response.GetApplySnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposal(ctx context.Context, req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestPrepareProposal(req))
|
||||
func (cli *socketClient) PrepareProposal(
|
||||
ctx context.Context,
|
||||
req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestPrepareProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetPrepareProposal(), nil
|
||||
return reqres.Response.GetPrepareProposal(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposal(ctx context.Context, req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestProcessProposal(req))
|
||||
func (cli *socketClient) ProcessProposal(
|
||||
ctx context.Context,
|
||||
req types.RequestProcessProposal,
|
||||
) (*types.ResponseProcessProposal, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestProcessProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetProcessProposal(), nil
|
||||
return reqres.Response.GetProcessProposal(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ExtendVote(ctx context.Context, req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestExtendVote(req))
|
||||
func (cli *socketClient) ExtendVote(
|
||||
ctx context.Context,
|
||||
req types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestExtendVote(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetExtendVote(), nil
|
||||
return reqres.Response.GetExtendVote(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestVerifyVoteExtension(req))
|
||||
func (cli *socketClient) VerifyVoteExtension(
|
||||
ctx context.Context,
|
||||
req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestVerifyVoteExtension(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetVerifyVoteExtension(), nil
|
||||
return reqres.Response.GetVerifyVoteExtension(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlock(ctx context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestFinalizeBlock(req))
|
||||
func (cli *socketClient) FinalizeBlock(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestFinalizeBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetFinalizeBlock(), nil
|
||||
return reqres.Response.GetFinalizeBlock(), nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
|
||||
reqres := makeReqRes(req)
|
||||
// queueRequest enqueues req onto the queue. The request can break early if the
|
||||
// the context is canceled. If the queue is full, this method blocks to allow
|
||||
// the request to be placed onto the queue. This has the effect of creating an
|
||||
// unbounded queue of goroutines waiting to write to this queue which is a bit
|
||||
// antithetical to the purposes of a queue, however, undoing this behavior has
|
||||
// dangerous upstream implications as a result of the usage of this behavior upstream.
|
||||
// Remove at your peril.
|
||||
//
|
||||
// The caller is responsible for checking cli.Error.
|
||||
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request) (*ReqRes, error) {
|
||||
reqres := NewReqRes(req)
|
||||
|
||||
select {
|
||||
case cli.reqQueue <- reqres:
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("can't queue req: %w", ctx.Err())
|
||||
}
|
||||
|
||||
select {
|
||||
case <-reqres.signal:
|
||||
if err := cli.Error(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
return reqres, nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAndFlush(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func queueErr(e error) error {
|
||||
return fmt.Errorf("can't queue req: %w", e)
|
||||
}
|
||||
|
||||
// drainQueue marks as complete and discards all remaining pending requests
|
||||
@@ -348,8 +439,8 @@ func (cli *socketClient) drainQueue(ctx context.Context) {
|
||||
|
||||
// mark all in-flight messages as resolved (they will get cli.Error())
|
||||
for req := cli.reqSent.Front(); req != nil; req = req.Next() {
|
||||
reqres := req.Value.(*requestAndResponse)
|
||||
reqres.markDone()
|
||||
reqres := req.Value.(*ReqRes)
|
||||
reqres.SetDone()
|
||||
}
|
||||
|
||||
// Mark all queued messages as resolved.
|
||||
@@ -362,7 +453,7 @@ func (cli *socketClient) drainQueue(ctx context.Context) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case reqres := <-cli.reqQueue:
|
||||
reqres.markDone()
|
||||
reqres.SetDone()
|
||||
default:
|
||||
return
|
||||
}
|
||||
@@ -387,8 +478,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Query)
|
||||
case *types.Request_InitChain:
|
||||
_, ok = res.Value.(*types.Response_InitChain)
|
||||
case *types.Request_ProcessProposal:
|
||||
_, ok = res.Value.(*types.Response_ProcessProposal)
|
||||
case *types.Request_PrepareProposal:
|
||||
_, ok = res.Value.(*types.Response_PrepareProposal)
|
||||
case *types.Request_ExtendVote:
|
||||
|
||||
85
abci/client/socket_client_test.go
Normal file
85
abci/client/socket_client_test.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package abciclient_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"math/rand"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
func TestProperSyncCalls(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
app := slowApp{}
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
_, c := setupClientServer(ctx, t, logger, app)
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
rsp, err := c.FinalizeBlock(ctx, types.RequestFinalizeBlock{})
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, c.Flush(ctx))
|
||||
assert.NotNil(t, rsp)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case resp <- c.Error():
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
require.Fail(t, "No response arrived")
|
||||
case err, ok := <-resp:
|
||||
require.True(t, ok, "Must not close channel")
|
||||
assert.NoError(t, err, "This should return success")
|
||||
}
|
||||
}
|
||||
|
||||
func setupClientServer(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
app types.Application,
|
||||
) (service.Service, abciclient.Client) {
|
||||
t.Helper()
|
||||
|
||||
// some port between 20k and 30k
|
||||
port := 20000 + rand.Int31()%10000
|
||||
addr := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
s, err := server.NewServer(logger, addr, "socket", app)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.Start(ctx))
|
||||
t.Cleanup(s.Wait)
|
||||
|
||||
c := abciclient.NewSocketClient(logger, addr, true)
|
||||
require.NoError(t, c.Start(ctx))
|
||||
t.Cleanup(c.Wait)
|
||||
|
||||
require.True(t, s.IsRunning())
|
||||
require.True(t, c.IsRunning())
|
||||
|
||||
return s, c
|
||||
}
|
||||
|
||||
type slowApp struct {
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseFinalizeBlock{}
|
||||
}
|
||||
@@ -125,7 +125,7 @@ func addCommands(cmd *cobra.Command, logger log.Logger) {
|
||||
cmd.AddCommand(consoleCmd)
|
||||
cmd.AddCommand(echoCmd)
|
||||
cmd.AddCommand(infoCmd)
|
||||
cmd.AddCommand(finalizeBlockCmd)
|
||||
cmd.AddCommand(deliverTxCmd)
|
||||
cmd.AddCommand(checkTxCmd)
|
||||
cmd.AddCommand(commitCmd)
|
||||
cmd.AddCommand(versionCmd)
|
||||
@@ -150,9 +150,10 @@ where example.file looks something like:
|
||||
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
finalize_block 0x00
|
||||
deliver_tx 0x00
|
||||
check_tx 0x00
|
||||
finalize_block 0x01 0x04 0xff
|
||||
deliver_tx 0x01
|
||||
deliver_tx 0x04
|
||||
info
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
@@ -168,7 +169,7 @@ This command opens an interactive console for running any of the other commands
|
||||
without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "commit", "query"},
|
||||
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
@@ -187,11 +188,11 @@ var infoCmd = &cobra.Command{
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
|
||||
var finalizeBlockCmd = &cobra.Command{
|
||||
Use: "finalize_block",
|
||||
Short: "deliver a block of transactions to the application",
|
||||
Long: "deliver a block of transactions to the application",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
Use: "deliver_tx",
|
||||
Short: "deliver a new transaction to the application",
|
||||
Long: "deliver a new transaction to the application",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdFinalizeBlock,
|
||||
}
|
||||
|
||||
@@ -425,7 +426,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
return cmdCheckTx(cmd, actualArgs)
|
||||
case "commit":
|
||||
return cmdCommit(cmd, actualArgs)
|
||||
case "finalize_block":
|
||||
case "deliver_tx":
|
||||
return cmdFinalizeBlock(cmd, actualArgs)
|
||||
case "echo":
|
||||
return cmdEcho(cmd, actualArgs)
|
||||
@@ -499,23 +500,19 @@ func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
printResponse(cmd, args, response{
|
||||
Code: codeBad,
|
||||
Log: "Must provide at least one transaction",
|
||||
Log: "want the tx",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
txs := make([][]byte, len(args))
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txs[i] = txBytes
|
||||
}
|
||||
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: txs})
|
||||
txBytes, err := stringOrHexToBytes(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tx := range res.TxResults {
|
||||
res, err := client.FinalizeBlock(cmd.Context(), types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tx := range res.Txs {
|
||||
printResponse(cmd, args, response{
|
||||
Code: tx.Code,
|
||||
Data: tx.Data,
|
||||
|
||||
@@ -31,18 +31,18 @@ func init() {
|
||||
func TestKVStore(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewNopLogger()
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
t.Log("### Testing KVStore")
|
||||
logger.Info("### Testing KVStore")
|
||||
testBulk(ctx, t, logger, kvstore.NewApplication())
|
||||
}
|
||||
|
||||
func TestBaseApp(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewNopLogger()
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
t.Log("### Testing BaseApp")
|
||||
logger.Info("### Testing BaseApp")
|
||||
testBulk(ctx, t, logger, types.NewBaseApplication())
|
||||
}
|
||||
|
||||
@@ -50,9 +50,9 @@ func TestGRPC(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
logger := log.NewNopLogger()
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
t.Log("### Testing GRPC")
|
||||
logger.Info("### Testing GRPC")
|
||||
testGRPCSync(ctx, t, logger, types.NewGRPCApplication(types.NewBaseApplication()))
|
||||
}
|
||||
|
||||
@@ -84,8 +84,8 @@ func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Ap
|
||||
// Send bulk request
|
||||
res, err := client.FinalizeBlock(ctx, rfb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, numDeliverTxs, len(res.TxResults), "Number of txs doesn't match")
|
||||
for _, tx := range res.TxResults {
|
||||
require.Equal(t, numDeliverTxs, len(res.Txs), "Number of txs doesn't match")
|
||||
for _, tx := range res.Txs {
|
||||
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
|
||||
}
|
||||
|
||||
@@ -138,8 +138,8 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
|
||||
// Send request
|
||||
response, err := client.FinalizeBlock(ctx, &rfb)
|
||||
require.NoError(t, err, "Error in GRPC FinalizeBlock")
|
||||
require.Equal(t, numDeliverTxs, len(response.TxResults), "Number of txs returned via GRPC doesn't match")
|
||||
for _, tx := range response.TxResults {
|
||||
require.Equal(t, numDeliverTxs, len(response.Txs), "Number of txs returned via GRPC doesn't match")
|
||||
for _, tx := range response.Txs {
|
||||
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,21 +2,14 @@ package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
@@ -72,41 +65,17 @@ var _ types.Application = (*Application)(nil)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
mu sync.Mutex
|
||||
|
||||
state State
|
||||
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
|
||||
logger log.Logger
|
||||
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
|
||||
}
|
||||
|
||||
func NewApplication() *Application {
|
||||
return &Application{
|
||||
logger: log.NewNopLogger(),
|
||||
state: loadState(dbm.NewMemDB()),
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
}
|
||||
state := loadState(dbm.NewMemDB())
|
||||
return &Application{state: state}
|
||||
}
|
||||
|
||||
func (app *Application) InitChain(req types.RequestInitChain) types.ResponseInitChain {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
for _, v := range req.Validators {
|
||||
r := app.updateValidator(v)
|
||||
if r.IsErr() {
|
||||
app.logger.Error("error updating validators", "r", r)
|
||||
panic("problem updating validators")
|
||||
}
|
||||
}
|
||||
return types.ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
@@ -116,20 +85,8 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
}
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *Application) handleTx(tx []byte) *types.ExecTxResult {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(tx)
|
||||
}
|
||||
|
||||
if isPrepareTx(tx) {
|
||||
return app.execPrepareTx(tx)
|
||||
}
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) HandleTx(tx []byte) *types.ResponseDeliverTx {
|
||||
var key, value string
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
@@ -156,56 +113,22 @@ func (app *Application) handleTx(tx []byte) *types.ExecTxResult {
|
||||
},
|
||||
}
|
||||
|
||||
return &types.ExecTxResult{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
func (app *Application) Close() error {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
return app.state.db.Close()
|
||||
return &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
PubKey: pubKey,
|
||||
Power: ev.Validator.Power - 1,
|
||||
})
|
||||
app.logger.Info("Decreased val power by 1 because of the equivocation",
|
||||
"val", addr)
|
||||
} else {
|
||||
panic(fmt.Errorf("wanted to punish val %q but can't find it", addr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
respTxs := make([]*types.ExecTxResult, len(req.Txs))
|
||||
txs := make([]*types.ResponseDeliverTx, len(req.Txs))
|
||||
for i, tx := range req.Txs {
|
||||
respTxs[i] = app.handleTx(tx)
|
||||
txs[i] = app.HandleTx(tx)
|
||||
}
|
||||
|
||||
return types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates}
|
||||
return types.ResponseFinalizeBlock{Txs: txs}
|
||||
}
|
||||
|
||||
func (*Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() types.ResponseCommit {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
// Using a memdb - just return the big endian size of the db
|
||||
appHash := make([]byte, 8)
|
||||
binary.PutVarint(appHash, app.state.Size)
|
||||
@@ -221,239 +144,43 @@ func (app *Application) Commit() types.ResponseCommit {
|
||||
}
|
||||
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
if reqQuery.Path == "/val" {
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
value, err := app.state.db.Get(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return types.ResponseQuery{
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
if reqQuery.Prove {
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery := types.ResponseQuery{
|
||||
Index: -1,
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
Height: app.state.Height,
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Index = -1 // TODO make Proof return index
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return resQuery
|
||||
return
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery := types.ResponseQuery{
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
Height: app.state.Height,
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return resQuery
|
||||
}
|
||||
|
||||
func (app *Application) PrepareProposal(req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
return types.ResponsePrepareProposal{TxRecords: app.substPrepareTx(req.Txs)}
|
||||
}
|
||||
|
||||
func (*Application) ProcessProposal(req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 {
|
||||
return types.ResponseProcessProposal{Accept: false}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Accept: true}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
func (app *Application) Validators() (validators []types.ValidatorUpdate) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
itr, err := app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
validators = append(validators, *validator)
|
||||
}
|
||||
}
|
||||
if err = itr.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte {
|
||||
pk, err := encoding.PubKeyFromProto(pubkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
|
||||
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
|
||||
}
|
||||
|
||||
// format is "val:pubkey!power"
|
||||
// pubkey is a base64-encoded 32-byte ed25519 key
|
||||
func (app *Application) execValidatorTx(tx []byte) *types.ExecTxResult {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
|
||||
}
|
||||
|
||||
// decode the power
|
||||
power, err := strconv.ParseInt(powerS, 10, 64)
|
||||
if err != nil {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *Application) updateValidator(v types.ValidatorUpdate) *types.ExecTxResult {
|
||||
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
key := []byte("val:" + string(pubkey.Bytes()))
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
hasKey, err := app.state.db.Has(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeUnauthorized,
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
}
|
||||
if err = app.state.db.Delete(key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("error encoding validator: %v", err)}
|
||||
}
|
||||
if err = app.state.db.Set(key, value.Bytes()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
app.ValUpdates = append(app.ValUpdates, v)
|
||||
|
||||
return &types.ExecTxResult{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// prepare proposal machinery
|
||||
|
||||
const PreparePrefix = "prepare"
|
||||
|
||||
func isPrepareTx(tx []byte) bool {
|
||||
return bytes.HasPrefix(tx, []byte(PreparePrefix))
|
||||
}
|
||||
|
||||
// execPrepareTx is noop. tx data is considered as placeholder
|
||||
// and is substitute at the PrepareProposal.
|
||||
func (app *Application) execPrepareTx(tx []byte) *types.ExecTxResult {
|
||||
// noop
|
||||
return &types.ExecTxResult{}
|
||||
}
|
||||
|
||||
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
|
||||
// proposal for transactions with the prefix strips.
|
||||
// It marks all of the original transactions as 'REMOVED' so that
|
||||
// Tendermint will remove them from its mempool.
|
||||
func (app *Application) substPrepareTx(blockData [][]byte) []*types.TxRecord {
|
||||
trs := make([]*types.TxRecord, len(blockData))
|
||||
var removed []*types.TxRecord
|
||||
for i, tx := range blockData {
|
||||
if isPrepareTx(tx) {
|
||||
removed = append(removed, &types.TxRecord{
|
||||
Tx: tx,
|
||||
Action: types.TxRecord_REMOVED,
|
||||
})
|
||||
trs[i] = &types.TxRecord{
|
||||
Tx: bytes.TrimPrefix(tx, []byte(PreparePrefix)),
|
||||
Action: types.TxRecord_ADDED,
|
||||
}
|
||||
continue
|
||||
}
|
||||
trs[i] = &types.TxRecord{
|
||||
Tx: tx,
|
||||
Action: types.TxRecord_UNMODIFIED,
|
||||
}
|
||||
}
|
||||
|
||||
return append(trs, removed...)
|
||||
func (app *Application) PrepareProposal(
|
||||
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
return types.ResponsePrepareProposal{
|
||||
BlockData: req.BlockData}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -27,12 +26,12 @@ const (
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
|
||||
ar := app.FinalizeBlock(req)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
require.Equal(t, 1, len(ar.Txs))
|
||||
require.False(t, ar.Txs[0].IsErr())
|
||||
// repeating tx doesn't raise error
|
||||
ar = app.FinalizeBlock(req)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
require.Equal(t, 1, len(ar.Txs))
|
||||
require.False(t, ar.Txs[0].IsErr())
|
||||
// commit
|
||||
app.Commit()
|
||||
|
||||
@@ -75,7 +74,7 @@ func TestKVStoreKV(t *testing.T) {
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
logger := log.NewNopLogger()
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
key := testKey
|
||||
@@ -90,7 +89,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
logger := log.NewNopLogger()
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
InitKVStore(kvstore)
|
||||
@@ -107,7 +106,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
}
|
||||
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
|
||||
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header, Height: height})
|
||||
kvstore.Commit()
|
||||
|
||||
resInfo = kvstore.Info(types.RequestInfo{})
|
||||
@@ -119,7 +118,10 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
kvstore := NewApplication()
|
||||
dir := t.TempDir()
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
|
||||
// init with some validators
|
||||
total := 10
|
||||
@@ -196,6 +198,7 @@ func makeApplyBlock(
|
||||
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
|
||||
Hash: hash,
|
||||
Header: header,
|
||||
Height: height,
|
||||
Txs: txs,
|
||||
})
|
||||
|
||||
@@ -207,7 +210,6 @@ func makeApplyBlock(
|
||||
|
||||
// order doesn't matter
|
||||
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
t.Helper()
|
||||
if len(vals1) != len(vals2) {
|
||||
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
|
||||
}
|
||||
@@ -229,11 +231,9 @@ func makeSocketClientServer(
|
||||
app types.Application,
|
||||
name string,
|
||||
) (abciclient.Client, service.Service, error) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.Cleanup(cancel)
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
@@ -263,8 +263,6 @@ func makeGRPCClientServer(
|
||||
) (abciclient.Client, service.Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.Cleanup(cancel)
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
|
||||
@@ -288,7 +286,7 @@ func makeGRPCClientServer(
|
||||
func TestClientServer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewNopLogger()
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
// set up socket app
|
||||
kvstore := NewApplication()
|
||||
@@ -325,13 +323,13 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client)
|
||||
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
|
||||
ar, err := app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
require.Equal(t, 1, len(ar.Txs))
|
||||
require.False(t, ar.Txs[0].IsErr())
|
||||
// repeating FinalizeBlock doesn't raise error
|
||||
ar, err = app.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
require.Equal(t, 1, len(ar.Txs))
|
||||
require.False(t, ar.Txs[0].IsErr())
|
||||
// commit
|
||||
_, err = app.Commit(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,10 +2,16 @@ package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
ptypes "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
@@ -20,42 +26,325 @@ const (
|
||||
var _ types.Application = (*PersistentKVStoreApplication)(nil)
|
||||
|
||||
type PersistentKVStoreApplication struct {
|
||||
*Application
|
||||
app *Application
|
||||
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
|
||||
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *PersistentKVStoreApplication {
|
||||
db, err := dbm.NewGoLevelDB("kvstore", dbDir)
|
||||
name := "kvstore"
|
||||
db, err := dbm.NewGoLevelDB(name, dbDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
state := loadState(db)
|
||||
|
||||
return &PersistentKVStoreApplication{
|
||||
Application: &Application{
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
state: loadState(db),
|
||||
logger: logger,
|
||||
},
|
||||
app: &Application{state: state},
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
func (app *PersistentKVStoreApplication) Close() error {
|
||||
return app.app.state.db.Close()
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
res := app.app.Info(req)
|
||||
res.LastBlockHeight = app.app.state.Height
|
||||
res.LastBlockAppHash = app.app.state.AppHash
|
||||
return res
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) HandleTx(tx []byte) *types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(tx)
|
||||
}
|
||||
|
||||
if isPrepareTx(tx) {
|
||||
return app.execPrepareTx(tx)
|
||||
}
|
||||
|
||||
// otherwise, update the key-value store
|
||||
return app.app.HandleTx(tx)
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(req)
|
||||
}
|
||||
|
||||
// Commit will panic if InitChain was not called
|
||||
func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
|
||||
return app.app.Commit()
|
||||
}
|
||||
|
||||
// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded.
|
||||
// For any other path, returns an associated value or nil if missing.
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
switch reqQuery.Path {
|
||||
case "/val":
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
value, err := app.app.state.db.Get(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
return
|
||||
default:
|
||||
return app.app.Query(reqQuery)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the validators in the merkle tree
|
||||
func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
|
||||
for _, v := range req.Validators {
|
||||
r := app.updateValidator(v)
|
||||
if r.IsErr() {
|
||||
app.logger.Error("error updating validators", "r", r)
|
||||
}
|
||||
}
|
||||
return types.ResponseInitChain{}
|
||||
}
|
||||
|
||||
// Track the block hash and header information
|
||||
// Execute transactions
|
||||
// Update the validator set
|
||||
func (app *PersistentKVStoreApplication) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
PubKey: pubKey,
|
||||
Power: ev.Validator.Power - 1,
|
||||
})
|
||||
app.logger.Info("Decreased val power by 1 because of the equivocation",
|
||||
"val", addr)
|
||||
} else {
|
||||
app.logger.Error("Wanted to punish val, but can't find it",
|
||||
"val", addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
respTxs := make([]*types.ResponseDeliverTx, len(req.Txs))
|
||||
for i, tx := range req.Txs {
|
||||
respTxs[i] = app.HandleTx(tx)
|
||||
}
|
||||
|
||||
return types.ResponseFinalizeBlock{Txs: respTxs, ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ExtendVote(req types.RequestExtendVote) types.ResponseExtendVote {
|
||||
return types.ResponseExtendVote{VoteExtension: ConstructVoteExtension(req.Vote.ValidatorAddress)}
|
||||
func (app *PersistentKVStoreApplication) ExtendVote(
|
||||
req types.RequestExtendVote) types.ResponseExtendVote {
|
||||
return types.ResponseExtendVote{
|
||||
VoteExtension: ConstructVoteExtension(req.Vote.ValidatorAddress),
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) VerifyVoteExtension(req types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
|
||||
return types.RespondVerifyVoteExtension(app.verifyExtension(req.Vote.ValidatorAddress, req.Vote.VoteExtension))
|
||||
func (app *PersistentKVStoreApplication) VerifyVoteExtension(
|
||||
req types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
|
||||
return types.RespondVerifyVoteExtension(
|
||||
app.verifyExtension(req.Vote.ValidatorAddress, req.Vote.VoteExtension))
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) PrepareProposal(
|
||||
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
return types.ResponsePrepareProposal{BlockData: app.substPrepareTx(req.BlockData)}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ProcessProposal(
|
||||
req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 {
|
||||
return types.ResponseProcessProposal{Result: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Result: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
|
||||
itr, err := app.app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
validators = append(validators, *validator)
|
||||
}
|
||||
}
|
||||
if err = itr.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte {
|
||||
pk, err := encoding.PubKeyFromProto(pubkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
|
||||
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
|
||||
}
|
||||
|
||||
// format is "val:pubkey!power"
|
||||
// pubkey is a base64-encoded 32-byte ed25519 key
|
||||
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) *types.ResponseDeliverTx {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return &types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return &types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
|
||||
}
|
||||
|
||||
// decode the power
|
||||
power, err := strconv.ParseInt(powerS, 10, 64)
|
||||
if err != nil {
|
||||
return &types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) *types.ResponseDeliverTx {
|
||||
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
key := []byte("val:" + string(pubkey.Bytes()))
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
hasKey, err := app.app.state.db.Has(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
|
||||
return &types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeUnauthorized,
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
}
|
||||
if err = app.app.state.db.Delete(key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
return &types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("error encoding validator: %v", err)}
|
||||
}
|
||||
if err = app.app.state.db.Set(key, value.Bytes()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
app.ValUpdates = append(app.ValUpdates, v)
|
||||
|
||||
return &types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
|
||||
const PreparePrefix = "prepare"
|
||||
|
||||
func isPrepareTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), PreparePrefix)
|
||||
}
|
||||
|
||||
// execPrepareTx is noop. tx data is considered as placeholder
|
||||
// and is substitute at the PrepareProposal.
|
||||
func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) *types.ResponseDeliverTx {
|
||||
// noop
|
||||
return &types.ResponseDeliverTx{}
|
||||
}
|
||||
|
||||
// substPrepareTx subst all the preparetx in the blockdata
|
||||
// to null string(could be any arbitrary string).
|
||||
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte) [][]byte {
|
||||
// TODO: this mechanism will change with the current spec of PrepareProposal
|
||||
// We now have a special type for marking a tx as changed
|
||||
for i, tx := range blockData {
|
||||
if isPrepareTx(tx) {
|
||||
blockData[i] = make([]byte, len(tx))
|
||||
}
|
||||
}
|
||||
|
||||
return blockData
|
||||
}
|
||||
|
||||
func ConstructVoteExtension(valAddr []byte) *ptypes.VoteExtension {
|
||||
return &ptypes.VoteExtension{
|
||||
AppDataToSign: valAddr,
|
||||
|
||||
@@ -16,9 +16,10 @@ type GRPCServer struct {
|
||||
service.BaseService
|
||||
logger log.Logger
|
||||
|
||||
proto string
|
||||
addr string
|
||||
server *grpc.Server
|
||||
proto string
|
||||
addr string
|
||||
listener net.Listener
|
||||
server *grpc.Server
|
||||
|
||||
app types.ABCIApplicationServer
|
||||
}
|
||||
@@ -27,10 +28,11 @@ type GRPCServer struct {
|
||||
func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicationServer) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &GRPCServer{
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
app: app,
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
}
|
||||
s.BaseService = *service.NewBaseService(logger, "ABCIServer", s)
|
||||
return s
|
||||
@@ -38,11 +40,13 @@ func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicatio
|
||||
|
||||
// OnStart starts the gRPC service.
|
||||
func (s *GRPCServer) OnStart(ctx context.Context) error {
|
||||
|
||||
ln, err := net.Listen(s.proto, s.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.listener = ln
|
||||
s.server = grpc.NewServer()
|
||||
types.RegisterABCIApplicationServer(s.server, s.app)
|
||||
|
||||
@@ -53,7 +57,7 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
|
||||
s.server.GracefulStop()
|
||||
}()
|
||||
|
||||
if err := s.server.Serve(ln); err != nil {
|
||||
if err := s.server.Serve(s.listener); err != nil {
|
||||
s.logger.Error("error serving gRPC server", "err", err)
|
||||
}
|
||||
}()
|
||||
@@ -61,4 +65,6 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// OnStop stops the gRPC server.
|
||||
func (s *GRPCServer) OnStop() { s.server.Stop() }
|
||||
func (s *GRPCServer) OnStop() {
|
||||
s.server.Stop()
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package server
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -27,21 +26,22 @@ type SocketServer struct {
|
||||
listener net.Listener
|
||||
|
||||
connsMtx sync.Mutex
|
||||
connsClose map[int]func()
|
||||
conns map[int]net.Conn
|
||||
nextConnID int
|
||||
|
||||
app types.Application
|
||||
appMtx sync.Mutex
|
||||
app types.Application
|
||||
}
|
||||
|
||||
func NewSocketServer(logger log.Logger, protoAddr string, app types.Application) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &SocketServer{
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
connsClose: make(map[int]func()),
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
conns: make(map[int]net.Conn),
|
||||
}
|
||||
s.BaseService = *service.NewBaseService(logger, "ABCIServer", s)
|
||||
return s
|
||||
@@ -67,35 +67,44 @@ func (s *SocketServer) OnStop() {
|
||||
s.connsMtx.Lock()
|
||||
defer s.connsMtx.Unlock()
|
||||
|
||||
for _, closer := range s.connsClose {
|
||||
closer()
|
||||
for id, conn := range s.conns {
|
||||
delete(s.conns, id)
|
||||
if err := conn.Close(); err != nil {
|
||||
s.logger.Error("error closing connection", "id", id, "conn", conn, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) addConn(closer func()) int {
|
||||
func (s *SocketServer) addConn(conn net.Conn) int {
|
||||
s.connsMtx.Lock()
|
||||
defer s.connsMtx.Unlock()
|
||||
|
||||
connID := s.nextConnID
|
||||
s.nextConnID++
|
||||
s.connsClose[connID] = closer
|
||||
s.conns[connID] = conn
|
||||
|
||||
return connID
|
||||
}
|
||||
|
||||
// deletes conn even if close errs
|
||||
func (s *SocketServer) rmConn(connID int) {
|
||||
func (s *SocketServer) rmConn(connID int) error {
|
||||
s.connsMtx.Lock()
|
||||
defer s.connsMtx.Unlock()
|
||||
if closer, ok := s.connsClose[connID]; ok {
|
||||
closer()
|
||||
delete(s.connsClose, connID)
|
||||
|
||||
conn, ok := s.conns[connID]
|
||||
if !ok {
|
||||
return fmt.Errorf("connection %d does not exist", connID)
|
||||
}
|
||||
|
||||
delete(s.conns, connID)
|
||||
return conn.Close()
|
||||
}
|
||||
|
||||
func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// Accept a connection
|
||||
@@ -109,134 +118,149 @@ func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
cctx, ccancel := context.WithCancel(ctx)
|
||||
connID := s.addConn(ccancel)
|
||||
s.logger.Info("Accepted a new connection")
|
||||
|
||||
s.logger.Info("Accepted a new connection", "id", connID)
|
||||
connID := s.addConn(conn)
|
||||
|
||||
closeConn := make(chan error, 2) // Push to signal connection closed
|
||||
responses := make(chan *types.Response, 1000) // A channel to buffer responses
|
||||
|
||||
once := &sync.Once{}
|
||||
closer := func(err error) {
|
||||
ccancel()
|
||||
once.Do(func() {
|
||||
if cerr := conn.Close(); err != nil {
|
||||
s.logger.Error("error closing connection",
|
||||
"id", connID,
|
||||
"close_err", cerr,
|
||||
"err", err)
|
||||
}
|
||||
s.rmConn(connID)
|
||||
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled):
|
||||
s.logger.Error("Connection terminated",
|
||||
"id", connID,
|
||||
"err", err)
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
s.logger.Error("Connection encountered timeout",
|
||||
"id", connID,
|
||||
"err", err)
|
||||
case errors.Is(err, io.EOF):
|
||||
s.logger.Error("Connection was closed by client",
|
||||
"id", connID)
|
||||
case err != nil:
|
||||
s.logger.Error("Connection error",
|
||||
"id", connID,
|
||||
"err", err)
|
||||
default:
|
||||
s.logger.Error("Connection was closed",
|
||||
"id", connID)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
go s.handleRequests(cctx, closer, conn, responses)
|
||||
go s.handleRequests(ctx, closeConn, conn, responses)
|
||||
// Pull responses from 'responses' and write them to conn.
|
||||
go s.handleResponses(cctx, closer, conn, responses)
|
||||
go s.handleResponses(ctx, closeConn, conn, responses)
|
||||
|
||||
// Wait until signal to close connection
|
||||
go s.waitForClose(ctx, closeConn, connID)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) waitForClose(ctx context.Context, closeConn chan error, connID int) {
|
||||
defer func() {
|
||||
// Close the connection
|
||||
if err := s.rmConn(connID); err != nil {
|
||||
s.logger.Error("error closing connection", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case err := <-closeConn:
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
s.logger.Error("Connection was closed by client")
|
||||
case err != nil:
|
||||
s.logger.Error("Connection error", "err", err)
|
||||
default:
|
||||
// never happens
|
||||
s.logger.Error("Connection was closed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
func (s *SocketServer) handleRequests(
|
||||
ctx context.Context,
|
||||
closer func(error),
|
||||
closeConn chan error,
|
||||
conn io.Reader,
|
||||
responses chan<- *types.Response,
|
||||
) {
|
||||
var count int
|
||||
var bufReader = bufio.NewReader(conn)
|
||||
|
||||
defer func() {
|
||||
// make sure to recover from any app-related panics to allow proper socket cleanup
|
||||
if r := recover(); r != nil {
|
||||
r := recover()
|
||||
if r != nil {
|
||||
const size = 64 << 10
|
||||
buf := make([]byte, size)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
closer(fmt.Errorf("recovered from panic: %v\n%s", r, buf))
|
||||
err := fmt.Errorf("recovered from panic: %v\n%s", r, buf)
|
||||
closeConn <- err
|
||||
s.appMtx.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
req := &types.Request{}
|
||||
if err := types.ReadMessage(bufReader, req); err != nil {
|
||||
closer(fmt.Errorf("error reading message: %w", err))
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp := s.processRequest(req)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
closer(ctx.Err())
|
||||
var req = &types.Request{}
|
||||
err := types.ReadMessage(bufReader, req)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
closeConn <- err
|
||||
} else {
|
||||
closeConn <- fmt.Errorf("error reading message: %w", err)
|
||||
}
|
||||
return
|
||||
case responses <- resp:
|
||||
}
|
||||
s.appMtx.Lock()
|
||||
count++
|
||||
s.handleRequest(req, responses)
|
||||
s.appMtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) processRequest(req *types.Request) *types.Response {
|
||||
func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) {
|
||||
switch r := req.Value.(type) {
|
||||
case *types.Request_Echo:
|
||||
return types.ToResponseEcho(r.Echo.Message)
|
||||
responses <- types.ToResponseEcho(r.Echo.Message)
|
||||
case *types.Request_Flush:
|
||||
return types.ToResponseFlush()
|
||||
responses <- types.ToResponseFlush()
|
||||
case *types.Request_Info:
|
||||
return types.ToResponseInfo(s.app.Info(*r.Info))
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_CheckTx:
|
||||
return types.ToResponseCheckTx(s.app.CheckTx(*r.CheckTx))
|
||||
res := s.app.CheckTx(*r.CheckTx)
|
||||
responses <- types.ToResponseCheckTx(res)
|
||||
case *types.Request_Commit:
|
||||
return types.ToResponseCommit(s.app.Commit())
|
||||
res := s.app.Commit()
|
||||
responses <- types.ToResponseCommit(res)
|
||||
case *types.Request_Query:
|
||||
return types.ToResponseQuery(s.app.Query(*r.Query))
|
||||
res := s.app.Query(*r.Query)
|
||||
responses <- types.ToResponseQuery(res)
|
||||
case *types.Request_InitChain:
|
||||
return types.ToResponseInitChain(s.app.InitChain(*r.InitChain))
|
||||
res := s.app.InitChain(*r.InitChain)
|
||||
responses <- types.ToResponseInitChain(res)
|
||||
case *types.Request_ListSnapshots:
|
||||
return types.ToResponseListSnapshots(s.app.ListSnapshots(*r.ListSnapshots))
|
||||
res := s.app.ListSnapshots(*r.ListSnapshots)
|
||||
responses <- types.ToResponseListSnapshots(res)
|
||||
case *types.Request_OfferSnapshot:
|
||||
return types.ToResponseOfferSnapshot(s.app.OfferSnapshot(*r.OfferSnapshot))
|
||||
res := s.app.OfferSnapshot(*r.OfferSnapshot)
|
||||
responses <- types.ToResponseOfferSnapshot(res)
|
||||
case *types.Request_PrepareProposal:
|
||||
return types.ToResponsePrepareProposal(s.app.PrepareProposal(*r.PrepareProposal))
|
||||
res := s.app.PrepareProposal(*r.PrepareProposal)
|
||||
responses <- types.ToResponsePrepareProposal(res)
|
||||
case *types.Request_ProcessProposal:
|
||||
return types.ToResponseProcessProposal(s.app.ProcessProposal(*r.ProcessProposal))
|
||||
res := s.app.ProcessProposal(*r.ProcessProposal)
|
||||
responses <- types.ToResponseProcessProposal(res)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
return types.ToResponseLoadSnapshotChunk(s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk))
|
||||
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
|
||||
responses <- types.ToResponseLoadSnapshotChunk(res)
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
return types.ToResponseApplySnapshotChunk(s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk))
|
||||
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
|
||||
responses <- types.ToResponseApplySnapshotChunk(res)
|
||||
case *types.Request_ExtendVote:
|
||||
return types.ToResponseExtendVote(s.app.ExtendVote(*r.ExtendVote))
|
||||
res := s.app.ExtendVote(*r.ExtendVote)
|
||||
responses <- types.ToResponseExtendVote(res)
|
||||
case *types.Request_VerifyVoteExtension:
|
||||
return types.ToResponseVerifyVoteExtension(s.app.VerifyVoteExtension(*r.VerifyVoteExtension))
|
||||
res := s.app.VerifyVoteExtension(*r.VerifyVoteExtension)
|
||||
responses <- types.ToResponseVerifyVoteExtension(res)
|
||||
case *types.Request_FinalizeBlock:
|
||||
return types.ToResponseFinalizeBlock(s.app.FinalizeBlock(*r.FinalizeBlock))
|
||||
res := s.app.FinalizeBlock(*r.FinalizeBlock)
|
||||
responses <- types.ToResponseFinalizeBlock(res)
|
||||
default:
|
||||
return types.ToResponseException("Unknown request")
|
||||
responses <- types.ToResponseException("Unknown request")
|
||||
}
|
||||
}
|
||||
|
||||
// Pull responses from 'responses' and write them to conn.
|
||||
func (s *SocketServer) handleResponses(
|
||||
ctx context.Context,
|
||||
closer func(error),
|
||||
closeConn chan error,
|
||||
conn io.Writer,
|
||||
responses <-chan *types.Response,
|
||||
) {
|
||||
@@ -244,15 +268,21 @@ func (s *SocketServer) handleResponses(
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
closer(ctx.Err())
|
||||
return
|
||||
case res := <-responses:
|
||||
if err := types.WriteMessage(res, bw); err != nil {
|
||||
closer(fmt.Errorf("error writing message: %w", err))
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case closeConn <- fmt.Errorf("error writing message: %w", err):
|
||||
}
|
||||
return
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
closer(fmt.Errorf("error writing message: %w", err))
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case closeConn <- fmt.Errorf("error flushing write buffer: %w", err):
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error
|
||||
|
||||
func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error {
|
||||
res, _ := client.FinalizeBlock(ctx, types.RequestFinalizeBlock{Txs: txBytes})
|
||||
for i, tx := range res.TxResults {
|
||||
for i, tx := range res.Txs {
|
||||
code, data, log := tx.Code, tx.Data, tx.Log
|
||||
if code != codeExp[i] {
|
||||
fmt.Println("Failed test: FinalizeBlock")
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
echo hello
|
||||
info
|
||||
commit
|
||||
finalize_block "abc"
|
||||
deliver_tx "abc"
|
||||
info
|
||||
commit
|
||||
query "abc"
|
||||
finalize_block "def=xyz" "ghi=123"
|
||||
deliver_tx "def=xyz"
|
||||
commit
|
||||
query "def"
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
|
||||
> finalize_block "abc"
|
||||
> deliver_tx "abc"
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
@@ -33,14 +33,12 @@
|
||||
-> value: abc
|
||||
-> value.hex: 616263
|
||||
|
||||
> finalize_block "def=xyz" "ghi=123"
|
||||
-> code: OK
|
||||
> finalize_block "def=xyz" "ghi=123"
|
||||
> deliver_tx "def=xyz"
|
||||
-> code: OK
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0600000000000000
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
> query "def"
|
||||
-> code: OK
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
finalize_block 0x00
|
||||
deliver_tx 0x00
|
||||
check_tx 0x00
|
||||
finalize_block 0x01
|
||||
finalize_block 0x04
|
||||
deliver_tx 0x01
|
||||
deliver_tx 0x04
|
||||
info
|
||||
|
||||
@@ -4,20 +4,20 @@
|
||||
> check_tx 0xff
|
||||
-> code: OK
|
||||
|
||||
> finalize_block 0x00
|
||||
> deliver_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> finalize_block 0x01
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
|
||||
> finalize_block 0x04
|
||||
> deliver_tx 0x04
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"size":3}
|
||||
-> data.hex: 0x7B2273697A65223A337D
|
||||
-> data: {"hashes":0,"txs":3}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A337D
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Application
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
// to be driven by a blockchain-based replication engine via the ABCI.
|
||||
// All methods take a RequestXxx argument and return a ResponseXxx argument,
|
||||
@@ -42,7 +41,8 @@ type Application interface {
|
||||
|
||||
var _ Application = (*BaseApplication)(nil)
|
||||
|
||||
type BaseApplication struct{}
|
||||
type BaseApplication struct {
|
||||
}
|
||||
|
||||
func NewBaseApplication() *BaseApplication {
|
||||
return &BaseApplication{}
|
||||
@@ -103,12 +103,12 @@ func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProce
|
||||
}
|
||||
|
||||
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
|
||||
txs := make([]*ExecTxResult, len(req.Txs))
|
||||
txs := make([]*ResponseDeliverTx, len(req.Txs))
|
||||
for i := range req.Txs {
|
||||
txs[i] = &ExecTxResult{Code: CodeTypeOK}
|
||||
txs[i] = &ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
return ResponseFinalizeBlock{
|
||||
TxResults: txs,
|
||||
Txs: txs,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
b, err := json.Marshal(&ExecTxResult{Code: 1})
|
||||
b, err := json.Marshal(&ResponseDeliverTx{})
|
||||
assert.NoError(t, err)
|
||||
// include empty fields.
|
||||
assert.True(t, strings.Contains(string(b), "code"))
|
||||
|
||||
@@ -1,209 +0,0 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// Application is an autogenerated mock type for the Application type
|
||||
type Application struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseApplySnapshotChunk)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0
|
||||
func (_m *Application) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCheckTx)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields:
|
||||
func (_m *Application) Commit() types.ResponseCommit {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func() types.ResponseCommit); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCommit)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ExtendVote provides a mock function with given fields: _a0
|
||||
func (_m *Application) ExtendVote(_a0 types.RequestExtendVote) types.ResponseExtendVote {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseExtendVote
|
||||
if rf, ok := ret.Get(0).(func(types.RequestExtendVote) types.ResponseExtendVote); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseExtendVote)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// FinalizeBlock provides a mock function with given fields: _a0
|
||||
func (_m *Application) FinalizeBlock(_a0 types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestFinalizeBlock) types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseFinalizeBlock)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0
|
||||
func (_m *Application) Info(_a0 types.RequestInfo) types.ResponseInfo {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) types.ResponseInfo); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInfo)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0
|
||||
func (_m *Application) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInitChain)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0
|
||||
func (_m *Application) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseListSnapshots)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseLoadSnapshotChunk)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0
|
||||
func (_m *Application) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseOfferSnapshot)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) PrepareProposal(_a0 types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponsePrepareProposal)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) ProcessProposal(_a0 types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseProcessProposal)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0
|
||||
func (_m *Application) Query(_a0 types.RequestQuery) types.ResponseQuery {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) types.ResponseQuery); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseQuery)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// VerifyVoteExtension provides a mock function with given fields: _a0
|
||||
func (_m *Application) VerifyVoteExtension(_a0 types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 types.ResponseVerifyVoteExtension
|
||||
if rf, ok := ret.Get(0).(func(types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseVerifyVoteExtension)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// BaseMock provides a wrapper around the generated Application mock and a BaseApplication.
|
||||
// BaseMock first tries to use the mock's implementation of the method.
|
||||
// If no functionality was provided for the mock by the user, BaseMock dispatches
|
||||
// to the BaseApplication and uses its functionality.
|
||||
// BaseMock allows users to provide mocked functionality for only the methods that matter
|
||||
// for their test while avoiding a panic if the code calls Application methods that are
|
||||
// not relevant to the test.
|
||||
type BaseMock struct {
|
||||
base *types.BaseApplication
|
||||
*Application
|
||||
}
|
||||
|
||||
func NewBaseMock() BaseMock {
|
||||
return BaseMock{
|
||||
base: types.NewBaseApplication(),
|
||||
Application: new(Application),
|
||||
}
|
||||
}
|
||||
|
||||
// Info/Query Connection
|
||||
// Return application info
|
||||
func (m BaseMock) Info(input types.RequestInfo) types.ResponseInfo {
|
||||
var ret types.ResponseInfo
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Info(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Info(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) Query(input types.RequestQuery) types.ResponseQuery {
|
||||
var ret types.ResponseQuery
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Query(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Query(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Mempool Connection
|
||||
// Validate a tx for the mempool
|
||||
func (m BaseMock) CheckTx(input types.RequestCheckTx) types.ResponseCheckTx {
|
||||
var ret types.ResponseCheckTx
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.CheckTx(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.CheckTx(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Consensus Connection
|
||||
// Initialize blockchain w validators/other info from TendermintCore
|
||||
func (m BaseMock) InitChain(input types.RequestInitChain) types.ResponseInitChain {
|
||||
var ret types.ResponseInitChain
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.InitChain(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.InitChain(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) PrepareProposal(input types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
var ret types.ResponsePrepareProposal
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.PrepareProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.PrepareProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ProcessProposal(input types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
var ret types.ResponseProcessProposal
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ProcessProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ProcessProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Commit the state and return the application Merkle root hash
|
||||
func (m BaseMock) Commit() types.ResponseCommit {
|
||||
var ret types.ResponseCommit
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Commit()
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Commit()
|
||||
return ret
|
||||
}
|
||||
|
||||
// Create application specific vote extension
|
||||
func (m BaseMock) ExtendVote(input types.RequestExtendVote) types.ResponseExtendVote {
|
||||
var ret types.ResponseExtendVote
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ExtendVote(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ExtendVote(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Verify application's vote extension data
|
||||
func (m BaseMock) VerifyVoteExtension(input types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension {
|
||||
var ret types.ResponseVerifyVoteExtension
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.VerifyVoteExtension(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.VerifyVoteExtension(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// State Sync Connection
|
||||
// List available snapshots
|
||||
func (m BaseMock) ListSnapshots(input types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
var ret types.ResponseListSnapshots
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ListSnapshots(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ListSnapshots(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) OfferSnapshot(input types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
var ret types.ResponseOfferSnapshot
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.OfferSnapshot(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.OfferSnapshot(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) LoadSnapshotChunk(input types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
var ret types.ResponseLoadSnapshotChunk
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.LoadSnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.LoadSnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ApplySnapshotChunk(input types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
var ret types.ResponseApplySnapshotChunk
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ApplySnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ApplySnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) FinalizeBlock(input types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
var ret types.ResponseFinalizeBlock
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.FinalizeBlock(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.FinalizeBlock(input)
|
||||
return ret
|
||||
}
|
||||
@@ -33,16 +33,6 @@ func (r ResponseDeliverTx) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ExecTxResult) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ExecTxResult) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ResponseQuery) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
@@ -68,6 +58,11 @@ func (r ResponseVerifyVoteExtension) IsErr() bool {
|
||||
return r.Result != ResponseVerifyVoteExtension_ACCEPT
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK
|
||||
func (r ResponseProcessProposal) IsOK() bool {
|
||||
return r.Result == ResponseProcessProposal_ACCEPT
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
@@ -167,31 +162,3 @@ func RespondVerifyVoteExtension(ok bool) ResponseVerifyVoteExtension {
|
||||
Result: result,
|
||||
}
|
||||
}
|
||||
|
||||
// deterministicExecTxResult constructs a copy of response that omits
|
||||
// non-deterministic fields. The input response is not modified.
|
||||
func deterministicExecTxResult(response *ExecTxResult) *ExecTxResult {
|
||||
return &ExecTxResult{
|
||||
Code: response.Code,
|
||||
Data: response.Data,
|
||||
GasWanted: response.GasWanted,
|
||||
GasUsed: response.GasUsed,
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalTxResults encodes the the TxResults as a list of byte
|
||||
// slices. It strips off the non-deterministic pieces of the TxResults
|
||||
// so that the resulting data can be used for hash comparisons and used
|
||||
// in Merkle proofs.
|
||||
func MarshalTxResults(r []*ExecTxResult) ([][]byte, error) {
|
||||
s := make([][]byte, len(r))
|
||||
for i, e := range r {
|
||||
d := deterministicExecTxResult(e)
|
||||
b, err := d.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s[i] = b
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,74 +0,0 @@
|
||||
package types_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
)
|
||||
|
||||
func TestHashAndProveResults(t *testing.T) {
|
||||
trs := []*abci.ExecTxResult{
|
||||
// Note, these tests rely on the first two entries being in this order.
|
||||
{Code: 0, Data: nil},
|
||||
{Code: 0, Data: []byte{}},
|
||||
|
||||
{Code: 0, Data: []byte("one")},
|
||||
{Code: 14, Data: nil},
|
||||
{Code: 14, Data: []byte("foo")},
|
||||
{Code: 14, Data: []byte("bar")},
|
||||
}
|
||||
|
||||
// Nil and []byte{} should produce the same bytes
|
||||
bz0, err := trs[0].Marshal()
|
||||
require.NoError(t, err)
|
||||
bz1, err := trs[1].Marshal()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bz0, bz1)
|
||||
|
||||
// Make sure that we can get a root hash from results and verify proofs.
|
||||
rs, err := abci.MarshalTxResults(trs)
|
||||
require.NoError(t, err)
|
||||
root := merkle.HashFromByteSlices(rs)
|
||||
assert.NotEmpty(t, root)
|
||||
|
||||
_, proofs := merkle.ProofsFromByteSlices(rs)
|
||||
for i, tr := range trs {
|
||||
bz, err := tr.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
valid := proofs[i].Verify(root, bz)
|
||||
assert.NoError(t, valid, "%d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashDeterministicFieldsOnly(t *testing.T) {
|
||||
tr1 := abci.ExecTxResult{
|
||||
Code: 1,
|
||||
Data: []byte("transaction"),
|
||||
Log: "nondeterministic data: abc",
|
||||
Info: "nondeterministic data: abc",
|
||||
GasWanted: 1000,
|
||||
GasUsed: 1000,
|
||||
Events: []abci.Event{},
|
||||
Codespace: "nondeterministic.data.abc",
|
||||
}
|
||||
tr2 := abci.ExecTxResult{
|
||||
Code: 1,
|
||||
Data: []byte("transaction"),
|
||||
Log: "nondeterministic data: def",
|
||||
Info: "nondeterministic data: def",
|
||||
GasWanted: 1000,
|
||||
GasUsed: 1000,
|
||||
Events: []abci.Event{},
|
||||
Codespace: "nondeterministic.data.def",
|
||||
}
|
||||
r1, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr1})
|
||||
require.NoError(t, err)
|
||||
r2, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, merkle.HashFromByteSlices(r1), merkle.HashFromByteSlices(r2))
|
||||
}
|
||||
19
buf.gen.yaml
19
buf.gen.yaml
@@ -1,9 +1,14 @@
|
||||
version: v1
|
||||
# The version of the generation template (required).
|
||||
# The only currently-valid value is v1beta1.
|
||||
version: v1beta1
|
||||
|
||||
# The plugins to run.
|
||||
plugins:
|
||||
# The name of the plugin.
|
||||
- name: gogofaster
|
||||
out: ./proto/
|
||||
opt:
|
||||
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types
|
||||
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
|
||||
- plugins=grpc
|
||||
- paths=source_relative
|
||||
# The directory where the generated proto output will be written.
|
||||
# The directory is relative to where the generation tool was run.
|
||||
out: proto
|
||||
# Set options to assign import paths to the well-known types
|
||||
# and to enable service generation.
|
||||
opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
version: v1
|
||||
directories:
|
||||
- proto
|
||||
@@ -1,11 +1,16 @@
|
||||
version: v1
|
||||
deps:
|
||||
- buf.build/gogo/protobuf
|
||||
breaking:
|
||||
use:
|
||||
- FILE
|
||||
version: v1beta1
|
||||
|
||||
build:
|
||||
roots:
|
||||
- proto
|
||||
- third_party/proto
|
||||
lint:
|
||||
use:
|
||||
- BASIC
|
||||
- FILE_LOWER_SNAKE_CASE
|
||||
- UNARY_RPC
|
||||
ignore:
|
||||
- gogoproto
|
||||
breaking:
|
||||
use:
|
||||
- FILE
|
||||
@@ -2,29 +2,38 @@ package debug
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
const (
|
||||
var (
|
||||
nodeRPCAddr string
|
||||
profAddr string
|
||||
frequency uint
|
||||
|
||||
flagNodeRPCAddr = "rpc-laddr"
|
||||
flagProfAddr = "pprof-laddr"
|
||||
flagFrequency = "frequency"
|
||||
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
|
||||
)
|
||||
|
||||
func GetDebugCommand(logger log.Logger) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "debug",
|
||||
Short: "A utility to kill or watch a Tendermint process while aggregating debugging data",
|
||||
}
|
||||
cmd.PersistentFlags().SortFlags = true
|
||||
cmd.PersistentFlags().String(
|
||||
// DebugCmd defines the root command containing subcommands that assist in
|
||||
// debugging running Tendermint processes.
|
||||
var DebugCmd = &cobra.Command{
|
||||
Use: "debug",
|
||||
Short: "A utility to kill or watch a Tendermint process while aggregating debugging data",
|
||||
}
|
||||
|
||||
func init() {
|
||||
DebugCmd.PersistentFlags().SortFlags = true
|
||||
DebugCmd.PersistentFlags().StringVar(
|
||||
&nodeRPCAddr,
|
||||
flagNodeRPCAddr,
|
||||
"tcp://localhost:26657",
|
||||
"the Tendermint node's RPC address <host>:<port>)",
|
||||
"the Tendermint node's RPC address (<host>:<port>)",
|
||||
)
|
||||
|
||||
cmd.AddCommand(getKillCmd(logger))
|
||||
cmd.AddCommand(getDumpCmd(logger))
|
||||
return cmd
|
||||
|
||||
DebugCmd.AddCommand(killCmd)
|
||||
DebugCmd.AddCommand(dumpCmd)
|
||||
}
|
||||
|
||||
@@ -13,102 +13,78 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cli"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
)
|
||||
|
||||
func getDumpCmd(logger log.Logger) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "dump [output-directory]",
|
||||
Short: "Continuously poll a Tendermint process and dump debugging data into a single location",
|
||||
Long: `Continuously poll a Tendermint process and dump debugging data into a single
|
||||
var dumpCmd = &cobra.Command{
|
||||
Use: "dump [output-directory]",
|
||||
Short: "Continuously poll a Tendermint process and dump debugging data into a single location",
|
||||
Long: `Continuously poll a Tendermint process and dump debugging data into a single
|
||||
location at a specified frequency. At each frequency interval, an archived and compressed
|
||||
file will contain node debugging information including the goroutine and heap profiles
|
||||
if enabled.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
outDir := args[0]
|
||||
if outDir == "" {
|
||||
return errors.New("invalid output directory")
|
||||
}
|
||||
frequency, err := cmd.Flags().GetUint(flagFrequency)
|
||||
if err != nil {
|
||||
return fmt.Errorf("flag %q not defined: %w", flagFrequency, err)
|
||||
}
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: dumpCmdHandler,
|
||||
}
|
||||
|
||||
if frequency == 0 {
|
||||
return errors.New("frequency must be positive")
|
||||
}
|
||||
|
||||
nodeRPCAddr, err := cmd.Flags().GetString(flagNodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("flag %q not defined: %w", flagNodeRPCAddr, err)
|
||||
}
|
||||
|
||||
profAddr, err := cmd.Flags().GetString(flagProfAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("flag %q not defined: %w", flagProfAddr, err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(outDir); os.IsNotExist(err) {
|
||||
if err := os.Mkdir(outDir, os.ModePerm); err != nil {
|
||||
return fmt.Errorf("failed to create output directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
ctx := cmd.Context()
|
||||
|
||||
home := viper.GetString(cli.HomeFlag)
|
||||
conf := config.DefaultConfig()
|
||||
conf = conf.SetRoot(home)
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
|
||||
dumpArgs := dumpDebugDataArgs{
|
||||
conf: conf,
|
||||
outDir: outDir,
|
||||
profAddr: profAddr,
|
||||
}
|
||||
dumpDebugData(ctx, logger, rpc, dumpArgs)
|
||||
|
||||
ticker := time.NewTicker(time.Duration(frequency) * time.Second)
|
||||
for range ticker.C {
|
||||
dumpDebugData(ctx, logger, rpc, dumpArgs)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().Uint(
|
||||
func init() {
|
||||
dumpCmd.Flags().UintVar(
|
||||
&frequency,
|
||||
flagFrequency,
|
||||
30,
|
||||
"the frequency (seconds) in which to poll, aggregate and dump Tendermint debug data",
|
||||
)
|
||||
|
||||
cmd.Flags().String(
|
||||
dumpCmd.Flags().StringVar(
|
||||
&profAddr,
|
||||
flagProfAddr,
|
||||
"",
|
||||
"the profiling server address (<host>:<port>)",
|
||||
)
|
||||
|
||||
return cmd
|
||||
|
||||
}
|
||||
|
||||
type dumpDebugDataArgs struct {
|
||||
conf *config.Config
|
||||
outDir string
|
||||
profAddr string
|
||||
func dumpCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
outDir := args[0]
|
||||
if outDir == "" {
|
||||
return errors.New("invalid output directory")
|
||||
}
|
||||
|
||||
if frequency == 0 {
|
||||
return errors.New("frequency must be positive")
|
||||
}
|
||||
|
||||
if _, err := os.Stat(outDir); os.IsNotExist(err) {
|
||||
if err := os.Mkdir(outDir, os.ModePerm); err != nil {
|
||||
return fmt.Errorf("failed to create output directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
ctx := cmd.Context()
|
||||
|
||||
home := viper.GetString(cli.HomeFlag)
|
||||
conf := config.DefaultConfig()
|
||||
conf = conf.SetRoot(home)
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
|
||||
dumpDebugData(ctx, outDir, conf, rpc)
|
||||
|
||||
ticker := time.NewTicker(time.Duration(frequency) * time.Second)
|
||||
for range ticker.C {
|
||||
dumpDebugData(ctx, outDir, conf, rpc)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dumpDebugData(ctx context.Context, logger log.Logger, rpc *rpchttp.HTTP, args dumpDebugDataArgs) {
|
||||
func dumpDebugData(ctx context.Context, outDir string, conf *config.Config, rpc *rpchttp.HTTP) {
|
||||
start := time.Now().UTC()
|
||||
|
||||
tmpDir, err := os.MkdirTemp(args.outDir, "tendermint_debug_tmp")
|
||||
tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err)
|
||||
return
|
||||
@@ -134,26 +110,26 @@ func dumpDebugData(ctx context.Context, logger log.Logger, rpc *rpchttp.HTTP, ar
|
||||
}
|
||||
|
||||
logger.Info("copying node WAL...")
|
||||
if err := copyWAL(args.conf, tmpDir); err != nil {
|
||||
if err := copyWAL(conf, tmpDir); err != nil {
|
||||
logger.Error("failed to copy node WAL", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if args.profAddr != "" {
|
||||
if profAddr != "" {
|
||||
logger.Info("getting node goroutine profile...")
|
||||
if err := dumpProfile(tmpDir, args.profAddr, "goroutine", 2); err != nil {
|
||||
if err := dumpProfile(tmpDir, profAddr, "goroutine", 2); err != nil {
|
||||
logger.Error("failed to dump goroutine profile", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("getting node heap profile...")
|
||||
if err := dumpProfile(tmpDir, args.profAddr, "heap", 2); err != nil {
|
||||
if err := dumpProfile(tmpDir, profAddr, "heap", 2); err != nil {
|
||||
logger.Error("failed to dump heap profile", "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
outFile := filepath.Join(args.outDir, fmt.Sprintf("%s.zip", start.Format(time.RFC3339)))
|
||||
outFile := filepath.Join(outDir, fmt.Sprintf("%s.zip", start.Format(time.RFC3339)))
|
||||
if err := zipDir(tmpDir, outFile); err != nil {
|
||||
logger.Error("failed to create and compress archive", "file", outFile, "error", err)
|
||||
}
|
||||
|
||||
@@ -15,96 +15,89 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cli"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
)
|
||||
|
||||
func getKillCmd(logger log.Logger) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "kill [pid] [compressed-output-file]",
|
||||
Short: "Kill a Tendermint process while aggregating and packaging debugging data",
|
||||
Long: `Kill a Tendermint process while also aggregating Tendermint process data
|
||||
var killCmd = &cobra.Command{
|
||||
Use: "kill [pid] [compressed-output-file]",
|
||||
Short: "Kill a Tendermint process while aggregating and packaging debugging data",
|
||||
Long: `Kill a Tendermint process while also aggregating Tendermint process data
|
||||
such as the latest node state, including consensus and networking state,
|
||||
go-routine state, and the node's WAL and config information. This aggregated data
|
||||
is packaged into a compressed archive.
|
||||
|
||||
Example:
|
||||
$ tendermint debug kill 34255 /path/to/tm-debug.zip`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
pid, err := strconv.ParseInt(args[0], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: killCmdHandler,
|
||||
}
|
||||
|
||||
outFile := args[1]
|
||||
if outFile == "" {
|
||||
return errors.New("invalid output file")
|
||||
}
|
||||
nodeRPCAddr, err := cmd.Flags().GetString(flagNodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("flag %q not defined: %w", flagNodeRPCAddr, err)
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
home := viper.GetString(cli.HomeFlag)
|
||||
conf := config.DefaultConfig()
|
||||
conf = conf.SetRoot(home)
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
|
||||
// Create a temporary directory which will contain all the state dumps and
|
||||
// relevant files and directories that will be compressed into a file.
|
||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
logger.Info("getting node status...")
|
||||
if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("getting node network info...")
|
||||
if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("getting node consensus state...")
|
||||
if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("copying node WAL...")
|
||||
if err := copyWAL(conf, tmpDir); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("node WAL does not exist; continuing...")
|
||||
}
|
||||
|
||||
logger.Info("copying node configuration...")
|
||||
if err := copyConfig(home, tmpDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("killing Tendermint process")
|
||||
if err := killProc(int(pid), tmpDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("archiving and compressing debug directory...")
|
||||
return zipDir(tmpDir, outFile)
|
||||
},
|
||||
func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
pid, err := strconv.ParseInt(args[0], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmd
|
||||
outFile := args[1]
|
||||
if outFile == "" {
|
||||
return errors.New("invalid output file")
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
home := viper.GetString(cli.HomeFlag)
|
||||
conf := config.DefaultConfig()
|
||||
conf = conf.SetRoot(home)
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
|
||||
// Create a temporary directory which will contain all the state dumps and
|
||||
// relevant files and directories that will be compressed into a file.
|
||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
logger.Info("getting node status...")
|
||||
if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("getting node network info...")
|
||||
if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("getting node consensus state...")
|
||||
if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("copying node WAL...")
|
||||
if err := copyWAL(conf, tmpDir); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("node WAL does not exist; continuing...")
|
||||
}
|
||||
|
||||
logger.Info("copying node configuration...")
|
||||
if err := copyConfig(home, tmpDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("killing Tendermint process")
|
||||
if err := killProc(int(pid), tmpDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("archiving and compressing debug directory...")
|
||||
return zipDir(tmpDir, outFile)
|
||||
}
|
||||
|
||||
// killProc attempts to kill the Tendermint process with a given PID with an
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -148,7 +149,7 @@ for applications built w/ Cosmos SDK).
|
||||
// Initiate the light client. If the trusted store already has blocks in it, this
|
||||
// will be used else we use the trusted options.
|
||||
c, err := light.NewHTTPClient(
|
||||
cmd.Context(),
|
||||
context.Background(),
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: trustingPeriod,
|
||||
|
||||
@@ -213,7 +213,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
|
||||
Height: b.Height,
|
||||
Index: uint32(i),
|
||||
Tx: b.Data.Txs[i],
|
||||
Result: *(r.FinalizeBlock.TxResults[i]),
|
||||
Result: *(r.FinalizeBlock.Txs[i]),
|
||||
}
|
||||
|
||||
_ = batch.Add(&tr)
|
||||
|
||||
@@ -153,10 +153,10 @@ func TestReIndexEvent(t *testing.T) {
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
|
||||
|
||||
dtx := abcitypes.ExecTxResult{}
|
||||
dtx := abcitypes.ResponseDeliverTx{}
|
||||
abciResp := &prototmstate.ABCIResponses{
|
||||
FinalizeBlock: &abcitypes.ResponseFinalizeBlock{
|
||||
TxResults: []*abcitypes.ExecTxResult{&dtx},
|
||||
Txs: []*abcitypes.ResponseDeliverTx{&dtx},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -32,20 +31,6 @@ func MakeResetAllCommand(conf *config.Config, logger log.Logger) *cobra.Command
|
||||
return cmd
|
||||
}
|
||||
|
||||
// MakeResetStateCommand constructs a command that removes the database of
|
||||
// the specified Tendermint core instance.
|
||||
func MakeResetStateCommand(conf *config.Config, logger log.Logger) *cobra.Command {
|
||||
var keyType string
|
||||
|
||||
return &cobra.Command{
|
||||
Use: "reset-state",
|
||||
Short: "Remove all the data and WAL",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return resetState(conf.DBDir(), logger, keyType)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *cobra.Command {
|
||||
var keyType string
|
||||
|
||||
@@ -70,76 +55,18 @@ func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *c
|
||||
// it's only suitable for testnets.
|
||||
|
||||
// resetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// Exported so other CLI tools can use it.
|
||||
func resetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
logger.Error("error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
}
|
||||
|
||||
return resetFilePV(privValKeyFile, privValStateFile, logger, keyType)
|
||||
}
|
||||
|
||||
// resetState removes address book files plus all databases.
|
||||
func resetState(dbDir string, logger log.Logger, keyType string) error {
|
||||
blockdb := filepath.Join(dbDir, "blockstore.db")
|
||||
state := filepath.Join(dbDir, "state.db")
|
||||
wal := filepath.Join(dbDir, "cs.wal")
|
||||
evidence := filepath.Join(dbDir, "evidence.db")
|
||||
txIndex := filepath.Join(dbDir, "tx_index.db")
|
||||
peerstore := filepath.Join(dbDir, "peerstore.db")
|
||||
|
||||
if tmos.FileExists(blockdb) {
|
||||
if err := os.RemoveAll(blockdb); err == nil {
|
||||
logger.Info("Removed all blockstore.db", "dir", blockdb)
|
||||
} else {
|
||||
logger.Error("error removing all blockstore.db", "dir", blockdb, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(state) {
|
||||
if err := os.RemoveAll(state); err == nil {
|
||||
logger.Info("Removed all state.db", "dir", state)
|
||||
} else {
|
||||
logger.Error("error removing all state.db", "dir", state, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(wal) {
|
||||
if err := os.RemoveAll(wal); err == nil {
|
||||
logger.Info("Removed all cs.wal", "dir", wal)
|
||||
} else {
|
||||
logger.Error("error removing all cs.wal", "dir", wal, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(evidence) {
|
||||
if err := os.RemoveAll(evidence); err == nil {
|
||||
logger.Info("Removed all evidence.db", "dir", evidence)
|
||||
} else {
|
||||
logger.Error("error removing all evidence.db", "dir", evidence, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(txIndex) {
|
||||
if err := os.RemoveAll(txIndex); err == nil {
|
||||
logger.Info("Removed tx_index.db", "dir", txIndex)
|
||||
} else {
|
||||
logger.Error("error removing tx_index.db", "dir", txIndex, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmos.FileExists(peerstore) {
|
||||
if err := os.RemoveAll(peerstore); err == nil {
|
||||
logger.Info("Removed peerstore.db", "dir", peerstore)
|
||||
} else {
|
||||
logger.Error("error removing peerstore.db", "dir", peerstore, "err", err)
|
||||
}
|
||||
}
|
||||
// recreate the dbDir since the privVal state needs to live there
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
return nil
|
||||
return resetFilePV(privValKeyFile, privValStateFile, logger, keyType)
|
||||
}
|
||||
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error {
|
||||
|
||||
@@ -52,10 +52,6 @@ func RootCommand(conf *config.Config, logger log.Logger) *cobra.Command {
|
||||
*conf = *pconf
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
|
||||
if err := log.OverrideWithNewLogger(logger, conf.LogFormat, conf.LogLevel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -34,7 +34,6 @@ func main() {
|
||||
commands.MakeReplayCommand(conf, logger),
|
||||
commands.MakeReplayConsoleCommand(conf, logger),
|
||||
commands.MakeResetAllCommand(conf, logger),
|
||||
commands.MakeResetStateCommand(conf, logger),
|
||||
commands.MakeResetPrivateValidatorCommand(conf, logger),
|
||||
commands.MakeShowValidatorCommand(conf, logger),
|
||||
commands.MakeTestnetFilesCommand(conf, logger),
|
||||
@@ -44,7 +43,7 @@ func main() {
|
||||
commands.MakeInspectCommand(conf, logger),
|
||||
commands.MakeRollbackStateCommand(conf),
|
||||
commands.MakeKeyMigrateCommand(conf, logger),
|
||||
debug.GetDebugCommand(logger),
|
||||
debug.DebugCmd,
|
||||
commands.NewCompletionCmd(rcmd, true),
|
||||
)
|
||||
|
||||
|
||||
@@ -442,33 +442,6 @@ type RPCConfig struct {
|
||||
// to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"`
|
||||
|
||||
// If true, disable the websocket interface to the RPC service. This has
|
||||
// the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all
|
||||
// methods for event subscription.
|
||||
//
|
||||
// EXPERIMENTAL: This setting will be removed in Tendermint v0.37.
|
||||
ExperimentalDisableWebsocket bool `mapstructure:"experimental-disable-websocket"`
|
||||
|
||||
// The time window size for the event log. All events up to this long before
|
||||
// the latest (up to EventLogMaxItems) will be available for subscribers to
|
||||
// fetch via the /events method. If 0 (the default) the event log and the
|
||||
// /events RPC method are disabled.
|
||||
EventLogWindowSize time.Duration `mapstructure:"event-log-window-size"`
|
||||
|
||||
// The maxiumum number of events that may be retained by the event log. If
|
||||
// this value is 0, no upper limit is set. Otherwise, items in excess of
|
||||
// this number will be discarded from the event log.
|
||||
//
|
||||
// Warning: This setting is a safety valve. Setting it too low may cause
|
||||
// subscribers to miss events. Try to choose a value higher than the
|
||||
// maximum worst-case expected event load within the chosen window size in
|
||||
// ordinary operation.
|
||||
//
|
||||
// For example, if the window size is 10 minutes and the node typically
|
||||
// averages 1000 events per ten minutes, but with occasional known spikes of
|
||||
// up to 2000, choose a value > 2000.
|
||||
EventLogMaxItems int `mapstructure:"event-log-max-items"`
|
||||
|
||||
// How long to wait for a tx to be committed during /broadcast_tx_commit
|
||||
// WARNING: Using a value larger than 10s will result in increasing the
|
||||
// global HTTP write timeout, which applies to all connections and endpoints.
|
||||
@@ -514,14 +487,9 @@ func DefaultRPCConfig() *RPCConfig {
|
||||
Unsafe: false,
|
||||
MaxOpenConnections: 900,
|
||||
|
||||
// Settings for event subscription.
|
||||
MaxSubscriptionClients: 100,
|
||||
MaxSubscriptionsPerClient: 5,
|
||||
ExperimentalDisableWebsocket: false, // compatible with TM v0.35 and earlier
|
||||
EventLogWindowSize: 0, // disables /events RPC by default
|
||||
EventLogMaxItems: 0,
|
||||
|
||||
TimeoutBroadcastTxCommit: 10 * time.Second,
|
||||
MaxSubscriptionClients: 100,
|
||||
MaxSubscriptionsPerClient: 5,
|
||||
TimeoutBroadcastTxCommit: 10 * time.Second,
|
||||
|
||||
MaxBodyBytes: int64(1000000), // 1MB
|
||||
MaxHeaderBytes: 1 << 20, // same as the net/http default
|
||||
@@ -551,12 +519,6 @@ func (cfg *RPCConfig) ValidateBasic() error {
|
||||
if cfg.MaxSubscriptionsPerClient < 0 {
|
||||
return errors.New("max-subscriptions-per-client can't be negative")
|
||||
}
|
||||
if cfg.EventLogWindowSize < 0 {
|
||||
return errors.New("event-log-window-size must not be negative")
|
||||
}
|
||||
if cfg.EventLogMaxItems < 0 {
|
||||
return errors.New("event-log-max-items must not be negative")
|
||||
}
|
||||
if cfg.TimeoutBroadcastTxCommit < 0 {
|
||||
return errors.New("timeout-broadcast-tx-commit can't be negative")
|
||||
}
|
||||
|
||||
@@ -220,33 +220,6 @@ max-subscription-clients = {{ .RPC.MaxSubscriptionClients }}
|
||||
# to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }}
|
||||
|
||||
# If true, disable the websocket interface to the RPC service. This has
|
||||
# the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all
|
||||
# methods for event subscription.
|
||||
#
|
||||
# EXPERIMENTAL: This setting will be removed in Tendermint v0.37.
|
||||
experimental-disable-websocket = {{ .RPC.ExperimentalDisableWebsocket }}
|
||||
|
||||
# The time window size for the event log. All events up to this long before
|
||||
# the latest (up to EventLogMaxItems) will be available for subscribers to
|
||||
# fetch via the /events method. If 0 (the default) the event log and the
|
||||
# /events RPC method are disabled.
|
||||
event-log-window-size = "{{ .RPC.EventLogWindowSize }}"
|
||||
|
||||
# The maxiumum number of events that may be retained by the event log. If
|
||||
# this value is 0, no upper limit is set. Otherwise, items in excess of
|
||||
# this number will be discarded from the event log.
|
||||
#
|
||||
# Warning: This setting is a safety valve. Setting it too low may cause
|
||||
# subscribers to miss events. Try to choose a value higher than the
|
||||
# maximum worst-case expected event load within the chosen window size in
|
||||
# ordinary operation.
|
||||
#
|
||||
# For example, if the window size is 10 minutes and the node typically
|
||||
# averages 1000 events per ten minutes, but with occasional known spikes of
|
||||
# up to 2000, choose a value > 2000.
|
||||
event-log-max-items = {{ .RPC.EventLogMaxItems }}
|
||||
|
||||
# How long to wait for a tx to be committed during /broadcast_tx_commit.
|
||||
# WARNING: Using a value larger than 10s will result in increasing the
|
||||
# global HTTP write timeout, which applies to all connections and endpoints.
|
||||
|
||||
@@ -33,6 +33,10 @@ module.exports = {
|
||||
{
|
||||
"label": "v0.35",
|
||||
"key": "v0.35"
|
||||
},
|
||||
{
|
||||
"label": "master",
|
||||
"key": "master"
|
||||
}
|
||||
],
|
||||
topbar: {
|
||||
@@ -45,10 +49,8 @@ module.exports = {
|
||||
title: 'Resources',
|
||||
children: [
|
||||
{
|
||||
// TODO(creachadair): Figure out how to make this per-branch.
|
||||
// See: https://github.com/tendermint/tendermint/issues/7908
|
||||
title: 'RPC',
|
||||
path: 'https://docs.tendermint.com/v0.35/rpc/',
|
||||
path: 'https://docs.tendermint.com/master/rpc/',
|
||||
static: true
|
||||
},
|
||||
]
|
||||
@@ -160,12 +162,6 @@ module.exports = {
|
||||
{
|
||||
ga: 'UA-51029217-11'
|
||||
}
|
||||
],
|
||||
[
|
||||
'@vuepress/plugin-html-redirect',
|
||||
{
|
||||
countdown: 0
|
||||
}
|
||||
]
|
||||
]
|
||||
};
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
/master/ /v0.35/
|
||||
@@ -21,7 +21,7 @@ Tendermint?](introduction/what-is-tendermint.md).
|
||||
|
||||
To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md).
|
||||
|
||||
To learn about application development on Tendermint, see the [Application Blockchain Interface](../spec/abci).
|
||||
To learn about application development on Tendermint, see the [Application Blockchain Interface](https://github.com/tendermint/tendermint/tree/master/spec/abci).
|
||||
|
||||
For more details on using Tendermint, see the respective documentation for
|
||||
[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](nodes/).
|
||||
|
||||
@@ -27,17 +27,17 @@ Usage:
|
||||
abci-cli [command]
|
||||
|
||||
Available Commands:
|
||||
batch Run a batch of abci commands against an application
|
||||
check_tx Validate a tx
|
||||
commit Commit the application state and return the Merkle root hash
|
||||
console Start an interactive abci console for multiple commands
|
||||
finalize_block Send a set of transactions to the application
|
||||
kvstore ABCI demo example
|
||||
echo Have the application echo a message
|
||||
help Help about any command
|
||||
info Get some info about the application
|
||||
query Query the application state
|
||||
set_option Set an options on the application
|
||||
batch Run a batch of abci commands against an application
|
||||
check_tx Validate a tx
|
||||
commit Commit the application state and return the Merkle root hash
|
||||
console Start an interactive abci console for multiple commands
|
||||
deliver_tx Deliver a new tx to the application
|
||||
kvstore ABCI demo example
|
||||
echo Have the application echo a message
|
||||
help Help about any command
|
||||
info Get some info about the application
|
||||
query Query the application state
|
||||
set_option Set an options on the application
|
||||
|
||||
Flags:
|
||||
--abci string socket or grpc (default "socket")
|
||||
@@ -53,7 +53,7 @@ Use "abci-cli [command] --help" for more information about a command.
|
||||
The `abci-cli` tool lets us send ABCI messages to our application, to
|
||||
help build and debug them.
|
||||
|
||||
The most important messages are `finalize_block`, `check_tx`, and `commit`,
|
||||
The most important messages are `deliver_tx`, `check_tx`, and `commit`,
|
||||
but there are others for convenience, configuration, and information
|
||||
purposes.
|
||||
|
||||
@@ -173,7 +173,7 @@ Try running these commands:
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
|
||||
> finalize_block "abc"
|
||||
> deliver_tx "abc"
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
@@ -192,7 +192,7 @@ Try running these commands:
|
||||
-> value: abc
|
||||
-> value.hex: 616263
|
||||
|
||||
> finalize_block "def=xyz"
|
||||
> deliver_tx "def=xyz"
|
||||
-> code: OK
|
||||
|
||||
> commit
|
||||
@@ -207,8 +207,8 @@ Try running these commands:
|
||||
-> value.hex: 78797A
|
||||
```
|
||||
|
||||
Note that if we do `finalize_block "abc"` it will store `(abc, abc)`, but if
|
||||
we do `finalize_block "abc=efg"` it will store `(abc, efg)`.
|
||||
Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if
|
||||
we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
|
||||
|
||||
Similarly, you could put the commands in a file and run
|
||||
`abci-cli --verbose batch < myfile`.
|
||||
|
||||
@@ -67,10 +67,6 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
||||
- [ADR-066: E2E-Testing](./adr-066-e2e-testing.md)
|
||||
- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md)
|
||||
- [ADR-077: Block Retention](./adr-077-block-retention.md)
|
||||
- [ADR-078: Non-zero Genesis](./adr-078-nonzero-genesis.md)
|
||||
- [ADR-079: ED25519 Verification](./adr-079-ed25519-verification.md)
|
||||
- [ADR-080: Reverse Sync](./adr-080-reverse-sync.md)
|
||||
|
||||
### Accepted
|
||||
|
||||
@@ -85,11 +81,6 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md)
|
||||
- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md)
|
||||
- [ADR-076: Combine Spec and Tendermint Repositories](./adr-076-combine-spec-repo.md)
|
||||
- [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md)
|
||||
|
||||
### Deprecated
|
||||
|
||||
None
|
||||
|
||||
### Rejected
|
||||
|
||||
@@ -97,6 +88,7 @@ None
|
||||
- [ADR-029: Check-Tx-Consensus](./adr-029-check-tx-consensus.md)
|
||||
- [ADR-058: Event-Hashing](./adr-058-event-hashing.md)
|
||||
|
||||
|
||||
### Proposed
|
||||
|
||||
- [ADR-007: Trust-Metric-Usage](./adr-007-trust-metric-usage.md)
|
||||
|
||||
@@ -84,7 +84,7 @@ The linear verification algorithm requires downloading all headers
|
||||
between the `TrustHeight` and the `LatestHeight`. The lite client downloads the
|
||||
full header for the provided `TrustHeight` and then proceeds to download `N+1`
|
||||
headers and applies the [Tendermint validation
|
||||
rules](https://github.com/tendermint/tendermint/tree/master/spec/light-client/verification/README.md)
|
||||
rules](https://docs.tendermint.com/master/spec/light-client/verification/)
|
||||
to each block.
|
||||
|
||||
### Bisecting Verification
|
||||
|
||||
@@ -18,7 +18,7 @@ graceful here, but that's for another day.
|
||||
|
||||
It's possible to fool lite clients without there being a fork on the
|
||||
main chain - so called Fork-Lite. See the
|
||||
[fork accountability](https://github.com/tendermint/tendermint/blob/master/spec/light-client/accountability/README.md)
|
||||
[fork accountability](https://docs.tendermint.com/master/spec/light-client/accountability/)
|
||||
document for more details. For a sequential lite client, this can happen via
|
||||
equivocation or amnesia attacks. For a skipping lite client this can also happen
|
||||
via lunatic validator attacks. There must be some way for applications to punish
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
## Changelog
|
||||
|
||||
- 01-Mar-2022: Update long-polling interface (@creachadair).
|
||||
- 10-Feb-2022: Updates to reflect implementation.
|
||||
- 26-Jan-2022: Marked accepted.
|
||||
- 22-Jan-2022: Updated and expanded (@creachadair).
|
||||
- 20-Nov-2021: Initial draft (@creachadair).
|
||||
@@ -269,12 +267,12 @@ initial implementation will store the event log in-memory, and the operator
|
||||
will be given two per-node configuration settings. Note, these names are
|
||||
provisional:
|
||||
|
||||
- `[rpc] event-log-window-size`: A duration before the latest published event,
|
||||
during which the node will retain event items published. Setting this value
|
||||
to zero disables event subscription.
|
||||
- `[event-subscription] time-window`: A duration before present during which the
|
||||
node will retain event items published. Setting this value to zero disables
|
||||
event subscription.
|
||||
|
||||
- `[rpc] event-log-max-items`: A maximum number of event items that the node
|
||||
will retain within the time window. If the number of items exceeds this
|
||||
- `[event-subscription] max-items`: A maximum number of event items that the
|
||||
node will retain within the time window. If the number of items exceeds this
|
||||
value, the node discardes the oldest items in the window. Setting this value
|
||||
to zero means that no limit is imposed on the number of items.
|
||||
|
||||
@@ -309,11 +307,11 @@ type EventParams struct {
|
||||
|
||||
// Return only items after this cursor. If empty, the limit is just
|
||||
// before the the beginning of the event log.
|
||||
After string `json:"after"`
|
||||
After string `json:"after_item"`
|
||||
|
||||
// Return only items before this cursor. If empty, the limit is just
|
||||
// after the head of the event log.
|
||||
Before string `json:"before"`
|
||||
Before string `json:"before_item"`
|
||||
|
||||
// Wait for up to this long for events to be available.
|
||||
WaitTime time.Duration `json:"wait_time"`
|
||||
@@ -337,8 +335,8 @@ type Filter struct {
|
||||
The semantics of the request are as follows: An item in the event log is
|
||||
**eligible** for a query if:
|
||||
|
||||
- It is newer than the `after` cursor (if set).
|
||||
- It is older than the `before` cursor (if set).
|
||||
- It is newer than the `after_item` cursor (if set).
|
||||
- It is older than the `before_item` cursor (if set).
|
||||
- It matches the filter (if set).
|
||||
|
||||
Among the eligible items in the log, the server returns up to `max_results` of
|
||||
@@ -346,13 +344,13 @@ the newest items, in reverse order of cursor. If `max_results` is unset the
|
||||
server chooses a number to return, and will cap `max_results` at a sensible
|
||||
limit.
|
||||
|
||||
The `wait_time` parameter is used to effect polling. If `before` is empty and
|
||||
no items are available, the server will wait for up to `wait_time` for matching
|
||||
items to arrive at the head of the log. If `wait_time` is zero or negative, the
|
||||
server will wait for a default (positive) interval.
|
||||
The `wait_time` parameter is used to effect polling. If `before_item` is empty,
|
||||
the server will wait for up to `wait_time` for additional items, if there are
|
||||
fewer than `max_results` eligible results in the log. If `wait_time` is zero,
|
||||
the server will return whatever eligible items are available immediately.
|
||||
|
||||
If `before` non-empty, `wait_time` is ignored: new results are only added to
|
||||
the head of the log, so there is no need to wait. This allows the client to
|
||||
If `before_item` non-empty, `wait_time` is ignored: new results are only added
|
||||
to the head of the log, so there is no need to wait. This allows the client to
|
||||
poll for new data, and "page" backward through matching event items. This is
|
||||
discussed in more detail below.
|
||||
|
||||
@@ -374,11 +372,11 @@ type EventReply struct {
|
||||
|
||||
// The cursor of the oldest item in the log at the time of this reply,
|
||||
// or "" if the log is empty.
|
||||
Oldest string `json:"oldest"`
|
||||
Oldest string `json:"oldest_item"`
|
||||
|
||||
// The cursor of the newest item in the log at the time of this reply,
|
||||
// or "" if the log is empty.
|
||||
Newest string `json:"newest"`
|
||||
Newest string `json:"newest_item"`
|
||||
}
|
||||
|
||||
type EventItem struct {
|
||||
@@ -394,9 +392,9 @@ type EventItem struct {
|
||||
}
|
||||
```
|
||||
|
||||
The `oldest` and `newest` fields of the reply report the cursors of the oldest
|
||||
and newest items (of any kind) recorded in the event log at the time of the
|
||||
reply, or are `""` if the log is empty.
|
||||
The `oldest_item` and `newest_item` fields of the reply report the cursors of
|
||||
the oldest and newest items (of any kind) recorded in the event log at the time
|
||||
of the reply, or are `""` if the log is empty.
|
||||
|
||||
The `data` field contains the type-specific event datum. The datum carries any
|
||||
ABCI events that may have been defined.
|
||||
@@ -414,26 +412,26 @@ The semantics of the reply are as follows:
|
||||
- If `more` is true, there is at least one additional, older item in the
|
||||
event log that was not returned (in excess of `max_results`).
|
||||
|
||||
In this case the client can fetch the next page by setting `before` in a
|
||||
new request, to the cursor of the oldest item fetched (i.e., the last one
|
||||
in `items`).
|
||||
In this case the client can fetch the next page by setting `before_item`
|
||||
in a new request, to the cursor of the oldest item fetched (i.e., the
|
||||
last one in `items`).
|
||||
|
||||
- Otherwise (if `more` is false), all the matching results have been
|
||||
reported (pagination is complete).
|
||||
|
||||
- The first element of `items` identifies the newest item considered.
|
||||
Subsequent poll requests can set `after` to this cursor to skip items
|
||||
that were already retrieved.
|
||||
Subsequent poll requests can set `after_item` to this cursor to skip
|
||||
items that were already retrieved.
|
||||
|
||||
- If `items` is empty:
|
||||
|
||||
- If the `before` was set in the request, there are no further eligible
|
||||
items for this query in the log (pagination is complete).
|
||||
- If the `before_item` was set in the request, there are no further
|
||||
eligible items for this query in the log (pagination is complete).
|
||||
|
||||
This is just a safety case; the client can detect this without issuing
|
||||
another call by consulting the `more` field of the previous reply.
|
||||
|
||||
- If the `before` was empty in the request, no eligible items were
|
||||
- If the `before_item` was empty in the request, no eligible items were
|
||||
available before the `wait_time` expired. The client may poll again to
|
||||
wait for more event items.
|
||||
|
||||
@@ -455,11 +453,12 @@ crashes and connectivity issues:
|
||||
|
||||
1. In ordinary operation, clients will **long-poll** the head of the event
|
||||
log for new events matching their criteria (by setting a `wait_time` and
|
||||
no `before`).
|
||||
no `before_item`).
|
||||
|
||||
2. If there are more events than the client requested, or if the client needs
|
||||
to to read older events to recover from a stall or crash, clients will
|
||||
**page** backward through the event log (by setting `before` and `after`).
|
||||
**page** backward through the event log (by setting `before_item` and
|
||||
possibly `after_item`).
|
||||
|
||||
- While the new API requires explicit polling by the client, it makes better
|
||||
use of the node's existing HTTP infrastructure (e.g., connection pools).
|
||||
@@ -480,7 +479,7 @@ crashes and connectivity issues:
|
||||
The initial implementation will do this by checking the tail of the event log
|
||||
after each new item is published. If the number of items in the log exceeds
|
||||
the item limit, it will delete oldest items until the log is under the limit;
|
||||
then discard any older than the time window before the latest.
|
||||
then discard any older than the time window before present.
|
||||
|
||||
To minimize coordination interference between the publisher (the event bus)
|
||||
and the subcribers (the `events` service handlers), the event log will be
|
||||
@@ -665,14 +664,13 @@ The following alternative approaches were considered:
|
||||
- [rpc: remove duplication of events when querying][i7273] (#7273)
|
||||
|
||||
[rfc006]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-006-event-subscription.md
|
||||
[rpc-service]: https://github.com/tendermint/tendermint/blob/master/rpc/openapi/openapi.yaml
|
||||
[rpc-service]: https://docs.tendermint.com/master/rpc
|
||||
[query-grammar]: https://pkg.go.dev/github.com/tendermint/tendermint@master/internal/pubsub/query/syntax
|
||||
[ws]: https://datatracker.ietf.org/doc/html/rfc6455
|
||||
[jsonrpc2]: https://www.jsonrpc.org/specification
|
||||
[nginx]: https://nginx.org/en/docs/
|
||||
[fcgi]: http://www.mit.edu/~yandros/doc/specs/fcgi-spec.html
|
||||
[rp-ws]: https://nginx.org/en/docs/http/websocket.html
|
||||
<!-- markdown-link-check-disable-next-line -->
|
||||
[ng-xm]: https://www.nginx.com/resources/wiki/extending/
|
||||
[abci-event]: https://pkg.go.dev/github.com/tendermint/tendermint/abci/types#Event
|
||||
[rfc001]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-001-storage-engine.rst
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
# ADR 081: Protocol Buffers Management
|
||||
|
||||
## Changelog
|
||||
|
||||
- 2022-02-28: First draft
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
[Tracking issue](https://github.com/tendermint/tendermint/issues/8121)
|
||||
|
||||
## Context
|
||||
|
||||
At present, we manage the [Protocol Buffers] schema files ("protos") that define
|
||||
our wire-level data formats within the Tendermint repository itself (see the
|
||||
[`proto`](../../proto/) directory). Recently, we have been making use of [Buf],
|
||||
both locally and in CI, in order to generate Go stubs, and lint and check
|
||||
`.proto` files for breaking changes.
|
||||
|
||||
The version of Buf used at the time of this decision was `v1beta1`, and it was
|
||||
discussed in [\#7975] and in weekly calls as to whether we should upgrade to
|
||||
`v1` and harmonize our approach with that used by the Cosmos SDK. The team
|
||||
managing the Cosmos SDK was primarily interested in having our protos versioned
|
||||
and easily accessible from the [Buf] registry.
|
||||
|
||||
The three main sets of stakeholders for the `.proto` files and their needs, as
|
||||
currently understood, are as follows.
|
||||
|
||||
1. Tendermint needs Go code generated from `.proto` files.
|
||||
2. Consumers of Tendermint's `.proto` files, specifically projects that want to
|
||||
interoperate with Tendermint and need to generate code for their own
|
||||
programming language, want to be able to access these files in a reliable and
|
||||
efficient way.
|
||||
3. The Tendermint Core team wants to provide stable interfaces that are as easy
|
||||
as possible to maintain, on which consumers can depend, and to be able to
|
||||
notify those consumers promptly when those interfaces change. To this end, we
|
||||
want to:
|
||||
1. Prevent any breaking changes from being introduced in minor/patch releases
|
||||
of Tendermint. Only major version updates should be able to contain
|
||||
breaking interface changes.
|
||||
2. Prevent generated code from diverging from the Protobuf schema files.
|
||||
|
||||
There was also discussion surrounding the notion of automated documentation
|
||||
generation and hosting, but it is not clear at this time whether this would be
|
||||
that valuable to any of our stakeholders. What will, of course, be valuable at
|
||||
minimum would be better documentation (in comments) of the `.proto` files
|
||||
themselves.
|
||||
|
||||
## Alternative Approaches
|
||||
|
||||
### Meeting stakeholders' needs
|
||||
|
||||
1. Go stub generation from protos. We could use:
|
||||
1. [Buf]. This approach has been rather cumbersome up to this point, and it
|
||||
is not clear what Buf really provides beyond that which `protoc` provides
|
||||
to justify the additional complexity in configuring Buf for stub
|
||||
generation.
|
||||
2. [protoc] - the Protocol Buffers compiler.
|
||||
2. Notification of breaking changes:
|
||||
1. Buf in CI for all pull requests to *release* branches only (and not on
|
||||
`master`).
|
||||
2. Buf in CI on every pull request to every branch (this was the case at the
|
||||
time of this decision, and the team decided that the signal-to-noise ratio
|
||||
for this approach was too low to be of value).
|
||||
3. `.proto` linting:
|
||||
1. Buf in CI on every pull request
|
||||
4. `.proto` formatting:
|
||||
1. [clang-format] locally and a [clang-format GitHub Action] in CI to check
|
||||
that files are formatted properly on every pull request.
|
||||
5. Sharing of `.proto` files in a versioned, reliable manner:
|
||||
1. Consumers could simply clone the Tendermint repository, check out a
|
||||
specific commit, tag or branch and manually copy out all of the `.proto`
|
||||
files they need. This requires no effort from the Tendermint Core team and
|
||||
will continue to be an option for consumers. The drawback of this approach
|
||||
is that it requires manual coding/scripting to implement and is brittle in
|
||||
the face of bigger changes.
|
||||
2. Uploading our `.proto` files to Buf's registry on every release. This is
|
||||
by far the most seamless for consumers of our `.proto` files, but requires
|
||||
the dependency on Buf. This has the additional benefit that the Buf
|
||||
registry will automatically [generate and host
|
||||
documentation][buf-docs-gen] for these protos.
|
||||
3. We could create a process that, upon release, creates a `.zip` file
|
||||
containing our `.proto` files.
|
||||
|
||||
### Popular alternatives to Buf
|
||||
|
||||
[Prototool] was not considered as it appears deprecated, and the ecosystem seems
|
||||
to be converging on Buf at this time.
|
||||
|
||||
### Tooling complexity
|
||||
|
||||
The more tools we have in our build/CI processes, the more complex and fragile
|
||||
repository/CI management becomes, and the longer it takes to onboard new team
|
||||
members. Maintainability is a core concern here.
|
||||
|
||||
### Buf sustainability and costs
|
||||
|
||||
One of the primary considerations regarding the usage of Buf is whether, for
|
||||
example, access to its registry will eventually become a
|
||||
paid-for/subscription-based service and whether this is valuable enough for us
|
||||
and the ecosystem to pay for such a service. At this time, it appears as though
|
||||
Buf will never charge for hosting open source projects' protos.
|
||||
|
||||
Another consideration was Buf's sustainability as a project - what happens when
|
||||
their resources run out? Will there be a strong and broad enough open source
|
||||
community to continue maintaining it?
|
||||
|
||||
### Local Buf usage options
|
||||
|
||||
Local usage of Buf (i.e. not in CI) can be accomplished in two ways:
|
||||
|
||||
1. Installing the relevant tools individually.
|
||||
2. By way of its [Docker image][buf-docker].
|
||||
|
||||
Local installation of Buf requires developers to manually keep their toolchains
|
||||
up-to-date. The Docker option comes with a number of complexities, including
|
||||
how the file system permissions of code generated by a Docker container differ
|
||||
between platforms (e.g. on Linux, Buf-generated code ends up being owned by
|
||||
`root`).
|
||||
|
||||
The trouble with the Docker-based approach is that we make use of the
|
||||
[gogoprotobuf] plugin for `protoc`. Continuing to use the Docker-based approach
|
||||
to using Buf will mean that we will have to continue building our own custom
|
||||
Docker image with embedded gogoprotobuf.
|
||||
|
||||
Along these lines, we could eventually consider coming up with a [Nix]- or
|
||||
[redo]-based approach to developer tooling to ensure tooling consistency across
|
||||
the team and for anyone who wants to be able to contribute to Tendermint.
|
||||
|
||||
## Decision
|
||||
|
||||
1. We will adopt Buf for now for proto generation, linting, breakage checking
|
||||
and its registry (mainly in CI, with optional usage locally).
|
||||
2. Failing CI when checking for breaking changes in `.proto` files will only
|
||||
happen when performing minor/patch releases.
|
||||
3. Local tooling will be favored over Docker-based tooling.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
We currently aim to:
|
||||
|
||||
1. Update to Buf `v1` to facilitate linting, breakage checking and uploading to
|
||||
the Buf registry.
|
||||
2. Configure CI appropriately for proto management:
|
||||
1. Uploading protos to the Buf registry on every release (e.g. the
|
||||
[approach][cosmos-sdk-buf-registry-ci] used by the Cosmos SDK).
|
||||
2. Linting on every pull request (e.g. the
|
||||
[approach][cosmos-sdk-buf-linting-ci] used by the Cosmos SDK). The linter
|
||||
passing should be considered a requirement for accepting PRs.
|
||||
3. Checking for breaking changes in minor/patch version releases and failing
|
||||
CI accordingly - see [\#8003].
|
||||
4. Add [clang-format GitHub Action] to check `.proto` file formatting. Format
|
||||
checking should be considered a requirement for accepting PRs.
|
||||
3. Update the Tendermint [`Makefile`](../../Makefile) to primarily facilitate
|
||||
local Protobuf stub generation, linting, formatting and breaking change
|
||||
checking. More specifically:
|
||||
1. This includes removing the dependency on Docker and introducing the
|
||||
dependency on local toolchain installation. CI-based equivalents, where
|
||||
relevant, will rely on specific GitHub Actions instead of the Makefile.
|
||||
2. Go code generation will rely on `protoc` directly.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- We will still offer Go stub generation, proto linting and breakage checking.
|
||||
- Breakage checking will only happen on minor/patch releases to increase the
|
||||
signal-to-noise ratio in CI.
|
||||
- Versioned protos will be made available via Buf's registry upon every release.
|
||||
|
||||
### Negative
|
||||
|
||||
- Developers/contributors will need to install the relevant Protocol
|
||||
Buffers-related tooling (Buf, gogoprotobuf, clang-format) locally in order to
|
||||
build, lint, format and check `.proto` files for breaking changes.
|
||||
|
||||
### Neutral
|
||||
|
||||
## References
|
||||
|
||||
- [Protocol Buffers]
|
||||
- [Buf]
|
||||
- [\#7975]
|
||||
- [protoc] - The Protocol Buffers compiler
|
||||
|
||||
[Protocol Buffers]: https://developers.google.com/protocol-buffers
|
||||
[Buf]: https://buf.build/
|
||||
[\#7975]: https://github.com/tendermint/tendermint/pull/7975
|
||||
[protoc]: https://github.com/protocolbuffers/protobuf
|
||||
[clang-format]: https://clang.llvm.org/docs/ClangFormat.html
|
||||
[clang-format GitHub Action]: https://github.com/marketplace/actions/clang-format-github-action
|
||||
[buf-docker]: https://hub.docker.com/r/bufbuild/buf
|
||||
[cosmos-sdk-buf-registry-ci]: https://github.com/cosmos/cosmos-sdk/blob/e6571906043b6751951a42b6546431b1c38b05bd/.github/workflows/proto-registry.yml
|
||||
[cosmos-sdk-buf-linting-ci]: https://github.com/cosmos/cosmos-sdk/blob/e6571906043b6751951a42b6546431b1c38b05bd/.github/workflows/proto.yml#L15
|
||||
[\#8003]: https://github.com/tendermint/tendermint/issues/8003
|
||||
[Nix]: https://nixos.org/
|
||||
[gogoprotobuf]: https://github.com/gogo/protobuf
|
||||
[Prototool]: https://github.com/uber/prototool
|
||||
[buf-docs-gen]: https://docs.buf.build/bsr/documentation
|
||||
[redo]: https://redo.readthedocs.io/en/latest/
|
||||
@@ -6,30 +6,12 @@
|
||||
|
||||
## Status
|
||||
|
||||
> An architecture decision is considered "proposed" when a PR containing the ADR
|
||||
> is submitted. When merged, an ADR must have a status associated with it, which
|
||||
> must be one of: "Accepted", "Rejected", "Deprecated" or "Superseded".
|
||||
>
|
||||
> An accepted ADR's implementation status must be tracked via a tracking issue,
|
||||
> milestone or project board (only one of these is necessary). For example:
|
||||
>
|
||||
> Accepted
|
||||
>
|
||||
> [Tracking issue](https://github.com/tendermint/tendermint/issues/123)
|
||||
> [Milestone](https://github.com/tendermint/tendermint/milestones/123)
|
||||
> [Project board](https://github.com/orgs/tendermint/projects/123)
|
||||
>
|
||||
> Rejected ADRs are captured as a record of recommendations that we specifically
|
||||
> do not (and possibly never) want to implement. The ADR itself must, for
|
||||
> posterity, include reasoning as to why it was rejected.
|
||||
>
|
||||
> If an ADR is deprecated, simply write "Deprecated" in this section. If an ADR
|
||||
> is superseded by one or more other ADRs, provide local a reference to those
|
||||
> ADRs, e.g.:
|
||||
>
|
||||
> Superseded by [ADR 123](./adr-123.md)
|
||||
> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted"
|
||||
> once it is agreed upon. Once the ADR has been implemented mark the ADR as
|
||||
> "implemented". If a later ADR changes or reverses a decision, it may be marked
|
||||
> as "deprecated" or "superseded" with a reference to its replacement.
|
||||
|
||||
Accepted | Rejected | Deprecated | Superseded by
|
||||
{Deprecated|Declined|Accepted|Implemented}
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
@@ -594,7 +594,7 @@ This section will cover settings within the p2p section of the `config.toml`.
|
||||
- `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on.
|
||||
- `private-peer-ids` = is a comma-separated list of node ids that will _not_ be exposed to other peers (i.e., you will not tell other peers about the ids in this list). This can be filled with a validator's node id.
|
||||
|
||||
Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config parameters being deprecated and/or replaced.
|
||||
Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config paramters being deprecated and/or replaced.
|
||||
|
||||
We will cover the new and deprecated parameters below.
|
||||
### New Parameters
|
||||
@@ -606,7 +606,7 @@ There are three new parameters, which are enabled if use-legacy is set to false.
|
||||
- `max-connections` = is the max amount of allowed inbound and outbound connections.
|
||||
### Deprecated Parameters
|
||||
|
||||
> Note: For Tendermint 0.35, there are two p2p implementations. The old version is used by default with the deprecated fields. The new implementation uses different config parameters, explained above.
|
||||
> Note: For Tendermint 0.35, there are two p2p implementations. The old version is used by deafult with the deprecated fields. The new implementation uses different config parameters, explained above.
|
||||
|
||||
- `max-num-inbound-peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). *This was replaced by `max-connections`*
|
||||
- `max-num-outbound-peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection).*This was replaced by `max-connections`*
|
||||
|
||||
@@ -40,7 +40,6 @@ The following metrics are available:
|
||||
| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) |
|
||||
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
|
||||
| consensus_block_size_bytes | Gauge | | Block size in bytes |
|
||||
| evidence_pool_num_evidence | Gauge | | Number of evidence in the evidence pool
|
||||
| p2p_peers | Gauge | | Number of peers node's connected to |
|
||||
| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer |
|
||||
| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer |
|
||||
|
||||
14305
docs/package-lock.json
generated
14305
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,6 @@
|
||||
"vuepress-theme-cosmos": "^1.0.183"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@vuepress/plugin-html-redirect": "^0.1.4",
|
||||
"watchpack": "^2.3.1"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,261 +0,0 @@
|
||||
# RFC 015: ABCI++ TX Mutation
|
||||
|
||||
## Changelog
|
||||
|
||||
- 23-Feb-2022: Initial draft (@williambanfield).
|
||||
- 28-Feb-2022: Revised draft (@williambanfield).
|
||||
|
||||
## Abstract
|
||||
|
||||
A previous version of the ABCI++ specification detailed a mechanism for proposers to replace transactions
|
||||
in the proposed block. This scheme required the proposer to construct new transactions
|
||||
and mark these new transactions as replacing other removed transactions. The specification
|
||||
was ambiguous as to how the replacement may be communicated to peer nodes.
|
||||
This RFC discusses issues with this mechanism and possible solutions.
|
||||
|
||||
## Background
|
||||
|
||||
### What is the proposed change?
|
||||
|
||||
A previous version of the ABCI++ specification proposed mechanisms for adding, removing, and replacing
|
||||
transactions in a proposed block. To replace a transaction, the application running
|
||||
`ProcessProposal` could mark a transaction as replaced by other application-supplied
|
||||
transactions by returning a new transaction marked with the `ADDED` flag setting
|
||||
the `new_hashes` field of the removed transaction to contain the list of transaction hashes
|
||||
that replace it. In that previous specification for ABCI++, the full use of the
|
||||
`new_hashes` field is left somewhat ambiguous. At present, these hashes are not
|
||||
gossiped and are not eventually included in the block to signal replacement to
|
||||
other nodes. The specification did indicate that the transactions specified in
|
||||
the `new_hashes` field will be removed from the mempool but it's not clear how
|
||||
peer nodes will learn about them.
|
||||
|
||||
### What systems would be affected by adding transaction replacement?
|
||||
|
||||
The 'transaction' is a central building block of a Tendermint blockchain, so adding
|
||||
a mechanism for transaction replacement would require changes to many aspects of Tendermint.
|
||||
|
||||
The following is a rough list of the functionality that this mechanism would affect:
|
||||
|
||||
#### Transaction indexing
|
||||
|
||||
Tendermint's indexer stores transactions and transaction results using the hash of the executed
|
||||
transaction [as the key][tx-result-index] and the ABCI results and transaction bytes as the value.
|
||||
|
||||
To allow transaction replacement, the replaced transactions would need to stored as well in the
|
||||
indexer, likely as a mapping of original transaction to list of transaction hashes that replaced
|
||||
the original transaction.
|
||||
|
||||
#### Transaction inclusion proofs
|
||||
|
||||
The result of a transaction query includes a Merkle proof of the existence of the
|
||||
transaction in the block chain. This [proof is built][inclusion-proof] as a merkle tree
|
||||
of the hashes of all of the transactions in the block where the queried transaction was executed.
|
||||
|
||||
To allow transaction replacement, these proofs would need to be updated to prove
|
||||
that a replaced transaction was included by replacement in the block.
|
||||
|
||||
#### RPC-based transaction query parameters and results
|
||||
|
||||
Tendermint's RPC allows clients to retrieve information about transactions via the
|
||||
`/tx_search` and `/tx` RPC endpoints.
|
||||
|
||||
RPC query results containing replaced transactions would need to be updated to include
|
||||
information on replaced transactions, either by returning results for all of the replaced
|
||||
transactions, or by including a response with just the hashes of the replaced transactions
|
||||
which clients could proceed to query individually.
|
||||
|
||||
#### Mempool transaction removal
|
||||
|
||||
Additional logic would need to be added to the Tendermint mempool to clear out replaced
|
||||
transactions after each block is executed. Tendermint currently removes executed transactions
|
||||
from the mempool, so this would be a pretty straightforward change.
|
||||
|
||||
## Discussion
|
||||
|
||||
### What value may be added to Tendermint by introducing transaction replacement?
|
||||
|
||||
Transaction replacement would would enable applications to aggregate or disaggregate transactions.
|
||||
|
||||
For aggregation, a set of transactions that all related work, such as transferring
|
||||
tokens between the same two accounts, could be replaced with a single transaction,
|
||||
i.e. one that transfers a single sum from one account to the other.
|
||||
Applications that make frequent use of aggregation may be able to achieve a higher throughput.
|
||||
Aggregation would decrease the space occupied by a single client-submitted transaction in the block, allowing
|
||||
more client-submitted transactions to be executed per block.
|
||||
|
||||
For disaggregation, a very complex transaction could be split into multiple smaller transactions.
|
||||
This may be useful if an application wishes to perform more fine-grained indexing on intermediate parts
|
||||
of a multi-part transaction.
|
||||
|
||||
### Drawbacks to transaction replacement
|
||||
|
||||
Transaction replacement would require updating and shimming many of the places that
|
||||
Tendermint records and exposes information about executed transactions. While
|
||||
systems within Tendermint could be updated to account for transaction replacement,
|
||||
such a system would leave new issues and rough edges.
|
||||
|
||||
#### No way of guaranteeing correct replacement
|
||||
|
||||
If a user issues a transaction to the network and the transaction is replaced, the
|
||||
user has no guarantee that the replacement was correct. For example, suppose a set of users issue
|
||||
transactions A, B, and C and they are all aggregated into a new transaction, D.
|
||||
There is nothing guaranteeing that D was constructed correctly from the inputs.
|
||||
The only way for users to ensure D is correct would be if D contained all of the
|
||||
information of its constituent transactions, in which case, nothing is really gained by the replacement.
|
||||
|
||||
#### Replacement transactions not signed by submitter
|
||||
|
||||
Abstractly, Tendermint simply views transactions as a ball of bytes and therefore
|
||||
should be fine with replacing one for another. However, many applications require
|
||||
that transactions submitted to the chain be signed by some private key to authenticate
|
||||
and authorize the transaction. Replaced transactions could not be signed by the
|
||||
submitter, only by the application node. Therefore, any use of transaction replacement
|
||||
could not contain authorization from the submitter and would either need to grant
|
||||
application-submitted transactions power to perform application logic on behalf
|
||||
of a user without their consent.
|
||||
|
||||
Granting this power to application-submitted transactions would be very dangerous
|
||||
and therefore might not be of much value to application developers.
|
||||
Transaction replacement might only be really safe in the case of application-submitted
|
||||
transactions or for transactions that require no authorization. For such transactions,
|
||||
it's quite not quite clear what the utility of replacement is: the application can already
|
||||
generate any transactions that it wants. The fact that such a transaction was a replacement
|
||||
is not particularly relevant to participants in the chain since the application is
|
||||
merely replacing its own transactions.
|
||||
|
||||
#### New vector for censorship
|
||||
|
||||
Depending on the implementation, transaction replacement may allow a node signal
|
||||
to the rest of the chain that some transaction should no longer be considered for execution.
|
||||
Honest nodes will use the replacement mechanism to signal that a transaction has been aggregated.
|
||||
Malicious nodes will be granted a new vector for censoring transactions.
|
||||
There is no guarantee that a replaced transactions is actually executed at all.
|
||||
A malicious node could censor a transaction by simply listing it as replaced.
|
||||
Honest nodes seeing the replacement would flush the transaction from their mempool
|
||||
and not execute or propose it it in later blocks.
|
||||
|
||||
### Transaction tracking implementations
|
||||
|
||||
This section discusses possible ways to flesh out the implementation of transaction replacement.
|
||||
Specifically, this section proposes a few alternative ways that Tendermint blockchains could
|
||||
track and store transaction replacements.
|
||||
|
||||
#### Include transaction replacements in the block
|
||||
|
||||
One option to track transaction replacement is to include information on the
|
||||
transaction replacement within the block. An additional structure may be added
|
||||
the block of the following form:
|
||||
|
||||
```proto
|
||||
message Block {
|
||||
...
|
||||
repeated Replacement replacements = 5;
|
||||
}
|
||||
|
||||
message Replacement {
|
||||
bytes included_tx_key = 1;
|
||||
repeated bytes replaced_txs_keys = 2;
|
||||
}
|
||||
```
|
||||
|
||||
Applications executing `PrepareProposal` would return the list of replacements and
|
||||
Tendermint would include an encoding of these replacements in the block that is gossiped
|
||||
and committed.
|
||||
|
||||
Tendermint's transaction indexing would include a new mapping for each replaced transaction
|
||||
key to the committed transaction.
|
||||
Transaction inclusion proofs would be updated to include these additional new transaction
|
||||
keys in the Merkle tree and queries for transaction hashes that were replaced would return
|
||||
information indicating that the transaction was replaced along with the hash of the
|
||||
transaction that replaced it.
|
||||
|
||||
Block validation of gossiped blocks would be updated to check that each of the
|
||||
`included_txs_key` matches the hash of some transaction in the proposed block.
|
||||
|
||||
Implementing the changes described in this section would allow Tendermint to gossip
|
||||
and index transaction replacements as part of block propagation. These changes would
|
||||
still require the application to certify that the replacements were valid. This
|
||||
validation may be performed in one of two ways:
|
||||
|
||||
1. **Applications optimistically trust that the proposer performed a legitimate replacement.**
|
||||
|
||||
In this validation scheme, applications would not verify that the substitution
|
||||
is valid during consensus and instead simply trust that the proposer is correct.
|
||||
This would have the drawback of allowing a malicious proposer to remove transactions
|
||||
it did not want executed.
|
||||
|
||||
2. **Applications completely validate transaction replacement.**
|
||||
|
||||
In this validation scheme, applications that allow replacement would check that
|
||||
each listed replaced transaction was correctly reflected in the replacement transaction.
|
||||
In order to perform such validation, the node would need to have the replaced transactions
|
||||
locally. This could be accomplished one of a few ways: by querying the mempool,
|
||||
by adding an additional p2p gossip channel for transaction replacements, or by including the replaced transactions
|
||||
in the block. Replacement validation via mempool querying would require the node
|
||||
to have received all of the replaced transactions in the mempool which is far from
|
||||
guaranteed. Adding an additional gossip channel would make gossiping replaced transactions
|
||||
a requirement for consensus to proceed, since all nodes would need to receive all replacement
|
||||
messages before considering a block valid. Finally, including replaced transactions in
|
||||
the block seems to obviate any benefit gained from performing a transaction replacement
|
||||
since the replaced transaction and the original transactions would now both appear in the block.
|
||||
|
||||
#### Application defined transaction replacement
|
||||
|
||||
An additional option for allowing transaction replacement is to leave it entirely as a responsibility
|
||||
of the application. The `PrepareProposal` ABCI++ call allows for applications to add
|
||||
new transactions to a proposed block. Applications that wished to implement a transaction
|
||||
replacement mechanism would be free to do so without the newly defined `new_hashes` field.
|
||||
Applications wishing to implement transaction replacement would add the aggregated
|
||||
transactions in the `PrepareProposal` response, and include one additional bookkeeping
|
||||
transaction that listed all of the replacements, with a similar scheme to the `new_hashes`
|
||||
field described in ABCI++. This new bookkeeping transaction could be used by the
|
||||
application to determine which transactions to clear from the mempool in future calls
|
||||
to `CheckTx`.
|
||||
|
||||
The meaning of any transaction in the block is completely opaque to Tendermint,
|
||||
so applications performing this style of replacement would not be able to have the replacement
|
||||
reflected in any most of Tendermint's transaction tracking mechanisms, such as transaction indexing
|
||||
and the `/tx` endpoint.
|
||||
|
||||
#### Application defined Tx Keys
|
||||
|
||||
Tendermint currently uses cryptographic hashes, SHA256, as a key for each transaction.
|
||||
As noted in the section on systems that would require changing, this key is used
|
||||
to identify the transaction in the mempool, in the indexer, and within the RPC system.
|
||||
|
||||
An alternative approach to allowing `ProcessProposal` to specify a set of transaction
|
||||
replacements would be instead to allow the application to specify an additional key or set
|
||||
of keys for each transaction during `ProcessProposal`. This new `secondary_keys` set
|
||||
would be included in the block and therefore gossiped during block propagation.
|
||||
Additional RPC endpoints could be exposed to query by the application-defined keys.
|
||||
|
||||
Applications wishing to implement replacement would leverage this new field by providing the
|
||||
replaced transaction hashes as the `secondary_keys` and checking their validity during
|
||||
`ProcessProposal`. During `RecheckTx` the application would then be responsible for
|
||||
clearing out transactions that matched the `secondary_keys`.
|
||||
|
||||
It is worth noting that something like this would be possible without `secondary_keys`.
|
||||
An application wishing to implement a system like this one could define a replacement
|
||||
transaction, as discussed in the section on application-defined transaction replacement,
|
||||
and use a custom [ABCI event type][abci-event-type] to communicate that the replacement should
|
||||
be indexed within Tendermint's ABCI event indexing.
|
||||
|
||||
### Complexity to value-add tradeoff
|
||||
|
||||
It is worth remarking that adding a system like this may introduce a decent amount
|
||||
of new complexity into Tendermint. An approach that leaves much of the replacement
|
||||
logic to Tendermint would require altering the core transaction indexing and querying
|
||||
data. In many of the cases listed, a system for transaction replacement is possible
|
||||
without explicitly defining it as part of `PrepareProposal`. Since applications
|
||||
can now add transactions during `PrepareProposal` they can and should leverage this
|
||||
functionality to include additional bookkeeping transactions in the block. It may
|
||||
be worth encouraging applications to discover new and interesting ways to leverage this
|
||||
power instead of immediately solving the problem for them.
|
||||
|
||||
### References
|
||||
|
||||
[inclusion-proof]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/types/tx.go#L67
|
||||
[tx-serach-result]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/rpc/coretypes/responses.go#L267
|
||||
[tx-rpc-func]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/rpc/core/tx.go#L21
|
||||
[tx-result-index]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/state/indexer/tx/kv/kv.go#L90
|
||||
[abci-event-type]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/abci/types/types.pb.go#L3168
|
||||
@@ -47,7 +47,7 @@ An overhaul of the existing interface between the application and consensus, to
|
||||
|
||||
### Proposer-Based Timestamps
|
||||
|
||||
Proposer-based timestamps are a replacement of [BFT time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/bft-time.md), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md)
|
||||
Proposer-based timestamps are a replacement of [BFT time](https://docs.tendermint.com/master/spec/consensus/bft-time.html), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md)
|
||||
|
||||
### RPC Event Subscription
|
||||
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
---
|
||||
order: 3
|
||||
---
|
||||
|
||||
# PBTS
|
||||
|
||||
This document provides an overview of the Proposer-Based Timestamp (PBTS)
|
||||
algorithm added to Tendermint in the v0.36 release. It outlines the core
|
||||
functionality as well as the parameters and constraints of the this algorithm.
|
||||
|
||||
## Algorithm Overview
|
||||
|
||||
The PBTS algorithm defines a way for a Tendermint blockchain to create block
|
||||
timestamps that are within a reasonable bound of the clocks of the validators on
|
||||
the network. This replaces the original BFTTime algorithm for timestamp
|
||||
assignment that relied on the timestamps included in precommit messages.
|
||||
|
||||
## Algorithm Parameters
|
||||
|
||||
The functionality of the PBTS algorithm is governed by two parameters within
|
||||
Tendermint. These two parameters are [consensus
|
||||
parameters](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#L291),
|
||||
meaning they are configured by the ABCI application and are expected to be the
|
||||
same across all nodes on the network.
|
||||
|
||||
### `Precision`
|
||||
|
||||
The `Precision` parameter configures the acceptable upper-bound of clock drift
|
||||
among all of the nodes on a Tendermint network. Any two nodes on a Tendermint
|
||||
network are expected to have clocks that differ by at most `Precision`
|
||||
milliseconds any given instant.
|
||||
|
||||
### `MessageDelay`
|
||||
|
||||
The `MessageDelay` parameter configures the acceptable upper-bound for
|
||||
transmitting a `Proposal` message from the proposer to _all_ of the validators
|
||||
on the network.
|
||||
|
||||
Networks should choose as small a value for `MessageDelay` as is practical,
|
||||
provided it is large enough that messages can reach all participants with high
|
||||
probability given the number of participants and latency of their connections.
|
||||
|
||||
## Algorithm Concepts
|
||||
|
||||
### Block timestamps
|
||||
|
||||
Each block produced by the Tendermint consensus engine contains a timestamp.
|
||||
The timestamp produced in each block is a meaningful representation of time that is
|
||||
useful for the protocols and applications built on top of Tendermint.
|
||||
|
||||
The following protocols and application features require a reliable source of time:
|
||||
|
||||
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification.
|
||||
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification).
|
||||
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21
|
||||
days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime).
|
||||
* IBC packets can use either a [timestamp or a height to timeout packet
|
||||
delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements)
|
||||
|
||||
### Proposer Selects a Block Timestamp
|
||||
|
||||
When the proposer node creates a new block proposal, the node reads the time
|
||||
from its local clock and uses this reading as the timestamp for the proposed
|
||||
block.
|
||||
|
||||
### Timeliness
|
||||
|
||||
When each validator on a Tendermint network receives a proposed block, it
|
||||
performs a series of checks to ensure that the block can be considered valid as
|
||||
a candidate to be the next block in the chain.
|
||||
|
||||
The PBTS algorithm performs a validity check on the timestamp of proposed
|
||||
blocks. When a validator receives a proposal it ensures that the timestamp in
|
||||
the proposal is within a bound of the validator's local clock. Specifically, the
|
||||
algorithm checks that the timestamp is no more than `Precision` greater than the
|
||||
node's local clock and no less than `Precision` + `MessageDelay` behind than the
|
||||
node's local clock. This creates range of acceptable timestamps around the
|
||||
node's local time. If the timestamp is within this range, the PBTS algorithm
|
||||
considers the block **timely**. If a block is not **timely**, the node will
|
||||
issue a `nil` `prevote` for this block, signaling to the rest of the network
|
||||
that the node does not consider the block to be valid.
|
||||
|
||||
### Clock Synchronization
|
||||
|
||||
The PBTS algorithm requires the clocks of the validators on a Tendermint network
|
||||
are within `Precision` of each other. In practice, this means that validators
|
||||
should periodically synchronize to a reliable NTP server. Validators that drift
|
||||
too far away from the rest of the network will no longer propose blocks with
|
||||
valid timestamps. Additionally they will not view the timestamps of blocks
|
||||
proposed by their peers to be valid either.
|
||||
|
||||
## See Also
|
||||
|
||||
* [The PBTS specification](https://github.com/tendermint/tendermint/blob/master/spec/consensus/proposer-based-timestamp/README.md)
|
||||
contains all of the details of the algorithm.
|
||||
@@ -212,7 +212,7 @@ etc.) by Tendermint Core.
|
||||
|
||||
Valid transactions will eventually be committed given they are not too big and
|
||||
have enough gas. To learn more about gas, check out ["the
|
||||
specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas).
|
||||
specification"](https://docs.tendermint.com/master/spec/abci/apps.html#gas).
|
||||
|
||||
For the underlying key-value store we'll use
|
||||
[badger](https://github.com/dgraph-io/badger), which is an embeddable,
|
||||
@@ -331,7 +331,7 @@ func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery
|
||||
```
|
||||
|
||||
The complete specification can be found
|
||||
[here](https://github.com/tendermint/tendermint/tree/master/spec/abci/).
|
||||
[here](https://docs.tendermint.com/master/spec/abci/).
|
||||
|
||||
## 1.4 Starting an application and a Tendermint Core instance in the same process
|
||||
|
||||
|
||||
@@ -210,7 +210,7 @@ etc.) by Tendermint Core.
|
||||
|
||||
Valid transactions will eventually be committed given they are not too big and
|
||||
have enough gas. To learn more about gas, check out ["the
|
||||
specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas).
|
||||
specification"](https://docs.tendermint.com/master/spec/abci/apps.html#gas).
|
||||
|
||||
For the underlying key-value store we'll use
|
||||
[badger](https://github.com/dgraph-io/badger), which is an embeddable,
|
||||
@@ -328,7 +328,7 @@ func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery
|
||||
```
|
||||
|
||||
The complete specification can be found
|
||||
[here](https://github.com/tendermint/tendermint/tree/master/spec/abci/).
|
||||
[here](https://docs.tendermint.com/master/spec/abci/).
|
||||
|
||||
## 1.4 Starting an application and a Tendermint Core instances
|
||||
|
||||
|
||||
11
go.mod
11
go.mod
@@ -26,7 +26,7 @@ require (
|
||||
github.com/rs/cors v1.8.2
|
||||
github.com/rs/zerolog v1.26.1
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/spf13/viper v1.10.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
@@ -34,17 +34,11 @@ require (
|
||||
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce
|
||||
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/grpc v1.45.0
|
||||
google.golang.org/grpc v1.44.0
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
pgregory.net/rapid v0.4.7
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/creachadair/atomicfile v0.2.4
|
||||
github.com/google/go-cmp v0.5.7
|
||||
gotest.tools v2.2.0+incompatible
|
||||
)
|
||||
|
||||
require (
|
||||
4d63.com/gochecknoglobals v0.1.0 // indirect
|
||||
github.com/Antonboom/errname v0.1.5 // indirect
|
||||
@@ -112,6 +106,7 @@ require (
|
||||
github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect
|
||||
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
|
||||
github.com/google/btree v1.0.0 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect
|
||||
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
|
||||
github.com/gostaticanalysis/comment v1.4.2 // indirect
|
||||
|
||||
9
go.sum
9
go.sum
@@ -217,8 +217,6 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creachadair/atomicfile v0.2.4 h1:GRjpQLmz/78I4+nBQpGMFrRa9yrL157AUTrA6hnF0YU=
|
||||
github.com/creachadair/atomicfile v0.2.4/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
@@ -940,9 +938,8 @@ github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
|
||||
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
|
||||
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
|
||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
@@ -1626,8 +1623,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
|
||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
||||
@@ -168,7 +168,7 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
for _, peer := range pool.peers {
|
||||
// check if peer timed out
|
||||
if !peer.didTimeout && peer.numPending > 0 {
|
||||
curRate := peer.recvMonitor.CurrentTransferRate()
|
||||
curRate := peer.recvMonitor.Status().CurRate
|
||||
// curRate can be 0 on start
|
||||
if curRate != 0 && curRate < minRecvRate {
|
||||
err := errors.New("peer is not sending us data fast enough")
|
||||
|
||||
@@ -70,8 +70,6 @@ type Reactor struct {
|
||||
|
||||
// immutable
|
||||
initialState sm.State
|
||||
// store
|
||||
stateStore sm.Store
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
@@ -103,7 +101,7 @@ type Reactor struct {
|
||||
func NewReactor(
|
||||
ctx context.Context,
|
||||
logger log.Logger,
|
||||
stateStore sm.Store,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
store *store.BlockStore,
|
||||
consReactor consensusReactor,
|
||||
@@ -113,6 +111,19 @@ func NewReactor(
|
||||
metrics *consensus.Metrics,
|
||||
eventBus *eventbus.EventBus,
|
||||
) (*Reactor, error) {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
|
||||
}
|
||||
|
||||
startHeight := store.Height() + 1
|
||||
if startHeight == 1 {
|
||||
startHeight = state.InitialHeight
|
||||
}
|
||||
|
||||
requestsCh := make(chan BlockRequest, maxTotalRequesters)
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
|
||||
blockSyncCh, err := channelCreator(ctx, GetChannelDescriptor())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -120,16 +131,20 @@ func NewReactor(
|
||||
|
||||
r := &Reactor{
|
||||
logger: logger,
|
||||
stateStore: stateStore,
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(logger, startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
blockSync: newAtomicBool(blockSync),
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockSyncCh: blockSyncCh,
|
||||
blockSyncOutBridgeCh: make(chan p2p.Envelope),
|
||||
peerUpdates: peerUpdates,
|
||||
metrics: metrics,
|
||||
eventBus: eventBus,
|
||||
syncStartTime: time.Time{},
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "BlockSync", r)
|
||||
@@ -144,27 +159,6 @@ func NewReactor(
|
||||
// If blockSync is enabled, we also start the pool and the pool processing
|
||||
// goroutine. If the pool fails to start, an error is returned.
|
||||
func (r *Reactor) OnStart(ctx context.Context) error {
|
||||
state, err := r.stateStore.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.initialState = state
|
||||
|
||||
if state.LastBlockHeight != r.store.Height() {
|
||||
return fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, r.store.Height())
|
||||
}
|
||||
|
||||
startHeight := r.store.Height() + 1
|
||||
if startHeight == 1 {
|
||||
startHeight = state.InitialHeight
|
||||
}
|
||||
|
||||
requestsCh := make(chan BlockRequest, maxTotalRequesters)
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
r.pool = NewBlockPool(r.logger, startHeight, requestsCh, errorsCh)
|
||||
r.requestsCh = requestsCh
|
||||
r.errorsCh = errorsCh
|
||||
|
||||
if r.blockSync.IsSet() {
|
||||
if err := r.pool.Start(ctx); err != nil {
|
||||
return err
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
@@ -15,8 +14,7 @@ import (
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
mpmocks "github.com/tendermint/tendermint/internal/mempool/mocks"
|
||||
"github.com/tendermint/tendermint/internal/mempool/mock"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
"github.com/tendermint/tendermint/internal/proxy"
|
||||
@@ -35,7 +33,7 @@ type reactorTestSuite struct {
|
||||
nodes []types.NodeID
|
||||
|
||||
reactors map[types.NodeID]*Reactor
|
||||
app map[types.NodeID]abciclient.Client
|
||||
app map[types.NodeID]proxy.AppConns
|
||||
|
||||
blockSyncChannels map[types.NodeID]*p2p.Channel
|
||||
peerChans map[types.NodeID]chan p2p.PeerUpdate
|
||||
@@ -66,7 +64,7 @@ func setup(
|
||||
network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}),
|
||||
nodes: make([]types.NodeID, 0, numNodes),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
app: make(map[types.NodeID]abciclient.Client, numNodes),
|
||||
app: make(map[types.NodeID]proxy.AppConns, numNodes),
|
||||
blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
|
||||
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
|
||||
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
|
||||
@@ -111,7 +109,7 @@ func (rts *reactorTestSuite) addNode(
|
||||
logger := log.TestingLogger()
|
||||
|
||||
rts.nodes = append(rts.nodes, nodeID)
|
||||
rts.app[nodeID] = proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics())
|
||||
rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), logger, proxy.NopMetrics())
|
||||
require.NoError(t, rts.app[nodeID].Start(ctx))
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
@@ -122,29 +120,14 @@ func (rts *reactorTestSuite) addNode(
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
mp := &mpmocks.Mempool{}
|
||||
mp.On("Lock").Return()
|
||||
mp.On("Unlock").Return()
|
||||
mp.On("FlushAppConn", mock.Anything).Return(nil)
|
||||
mp.On("Update",
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
eventbus := eventbus.NewDefault(logger)
|
||||
require.NoError(t, eventbus.Start(ctx))
|
||||
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
rts.app[nodeID],
|
||||
mp,
|
||||
rts.app[nodeID].Consensus(),
|
||||
mock.Mempool{},
|
||||
sm.EmptyEvidencePool{},
|
||||
blockStore,
|
||||
eventbus,
|
||||
)
|
||||
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
@@ -171,7 +154,8 @@ func (rts *reactorTestSuite) addNode(
|
||||
)
|
||||
}
|
||||
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
thisBlock, err := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
require.NoError(t, err)
|
||||
thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
@@ -192,7 +176,7 @@ func (rts *reactorTestSuite) addNode(
|
||||
rts.reactors[nodeID], err = NewReactor(
|
||||
ctx,
|
||||
rts.logger.With("nodeID", nodeID),
|
||||
stateStore,
|
||||
state.Copy(),
|
||||
blockExec,
|
||||
blockStore,
|
||||
nil,
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
@@ -37,7 +36,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
// kind of deadlock and hit the larger timeout. This timeout
|
||||
// can be extended a bunch if needed, but it's good to avoid
|
||||
// falling back to a much coarser timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
config := configSetup(t)
|
||||
@@ -46,6 +45,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
prevoteHeight := int64(2)
|
||||
testName := "consensus_byzantine_test"
|
||||
tickerFunc := newMockTickerFunc(true)
|
||||
appFunc := newKVStore
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, nValidators, 30)
|
||||
genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil)
|
||||
@@ -66,7 +66,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
|
||||
ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := kvstore.NewApplication()
|
||||
app := appFunc(t, logger)
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
@@ -82,33 +82,36 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
log.TestingLogger().With("module", "mempool"),
|
||||
thisConfig.Mempool,
|
||||
proxyAppConnMem,
|
||||
0,
|
||||
)
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
|
||||
require.NoError(t, eventBus.Start(ctx))
|
||||
|
||||
// Make a full instance of the evidence pool
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
evpool := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus)
|
||||
evpool, err := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus)
|
||||
cs, err := NewState(ctx, logger, thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool, eventBus)
|
||||
require.NoError(t, err)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(ctx, logger, thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
// set private validator
|
||||
pv := privVals[i]
|
||||
cs.SetPrivValidator(ctx, pv)
|
||||
|
||||
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
|
||||
err = eventBus.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
cs.SetEventBus(eventBus)
|
||||
|
||||
cs.SetTimeoutTicker(tickerFunc())
|
||||
|
||||
states[i] = cs
|
||||
}()
|
||||
}
|
||||
|
||||
rts := setup(ctx, t, nValidators, states, 512) // buffer must be large enough to not deadlock
|
||||
rts := setup(ctx, t, nValidators, states, 100) // buffer must be large enough to not deadlock
|
||||
|
||||
var bzNodeID types.NodeID
|
||||
|
||||
@@ -176,6 +179,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
require.NotNil(t, lazyNodeState.privValidator)
|
||||
|
||||
var commit *types.Commit
|
||||
var votes []*types.Vote
|
||||
switch {
|
||||
case lazyNodeState.Height == lazyNodeState.state.InitialHeight:
|
||||
// We're creating a proposal for the first block.
|
||||
@@ -184,6 +188,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
case lazyNodeState.LastCommit.HasTwoThirdsMajority():
|
||||
// Make the commit from LastCommit
|
||||
commit = lazyNodeState.LastCommit.MakeCommit()
|
||||
votes = lazyNodeState.LastCommit.GetVotes()
|
||||
default: // This shouldn't happen.
|
||||
lazyNodeState.logger.Error("enterPropose: Cannot propose anything: No commit for the previous block")
|
||||
return
|
||||
@@ -200,10 +205,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
}
|
||||
proposerAddr := lazyNodeState.privValidatorPubKey.Address()
|
||||
|
||||
block, err := lazyNodeState.blockExec.CreateProposalBlock(
|
||||
ctx, lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr, nil)
|
||||
require.NoError(t, err)
|
||||
blockParts, err := block.MakePartSet(types.BlockPartSizeBytes)
|
||||
block, blockParts, err := lazyNodeState.blockExec.CreateProposalBlock(
|
||||
ctx, lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr, votes,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Flush the WAL. Otherwise, we may not recompute the same proposal to sign,
|
||||
@@ -233,7 +237,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, reactor := range rts.reactors {
|
||||
reactor.SwitchToConsensus(ctx, reactor.state.GetState(), false)
|
||||
state := reactor.state.GetState()
|
||||
reactor.SwitchToConsensus(ctx, state, false)
|
||||
}
|
||||
|
||||
// Evidence should be submitted and committed at the third height but
|
||||
@@ -242,26 +247,20 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
i := 0
|
||||
subctx, subcancel := context.WithCancel(ctx)
|
||||
defer subcancel()
|
||||
for _, sub := range rts.subs {
|
||||
wg.Add(1)
|
||||
|
||||
go func(j int, s eventbus.Subscription) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
if subctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
msg, err := s.Next(subctx)
|
||||
if subctx.Err() != nil {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
msg, err := s.Next(ctx)
|
||||
assert.NoError(t, err)
|
||||
if err != nil {
|
||||
t.Errorf("waiting for subscription: %v", err)
|
||||
subcancel()
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -273,18 +272,12 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}(i, sub)
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// don't run more assertions if we've encountered a timeout
|
||||
select {
|
||||
case <-subctx.Done():
|
||||
t.Fatal("encountered timeout")
|
||||
default:
|
||||
}
|
||||
|
||||
pubkey, err := bzNodeState.privValidator.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -296,3 +289,267 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
assert.Equal(t, prevoteHeight, ev.Height())
|
||||
}
|
||||
}
|
||||
|
||||
// 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals).
|
||||
// byzantine validator sends conflicting proposals into A and B,
|
||||
// and prevotes/precommits on both of them.
|
||||
// B sees a commit, A doesn't.
|
||||
// Heal partition and ensure A sees the commit
|
||||
func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
// TODO: https://github.com/tendermint/tendermint/issues/6092
|
||||
t.SkipNow()
|
||||
|
||||
// n := 4
|
||||
// logger := consensusLogger().With("test", "byzantine")
|
||||
// app := newCounter
|
||||
|
||||
// states, cleanup := randConsensusState(n, "consensus_byzantine_test", newMockTickerFunc(false), app)
|
||||
// t.Cleanup(cleanup)
|
||||
|
||||
// // give the byzantine validator a normal ticker
|
||||
// ticker := NewTimeoutTicker()
|
||||
// ticker.SetLogger(states[0].logger)
|
||||
// states[0].SetTimeoutTicker(ticker)
|
||||
|
||||
// p2pLogger := logger.With("module", "p2p")
|
||||
|
||||
// blocksSubs := make([]types.Subscription, n)
|
||||
// reactors := make([]p2p.Reactor, n)
|
||||
// for i := 0; i < n; i++ {
|
||||
// // enable txs so we can create different proposals
|
||||
// assertMempool(states[i].txNotifier).EnableTxsAvailable()
|
||||
|
||||
// eventBus := states[i].eventBus
|
||||
// eventBus.SetLogger(logger.With("module", "events", "validator", i))
|
||||
|
||||
// var err error
|
||||
// blocksSubs[i], err = eventBus.Subscribe(ctx, testSubscriber, types.EventQueryNewBlock)
|
||||
// require.NoError(t, err)
|
||||
|
||||
// conR := NewReactor(states[i], true) // so we don't start the consensus states
|
||||
// conR.SetLogger(logger.With("validator", i))
|
||||
// conR.SetEventBus(eventBus)
|
||||
|
||||
// var conRI p2p.Reactor = conR
|
||||
|
||||
// // make first val byzantine
|
||||
// if i == 0 {
|
||||
// conRI = NewByzantineReactor(conR)
|
||||
// }
|
||||
|
||||
// reactors[i] = conRI
|
||||
// err = states[i].blockExec.Store().Save(states[i].state) // for save height 1's validators info
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
|
||||
// switches := p2p.MakeConnectedSwitches(config.P2P, N, func(i int, sw *p2p.Switch) *p2p.Switch {
|
||||
// sw.SetLogger(p2pLogger.With("validator", i))
|
||||
// sw.AddReactor("CONSENSUS", reactors[i])
|
||||
// return sw
|
||||
// }, func(sws []*p2p.Switch, i, j int) {
|
||||
// // the network starts partitioned with globally active adversary
|
||||
// if i != 0 {
|
||||
// return
|
||||
// }
|
||||
// p2p.Connect2Switches(sws, i, j)
|
||||
// })
|
||||
|
||||
// // make first val byzantine
|
||||
// // NOTE: Now, test validators are MockPV, which by default doesn't
|
||||
// // do any safety checks.
|
||||
// states[0].privValidator.(types.MockPV).DisableChecks()
|
||||
// states[0].decideProposal = func(j int32) func(int64, int32) {
|
||||
// return func(height int64, round int32) {
|
||||
// byzantineDecideProposalFunc(t, height, round, states[j], switches[j])
|
||||
// }
|
||||
// }(int32(0))
|
||||
// // We are setting the prevote function to do nothing because the prevoting
|
||||
// // and precommitting are done alongside the proposal.
|
||||
// states[0].doPrevote = func(height int64, round int32) {}
|
||||
|
||||
// defer func() {
|
||||
// for _, sw := range switches {
|
||||
// err := sw.Stop()
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
// }()
|
||||
|
||||
// // start the non-byz state machines.
|
||||
// // note these must be started before the byz
|
||||
// for i := 1; i < n; i++ {
|
||||
// cr := reactors[i].(*Reactor)
|
||||
// cr.SwitchToConsensus(cr.conS.GetState(), false)
|
||||
// }
|
||||
|
||||
// // start the byzantine state machine
|
||||
// byzR := reactors[0].(*ByzantineReactor)
|
||||
// s := byzR.reactor.conS.GetState()
|
||||
// byzR.reactor.SwitchToConsensus(s, false)
|
||||
|
||||
// // byz proposer sends one block to peers[0]
|
||||
// // and the other block to peers[1] and peers[2].
|
||||
// // note peers and switches order don't match.
|
||||
// peers := switches[0].Peers().List()
|
||||
|
||||
// // partition A
|
||||
// ind0 := getSwitchIndex(switches, peers[0])
|
||||
|
||||
// // partition B
|
||||
// ind1 := getSwitchIndex(switches, peers[1])
|
||||
// ind2 := getSwitchIndex(switches, peers[2])
|
||||
// p2p.Connect2Switches(switches, ind1, ind2)
|
||||
|
||||
// // wait for someone in the big partition (B) to make a block
|
||||
// <-blocksSubs[ind2].Out()
|
||||
|
||||
// t.Log("A block has been committed. Healing partition")
|
||||
// p2p.Connect2Switches(switches, ind0, ind1)
|
||||
// p2p.Connect2Switches(switches, ind0, ind2)
|
||||
|
||||
// // wait till everyone makes the first new block
|
||||
// // (one of them already has)
|
||||
// wg := new(sync.WaitGroup)
|
||||
// for i := 1; i < N-1; i++ {
|
||||
// wg.Add(1)
|
||||
// go func(j int) {
|
||||
// <-blocksSubs[j].Out()
|
||||
// wg.Done()
|
||||
// }(i)
|
||||
// }
|
||||
|
||||
// done := make(chan struct{})
|
||||
// go func() {
|
||||
// wg.Wait()
|
||||
// close(done)
|
||||
// }()
|
||||
|
||||
// tick := time.NewTicker(time.Second * 10)
|
||||
// select {
|
||||
// case <-done:
|
||||
// case <-tick.C:
|
||||
// for i, reactor := range reactors {
|
||||
// t.Log(fmt.Sprintf("Consensus Reactor %v", i))
|
||||
// t.Log(fmt.Sprintf("%v", reactor))
|
||||
// }
|
||||
// t.Fatalf("Timed out waiting for all validators to commit first block")
|
||||
// }
|
||||
}
|
||||
|
||||
// func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) {
|
||||
// // byzantine user should create two proposals and try to split the vote.
|
||||
// // Avoid sending on internalMsgQueue and running consensus state.
|
||||
|
||||
// // Create a new proposal block from state/txs from the mempool.
|
||||
// block1, blockParts1 := cs.createProposalBlock()
|
||||
// polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()}
|
||||
// proposal1 := types.NewProposal(height, round, polRound, propBlockID)
|
||||
// p1 := proposal1.ToProto()
|
||||
// if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil {
|
||||
// t.Error(err)
|
||||
// }
|
||||
|
||||
// proposal1.Signature = p1.Signature
|
||||
|
||||
// // some new transactions come in (this ensures that the proposals are different)
|
||||
// deliverTxsRange(cs, 0, 1)
|
||||
|
||||
// // Create a new proposal block from state/txs from the mempool.
|
||||
// block2, blockParts2 := cs.createProposalBlock()
|
||||
// polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()}
|
||||
// proposal2 := types.NewProposal(height, round, polRound, propBlockID)
|
||||
// p2 := proposal2.ToProto()
|
||||
// if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil {
|
||||
// t.Error(err)
|
||||
// }
|
||||
|
||||
// proposal2.Signature = p2.Signature
|
||||
|
||||
// block1Hash := block1.Hash()
|
||||
// block2Hash := block2.Hash()
|
||||
|
||||
// // broadcast conflicting proposals/block parts to peers
|
||||
// peers := sw.Peers().List()
|
||||
// t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers))
|
||||
// for i, peer := range peers {
|
||||
// if i < len(peers)/2 {
|
||||
// go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
|
||||
// } else {
|
||||
// go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// func sendProposalAndParts(
|
||||
// height int64,
|
||||
// round int32,
|
||||
// cs *State,
|
||||
// peer p2p.Peer,
|
||||
// proposal *types.Proposal,
|
||||
// blockHash []byte,
|
||||
// parts *types.PartSet,
|
||||
// ) {
|
||||
// // proposal
|
||||
// msg := &ProposalMessage{Proposal: proposal}
|
||||
// peer.Send(DataChannel, MustEncode(msg))
|
||||
|
||||
// // parts
|
||||
// for i := 0; i < int(parts.Total()); i++ {
|
||||
// part := parts.GetPart(i)
|
||||
// msg := &BlockPartMessage{
|
||||
// Height: height, // This tells peer that this part applies to us.
|
||||
// Round: round, // This tells peer that this part applies to us.
|
||||
// Part: part,
|
||||
// }
|
||||
// peer.Send(DataChannel, MustEncode(msg))
|
||||
// }
|
||||
|
||||
// // votes
|
||||
// cs.mtx.Lock()
|
||||
// prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
|
||||
// precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
|
||||
// cs.mtx.Unlock()
|
||||
|
||||
// peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
|
||||
// peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
// }
|
||||
|
||||
// type ByzantineReactor struct {
|
||||
// service.Service
|
||||
// reactor *Reactor
|
||||
// }
|
||||
|
||||
// func NewByzantineReactor(conR *Reactor) *ByzantineReactor {
|
||||
// return &ByzantineReactor{
|
||||
// Service: conR,
|
||||
// reactor: conR,
|
||||
// }
|
||||
// }
|
||||
|
||||
// func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) }
|
||||
// func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
|
||||
|
||||
// func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
|
||||
// if !br.reactor.IsRunning() {
|
||||
// return
|
||||
// }
|
||||
|
||||
// // Create peerState for peer
|
||||
// peerState := NewPeerState(peer).SetLogger(br.reactor.logger)
|
||||
// peer.Set(types.PeerStateKey, peerState)
|
||||
|
||||
// // Send our state to peer.
|
||||
// // If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
// if !br.reactor.waitSync {
|
||||
// br.reactor.sendNewRoundStepMessage(peer)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
// br.reactor.RemovePeer(peer, reason)
|
||||
// }
|
||||
|
||||
// func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
// br.reactor.Receive(chID, peer, msgBytes)
|
||||
// }
|
||||
|
||||
// func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -69,9 +70,6 @@ func configSetup(t *testing.T) *config.Config {
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { os.RemoveAll(configByzantineTest.RootDir) })
|
||||
|
||||
walDir := filepath.Dir(cfg.Consensus.WalFile())
|
||||
ensureDir(t, walDir, 0700)
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -242,9 +240,7 @@ func decideProposal(
|
||||
t.Helper()
|
||||
|
||||
cs1.mtx.Lock()
|
||||
block, err := cs1.createProposalBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
blockParts, err := block.MakePartSet(types.BlockPartSizeBytes)
|
||||
block, blockParts, err := cs1.createProposalBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
validRound := cs1.ValidRound
|
||||
chainID := cs1.state.ChainID
|
||||
@@ -375,11 +371,7 @@ func subscribeToVoter(ctx context.Context, t *testing.T, cs *State, addr []byte)
|
||||
vote := msg.Data().(types.EventDataVote)
|
||||
// we only fire for our own votes
|
||||
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case ch <- msg:
|
||||
}
|
||||
ch <- msg
|
||||
}
|
||||
return nil
|
||||
}, types.EventQueryVote); err != nil {
|
||||
@@ -410,10 +402,7 @@ func subscribeToVoterBuffered(ctx context.Context, t *testing.T, cs *State, addr
|
||||
vote := msg.Data().(types.EventDataVote)
|
||||
// we only fire for our own votes
|
||||
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case ch <- msg:
|
||||
}
|
||||
ch <- msg
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -474,6 +463,7 @@ func newStateWithConfigAndBlockStore(
|
||||
logger.With("module", "mempool"),
|
||||
thisConfig.Mempool,
|
||||
proxyAppConnMem,
|
||||
0,
|
||||
)
|
||||
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
@@ -487,26 +477,22 @@ func newStateWithConfigAndBlockStore(
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
|
||||
eventBus := eventbus.NewDefault(logger.With("module", "events"))
|
||||
require.NoError(t, eventBus.Start(ctx))
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore, eventBus)
|
||||
cs, err := NewState(ctx,
|
||||
blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(ctx,
|
||||
logger.With("module", "consensus"),
|
||||
thisConfig.Consensus,
|
||||
stateStore,
|
||||
state,
|
||||
blockExec,
|
||||
blockStore,
|
||||
mempool,
|
||||
evpool,
|
||||
eventBus,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cs.SetPrivValidator(ctx, pv)
|
||||
|
||||
eventBus := eventbus.NewDefault(logger.With("module", "events"))
|
||||
require.NoError(t, eventBus.Start(ctx))
|
||||
|
||||
cs.SetEventBus(eventBus)
|
||||
return cs
|
||||
}
|
||||
|
||||
@@ -521,41 +507,18 @@ func loadPrivValidator(t *testing.T, cfg *config.Config) *privval.FilePV {
|
||||
return privValidator
|
||||
}
|
||||
|
||||
type makeStateArgs struct {
|
||||
config *config.Config
|
||||
logger log.Logger
|
||||
validators int
|
||||
application abci.Application
|
||||
}
|
||||
|
||||
func makeState(ctx context.Context, t *testing.T, args makeStateArgs) (*State, []*validatorStub) {
|
||||
func makeState(ctx context.Context, t *testing.T, cfg *config.Config, logger log.Logger, nValidators int) (*State, []*validatorStub) {
|
||||
t.Helper()
|
||||
// Get State
|
||||
validators := 4
|
||||
if args.validators != 0 {
|
||||
validators = args.validators
|
||||
}
|
||||
var app abci.Application
|
||||
app = kvstore.NewApplication()
|
||||
if args.application != nil {
|
||||
app = args.application
|
||||
}
|
||||
if args.config == nil {
|
||||
args.config = configSetup(t)
|
||||
}
|
||||
if args.logger == nil {
|
||||
args.logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
state, privVals := makeGenesisState(ctx, t, args.config, genesisStateArgs{
|
||||
Validators: validators,
|
||||
state, privVals := makeGenesisState(ctx, t, cfg, genesisStateArgs{
|
||||
Validators: nValidators,
|
||||
})
|
||||
|
||||
vss := make([]*validatorStub, validators)
|
||||
vss := make([]*validatorStub, nValidators)
|
||||
|
||||
cs := newState(ctx, t, args.logger, state, privVals[0], app)
|
||||
cs := newState(ctx, t, logger, state, privVals[0], kvstore.NewApplication())
|
||||
|
||||
for i := 0; i < validators; i++ {
|
||||
for i := 0; i < nValidators; i++ {
|
||||
vss[i] = newValidatorStub(privVals[i], int32(i))
|
||||
}
|
||||
// since cs1 starts at 1
|
||||
@@ -787,10 +750,10 @@ func makeConsensusState(
|
||||
nValidators int,
|
||||
testName string,
|
||||
tickerFunc func() TimeoutTicker,
|
||||
appFunc func(t *testing.T, logger log.Logger) abci.Application,
|
||||
configOpts ...func(*config.Config),
|
||||
) ([]*State, cleanupFunc) {
|
||||
t.Helper()
|
||||
tempDir := t.TempDir()
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, nValidators, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
@@ -805,7 +768,7 @@ func makeConsensusState(
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
thisConfig, err := ResetConfig(tempDir, fmt.Sprintf("%s_%d", testName, i))
|
||||
thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i))
|
||||
require.NoError(t, err)
|
||||
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
@@ -814,11 +777,13 @@ func makeConsensusState(
|
||||
opt(thisConfig)
|
||||
}
|
||||
|
||||
walDir := filepath.Dir(thisConfig.Consensus.WalFile())
|
||||
ensureDir(t, walDir, 0700)
|
||||
ensureDir(t, filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
|
||||
app := kvstore.NewApplication()
|
||||
closeFuncs = append(closeFuncs, app.Close)
|
||||
app := appFunc(t, logger)
|
||||
|
||||
if appCloser, ok := app.(io.Closer); ok {
|
||||
closeFuncs = append(closeFuncs, appCloser.Close)
|
||||
}
|
||||
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
@@ -969,11 +934,19 @@ func (m *mockTicker) Chan() <-chan timeoutInfo {
|
||||
return m.c
|
||||
}
|
||||
|
||||
func newEpehemeralKVStore(_ log.Logger, _ string) abci.Application {
|
||||
func newPersistentKVStore(t *testing.T, logger log.Logger) abci.Application {
|
||||
t.Helper()
|
||||
|
||||
dir := t.TempDir()
|
||||
|
||||
return kvstore.NewPersistentKVStoreApplication(logger, dir)
|
||||
}
|
||||
|
||||
func newKVStore(_ *testing.T, _ log.Logger) abci.Application {
|
||||
return kvstore.NewApplication()
|
||||
}
|
||||
|
||||
func newPersistentKVStore(logger log.Logger, dbDir string) abci.Application {
|
||||
func newPersistentKVStoreWithPath(logger log.Logger, dbDir string) abci.Application {
|
||||
return kvstore.NewPersistentKVStoreApplication(logger, dbDir)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,10 +2,8 @@ package consensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -21,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
config := configSetup(t)
|
||||
@@ -29,7 +27,7 @@ func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
n := 4
|
||||
states, cleanup := makeConsensusState(ctx, t,
|
||||
config, n, "consensus_reactor_test",
|
||||
newMockTickerFunc(true))
|
||||
newMockTickerFunc(true), newKVStore)
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
@@ -50,13 +48,11 @@ func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
byzState := rts.states[node.NodeID]
|
||||
byzReactor := rts.reactors[node.NodeID]
|
||||
|
||||
signal := make(chan struct{})
|
||||
// Update the doPrevote function to just send a valid precommit for a random
|
||||
// block and otherwise disable the priv validator.
|
||||
byzState.mtx.Lock()
|
||||
privVal := byzState.privValidator
|
||||
byzState.doPrevote = func(ctx context.Context, height int64, round int32) {
|
||||
defer close(signal)
|
||||
invalidDoPrevoteFunc(ctx, t, height, round, byzState, byzReactor, privVal)
|
||||
}
|
||||
byzState.mtx.Unlock()
|
||||
@@ -73,31 +69,14 @@ func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
go func(s eventbus.Subscription) {
|
||||
defer wg.Done()
|
||||
_, err := s.Next(ctx)
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
if !assert.NoError(t, err) {
|
||||
cancel() // cancel other subscribers on failure
|
||||
}
|
||||
}(sub)
|
||||
}
|
||||
}
|
||||
wait := make(chan struct{})
|
||||
go func() { defer close(wait); wg.Wait() }()
|
||||
|
||||
select {
|
||||
case <-wait:
|
||||
if _, ok := <-signal; !ok {
|
||||
t.Fatal("test condition did not fire")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
if _, ok := <-signal; !ok {
|
||||
t.Fatal("test condition did not fire after timeout")
|
||||
return
|
||||
}
|
||||
case <-signal:
|
||||
// test passed
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func invalidDoPrevoteFunc(
|
||||
@@ -145,30 +124,13 @@ func invalidDoPrevoteFunc(
|
||||
cs.privValidator = nil // disable priv val so we don't do normal votes
|
||||
cs.mtx.Unlock()
|
||||
|
||||
r.mtx.Lock()
|
||||
ids := make([]types.NodeID, 0, len(r.peers))
|
||||
for _, ps := range r.peers {
|
||||
ids = append(ids, ps.peerID)
|
||||
}
|
||||
r.mtx.Unlock()
|
||||
|
||||
count := 0
|
||||
for _, peerID := range ids {
|
||||
count++
|
||||
err := r.voteCh.Send(ctx, p2p.Envelope{
|
||||
To: peerID,
|
||||
require.NoError(t, r.voteCh.Send(ctx, p2p.Envelope{
|
||||
To: ps.peerID,
|
||||
Message: &tmcons.Vote{
|
||||
Vote: precommit.ToProto(),
|
||||
},
|
||||
})
|
||||
// we want to have sent some of these votes,
|
||||
// but if the test completes without erroring
|
||||
// or not sending any messages, then we should
|
||||
// error.
|
||||
if errors.Is(err, context.Canceled) && count > 0 {
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
|
||||
ensureNewEventOnChannel(t, newBlockCh) // first block gets committed
|
||||
ensureNoNewEventOnChannel(t, newBlockCh)
|
||||
checkTxsRange(ctx, t, cs, 0, 1)
|
||||
deliverTxsRange(ctx, t, cs, 0, 1)
|
||||
ensureNewEventOnChannel(t, newBlockCh) // commit txs
|
||||
ensureNewEventOnChannel(t, newBlockCh) // commit updated app hash
|
||||
ensureNoNewEventOnChannel(t, newBlockCh)
|
||||
@@ -118,7 +118,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
round = 0
|
||||
|
||||
ensureNewRound(t, newRoundCh, height, round) // first round at next height
|
||||
checkTxsRange(ctx, t, cs, 0, 1) // we deliver txs, but don't set a proposal so we get the next round
|
||||
deliverTxsRange(ctx, t, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
|
||||
ensureNewTimeout(t, timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds())
|
||||
|
||||
round++ // moving to the next round
|
||||
@@ -126,7 +126,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
ensureNewEventOnChannel(t, newBlockCh) // now we can commit the block
|
||||
}
|
||||
|
||||
func checkTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) {
|
||||
func deliverTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) {
|
||||
t.Helper()
|
||||
// Deliver some txs.
|
||||
for i := start; i < end; i++ {
|
||||
@@ -159,7 +159,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
newBlockHeaderCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlockHeader)
|
||||
|
||||
const numTxs int64 = 3000
|
||||
go checkTxsRange(ctx, t, cs, 0, int(numTxs))
|
||||
go deliverTxsRange(ctx, t, cs, 0, int(numTxs))
|
||||
|
||||
startTestRound(ctx, cs, cs.Height, cs.Round)
|
||||
for n := int64(0); n < numTxs; {
|
||||
@@ -192,8 +192,8 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(0))
|
||||
|
||||
resFinalize := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
assert.False(t, resFinalize.TxResults[0].IsErr(), fmt.Sprintf("expected no error. got %v", resFinalize))
|
||||
resDeliver := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
assert.False(t, resDeliver.Txs[0].IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
|
||||
|
||||
resCommit := app.Commit()
|
||||
assert.True(t, len(resCommit.Data) > 0)
|
||||
@@ -212,7 +212,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
checkTxRespCh <- struct{}{}
|
||||
}, mempool.TxInfo{})
|
||||
if err != nil {
|
||||
t.Errorf("error after CheckTx: %v", err)
|
||||
t.Errorf("error after CheckTx: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -265,20 +265,20 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
|
||||
}
|
||||
|
||||
func (app *CounterApplication) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
|
||||
respTxs := make([]*abci.ExecTxResult, len(req.Txs))
|
||||
respTxs := make([]*abci.ResponseDeliverTx, len(req.Txs))
|
||||
for i, tx := range req.Txs {
|
||||
txValue := txAsUint64(tx)
|
||||
if txValue != uint64(app.txCount) {
|
||||
respTxs[i] = &abci.ExecTxResult{
|
||||
respTxs[i] = &abci.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %d, got %d", app.txCount, txValue),
|
||||
}
|
||||
continue
|
||||
}
|
||||
app.txCount++
|
||||
respTxs[i] = &abci.ExecTxResult{Code: code.CodeTypeOK}
|
||||
respTxs[i] = &abci.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
return abci.ResponseFinalizeBlock{TxResults: respTxs}
|
||||
return abci.ResponseFinalizeBlock{Txs: respTxs}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
@@ -310,10 +310,5 @@ func (app *CounterApplication) Commit() abci.ResponseCommit {
|
||||
|
||||
func (app *CounterApplication) PrepareProposal(
|
||||
req abci.RequestPrepareProposal) abci.ResponsePrepareProposal {
|
||||
return abci.ResponsePrepareProposal{}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) ProcessProposal(
|
||||
req abci.RequestProcessProposal) abci.ResponseProcessProposal {
|
||||
return abci.ResponseProcessProposal{Accept: true}
|
||||
return abci.ResponsePrepareProposal{BlockData: req.BlockData}
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ func (p *pbtsTestHarness) nextHeight(ctx context.Context, t *testing.T, proposer
|
||||
|
||||
ensureNewRound(t, p.roundCh, p.currentHeight, p.currentRound)
|
||||
|
||||
b, err := p.observedState.createProposalBlock(ctx)
|
||||
b, _, err := p.observedState.createProposalBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
b.Height = p.currentHeight
|
||||
b.Header.Height = p.currentHeight
|
||||
|
||||
@@ -19,8 +19,6 @@ import (
|
||||
var (
|
||||
ErrPeerStateHeightRegression = errors.New("peer state height regression")
|
||||
ErrPeerStateInvalidStartTime = errors.New("peer state invalid startTime")
|
||||
ErrPeerStateSetNilVote = errors.New("peer state set a nil vote")
|
||||
ErrPeerStateInvalidVoteIndex = errors.New("peer sent a vote with an invalid vote index")
|
||||
)
|
||||
|
||||
// peerStateStats holds internal statistics for a peer.
|
||||
@@ -358,19 +356,17 @@ func (ps *PeerState) BlockPartsSent() int {
|
||||
}
|
||||
|
||||
// SetHasVote sets the given vote as known by the peer
|
||||
func (ps *PeerState) SetHasVote(vote *types.Vote) error {
|
||||
// sanity check
|
||||
func (ps *PeerState) SetHasVote(vote *types.Vote) {
|
||||
if vote == nil {
|
||||
return ErrPeerStateSetNilVote
|
||||
return
|
||||
}
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
return ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex)
|
||||
ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex)
|
||||
}
|
||||
|
||||
// setHasVote will return an error when the index exceeds the bitArray length
|
||||
func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) error {
|
||||
func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) {
|
||||
logger := ps.logger.With(
|
||||
"peerH/R", fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
|
||||
"H/R", fmt.Sprintf("%d/%d", height, round),
|
||||
@@ -381,12 +377,8 @@ func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.Sign
|
||||
// NOTE: some may be nil BitArrays -> no side effects
|
||||
psVotes := ps.getVoteBitArray(height, round, voteType)
|
||||
if psVotes != nil {
|
||||
if ok := psVotes.SetIndex(int(index), true); !ok {
|
||||
// https://github.com/tendermint/tendermint/issues/2871
|
||||
return ErrPeerStateInvalidVoteIndex
|
||||
}
|
||||
psVotes.SetIndex(int(index), true)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyNewRoundStepMessage updates the peer state for the new round.
|
||||
@@ -483,15 +475,15 @@ func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
|
||||
}
|
||||
|
||||
// ApplyHasVoteMessage updates the peer state for the new vote.
|
||||
func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) error {
|
||||
func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.PRS.Height != msg.Height {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
return ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
|
||||
ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
|
||||
}
|
||||
|
||||
// ApplyVoteSetBitsMessage updates the peer state for the bit-array of votes
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func peerStateSetup(h, r, v int) *PeerState {
|
||||
ps := NewPeerState(log.TestingLogger(), "testPeerState")
|
||||
ps.PRS.Height = int64(h)
|
||||
ps.PRS.Round = int32(r)
|
||||
ps.ensureVoteBitArrays(int64(h), v)
|
||||
return ps
|
||||
}
|
||||
|
||||
func TestSetHasVote(t *testing.T) {
|
||||
ps := peerStateSetup(1, 1, 1)
|
||||
pva := ps.PRS.Prevotes.Copy()
|
||||
|
||||
// nil vote should return ErrPeerStateNilVote
|
||||
err := ps.SetHasVote(nil)
|
||||
require.Equal(t, ErrPeerStateSetNilVote, err)
|
||||
|
||||
// the peer giving an invalid index should returns ErrPeerStateInvalidVoteIndex
|
||||
v0 := &types.Vote{
|
||||
Height: 1,
|
||||
ValidatorIndex: -1,
|
||||
Round: 1,
|
||||
Type: tmproto.PrevoteType,
|
||||
}
|
||||
|
||||
err = ps.SetHasVote(v0)
|
||||
require.Equal(t, ErrPeerStateInvalidVoteIndex, err)
|
||||
|
||||
// the peer giving an invalid index should returns ErrPeerStateInvalidVoteIndex
|
||||
v1 := &types.Vote{
|
||||
Height: 1,
|
||||
ValidatorIndex: 1,
|
||||
Round: 1,
|
||||
Type: tmproto.PrevoteType,
|
||||
}
|
||||
|
||||
err = ps.SetHasVote(v1)
|
||||
require.Equal(t, ErrPeerStateInvalidVoteIndex, err)
|
||||
|
||||
// the peer giving a correct index should return nil (vote has been set)
|
||||
v2 := &types.Vote{
|
||||
Height: 1,
|
||||
ValidatorIndex: 0,
|
||||
Round: 1,
|
||||
Type: tmproto.PrevoteType,
|
||||
}
|
||||
require.Nil(t, ps.SetHasVote(v2))
|
||||
|
||||
// verify vote
|
||||
pva.SetIndex(0, true)
|
||||
require.Equal(t, pva, ps.getVoteBitArray(1, 1, tmproto.PrevoteType))
|
||||
|
||||
// the vote is not in the correct height/round/voteType should return nil (ignore the vote)
|
||||
v3 := &types.Vote{
|
||||
Height: 2,
|
||||
ValidatorIndex: 0,
|
||||
Round: 1,
|
||||
Type: tmproto.PrevoteType,
|
||||
}
|
||||
require.Nil(t, ps.SetHasVote(v3))
|
||||
// prevote bitarray has no update
|
||||
require.Equal(t, pva, ps.getVoteBitArray(1, 1, tmproto.PrevoteType))
|
||||
}
|
||||
|
||||
func TestApplyHasVoteMessage(t *testing.T) {
|
||||
ps := peerStateSetup(1, 1, 1)
|
||||
pva := ps.PRS.Prevotes.Copy()
|
||||
|
||||
// ignore the message with an invalid height
|
||||
msg := &HasVoteMessage{
|
||||
Height: 2,
|
||||
}
|
||||
require.Nil(t, ps.ApplyHasVoteMessage(msg))
|
||||
|
||||
// apply a message like v2 in TestSetHasVote
|
||||
msg2 := &HasVoteMessage{
|
||||
Height: 1,
|
||||
Index: 0,
|
||||
Round: 1,
|
||||
Type: tmproto.PrevoteType,
|
||||
}
|
||||
|
||||
require.Nil(t, ps.ApplyHasVoteMessage(msg2))
|
||||
|
||||
// verify vote
|
||||
pva.SetIndex(0, true)
|
||||
require.Equal(t, pva, ps.getVoteBitArray(1, 1, tmproto.PrevoteType))
|
||||
|
||||
// skip test cases like v & v3 in TestSetHasVote due to the same path
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user