mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-12 15:52:50 +00:00
Compare commits
10 Commits
wb/config-
...
finalizeBl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed0c89eb76 | ||
|
|
87c2ee69fd | ||
|
|
dc3327083e | ||
|
|
6a29aa2b7a | ||
|
|
8a39848d06 | ||
|
|
f9a22483c0 | ||
|
|
5a4a56a781 | ||
|
|
3e006e6bc1 | ||
|
|
ffb1fc32ea | ||
|
|
a8f91f696a |
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -7,7 +7,5 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @cmwaters @tychoish @williambanfield @creachadair @sergio-mena @jmalicevic @thanethomson @ancazamfir @samricotta
|
||||
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish
|
||||
|
||||
# Spec related changes can be approved by the protocol design team
|
||||
/spec @josef-widder @milosevic @cason @sergio-mena @jmalicevic
|
||||
|
||||
37
.github/ISSUE_TEMPLATE/proposal.md
vendored
37
.github/ISSUE_TEMPLATE/proposal.md
vendored
@@ -1,37 +0,0 @@
|
||||
---
|
||||
name: Protocol Change Proposal
|
||||
about: Create a proposal to request a change to the protocol
|
||||
|
||||
---
|
||||
|
||||
<!-- < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < ☺
|
||||
v ✰ Thanks for opening an issue! ✰
|
||||
v Before smashing the submit button please review the template.
|
||||
v Word of caution: Under-specified proposals may be rejected summarily
|
||||
☺ > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > -->
|
||||
|
||||
# Protocol Change Proposal
|
||||
|
||||
## Summary
|
||||
|
||||
<!-- Short, concise description of the proposed change -->
|
||||
|
||||
## Problem Definition
|
||||
|
||||
<!-- Why do we need this change?
|
||||
What problems may be addressed by introducing this change?
|
||||
What benefits does Tendermint stand to gain by including this change?
|
||||
Are there any disadvantages of including this change? -->
|
||||
|
||||
## Proposal
|
||||
|
||||
<!-- Detailed description of requirements of implementation -->
|
||||
|
||||
____
|
||||
|
||||
#### For Admin Use
|
||||
|
||||
- [ ] Not duplicate issue
|
||||
- [ ] Appropriate labels applied
|
||||
- [ ] Appropriate contributors tagged
|
||||
- [ ] Contributor assigned/self-assigned
|
||||
20
.github/auto-comment.yml
vendored
20
.github/auto-comment.yml
vendored
@@ -1,16 +1,16 @@
|
||||
pullRequestOpened: |
|
||||
:wave: Thanks for creating a PR!
|
||||
:wave: Thanks for creating a PR!
|
||||
|
||||
Before we can merge this PR, please make sure that all the following items have been
|
||||
Before we can merge this PR, please make sure that all the following items have been
|
||||
checked off. If any of the checklist items are not applicable, please leave them but
|
||||
write a little note why.
|
||||
write a little note why.
|
||||
|
||||
- [ ] Wrote tests
|
||||
- [ ] Updated CHANGELOG_PENDING.md
|
||||
- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work.
|
||||
- [ ] Updated relevant documentation (`docs/`) and code comments
|
||||
- [ ] Re-reviewed `Files changed` in the Github PR explorer
|
||||
- [ ] Applied Appropriate Labels
|
||||
- [ ] Wrote tests
|
||||
- [ ] Updated CHANGELOG_PENDING.md
|
||||
- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work.
|
||||
- [ ] Updated relevant documentation (`docs/`) and code comments
|
||||
- [ ] Re-reviewed `Files changed` in the Github PR explorer
|
||||
- [ ] Applied Appropriate Labels
|
||||
|
||||
|
||||
Thank you for your contribution to Tendermint! :rocket:
|
||||
Thank you for your contribution to Tendermint! :rocket:
|
||||
14
.github/codecov.yml
vendored
14
.github/codecov.yml
vendored
@@ -5,14 +5,19 @@ coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 20%
|
||||
patch: off
|
||||
threshold: 1%
|
||||
patch: on
|
||||
changes: off
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
comment: false
|
||||
comment:
|
||||
layout: "diff, files"
|
||||
behavior: default
|
||||
require_changes: no
|
||||
require_base: no
|
||||
require_head: yes
|
||||
|
||||
ignore:
|
||||
- "docs"
|
||||
@@ -20,6 +25,3 @@ ignore:
|
||||
- "scripts"
|
||||
- "**/*.pb.go"
|
||||
- "libs/pubsub/query/query.peg.go"
|
||||
- "*.md"
|
||||
- "*.rst"
|
||||
- "*.yml"
|
||||
|
||||
89
.github/dependabot.yml
vendored
89
.github/dependabot.yml
vendored
@@ -3,94 +3,25 @@ updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
target-branch: "master"
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
target-branch: "v0.35.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
target-branch: "v0.36.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
###################################
|
||||
##
|
||||
## Update All Go Dependencies
|
||||
|
||||
reviewers:
|
||||
- fadeev
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
target-branch: "master"
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- melekes
|
||||
- tessr
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
target-branch: "v0.35.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
target-branch: "v0.36.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
8
.github/linter/markdownlint.yml
vendored
Normal file
8
.github/linter/markdownlint.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
default: true,
|
||||
MD007: { "indent": 4 }
|
||||
MD013: false
|
||||
MD024: { siblings_only: true }
|
||||
MD025: false
|
||||
MD033: { no-inline-html: false }
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
8
.github/linters/markdownlint.yml
vendored
8
.github/linters/markdownlint.yml
vendored
@@ -1,8 +0,0 @@
|
||||
default: true,
|
||||
MD007: {"indent": 4}
|
||||
MD013: false
|
||||
MD024: {siblings_only: true}
|
||||
MD025: false
|
||||
MD033: {no-inline-html: false}
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
9
.github/linters/yaml-lint.yml
vendored
9
.github/linters/yaml-lint.yml
vendored
@@ -1,9 +0,0 @@
|
||||
---
|
||||
# Default rules for YAML linting from super-linter.
|
||||
# See: See https://yamllint.readthedocs.io/en/stable/rules.html
|
||||
extends: default
|
||||
rules:
|
||||
document-end: disable
|
||||
document-start: disable
|
||||
line-length: disable
|
||||
truthy: disable
|
||||
31
.github/mergify.yml
vendored
31
.github/mergify.yml
vendored
@@ -1,22 +1,13 @@
|
||||
queue_rules:
|
||||
- name: default
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:automerge
|
||||
|
||||
pull_request_rules:
|
||||
- name: Automerge to master
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:automerge
|
||||
actions:
|
||||
queue:
|
||||
merge:
|
||||
method: squash
|
||||
name: default
|
||||
commit_message_template: |
|
||||
{{ title }} (#{{ number }})
|
||||
|
||||
{{ body }}
|
||||
strict: true
|
||||
commit_message: title+body
|
||||
- name: backport patches to v0.34.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
@@ -25,19 +16,3 @@ pull_request_rules:
|
||||
backport:
|
||||
branches:
|
||||
- v0.34.x
|
||||
- name: backport patches to v0.35.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:backport-to-v0.35.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.35.x
|
||||
- name: backport patches to v0.36.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:backport-to-v0.36.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.36.x
|
||||
|
||||
82
.github/workflows/build.yml
vendored
82
.github/workflows/build.yml
vendored
@@ -1,82 +0,0 @@
|
||||
name: Build
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to master or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
goos: ["linux"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: install
|
||||
run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
75
.github/workflows/check-generated.yml
vendored
75
.github/workflows/check-generated.yml
vendored
@@ -1,75 +0,0 @@
|
||||
# Verify that generated code is up-to-date.
|
||||
#
|
||||
# Note that we run these checks regardless whether the input files have
|
||||
# changed, because generated code can change in response to toolchain updates
|
||||
# even if no files in the repository are modified.
|
||||
name: Check generated code
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check-mocks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: "Check generated mocks"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
readonly MOCKERY=2.12.3 # N.B. no leading "v"
|
||||
curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf -
|
||||
make mockery 2>/dev/null
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Generated mocks require update (either Mockery or source files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check-proto:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1 # we need a .git directory to run git diff
|
||||
|
||||
- name: "Check protobuf generated code"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Install buf and gogo tools, so that differences that arise from
|
||||
# toolchain differences are also caught.
|
||||
readonly tools="$(mktemp -d)"
|
||||
export PATH="${PATH}:${tools}/bin"
|
||||
export GOBIN="${tools}/bin"
|
||||
|
||||
go install github.com/bufbuild/buf/cmd/buf
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
|
||||
make proto-gen
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make proto-gen' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
127
.github/workflows/coverage.yml
vendored
Normal file
127
.github/workflows/coverage.yml
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
name: Test Coverage
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
|
||||
# cache multiple
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-00"
|
||||
path: ./pkgs.txt.part.00
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-01"
|
||||
path: ./pkgs.txt.part.01
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02"
|
||||
path: ./pkgs.txt.part.02
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03"
|
||||
path: ./pkgs.txt.part.03
|
||||
|
||||
build-linux:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: GOOS=linux GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: split-test-files
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}"
|
||||
if: env.GIT_DIFF
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./${{ matrix.part }}profile.out
|
||||
|
||||
upload-coverage-report:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v1.5.2
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
19
.github/workflows/docker.yml
vendored
19
.github/workflows/docker.yml
vendored
@@ -1,19 +1,20 @@
|
||||
name: Docker
|
||||
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
|
||||
name: Build & Push
|
||||
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -38,18 +39,18 @@ jobs:
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.0.0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.5.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2.0.0
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3.0.0
|
||||
uses: docker/build-push-action@v2.6.1
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
62
.github/workflows/docs-deployment.yml
vendored
62
.github/workflows/docs-deployment.yml
vendored
@@ -1,62 +0,0 @@
|
||||
# Build and deploy the docs.tendermint.com website content.
|
||||
# The static content is published to GitHub Pages.
|
||||
#
|
||||
# For documentation build info, see docs/DOCS_README.md.
|
||||
name: Build static documentation site
|
||||
on:
|
||||
workflow_dispatch: # allow manual updates
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- docs/**
|
||||
- spec/**
|
||||
|
||||
jobs:
|
||||
# This is split into two jobs so that the build, which runs npm, does not
|
||||
# have write access to anything. The deploy requires write access to publish
|
||||
# to the branch used by GitHub Pages, however, so we can't just make the
|
||||
# whole workflow read-only.
|
||||
build:
|
||||
name: VuePress build
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: alpine:latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Install generator dependencies
|
||||
run: |
|
||||
apk add --no-cache make bash git npm
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
# We need to fetch full history so the backport branches for previous
|
||||
# versions will be available for the build.
|
||||
fetch-depth: 0
|
||||
- name: Build documentation
|
||||
run: |
|
||||
git config --global --add safe.directory "$PWD"
|
||||
make build-docs
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output/
|
||||
|
||||
deploy:
|
||||
name: Deploy to GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: 'docs-tendermint-com'
|
||||
folder: ~/output
|
||||
single-commit: true
|
||||
20
.github/workflows/docs-toc.yml
vendored
20
.github/workflows/docs-toc.yml
vendored
@@ -1,20 +0,0 @@
|
||||
# Verify that important design docs have ToC entries.
|
||||
name: Check documentation ToC
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
docs/architecture/**
|
||||
docs/rfc/**
|
||||
- run: ./docs/presubmit.sh
|
||||
if: env.GIT_DIFF
|
||||
36
.github/workflows/e2e-manual.yml
vendored
36
.github/workflows/e2e-manual.yml
vendored
@@ -1,36 +0,0 @@
|
||||
# Runs randomly generated E2E testnets nightly on master
|
||||
# manually run e2e tests
|
||||
name: e2e-manual
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', '04']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
34
.github/workflows/e2e-nightly-34x.yml
vendored
34
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -1,10 +1,12 @@
|
||||
# Runs randomly generated E2E testnets nightly on the 0.34.x branch.
|
||||
# Runs randomly generated E2E testnets nightly
|
||||
# on the 0.34.x release branch
|
||||
|
||||
# !! This file should be kept in sync with the e2e-nightly-master.yml file,
|
||||
# modulo changes to the version labels.
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-master workflow as well!
|
||||
|
||||
name: e2e-nightly-34x
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually, in theory
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
@@ -15,15 +17,15 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01']
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.16'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
@@ -35,7 +37,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 2 -d networks/nightly
|
||||
run: ./build/generator -g 4 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
@@ -56,3 +58,19 @@ jobs:
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
75
.github/workflows/e2e-nightly-35x.yml
vendored
75
.github/workflows/e2e-nightly-35x.yml
vendored
@@ -1,75 +0,0 @@
|
||||
# Runs randomly generated E2E testnets nightly on the v0.35.x branch.
|
||||
|
||||
# !! This file should be kept in sync with the e2e-nightly-master.yml file,
|
||||
# modulo changes to the version labels.
|
||||
|
||||
name: e2e-nightly-35x
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.35.x'
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.35.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.35.x
|
||||
SLACK_FOOTER: ''
|
||||
74
.github/workflows/e2e-nightly-36x.yml
vendored
74
.github/workflows/e2e-nightly-36x.yml
vendored
@@ -1,74 +0,0 @@
|
||||
# Runs randomly generated E2E testnets nightly on the v0.36.x branch.
|
||||
|
||||
# !! This file should be kept in sync with the e2e-nightly-master.yml file,
|
||||
# modulo changes to the version labels.
|
||||
|
||||
name: e2e-nightly-36x
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', '04']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.36.x'
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.36.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.36.x
|
||||
SLACK_FOOTER: ''
|
||||
31
.github/workflows/e2e-nightly-master.yml
vendored
31
.github/workflows/e2e-nightly-master.yml
vendored
@@ -1,47 +1,48 @@
|
||||
# Runs randomly generated E2E testnets nightly on master
|
||||
|
||||
# !! Relevant changes to this file should be propagated to the e2e-nightly-<V>x
|
||||
# files for the supported backport branches, when appropriate, modulo version
|
||||
# markers.
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-34x workflow as well!
|
||||
|
||||
name: e2e-nightly-master
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
e2e-nightly-test-2:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.16'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.3.4
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
run: make -j2 docker generator runner
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -56,8 +57,8 @@ jobs:
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on master
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
19
.github/workflows/e2e.yml
vendored
19
.github/workflows/e2e.yml
vendored
@@ -2,7 +2,7 @@ name: e2e
|
||||
# Runs the CI end-to-end test network on all pushes to master or release branches
|
||||
# and every pull request, but only if any Go files have been changed.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
@@ -14,11 +14,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
go-version: '1.16'
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -28,10 +28,15 @@ jobs:
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run two make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker runner tests
|
||||
run: make -j2 docker runner
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Run CI testnet
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/ci.toml
|
||||
run: ./build/runner -f networks/ci.toml
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Emit logs on failure
|
||||
if: ${{ failure() }}
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml logs
|
||||
|
||||
26
.github/workflows/fuzz-nightly.yml
vendored
26
.github/workflows/fuzz-nightly.yml
vendored
@@ -1,7 +1,7 @@
|
||||
# Runs fuzzing nightly.
|
||||
name: Fuzz Tests
|
||||
name: fuzz-nightly
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
pull_request:
|
||||
@@ -13,21 +13,31 @@ jobs:
|
||||
fuzz-nightly-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.16'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.3.4
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go install github.com/dvyukov/go-fuzz/go-fuzz@latest github.com/dvyukov/go-fuzz/go-fuzz-build@latest
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
- name: Fuzz mempool
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-addrbook
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-addrbook
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-pex
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-pex
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-sc
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-sc
|
||||
@@ -39,14 +49,14 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive crashers
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 3
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
|
||||
2
.github/workflows/janitor.yml
vendored
2
.github/workflows/janitor.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.10.0
|
||||
- uses: styfle/cancel-workflow-action@0.9.0
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
4
.github/workflows/jepsen.yml
vendored
4
.github/workflows/jepsen.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the Jepsen repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
repository: 'tendermint/jepsen'
|
||||
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}'
|
||||
|
||||
- name: Archive results
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: results
|
||||
path: tendermint/store/latest
|
||||
|
||||
12
.github/workflows/linkchecker.yml
vendored
Normal file
12
.github/workflows/linkchecker.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Check Markdown links
|
||||
on:
|
||||
schedule:
|
||||
- cron: '* */24 * * *'
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
|
||||
with:
|
||||
folder-path: "docs"
|
||||
27
.github/workflows/lint.yml
vendored
27
.github/workflows/lint.yml
vendored
@@ -1,11 +1,7 @@
|
||||
name: Golang Linter
|
||||
# Lint runs golangci-lint over the entire Tendermint repository.
|
||||
#
|
||||
# This workflow is run on every pull request and push to master.
|
||||
#
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum}
|
||||
# files have been modified.
|
||||
|
||||
name: Lint
|
||||
# Lint runs golangci-lint over the entire Tendermint repository
|
||||
# This workflow is run on every pull request and push to master
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
@@ -17,22 +13,17 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
- uses: golangci/golangci-lint-action@v2.5.2
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.45
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.38
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
8
.github/workflows/linter.yml
vendored
8
.github/workflows/linter.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Markdown Linter
|
||||
name: Lint
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
@@ -19,14 +19,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2.3.4
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v4
|
||||
uses: docker://github/super-linter:v3
|
||||
env:
|
||||
LINTER_RULES_PATH: .
|
||||
VALIDATE_ALL_CODEBASE: true
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
VALIDATE_OPENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
YAML_CONFIG_FILE: yaml-lint.yml
|
||||
|
||||
23
.github/workflows/markdown-links.yml
vendored
23
.github/workflows/markdown-links.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: Check Markdown links
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.md
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
with:
|
||||
check-modified-files-only: 'yes'
|
||||
config-file: '.md-link-check.json'
|
||||
if: env.GIT_DIFF
|
||||
51
.github/workflows/proto-docker.yml
vendored
Normal file
51
.github/workflows/proto-docker.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Build & Push TM Proto Builder
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
schedule:
|
||||
# run this job once a month to recieve any go or buf updates
|
||||
- cron: "* * 1 * *"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermintdev/docker-build-proto
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.5.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.6.1
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
21
.github/workflows/proto-lint.yml
vendored
21
.github/workflows/proto-lint.yml
vendored
@@ -1,21 +0,0 @@
|
||||
name: Protobuf Lint
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'proto/**'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'proto/**'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.6.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
23
.github/workflows/proto.yml
vendored
Normal file
23
.github/workflows/proto.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Protobuf
|
||||
# Protobuf runs buf (https://buf.build/) lint and check-breakage
|
||||
# This workflow is only run when a .proto file has been modified
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
paths:
|
||||
- "**.proto"
|
||||
jobs:
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
19
.github/workflows/release.yml
vendored
19
.github/workflows/release.yml
vendored
@@ -2,35 +2,36 @@ name: "Release"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
branches:
|
||||
- "RC[0-9]/**"
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.16'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
|
||||
8
.github/workflows/stale.yml
vendored
8
.github/workflows/stale.yml
vendored
@@ -7,14 +7,12 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
- uses: actions/stale@v3.0.19
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions."
|
||||
days-before-stale: -1
|
||||
days-before-close: -1
|
||||
days-before-pr-stale: 10
|
||||
days-before-pr-close: 4
|
||||
days-before-stale: 10
|
||||
days-before-close: 4
|
||||
exempt-pr-labels: "S:wip"
|
||||
|
||||
147
.github/workflows/tests.yml
vendored
147
.github/workflows/tests.yml
vendored
@@ -1,75 +1,138 @@
|
||||
name: Test
|
||||
name: Tests
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to master or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths:
|
||||
- "**.go"
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03", "04", "05"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: Run Go Tests
|
||||
run: |
|
||||
make test-group-${{ matrix.part }} NUM_SPLIT=6
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v3
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./build/${{ matrix.part }}.profile.out
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
|
||||
upload-coverage-report:
|
||||
test_abci_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
- name: test_abci_apps
|
||||
run: abci/tests/test_app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v3
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: set" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v3.1.0
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -15,7 +15,7 @@
|
||||
.vagrant
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci/abci-cli
|
||||
abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
build/*
|
||||
@@ -25,7 +25,6 @@ docs/_build
|
||||
docs/dist
|
||||
docs/node_modules/
|
||||
docs/spec
|
||||
docs/.vuepress/public/rpc
|
||||
index.html.md
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
profile\.out
|
||||
@@ -47,11 +46,3 @@ test/fuzz/**/corpus
|
||||
test/fuzz/**/crashers
|
||||
test/fuzz/**/suppressions
|
||||
test/fuzz/**/*.zip
|
||||
proto/spec/**/*.pb.go
|
||||
*.aux
|
||||
*.bbl
|
||||
*.blg
|
||||
*.log
|
||||
*.pdf
|
||||
*.gz
|
||||
*.dvi
|
||||
|
||||
@@ -1,35 +1,32 @@
|
||||
linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- exportloopref
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
- goconst
|
||||
# - gocritic
|
||||
- gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
- golint
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
# - interfacer
|
||||
# - lll
|
||||
# - maligned
|
||||
- lll
|
||||
- misspell
|
||||
# - maligned
|
||||
- nakedret
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
@@ -40,12 +37,18 @@ linters:
|
||||
- varcheck
|
||||
# - whitespace
|
||||
# - wsl
|
||||
# - gocognit
|
||||
- nolintlint
|
||||
- asciicheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
- linters:
|
||||
- lll
|
||||
source: "https://"
|
||||
max-same-issues: 50
|
||||
|
||||
linters-settings:
|
||||
|
||||
@@ -29,8 +29,8 @@ release:
|
||||
|
||||
archives:
|
||||
- files:
|
||||
- LICENSE
|
||||
- README.md
|
||||
- UPGRADING.md
|
||||
- SECURITY.md
|
||||
- CHANGELOG.md
|
||||
- LICENSE
|
||||
- README.md
|
||||
- UPGRADING.md
|
||||
- SECURITY.md
|
||||
- CHANGELOG.md
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
default: true
|
||||
MD001: false
|
||||
MD007: {indent: 4}
|
||||
MD013: false
|
||||
MD024: {siblings_only: true}
|
||||
MD025: false
|
||||
MD033: false
|
||||
MD036: false
|
||||
MD010: false
|
||||
MD012: false
|
||||
MD028: false
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503]
|
||||
}
|
||||
452
CHANGELOG.md
452
CHANGELOG.md
@@ -1,384 +1,6 @@
|
||||
# Changelog
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.35.5
|
||||
|
||||
May 26, 2022
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [p2p] [\#8371](https://github.com/tendermint/tendermint/pull/8371) fix setting in con-tracker (backport #8370) (@tychoish)
|
||||
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
|
||||
- [statesync] [\#8494](https://github.com/tendermint/tendermint/pull/8494) avoid potential race (@tychoish)
|
||||
- [keymigrate] [\#8467](https://github.com/tendermint/tendermint/pull/8467) improve filtering for legacy transaction hashes (backport #8466) (@creachadair)
|
||||
- [rpc] [\#8594](https://github.com/tendermint/tendermint/pull/8594) fix encoding of block_results responses (@creachadair)
|
||||
|
||||
## v0.35.4
|
||||
|
||||
April 18, 2022
|
||||
|
||||
Special thanks to external contributors on this release: @firelizzard18
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8300](https://github.com/tendermint/tendermint/pull/8300) Add a tool to update old config files to the latest version [backport [\#8281](https://github.com/tendermint/tendermint/pull/8281)]. (@creachadair)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8294](https://github.com/tendermint/tendermint/pull/8294) keymigrate: ensure block hash keys are correctly translated. (@creachadair)
|
||||
- [cli] [\#8352](https://github.com/tendermint/tendermint/pull/8352) keymigrate: ensure transaction hash keys are correctly translated. (@creachadair)
|
||||
|
||||
## v0.35.3
|
||||
|
||||
April 8, 2022
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8081](https://github.com/tendermint/tendermint/pull/8081) add a safer-to-use `reset-state` command. (@marbar3778)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [consensus] [\#8138](https://github.com/tendermint/tendermint/pull/8138) change lock handling in reactor and handleMsg for RoundState. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8276](https://github.com/tendermint/tendermint/pull/8276) scmigrate: ensure target key is correctly renamed. (@creachadair)
|
||||
|
||||
## v0.35.2
|
||||
|
||||
February 28, 2022
|
||||
|
||||
Special thanks to external contributors on this release: @ashcherbakov, @yihuang, @waelsy123
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [consensus] [\#7875](https://github.com/tendermint/tendermint/pull/7875) additional timing metrics. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [abci] [\#7990](https://github.com/tendermint/tendermint/pull/7990) revert buffer limit change. (@williambanfield)
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
|
||||
- [cli] [\#7869](https://github.com/tendermint/tendermint/pull/7869) Update unsafe-reset-all command to match release v35. (waelsy123)
|
||||
- [light] [\#7640](https://github.com/tendermint/tendermint/pull/7640) Light Client: fix absence proof verification (@ashcherbakov)
|
||||
- [light] [\#7641](https://github.com/tendermint/tendermint/pull/7641) Light Client: fix querying against the latest height (@ashcherbakov)
|
||||
- [mempool] [\#7718](https://github.com/tendermint/tendermint/pull/7718) return duplicate tx errors more consistently. (@tychoish)
|
||||
- [rpc] [\#7744](https://github.com/tendermint/tendermint/pull/7744) fix layout of endpoint list. (@creachadair)
|
||||
- [statesync] [\#7886](https://github.com/tendermint/tendermint/pull/7886) assert app version matches. (@cmwaters)
|
||||
|
||||
## v0.35.1
|
||||
|
||||
January 26, 2022
|
||||
|
||||
Special thanks to external contributors on this release: @altergui, @odeke-em,
|
||||
@thanethomson
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [config] [\#7276](https://github.com/tendermint/tendermint/pull/7276) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson).
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- [p2p] [\#7265](https://github.com/tendermint/tendermint/pull/7265) Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish)
|
||||
- [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#7270](https://github.com/tendermint/tendermint/pull/7270) Add `header` and `header_by_hash` RPC Client queries. (@fedekunze) (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [internal/protoio] [\#7325](https://github.com/tendermint/tendermint/pull/7325) Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em)
|
||||
- [\#7338](https://github.com/tendermint/tendermint/pull/7338) pubsub: Performance improvements for the event query API (backport of #7319) (@creachadair)
|
||||
- [\#7252](https://github.com/tendermint/tendermint/pull/7252) Add basic metrics to the indexer package. (@creachadair)
|
||||
- [\#7338](https://github.com/tendermint/tendermint/pull/7338) Performance improvements for the event query API. (@creachadair)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7310](https://github.com/tendermint/tendermint/issues/7310) pubsub: Report a non-nil error when shutting down (fixes #7306).
|
||||
- [\#7355](https://github.com/tendermint/tendermint/pull/7355) Fix incorrect tests using the PSQL sink. (@creachadair)
|
||||
- [\#7683](https://github.com/tendermint/tendermint/pull/7683) rpc: check error code for broadcast_tx_commit. (@tychoish)
|
||||
|
||||
## v0.35.0
|
||||
|
||||
November 4, 2021
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106,
|
||||
@bipulprasad, @alessio, @Yawning, @silasdavis, @cuonglm, @tanyabouman,
|
||||
@JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of an incorrect app hash. (@cmwaters)
|
||||
- [config] [\#7174](https://github.com/tendermint/tendermint/pull/7174) expose ability to write config to arbitrary paths. (@tychoish)
|
||||
- [mempool, rpc] [\#7065](https://github.com/tendermint/tendermint/pull/7065) add removetx rpc method (backport of #7047) (@tychoish).
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters).
|
||||
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
|
||||
- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
|
||||
- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to
|
||||
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
|
||||
exchange reactors behave the same. (@cmwaters)
|
||||
- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type
|
||||
- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez)
|
||||
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
|
||||
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
|
||||
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
|
||||
`mempool.version` configuration, where `v1` is the default configuration.
|
||||
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
||||
- Transactions are gossiped in FIFO order as they are in `v0`.
|
||||
- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||
- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106)
|
||||
- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106)
|
||||
- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
|
||||
- [\#7142](https://github.com/tendermint/tendermint/pull/7142) mempool: remove panic when recheck-tx was not sent to ABCI application (@williambanfield).
|
||||
- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060) <!-- markdown-link-check-disable-line -->
|
||||
wait until peerUpdates channel is closed to close remaining peers (@williambanfield)
|
||||
- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
||||
- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106)
|
||||
- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106)
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
|
||||
- [crypto/armor]: [\#6963](https://github.com/tendermint/tendermint/pull/6963) remove package which is unused, and based on
|
||||
deprecated fundamentals. Downstream users should maintain this
|
||||
library. (@tychoish)
|
||||
- [state] [store] [proxy] [rpc/core]: [\#6937](https://github.com/tendermint/tendermint/pull/6937) move packages to
|
||||
`internal` to prevent consumption of these internal APIs by
|
||||
external users. (@tychoish)
|
||||
- [pubsub] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
|
||||
- [p2p] [\#6618](https://github.com/tendermint/tendermint/pull/6618) [\#6583](https://github.com/tendermint/tendermint/pull/6583) Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish)
|
||||
- [node] [\#6540](https://github.com/tendermint/tendermint/pull/6540) Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
|
||||
- [p2p] [\#6547](https://github.com/tendermint/tendermint/pull/6547) Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
|
||||
- [libs/log] [\#6534](https://github.com/tendermint/tendermint/pull/6534) Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
|
||||
- [libs/time] [\#6495](https://github.com/tendermint/tendermint/pull/6495) Move types/time to libs/time to improve consistency. (@tychoish)
|
||||
- [mempool] [\#6529](https://github.com/tendermint/tendermint/pull/6529) The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
|
||||
- [abci/client, proxy] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] [\#5720](https://github.com/tendermint/tendermint/pull/5720) Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
- [store] [\#5848](https://github.com/tendermint/tendermint/pull/5848) Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||
- [state] [\#5864](https://github.com/tendermint/tendermint/pull/5864) Use an iterator when pruning state (@cmwaters)
|
||||
- [types] [\#6023](https://github.com/tendermint/tendermint/pull/6023) Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
|
||||
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
|
||||
- [light] [\#6054](https://github.com/tendermint/tendermint/pull/6054) Move `MaxRetryAttempt` option from client to provider.
|
||||
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
|
||||
- [all] [\#6077](https://github.com/tendermint/tendermint/pull/6077) Change spelling from British English to American (@cmwaters)
|
||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||
- Rename "behaviour" pkg to "behavior" and internalized it in blocksync v2
|
||||
- [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
|
||||
- [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Unexpose `WSEvents` (@melekes)
|
||||
- [rpc/jsonrpc/client/ws_client] [\#6176](https://github.com/tendermint/tendermint/pull/6176) `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
|
||||
- [internal/libs] [\#6366](https://github.com/tendermint/tendermint/pull/6366) Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
|
||||
- [libs/rand] [\#6364](https://github.com/tendermint/tendermint/pull/6364) Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
|
||||
- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
|
||||
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
|
||||
and `TxInfo`. (@alexanderbez)
|
||||
- [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||
- [types] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Move `NodeKey` to types to make the type public.
|
||||
- [config] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
|
||||
- [blocksync] [\#6755](https://github.com/tendermint/tendermint/pull/6755) Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters)
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
|
||||
- [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes)
|
||||
- [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
- [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
|
||||
- [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes)
|
||||
- [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters)
|
||||
- [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106)
|
||||
- [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||
- [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
|
||||
- [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated.
|
||||
- [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0
|
||||
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
|
||||
- [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield)
|
||||
- [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
|
||||
- [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish)
|
||||
- [tools] [\#6498](https://github.com/tendermint/tendermint/pull/6498) Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [cli/indexer] [\#6676](https://github.com/tendermint/tendermint/pull/6676) Reindex events command line tooling. (@JayT106)
|
||||
|
||||
- Apps
|
||||
|
||||
- [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
- [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
- [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
|
||||
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`.
|
||||
- It is not required any longer to set ldflags to set version strings
|
||||
- [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app
|
||||
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] [\#5771](https://github.com/tendermint/tendermint/pull/5771) Use an order-preserving varint key encoding (@cmwaters)
|
||||
- [mempool] [\#6396](https://github.com/tendermint/tendermint/pull/6396) Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
|
||||
- [state] [\#6541](https://github.com/tendermint/tendermint/pull/6541) Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] Console log formatting changes as a result of [\#6534](https://github.com/tendermint/tendermint/pull/6534) and [\#6589](https://github.com/tendermint/tendermint/pull/6589). (@tychoish)
|
||||
- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [types] [\#6478](https://github.com/tendermint/tendermint/pull/6478) Add `block_id` to `newblock` event (@jeebster)
|
||||
- [crypto/ed25519] [\#5632](https://github.com/tendermint/tendermint/pull/5632) Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [crypto/ed25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
|
||||
- [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
|
||||
- [privval] [\#5603](https://github.com/tendermint/tendermint/pull/5603) Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [privval] [\#5725](https://github.com/tendermint/tendermint/pull/5725) Add gRPC support to private validator.
|
||||
- [privval] [\#5876](https://github.com/tendermint/tendermint/pull/5876) `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
|
||||
- [abci/client] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` requests return an error if queue is full (@melekes)
|
||||
- [mempool] [\#5673](https://github.com/tendermint/tendermint/pull/5673) Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
|
||||
- [abci] [\#5706](https://github.com/tendermint/tendermint/pull/5706) Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [blocksync/v1] [\#5728](https://github.com/tendermint/tendermint/pull/5728) Remove blocksync v1 (@melekes)
|
||||
- [blocksync/v0] [\#5741](https://github.com/tendermint/tendermint/pull/5741) Relax termination conditions and increase sync timeout (@melekes)
|
||||
- [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` output now contains node ID (`id` field) (@melekes)
|
||||
- [blocksync/v2] [\#5774](https://github.com/tendermint/tendermint/pull/5774) Send status request when new peer joins (@melekes)
|
||||
- [store] [\#5888](https://github.com/tendermint/tendermint/pull/5888) store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
|
||||
- [consensus] [\#5987](https://github.com/tendermint/tendermint/pull/5987) and [\#5792](https://github.com/tendermint/tendermint/pull/5792) Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon)
|
||||
- [types] [\#5994](https://github.com/tendermint/tendermint/pull/5994) Reduce the use of protobuf types in core logic. (@marbar3778)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
|
||||
- [rpc/client/http] [\#6163](https://github.com/tendermint/tendermint/pull/6163) Do not drop events even if the `out` channel is full (@melekes)
|
||||
- [node] [\#6059](https://github.com/tendermint/tendermint/pull/6059) Validate and complete genesis doc before saving to state store (@silasdavis)
|
||||
- [state] [\#6067](https://github.com/tendermint/tendermint/pull/6067) Batch save state data (@githubsands & @cmwaters)
|
||||
- [crypto] [\#6120](https://github.com/tendermint/tendermint/pull/6120) Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
|
||||
- [types] [\#6120](https://github.com/tendermint/tendermint/pull/6120) use batch verification for verifying commits signatures.
|
||||
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
|
||||
- [privval/file] [\#6185](https://github.com/tendermint/tendermint/pull/6185) Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
|
||||
- [privval] [\#6240](https://github.com/tendermint/tendermint/pull/6240) Add `context.Context` to privval interface.
|
||||
- [rpc] [\#6265](https://github.com/tendermint/tendermint/pull/6265) set cache control in http-rpc response header (@JayT106)
|
||||
- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
|
||||
- [node/state] [\#6370](https://github.com/tendermint/tendermint/pull/6370) graceful shutdown in the consensus reactor (@JayT106)
|
||||
- [crypto/merkle] [\#6443](https://github.com/tendermint/tendermint/pull/6443) Improve HashAlternatives performance (@cuonglm)
|
||||
- [crypto/merkle] [\#6513](https://github.com/tendermint/tendermint/pull/6513) Optimize HashAlternatives (@marbar3778)
|
||||
- [p2p/pex] [\#6509](https://github.com/tendermint/tendermint/pull/6509) Improve addrBook.hash performance (@cuonglm)
|
||||
- [consensus/metrics] [\#6549](https://github.com/tendermint/tendermint/pull/6549) Change block_size gauge to a histogram for better observability over time (@marbar3778)
|
||||
- [statesync] [\#6587](https://github.com/tendermint/tendermint/pull/6587) Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
|
||||
- [state/privval] [\#6578](https://github.com/tendermint/tendermint/pull/6578) No GetPubKey retry beyond the proposal/voting window (@JayT106)
|
||||
- [rpc] [\#6615](https://github.com/tendermint/tendermint/pull/6615) Add TotalGasUsed to block_results response (@crypto-facs)
|
||||
- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
|
||||
|
||||
## v0.34.19
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8270](https://github.com/tendermint/tendermint/issues/8270) fix reset commands (@alexanderbez).
|
||||
|
||||
## v0.34.18
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [cli] [\#8258](https://github.com/tendermint/tendermint/pull/8258) Fix a bug in the cli that caused `unsafe-reset-all` to panic
|
||||
|
||||
## v0.34.17
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [cli] [\#8081](https://github.com/tendermint/tendermint/issues/8081) make the reset command safe to use (@marbar3778).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#8079](https://github.com/tendermint/tendermint/issues/8079) start the timeout ticker before relay (backport #7844) (@creachadair).
|
||||
- [consensus] [\#7992](https://github.com/tendermint/tendermint/issues/7992) [\#7994](https://github.com/tendermint/tendermint/issues/7994) change lock handling in handleMsg and reactor to alleviate issues gossiping during long ABCI calls (@williambanfield).
|
||||
|
||||
## v0.34.16
|
||||
|
||||
Special thanks to external contributors on this release: @yihuang
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#7617](https://github.com/tendermint/tendermint/issues/7617) calculate prevote message delay metric (backport #7551) (@williambanfield).
|
||||
- [consensus] [\#7631](https://github.com/tendermint/tendermint/issues/7631) check proposal non-nil in prevote message delay metric (backport #7625) (@williambanfield).
|
||||
- [statesync] [\#7885](https://github.com/tendermint/tendermint/issues/7885) statesync: assert app version matches (backport #7856) (@cmwaters).
|
||||
- [statesync] [\#7881](https://github.com/tendermint/tendermint/issues/7881) fix app hash in state rollback (backport #7837) (@cmwaters).
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang).
|
||||
|
||||
## v0.34.15
|
||||
|
||||
Special thanks to external contributors on this release: @thanethomson
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7368](https://github.com/tendermint/tendermint/issues/7368) cmd: add integration test for rollback functionality (@cmwaters).
|
||||
- [\#7309](https://github.com/tendermint/tendermint/issues/7309) pubsub: Report a non-nil error when shutting down (fixes #7306).
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#7230](https://github.com/tendermint/tendermint/issues/7230) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson).
|
||||
|
||||
## v0.34.14
|
||||
|
||||
This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) The tendermint binary now has built-in suppport for running the end-to-end test application (with state sync support) (@cmwaters).
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state. This may be useful in the event of non-determinstic app hash or when reverting an upgrade. @cmwaters
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [\#7103](https://github.com/tendermint/tendermint/pull/7104) Remove IAVL dependency (backport of #6550) (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [ABCI] [\#7110](https://github.com/tendermint/tendermint/issues/7110) Revert "change client to use multi-reader mutexes (#6873)" (@tychoish).
|
||||
|
||||
## v0.34.13
|
||||
|
||||
*September 6, 2021*
|
||||
|
||||
This release backports improvements to state synchronization and ABCI
|
||||
performance under concurrent load, and the PostgreSQL event indexer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6881](https://github.com/tendermint/tendermint/issues/6881) improvements to stateprovider logic (@cmwaters)
|
||||
- [ABCI] [\#6873](https://github.com/tendermint/tendermint/issues/6873) change client to use multi-reader mutexes (@tychoish)
|
||||
- [indexing] [\#6906](https://github.com/tendermint/tendermint/issues/6906) enable the PostgreSQL indexer sink (@creachadair)
|
||||
|
||||
## v0.34.12
|
||||
|
||||
*August 17, 2021*
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
|
||||
`/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug
|
||||
with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters)
|
||||
- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106)
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
## v0.34.11
|
||||
|
||||
@@ -390,25 +12,25 @@ adding two new parameters to the state sync config.
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Apps
|
||||
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
||||
- [Version] \#6494 `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
||||
- [statesync] [\#6582](https://github.com/tendermint/tendermint/pull/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
||||
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
||||
- [statesync] \#6582 Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
||||
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
||||
|
||||
## v0.34.10
|
||||
|
||||
*April 14, 2021*
|
||||
|
||||
This release fixes a bug where peers would sometimes try to send messages
|
||||
This release fixes a bug where peers would sometimes try to send messages
|
||||
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
|
||||
this issue!
|
||||
this issue!
|
||||
|
||||
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
|
||||
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
|
||||
@@ -417,7 +39,7 @@ this issue!
|
||||
|
||||
*April 8, 2021*
|
||||
|
||||
This release fixes a moderate severity security issue, Security Advisory Alderfly,
|
||||
This release fixes a moderate severity security issue, Security Advisory Alderfly,
|
||||
which impacts all networks that rely on Tendermint light clients.
|
||||
Further details will be released once networks have upgraded.
|
||||
|
||||
@@ -490,7 +112,7 @@ shout-out to @marbar3778 for diagnosing it quickly.
|
||||
|
||||
## v0.34.6
|
||||
|
||||
*February 18, 2021*
|
||||
*February 18, 2021*
|
||||
|
||||
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling problems._
|
||||
|
||||
@@ -498,9 +120,9 @@ _Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling p
|
||||
|
||||
*February 11, 2021*
|
||||
|
||||
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
|
||||
All Tendermint clients are recommended to upgrade.
|
||||
Thank you to our friends at Crypto.com for the initial report of this memory leak!
|
||||
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
|
||||
All Tendermint clients are recommended to upgrade.
|
||||
Thank you to our friends at Crypto.com for the initial report of this memory leak!
|
||||
|
||||
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
|
||||
|
||||
@@ -510,17 +132,17 @@ Special thanks to other external contributors on this release: @yayajacky, @odid
|
||||
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
|
||||
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
|
||||
|
||||
## v0.34.3
|
||||
## v0.34.3
|
||||
|
||||
*January 19, 2021*
|
||||
|
||||
This release includes a fix for a high-severity security vulnerability,
|
||||
This release includes a fix for a high-severity security vulnerability,
|
||||
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
|
||||
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
|
||||
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
|
||||
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||
|
||||
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
@@ -612,14 +234,14 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
|
||||
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- `MaxBatchBytes` new config setting defines the max size of one batch.
|
||||
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
|
||||
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
|
||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
|
||||
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
|
||||
@@ -678,7 +300,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
|
||||
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
|
||||
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
|
||||
@@ -716,7 +338,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
|
||||
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
|
||||
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
|
||||
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
|
||||
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)
|
||||
@@ -1084,7 +706,7 @@ and a validator address plus a timestamp. Note we may remove the validator
|
||||
address & timestamp fields in the future (see ADR-25).
|
||||
|
||||
`lite2` package has been added to solve `lite` issues and introduce weak
|
||||
subjectivity interface. Refer to the [spec](./spec/consensus/light-client/) for complete details.
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
|
||||
`lite` package is now deprecated and will be removed in v0.34 release.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
@@ -1445,7 +1067,7 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah
|
||||
*August 28, 2019*
|
||||
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md) <!-- markdown-link-check-disable-line -->
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md)
|
||||
guide.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@@ -2024,7 +1646,7 @@ more details.
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
|
||||
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
|
||||
* Apps
|
||||
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
|
||||
@@ -2077,7 +1699,7 @@ more details.
|
||||
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
|
||||
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
|
||||
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md)
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
|
||||
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
|
||||
(@guagualvcha)
|
||||
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
|
||||
@@ -2138,13 +1760,13 @@ handshake by authenticating the NetAddress.ID of the peer we're dialing.
|
||||
This release fixes yet another issue with the proposer selection algorithm.
|
||||
We hope it's the last one, but we won't be surprised if it's not.
|
||||
We plan to one day expose the selection algorithm more directly to
|
||||
the application ([\#3285](https://github.com/tendermint/tendermint/issues/3285)), and even to support randomness ([\#763](https://github.com/tendermint/tendermint/issues/763)). <!-- markdown-link-check-disable-line -->
|
||||
the application ([\#3285](https://github.com/tendermint/tendermint/issues/3285)), and even to support randomness ([\#763](https://github.com/tendermint/tendermint/issues/763)).
|
||||
For more, see issues marked
|
||||
[proposer-selection](https://github.com/tendermint/tendermint/labels/proposer-selection).
|
||||
|
||||
This release also includes a fix to prevent Tendermint from including the same
|
||||
piece of evidence in more than one block. This issue was reported by @chengwenxi in our
|
||||
[bug bounty program](https://hackerone.com/cosmos).
|
||||
[bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -2637,7 +2259,7 @@ Special thanks to external contributors on this release:
|
||||
@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu.
|
||||
|
||||
Special thanks to @Slamper for a series of bug reports in our [bug bounty
|
||||
program](https://hackerone.com/cosmos) which are fixed in this release.
|
||||
program](https://hackerone.com/tendermint) which are fixed in this release.
|
||||
|
||||
This release is primarily about adding Version fields to various data structures,
|
||||
optimizing consensus messages for signing and verification in
|
||||
@@ -2781,7 +2403,7 @@ Special thanks to external contributors on this release:
|
||||
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
|
||||
It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md).
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
@@ -2846,7 +2468,7 @@ are affected by a change.
|
||||
|
||||
A few more breaking changes are in the works - each will come with a clear
|
||||
Architecture Decision Record (ADR) explaining the change. You can review ADRs
|
||||
[here](https://github.com/tendermint/tendermint/tree/master/docs/architecture)
|
||||
[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture)
|
||||
or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls).
|
||||
You can also check in on the [issues marked as
|
||||
breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking).
|
||||
@@ -2862,7 +2484,7 @@ BREAKING CHANGES:
|
||||
- [abci] Added address of the original proposer of the block to Header
|
||||
- [abci] Change ABCI Header to match Tendermint exactly
|
||||
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
- Remove PubKey from `Validator` (so it's just Address and Power)
|
||||
- Introduce `ValidatorUpdate` (with just PubKey and Power)
|
||||
- InitChain and EndBlock use ValidatorUpdate
|
||||
@@ -2884,7 +2506,7 @@ BREAKING CHANGES:
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
|
||||
- Add NextValidatorSet to State, changes on-disk representation of state
|
||||
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)).
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
|
||||
- Remove ConsensusParams.BlockSize.MaxTxs
|
||||
- Introduce maximum sizes for all components of a block, including ChainID
|
||||
- [types] Updates to the block Header:
|
||||
@@ -2895,7 +2517,7 @@ BREAKING CHANGES:
|
||||
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
|
||||
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
|
||||
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)):
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
|
||||
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
|
||||
- malleability removed by requiring `s` to be in canonical form.
|
||||
|
||||
@@ -3125,7 +2747,7 @@ BREAKING CHANGES:
|
||||
FEATURES
|
||||
- [cmd] Added metrics (served under `/metrics` using a Prometheus client;
|
||||
disabled by default). See the new `instrumentation` section in the config and
|
||||
[metrics](https://github.com/tendermint/tendermint/blob/master/docs/nodes/metrics.md)
|
||||
[metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html)
|
||||
guide.
|
||||
- [p2p] Add IPv6 support to peering.
|
||||
- [p2p] Add `external_address` to config to allow specifying the address for
|
||||
@@ -3239,7 +2861,7 @@ BREAKING:
|
||||
|
||||
FEATURES
|
||||
|
||||
- [rpc] the RPC documentation is now published to `https://tendermint.github.io/slate`
|
||||
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
|
||||
- [p2p] AllowDuplicateIP config option to refuse connections from same IP.
|
||||
- true by default for now, false by default in next breaking release
|
||||
- [docs] Add docs for query, tx indexing, events, pubsub
|
||||
@@ -3717,7 +3339,7 @@ containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusCon
|
||||
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) accomodate new config
|
||||
|
||||
- Logger
|
||||
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`. <!-- markdown-link-check-disable-next-line -->
|
||||
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
|
||||
See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details
|
||||
- Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!)
|
||||
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger
|
||||
|
||||
@@ -1,104 +1,149 @@
|
||||
# Unreleased Changes
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## vX.X
|
||||
|
||||
Month, DD, YYYY
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [rpc] \#7121 Remove the deprecated gRPC interface to the RPC service. (@creachadair)
|
||||
- [blocksync] \#7159 Remove support for disabling blocksync in any circumstance. (@tychoish)
|
||||
- [mempool] \#7171 Remove legacy mempool implementation. (@tychoish)
|
||||
- [rpc] \#7575 Rework how RPC responses are written back via HTTP. (@creachadair)
|
||||
- [rpc] \#7713 Remove unused options for websocket clients. (@creachadair)
|
||||
- [config] \#7930 Add new event subscription options and defaults. (@creachadair)
|
||||
- [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair)
|
||||
- [cli] \#8081 make the reset command safe to use by intoducing `reset-state` command. Fixed by \#8259. (@marbar3778, @cmwaters)
|
||||
- [config] \#8222 default indexer configuration to null. (@creachadair)
|
||||
- [rpc] \#8570 rework timeouts to be per-method instead of global. (@creachadair)
|
||||
- [rpc] \#8624 deprecate `broadcast_tx_commit` and `braodcast_tx_sync` and `broadcast_tx_async` in favor of `braodcast_tx`. (@tychoish)
|
||||
- [config] \#8654 remove deprecated `seeds` field from config. Users should switch to `bootstrap-peers` instead. (@cmwaters)
|
||||
- [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
|
||||
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
- [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
|
||||
- [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes)
|
||||
- [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters)
|
||||
- [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106)
|
||||
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [libs/CList] \#6626 Automatically detach the prev/next elements in Remove function (@JayT106)
|
||||
|
||||
- Apps
|
||||
|
||||
- [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec).
|
||||
- [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish)
|
||||
- [abci] \#8605 Remove info, log, events, gasUsed and mempoolError fields from ResponseCheckTx as they are not used by Tendermint. (@jmalicevic)
|
||||
- [abci] \#8664 Move `app_hash` parameter from `Commit` to `FinalizeBlock`. (@sergio-mena)
|
||||
- [abci] \#8656 Added cli command for `PrepareProposal`. (@jmalicevic)
|
||||
- [sink/psql] \#8637 tx_results emitted from psql sink are now json encoded, previously they were protobuf encoded
|
||||
- [abci] \#8901 Added cli command for `ProcessProposal`. (@hvanz)
|
||||
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
|
||||
- It is not required any longer to set ldflags to set version strings
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish)
|
||||
- [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish)
|
||||
- [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui)
|
||||
- [p2p] \#8737 Introduce "inactive" peer label to avoid re-dialing incompatible peers. (@tychoish)
|
||||
- [p2p] \#8737 Increase frequency of dialing attempts to reduce latency for peer acquisition. (@tychoish)
|
||||
- [p2p] \#8737 Improvements to peer scoring and sorting to gossip a greater variety of peers during PEX. (@tychoish)
|
||||
- [p2p] \#8737 Track incoming and outgoing peers separately to ensure more peer slots open for incoming connections. (@tychoish)
|
||||
|
||||
- Go API
|
||||
|
||||
- [rpc] \#7474 Remove the "URI" RPC client. (@creachadair)
|
||||
- [libs/pubsub] \#7451 Internalize the pubsub packages. (@creachadair)
|
||||
- [libs/sync] \#7450 Internalize and remove the library. (@creachadair)
|
||||
- [libs/async] \#7449 Move library to internal. (@creachadair)
|
||||
- [pubsub] \#7231 Remove unbuffered subscriptions and rework the Subscription interface. (@creachadair)
|
||||
- [eventbus] \#7231 Move the EventBus type to the internal/eventbus package. (@creachadair)
|
||||
- [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish)
|
||||
- [p2p] \#7064 Remove WDRR queue implementation. (@tychoish)
|
||||
- [config] \#7169 `WriteConfigFile` now returns an error. (@tychoish)
|
||||
- [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychoish)
|
||||
- [abci/client] \#7607 Simplify client interface (removes most "async" methods). (@creachadair)
|
||||
- [libs/json] \#7673 Remove the libs/json (tmjson) library. (@creachadair)
|
||||
- [crypto] \#8412 \#8432 Remove `crypto/tmhash` package in favor of small functions in `crypto` package and cleanup of unused functions. (@tychoish)
|
||||
- [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
|
||||
- [p2p] \#6618 Move `p2p.NodeInfo` into `types` to support use of the SDK. (@tychoish)
|
||||
- [p2p] \#6583 Make `p2p.NodeID` and `p2p.NetAddress` exported types to support their use in the RPC layer. (@tychoish)
|
||||
- [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
|
||||
- [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
|
||||
- [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
|
||||
- [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish)
|
||||
- [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
|
||||
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
|
||||
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
|
||||
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
|
||||
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
|
||||
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
|
||||
- [all] \#6077 Change spelling from British English to American (@cmwaters)
|
||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||
- Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2
|
||||
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
|
||||
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
|
||||
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
|
||||
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
|
||||
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
|
||||
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
|
||||
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
|
||||
and `TxInfo`. (@alexanderbez)
|
||||
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||
- [types] \#6627 Move `NodeKey` to types to make the type public.
|
||||
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
|
||||
- [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
- Tooling
|
||||
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#7270](https://github.com/tendermint/tendermint/pull/7270) Add `header` and `header_by_hash` RPC Client queries. (@fedekunze)
|
||||
- [rpc] [\#7701] Add `ApplicationInfo` to `status` rpc call which contains the application version. (@jonasbostoen)
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade.
|
||||
- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish)
|
||||
- [consensus] \#7354 add a new `synchrony` field to the `ConsensusParams` struct for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield)
|
||||
- [consensus] \#7376 Update the proposal logic per the Propose-based timestamps specification so that the proposer will wait for the previous block time to occur before proposing the next block. (@williambanfield)
|
||||
- [consensus] \#7391 Use the proposed block timestamp as the proposal timestamp. Update the block validation logic to ensure that the proposed block's timestamp matches the timestamp in the proposal message. (@williambanfield)
|
||||
- [consensus] \#7415 Update proposal validation logic to Prevote nil if a proposal does not meet the conditions for Timelyness per the proposer-based timestamp specification. (@anca)
|
||||
- [consensus] \#7382 Update block validation to no longer require the block timestamp to be the median of the timestamps of the previous commit. (@anca)
|
||||
- [consensus] \#7711 Use the proposer timestamp for the first height instead of the genesis time. Chains will still start consensus at the genesis time. (@anca)
|
||||
- [cli] \#8281 Add a tool to update old config files to the latest version. (@creachadair)
|
||||
- [consenus] \#8514 move `RecheckTx` from the local node mempool config to a global `ConsensusParams` field in `BlockParams` (@cmwaters)
|
||||
- [abci] ABCI++ [specified](https://github.com/tendermint/tendermint/tree/master/spec/abci%2B%2B). (@sergio-mena, @cmwaters, @josef-widder)
|
||||
- [abci] ABCI++ [implemented](https://github.com/orgs/tendermint/projects/9). (@williambanfield, @thanethomson, @sergio-mena)
|
||||
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
|
||||
- [rpc] \#6329 Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
|
||||
- [pex] \#6305 v2 pex reactor with backwards compatability. Introduces two new pex messages to
|
||||
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
|
||||
exchange reactors behave the same. (@cmwaters)
|
||||
- [crypto] \#6376 Enable sr25519 as a validator key
|
||||
- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez)
|
||||
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
|
||||
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
|
||||
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
|
||||
`mempool.version` configuration, where `v1` is the default configuration.
|
||||
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
||||
- Transactions are gossiped in FIFO order as they are in `v0`.
|
||||
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [internal/protoio] \#7325 Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em)
|
||||
- [consensus] \#6969 remove logic to 'unlock' a locked block.
|
||||
- [evidence] \#7700 Evidence messages contain single Evidence instead of EvidenceList (@jmalicevic)
|
||||
- [evidence] \#7802 Evidence pool emits events when evidence is validated and updates a metric when the number of evidence in the evidence pool changes. (@jmalicevic)
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [node] \#7521 Define concrete type for seed node implementation (@spacech1mp)
|
||||
- [rpc] \#7612 paginate mempool /unconfirmed_txs rpc endpoint (@spacech1mp)
|
||||
- [light] [\#7536](https://github.com/tendermint/tendermint/pull/7536) rpc /status call returns info about the light client (@jmalicevic)
|
||||
- [types] \#7765 Replace EvidenceData with EvidenceList to avoid unnecessary nesting of evidence fields within a block. (@jmalicevic)
|
||||
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
|
||||
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
|
||||
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
|
||||
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [privval] \#5725 Add gRPC support to private validator.
|
||||
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
|
||||
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
|
||||
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes)
|
||||
- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
|
||||
- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes)
|
||||
- [consensus] \#5792 Deprecates the `time_iota_ms` consensus parameter, to reduce the bug surface. The parameter is no longer used. (@valardragon)
|
||||
- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
|
||||
- [consensus] \#5987 Remove `time_iota_ms` from consensus params. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778)
|
||||
- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
|
||||
- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes)
|
||||
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
|
||||
- [state] \#6067 Batch save state data (@githubsands & @cmwaters)
|
||||
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
|
||||
- [types] \#6120 use batch verification for verifying commits signatures.
|
||||
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
|
||||
- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
|
||||
- [privval] \#6240 Add `context.Context` to privval interface.
|
||||
- [rpc] \#6265 set cache control in http-rpc response header (@JayT106)
|
||||
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
|
||||
- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106)
|
||||
- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm)
|
||||
- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778)
|
||||
- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
|
||||
- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106)
|
||||
- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs)
|
||||
- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek)
|
||||
- [light] \#7640 Light Client: fix absence proof verification (@ashcherbakov)
|
||||
- [light] \#7641 Light Client: fix querying against the latest height (@ashcherbakov)
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang)
|
||||
- [cli] \#8276 scmigrate: ensure target key is correctly renamed. (@creachadair)
|
||||
- [cli] \#8294 keymigrate: ensure block hash keys are correctly translated. (@creachadair)
|
||||
- [cli] \#8352 keymigrate: ensure transaction hash keys are correctly translated. (@creachadair)
|
||||
- (indexer) \#8625 Fix overriding tx index of duplicated txs.
|
||||
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
||||
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
||||
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
||||
- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106)
|
||||
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)
|
||||
|
||||
@@ -20,7 +20,7 @@ This code of conduct applies to all projects run by the Tendermint/COSMOS team a
|
||||
|
||||
* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
|
||||
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](https://github.com/stumpsyn/policies/blob/master/citizen_code_of_conduct.md); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups.
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups.
|
||||
|
||||
* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back.
|
||||
|
||||
|
||||
138
CONTRIBUTING.md
138
CONTRIBUTING.md
@@ -105,33 +105,11 @@ specify exactly the dependency you want to update, eg.
|
||||
|
||||
## Protobuf
|
||||
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
|
||||
with [`gogoproto`](https://github.com/gogo/protobuf) to generate code for use
|
||||
across Tendermint Core.
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
|
||||
|
||||
To generate proto stubs, lint, and check protos for breaking changes, you will
|
||||
need to install [buf](https://buf.build/) and `gogoproto`. Then, from the root
|
||||
of the repository, run:
|
||||
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
|
||||
```bash
|
||||
# Lint all of the .proto files in proto/tendermint
|
||||
make proto-lint
|
||||
|
||||
# Check if any of your local changes (prior to committing to the Git repository)
|
||||
# are breaking
|
||||
make proto-check-breaking
|
||||
|
||||
# Generate Go code from the .proto files in proto/tendermint
|
||||
make proto-gen
|
||||
```
|
||||
|
||||
To automatically format `.proto` files, you will need
|
||||
[`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) installed. Once
|
||||
installed, you can run:
|
||||
|
||||
```bash
|
||||
make proto-format
|
||||
```
|
||||
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`.
|
||||
|
||||
### Visual Studio Code
|
||||
|
||||
@@ -249,6 +227,116 @@ Fixes #nnnn
|
||||
|
||||
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
|
||||
### Release Procedure
|
||||
|
||||
#### Major Release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
|
||||
|
||||
1. Start on the latest RC branch (`RCx/vX.X.0`).
|
||||
2. Run integration tests.
|
||||
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
|
||||
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
|
||||
- `git checkout RCx/vX.X.0`
|
||||
- `git checkout -b release/vX.X.0`
|
||||
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
|
||||
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
|
||||
- `git push origin vX.X.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
|
||||
new major release series.
|
||||
|
||||
##### Major Release (from `master`)
|
||||
|
||||
1. Start on `master`
|
||||
2. Run integration tests (see `test_integrations` in Makefile)
|
||||
3. Prepare release in a pull request against `master` (to be squash merged):
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
|
||||
had release candidates, squash all the RC updates into one
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
5. Update the `CHANGELOG.md` file on master with the releases changelog.
|
||||
6. Delete any RC branches and tags for this release (if applicable)
|
||||
|
||||
#### Minor Release (Point Releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
|
||||
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport to the needed branch. Depending on which backport branch you need to backport to there will be labels for them. To notify the bot to backport a pull request, mark the pull request with the label `backport-to-<backport_branch>`. Once the original pull request is merged, the bot will try to cherry-pick the pull request to the backport branch. If the bot fails to backport, it will open a pull request. The author of the original pull request is responsible for solving the conflicts and merging the pull request.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
|
||||
2. Run integration tests: `make test_integrations`
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes that will land them back on `vX.X.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
|
||||
#### Release Candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of RC branches. RC branches typically have names formatted
|
||||
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
|
||||
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
|
||||
2. Create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.34.0-rc4`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Create a new release candidate branch for any possible updates to the RC:
|
||||
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit tests
|
||||
|
||||
1
DOCKER/.gitignore
vendored
Normal file
1
DOCKER/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
tendermint
|
||||
@@ -1,14 +1,14 @@
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.17-alpine as builder
|
||||
FROM golang:1.16-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make git
|
||||
apk --no-cache add make
|
||||
COPY / /tendermint
|
||||
WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM golang:1.17-alpine
|
||||
FROM golang:1.15-alpine
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
@@ -53,3 +53,4 @@ CMD ["start"]
|
||||
|
||||
# Expose the data directory as a volume since there's mutable state in there
|
||||
VOLUME [ "$TMHOME" ]
|
||||
|
||||
|
||||
28
DOCKER/Dockerfile.build_c-amazonlinux
Normal file
28
DOCKER/Dockerfile.build_c-amazonlinux
Normal file
@@ -0,0 +1,28 @@
|
||||
FROM amazonlinux:2
|
||||
|
||||
RUN yum -y update && \
|
||||
yum -y install wget
|
||||
|
||||
RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \
|
||||
rpm -ivh epel-release-latest-7.noarch.rpm
|
||||
|
||||
RUN yum -y groupinstall "Development Tools"
|
||||
RUN yum -y install leveldb-devel which
|
||||
|
||||
ENV GOVERSION=1.16.5
|
||||
|
||||
RUN cd /tmp && \
|
||||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
mkdir -p /go/src && \
|
||||
mkdir -p /go/bin
|
||||
|
||||
ENV PATH=$PATH:/usr/local/go/bin:/go/bin
|
||||
ENV GOBIN=/go/bin
|
||||
ENV GOPATH=/go/src
|
||||
|
||||
RUN mkdir -p /tendermint
|
||||
WORKDIR /tendermint
|
||||
|
||||
CMD ["/usr/bin/make", "build", "TENDERMINT_BUILD_OPTIONS=cleveldb"]
|
||||
|
||||
16
DOCKER/Dockerfile.testing
Normal file
16
DOCKER/Dockerfile.testing
Normal file
@@ -0,0 +1,16 @@
|
||||
FROM golang:latest
|
||||
|
||||
# Grab deps (jq, hexdump, xxd, killall)
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
jq bsdmainutils vim-common psmisc netcat
|
||||
|
||||
# Add testing deps for curl
|
||||
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends curl
|
||||
|
||||
VOLUME /go
|
||||
|
||||
EXPOSE 26656
|
||||
EXPOSE 26657
|
||||
13
DOCKER/Makefile
Normal file
13
DOCKER/Makefile
Normal file
@@ -0,0 +1,13 @@
|
||||
build:
|
||||
@sh -c "'$(CURDIR)/build.sh'"
|
||||
|
||||
push:
|
||||
@sh -c "'$(CURDIR)/push.sh'"
|
||||
|
||||
build_testing:
|
||||
docker build --tag tendermint/testing -f ./Dockerfile.testing .
|
||||
|
||||
build_amazonlinux_buildimage:
|
||||
docker build -t "tendermint/tendermint:build_c-amazonlinux" -f Dockerfile.build_c-amazonlinux .
|
||||
|
||||
.PHONY: build push build_testing build_amazonlinux_buildimage
|
||||
@@ -8,7 +8,7 @@ Official releases can be found [here](https://github.com/tendermint/tendermint/r
|
||||
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
|
||||
|
||||
Respective versioned files can be found at `https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile` (replace the Xs with the version number).
|
||||
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
|
||||
|
||||
## Quick reference
|
||||
|
||||
|
||||
20
DOCKER/build.sh
Executable file
20
DOCKER/build.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Get the tag from the version, or try to figure it out.
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
|
||||
fi
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "Please specify a tag."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG_NO_PATCH=${TAG%.*}
|
||||
|
||||
read -p "==> Build 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$TAG" -t "tendermint/tendermint:$TAG_NO_PATCH" .
|
||||
fi
|
||||
22
DOCKER/push.sh
Executable file
22
DOCKER/push.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Get the tag from the version, or try to figure it out.
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
|
||||
fi
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "Please specify a tag."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG_NO_PATCH=${TAG%.*}
|
||||
|
||||
read -p "==> Push 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
docker push "tendermint/tendermint:latest"
|
||||
docker push "tendermint/tendermint:$TAG"
|
||||
docker push "tendermint/tendermint:$TAG_NO_PATCH"
|
||||
fi
|
||||
110
Makefile
110
Makefile
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
PACKAGES=$(shell go list ./...)
|
||||
BUILDDIR ?= $(CURDIR)/build
|
||||
|
||||
BUILD_TAGS?=tendermint
|
||||
@@ -13,6 +14,8 @@ endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
|
||||
CGO_ENABLED ?= 0
|
||||
|
||||
# handle nostrip
|
||||
@@ -76,44 +79,32 @@ $(BUILDDIR)/:
|
||||
### Protobuf ###
|
||||
###############################################################################
|
||||
|
||||
check-proto-deps:
|
||||
ifeq (,$(shell which protoc-gen-gogofaster))
|
||||
$(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install")
|
||||
endif
|
||||
.PHONY: check-proto-deps
|
||||
proto-all: proto-gen proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
|
||||
check-proto-format-deps:
|
||||
ifeq (,$(shell which clang-format))
|
||||
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
|
||||
endif
|
||||
.PHONY: check-proto-format-deps
|
||||
|
||||
proto-gen: check-proto-deps
|
||||
proto-gen:
|
||||
@docker pull -q tendermintdev/docker-build-proto
|
||||
@echo "Generating Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf generate
|
||||
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
|
||||
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
|
||||
.PHONY: proto-gen
|
||||
|
||||
# These targets are provided for convenience and are intended for local
|
||||
# execution only.
|
||||
proto-lint: check-proto-deps
|
||||
@echo "Linting Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf lint
|
||||
proto-lint:
|
||||
@$(DOCKER_BUF) check lint --error-format=json
|
||||
.PHONY: proto-lint
|
||||
|
||||
proto-format: check-proto-format-deps
|
||||
proto-format:
|
||||
@echo "Formatting Protobuf files"
|
||||
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
|
||||
docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-check-breaking: check-proto-deps
|
||||
@echo "Checking for breaking changes in Protobuf files against local branch"
|
||||
@echo "Note: This is only useful if your changes have not yet been committed."
|
||||
@echo " Otherwise read up on buf's \"breaking\" command usage:"
|
||||
@echo " https://docs.buf.build/breaking/usage"
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against ".git"
|
||||
proto-check-breaking:
|
||||
@$(DOCKER_BUF) check breaking --against-input .git#branch=master
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
proto-check-breaking-ci:
|
||||
@$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master
|
||||
.PHONY: proto-check-breaking-ci
|
||||
|
||||
###############################################################################
|
||||
### Build ABCI ###
|
||||
###############################################################################
|
||||
@@ -127,7 +118,7 @@ install_abci:
|
||||
.PHONY: install_abci
|
||||
|
||||
###############################################################################
|
||||
### Privval Server ###
|
||||
### Privval Server ###
|
||||
###############################################################################
|
||||
|
||||
build_privval_server:
|
||||
@@ -140,11 +131,11 @@ generate_test_cert:
|
||||
# generate server cerificate
|
||||
@certstrap request-cert -cn server -ip 127.0.0.1
|
||||
# self-sign server cerificate with rootCA
|
||||
@certstrap sign server --CA "root CA"
|
||||
@certstrap sign server --CA "root CA"
|
||||
# generate client cerificate
|
||||
@certstrap request-cert -cn client -ip 127.0.0.1
|
||||
# self-sign client cerificate with rootCA
|
||||
@certstrap sign client --CA "root CA"
|
||||
@certstrap sign client --CA "root CA"
|
||||
.PHONY: generate_test_cert
|
||||
|
||||
###############################################################################
|
||||
@@ -169,7 +160,7 @@ go.sum: go.mod
|
||||
|
||||
draw_deps:
|
||||
@# requires brew install graphviz or apt-get install graphviz
|
||||
go install github.com/RobotsAndPencils/goviz@latest
|
||||
go get github.com/RobotsAndPencils/goviz
|
||||
@goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png
|
||||
.PHONY: draw_deps
|
||||
|
||||
@@ -211,7 +202,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
@golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
@@ -223,8 +214,7 @@ DESTINATION = ./index.html.md
|
||||
build-docs:
|
||||
@cd docs && \
|
||||
while read -r branch path_prefix; do \
|
||||
( git checkout $${branch} && npm ci --quiet && \
|
||||
VUEPRESS_BASE="/$${path_prefix}/" npm run build --quiet ) ; \
|
||||
(git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
@@ -235,34 +225,12 @@ build-docs:
|
||||
### Docker image ###
|
||||
###############################################################################
|
||||
|
||||
build-docker:
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile .
|
||||
build-docker: build-linux
|
||||
cp $(BUILDDIR)/tendermint DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Metrics ###
|
||||
###############################################################################
|
||||
|
||||
metrics: testdata-metrics
|
||||
go generate -run="scripts/metricsgen" ./...
|
||||
.PHONY: metrics
|
||||
|
||||
# By convention, the go tool ignores subdirectories of directories named
|
||||
# 'testdata'. This command invokes the generate command on the folder directly
|
||||
# to avoid this.
|
||||
testdata-metrics:
|
||||
ls ./scripts/metricsgen/testdata | xargs -I{} go generate -run="scripts/metricsgen" ./scripts/metricsgen/testdata/{}
|
||||
.PHONY: testdata-metrics
|
||||
|
||||
###############################################################################
|
||||
### Local testnet using docker ###
|
||||
###############################################################################
|
||||
@@ -326,25 +294,3 @@ build-reproducible:
|
||||
--name latest-build cosmossdk/rbuilder:latest
|
||||
docker cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/
|
||||
.PHONY: build-reproducible
|
||||
|
||||
# Implements test splitting and running. This is pulled directly from
|
||||
# the github action workflows for better local reproducibility.
|
||||
|
||||
GO_TEST_FILES != find $(CURDIR) -name "*_test.go"
|
||||
|
||||
# default to four splits by default
|
||||
NUM_SPLIT ?= 4
|
||||
|
||||
$(BUILDDIR):
|
||||
mkdir -p $@
|
||||
|
||||
# The format statement filters out all packages that don't have tests.
|
||||
# Note we need to check for both in-package tests (.TestGoFiles) and
|
||||
# out-of-package tests (.XTestGoFiles).
|
||||
$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR)
|
||||
go list -f "{{ if (or .TestGoFiles .XTestGoFiles) }}{{ .ImportPath }}{{ end }}" ./... | sort > $@
|
||||
|
||||
split-test-packages:$(BUILDDIR)/packages.txt
|
||||
split -d -n l/$(NUM_SPLIT) $< $<.
|
||||
test-group-%:split-test-packages
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
|
||||
96
README.md
96
README.md
@@ -3,13 +3,13 @@
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/cosmosnetwork)
|
||||
[](https://discord.gg/vcExX9T)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
[](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
|
||||
@@ -20,31 +20,25 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines.
|
||||
|
||||
For protocol details, refer to the [Tendermint Specification](./spec/README.md).
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
|
||||
For detailed analysis of the consensus protocol, including safety and liveness proofs,
|
||||
read our paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/).
|
||||
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Releases
|
||||
|
||||
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
|
||||
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.io) or [join the chat](https://discord.gg/cosmosnetwork).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/vcExX9T).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/cosmos).
|
||||
program](https://hackerone.com/tendermint).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
|
||||
|
||||
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
|
||||
@@ -54,17 +48,22 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.17 or higher |
|
||||
| Go version | Go1.16 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
|
||||
|
||||
### Install
|
||||
|
||||
See the [install instructions](./docs/introduction/install.md).
|
||||
See the [install instructions](/docs/introduction/install.md).
|
||||
|
||||
### Quick Start
|
||||
|
||||
- [Single node](./docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](./docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md)
|
||||
- [Single node](/docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -72,9 +71,9 @@ Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
|
||||
|
||||
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
|
||||
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
|
||||
[specifications](./spec/README.md),
|
||||
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
|
||||
and familiarize yourself with our
|
||||
[Architectural Decision Records (ADRs)](./docs/architecture/README.md) and [Request For Comments (RFCs)](./docs/rfc/README.md).
|
||||
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
|
||||
## Versioning
|
||||
|
||||
@@ -83,12 +82,32 @@ and familiarize yourself with our
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR version is used
|
||||
to signal breaking changes across Tendermint's API. This API includes all
|
||||
publicly exposed types, functions, and methods in non-internal Go packages as well as
|
||||
the types and methods accessible via the Tendermint RPC interface.
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||
include the Go APIs.
|
||||
|
||||
Breaking changes to these public APIs will be documented in the CHANGELOG.
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- config
|
||||
- libs
|
||||
- bits
|
||||
- bytes
|
||||
- json
|
||||
- log
|
||||
- math
|
||||
- net
|
||||
- os
|
||||
- protoio
|
||||
- rand
|
||||
- sync
|
||||
- strings
|
||||
- service
|
||||
- node
|
||||
- rpc/client
|
||||
- types
|
||||
|
||||
### Upgrades
|
||||
|
||||
@@ -96,7 +115,7 @@ In an effort to avoid accumulating technical debt prior to 1.0.0,
|
||||
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
|
||||
will work with existing Tendermint blockchains. In these cases you will
|
||||
have to start a new blockchain, or write something custom to get the old
|
||||
data into the new chain. However, any bump in the PATCH version should be
|
||||
data into the new chain. However, any bump in the PATCH version should be
|
||||
compatible with existing blockchain histories.
|
||||
|
||||
|
||||
@@ -111,23 +130,24 @@ in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Resources
|
||||
|
||||
### Roadmap
|
||||
### Tendermint Core
|
||||
|
||||
We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap.md)
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](https://docs.tendermint.com/master/spec/).
|
||||
|
||||
### Libraries
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: <https://docs.tendermint.com/master/>
|
||||
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building applications in Golang
|
||||
- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs)
|
||||
- [ABCI Tower](https://github.com/penumbra-zone/tower-abci)
|
||||
### Tools
|
||||
|
||||
Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test).
|
||||
Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
|
||||
### Applications
|
||||
|
||||
- [Cosmos Hub](https://hub.cosmos.network/)
|
||||
- [Terra](https://www.terra.money/)
|
||||
- [Celestia](https://celestia.org/)
|
||||
- [Anoma](https://anoma.network/)
|
||||
- [Vocdoni](https://docs.vocdoni.io/)
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
|
||||
### Research
|
||||
|
||||
@@ -140,7 +160,7 @@ We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap
|
||||
## Join us!
|
||||
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
|
||||
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/)!
|
||||
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/p/682fb7e8a6f601-software-engineer-tendermint-core)!
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
|
||||
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
|
||||
|
||||
207
RELEASES.md
207
RELEASES.md
@@ -1,207 +0,0 @@
|
||||
# Releases
|
||||
|
||||
Tendermint uses [semantic versioning](https://semver.org/) with each release following
|
||||
a `vX.Y.Z` format. The `master` branch is used for active development and thus it's
|
||||
advisable not to build against it.
|
||||
|
||||
The latest changes are always initially merged into `master`.
|
||||
Releases are specified using tags and are built from long-lived "backport" branches
|
||||
that are cut from `master` when the release process begins.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
|
||||
and the backport branches have names like `v0.34.x` or `v0.33.x`
|
||||
(literally, `x`; it is not a placeholder in this case). Tendermint only
|
||||
maintains the last two releases at a time (the oldest release is predominantly
|
||||
just security patches).
|
||||
|
||||
## Backporting
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported
|
||||
to these backport branches.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
|
||||
to the needed branch. There should be a label for any backport branch that you'll be targeting.
|
||||
To notify the bot to backport a pull request, mark the pull request with the label corresponding
|
||||
to the correct backport branch. For example, to backport to v0.35.x, add the label `S:backport-to-v0.35.x`.
|
||||
Once the original pull request is merged, the bot will try to cherry-pick the pull request
|
||||
to the backport branch. If the bot fails to backport, it will open a pull request.
|
||||
The author of the original pull request is responsible for solving the conflicts and
|
||||
merging the pull request.
|
||||
|
||||
### Creating a backport branch
|
||||
|
||||
If this is the first release candidate for a major release, you get to have the
|
||||
honor of creating the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the
|
||||
tags on `master` so that `go mod` is able to order the branches correctly. You
|
||||
should tag `master` with a "dev" tag that is "greater than" the backport
|
||||
branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072)
|
||||
for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
|
||||
1. Start on `master`
|
||||
|
||||
2. Create and push the backport branch:
|
||||
```sh
|
||||
git checkout -b v0.35.x
|
||||
git push origin v0.35.x
|
||||
```
|
||||
|
||||
3. Create a PR to update the documentation directory for the backport branch.
|
||||
|
||||
We only maintain RFC and ADR documents on master, to avoid confusion.
|
||||
In addition, we rewrite Markdown URLs pointing to master to point to the
|
||||
backport branch, so that generated documentation will link to the correct
|
||||
versions of files elsewhere in the repository. For context on the latter,
|
||||
see https://github.com/tendermint/tendermint/issues/7675.
|
||||
|
||||
To prepare the PR:
|
||||
```sh
|
||||
# Remove the RFC and ADR documents from the backport.
|
||||
# We only maintain these on master to avoid confusion.
|
||||
git rm -r docs/rfc docs/architecture
|
||||
|
||||
# Update absolute links to point to the backport.
|
||||
go run ./scripts/linkpatch -recur -target v0.35.x -skip-path docs/DOCS_README.md,docs/README.md docs
|
||||
|
||||
# Create and push the PR.
|
||||
git checkout -b update-docs-v035x
|
||||
git commit -m "Update docs for v0.35.x backport branch." docs
|
||||
git push -u origin update-docs-v035x
|
||||
```
|
||||
|
||||
Be sure to merge this PR before making other changes on the newly-created
|
||||
backport branch.
|
||||
|
||||
After doing these steps, go back to `master` and do the following:
|
||||
|
||||
1. Tag `master` as the dev branch for the _next_ major release and push it up to GitHub.
|
||||
For example:
|
||||
```sh
|
||||
git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."
|
||||
git push origin v0.36.0-dev
|
||||
```
|
||||
|
||||
2. Create a new workflow to run e2e nightlies for the new backport branch.
|
||||
(See [e2e-nightly-master.yml][e2e] for an example.)
|
||||
|
||||
3. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x`
|
||||
[label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered.
|
||||
|
||||
4. Add a new section to the Dependabot config (`.github/dependabot.yml`) to
|
||||
enable automatic update of Go dependencies on this branch. Copy and edit one
|
||||
of the existing branch configurations to set the correct `target-branch`.
|
||||
|
||||
[e2e]: https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml
|
||||
|
||||
## Release candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of backport branches.
|
||||
|
||||
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
|
||||
(for example, `v0.35.0-rc0`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
|
||||
Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
2. Run the integration tests and the e2e nightlies
|
||||
(which can be triggered from the Github UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||
3. Prepare the changelog:
|
||||
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. Each RC should have
|
||||
it's own changelog section. These will be squashed when the final candidate is released.
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary.
|
||||
Check the changelog for breaking changes in these components.
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
6. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||
7. Push the tag back up to origin:
|
||||
`git push origin v0.35.0-rc0`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
8. Future RCs will continue to be built off of this branch.
|
||||
|
||||
Note that this process should only be used for "true" RCs--
|
||||
release candidates that, if successful, will be the next release.
|
||||
For more experimental "RCs," create a new, short-lived branch and tag that instead.
|
||||
|
||||
## Major release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, begin by creating a backport branch, as described above.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.35.0`.
|
||||
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||
- `git push origin v0.35.0`
|
||||
6. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
7. Add the release to the documentation site generator config (see
|
||||
[DOCS_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
- Start on branch `master`.
|
||||
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
ensure the newest release is the default for the landing page.
|
||||
- Add a new entry to `themeConfig.versions` in
|
||||
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
|
||||
release in the dropdown versions menu.
|
||||
- Commit these changes to `master` and backport them into the backport
|
||||
branch for this release.
|
||||
|
||||
## Minor release (point releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of
|
||||
long-lived backport branches, rather than from master. As non-breaking changes
|
||||
land on `master`, they should also be backported into these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky
|
||||
changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the TMDefaultVersion in `version.go`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
As part of our [Coordinated Vulnerability Disclosure
|
||||
Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/cosmos).
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
|
||||
|
||||
### Guidelines
|
||||
@@ -86,7 +86,7 @@ If you are running older versions of Tendermint Core, we encourage you to upgrad
|
||||
|
||||
## Scope
|
||||
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/cosmos). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
|
||||
416
UPGRADING.md
416
UPGRADING.md
@@ -2,220 +2,7 @@
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
|
||||
## v0.36
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
### ResponseCheckTx Parameter Change
|
||||
|
||||
`ResponseCheckTx` had fields that are not used by Tendermint, they are now removed.
|
||||
In 0.36, we removed the following fields, from `ResponseCheckTx`: `Log`, `Info`, `Events`,
|
||||
`GasUsed` and `MempoolError`.
|
||||
`MempoolError` was used to signal to operators that a transaction was rejected from the mempool
|
||||
by Tendermint itself. Right now, we return a regular error when this happens.
|
||||
|
||||
#### ABCI++
|
||||
|
||||
For information on how ABCI++ works, see the
|
||||
[Specification](spec/abci%2B%2B/README.md).
|
||||
In particular, the simplest way to upgrade your application is described
|
||||
[here](spec/abci%2B%2B/abci++_tmint_expected_behavior.md#adapting-existing-applications-that-use-abci).
|
||||
|
||||
#### Moving the `app_hash` parameter
|
||||
|
||||
The Application's hash (or any data representing the Application's current
|
||||
state) is known by the time `FinalizeBlock` finishes its execution.
|
||||
Accordingly, the `app_hash` parameter has been moved from `ResponseCommit` to
|
||||
`ResponseFinalizeBlock`, since it makes sense for the Application to return
|
||||
this value as soon as is it known.
|
||||
|
||||
#### ABCI Mutex
|
||||
|
||||
In previous versions of ABCI, Tendermint was prevented from making
|
||||
concurrent calls to ABCI implementations by virtue of mutexes in the
|
||||
implementation of Tendermint's ABCI infrastructure. These mutexes have
|
||||
been removed from the current implementation and applications will now
|
||||
be responsible for managing their own concurrency control.
|
||||
|
||||
To replicate the prior semantics, ensure that ABCI applications have a
|
||||
single mutex that protects all ABCI method calls from concurrent
|
||||
access. You can relax these requirements if your application can
|
||||
provide safe concurrent access via other means. This safety is an
|
||||
application concern so be very sure to test the application thoroughly
|
||||
using realistic workloads and the race detector to ensure your
|
||||
applications remains correct.
|
||||
|
||||
### Config Changes
|
||||
|
||||
- We have added a new, experimental tool to help operators migrate
|
||||
configuration files created by previous versions of Tendermint.
|
||||
To try this tool, run:
|
||||
|
||||
```shell
|
||||
# Install the tool.
|
||||
go install github.com/tendermint/tendermint/scripts/confix@latest
|
||||
|
||||
# Run the tool with the old configuration file as input.
|
||||
# Replace the -config argument with your path.
|
||||
confix -config ~/.tendermint/config/config.toml -out updated.toml
|
||||
```
|
||||
|
||||
This tool should be able to update configurations from v0.34 and v0.35. We
|
||||
plan to extend it to handle older configuration files in the future. For now,
|
||||
it will report an error (without making any changes) if it does not recognize
|
||||
the version that created the file.
|
||||
|
||||
- The default configuration for a newly-created node now disables indexing for
|
||||
ABCI event metadata. Existing node configurations that already have indexing
|
||||
turned on are not affected. Operators who wish to enable indexing for a new
|
||||
node, however, must now edit the `config.toml` explicitly.
|
||||
|
||||
- The function of seed nodes was modified in the past release. Now, seed nodes
|
||||
are treated identically to any other peer, however they only run the PEX
|
||||
reactor. Because of this `seeds` has been removed from the config. Users
|
||||
should add any seed nodes in the list of `bootstrap-peers`.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
Tendermint v0.36 adds a new RPC event subscription API. The existing event
|
||||
subscription API based on websockets is now deprecated. It will continue to
|
||||
work throughout the v0.36 release, but the `subscribe`, `unsubscribe`, and
|
||||
`unsubscribe_all` methods, along with websocket support, will be removed in
|
||||
Tendermint v0.37. Callers currently using these features should migrate as
|
||||
soon as is practical to the new API.
|
||||
|
||||
To enable the new API, node operators set a new `event-log-window-size`
|
||||
parameter in the `[rpc]` section of the `config.toml` file. This defines a
|
||||
duration of time during which the node will log all events published to the
|
||||
event bus for use by RPC consumers.
|
||||
|
||||
Consumers use the new `events` JSON-RPC method to poll for events matching
|
||||
their query in the log. Unlike the streaming API, events are not discarded if
|
||||
the caller is slow, loses its connection, or crashes. As long as the client
|
||||
recovers before its events expire from the log window, it will be able to
|
||||
replay and catch up after recovering. Also unlike the streaming API, the client
|
||||
can tell if it has truly missed events because they have expired from the log.
|
||||
|
||||
The `events` method is a normal JSON-RPC method, and does not require any
|
||||
non-standard response processing (in contrast with the old `subscribe`).
|
||||
Clients can modify their query at any time, and no longer need to coordinate
|
||||
subscribe and unsubscribe calls to handle multiple queries.
|
||||
|
||||
The Go client implementations in the Tendermint Core repository have all been
|
||||
updated to add a new `Events` method, including the light client proxy.
|
||||
|
||||
A new `rpc/client/eventstream` package has also been added to make it easier
|
||||
for users to update existing use of the streaming API to use the polling API
|
||||
The `eventstream` package handles polling and delivers matching events to a
|
||||
callback.
|
||||
|
||||
For more detailed information, see [ADR 075](https://tinyurl.com/adr075) which
|
||||
defines and describes the new API in detail.
|
||||
|
||||
#### BroadcastTx Methods
|
||||
|
||||
All callers should use the new `broadcast_tx` method, which has the
|
||||
same semantics as the legacy `broadcast_tx_sync` method. The
|
||||
`broadcast_tx_sync` and `broadcast_tx_async` methods are now
|
||||
deprecated and will be removed in 0.37.
|
||||
|
||||
Additionally the `broadcast_tx_commit` method is *also* deprecated,
|
||||
and will be removed in 0.37. Client code that submits a transaction
|
||||
and needs to wait for it to be committed to the chain, should poll
|
||||
the tendermint to observe the transaction in the committed state.
|
||||
|
||||
### Timeout Parameter Changes
|
||||
|
||||
Tendermint v0.36 updates how the Tendermint consensus timing parameters are
|
||||
configured. These parameters, `timeout-propose`, `timeout-propose-delta`,
|
||||
`timeout-prevote`, `timeout-prevote-delta`, `timeout-precommit`,
|
||||
`timeout-precommit-delta`, `timeout-commit`, and `skip-timeout-commit`, were
|
||||
previously configured in `config.toml`. These timing parameters have moved and
|
||||
are no longer configured in the `config.toml` file. These parameters have been
|
||||
migrated into the `ConsensusParameters`. Nodes with these parameters set in the
|
||||
local configuration file will see a warning logged on startup indicating that
|
||||
these parameters are no longer used.
|
||||
|
||||
These parameters have also been pared-down. There are no longer separate
|
||||
parameters for both the `prevote` and `precommit` phases of Tendermint. The
|
||||
separate `timeout-prevote` and `timeout-precommit` parameters have been merged
|
||||
into a single `timeout-vote` parameter that configures both of these similar
|
||||
phases of the consensus protocol.
|
||||
|
||||
A set of reasonable defaults have been put in place for these new parameters
|
||||
that will take effect when the node starts up in version v0.36. New chains
|
||||
created using v0.36 and beyond will be able to configure these parameters in the
|
||||
chain's `genesis.json` file. Chains that upgrade to v0.36 from a previous
|
||||
compatible version of Tendermint will begin running with the default values.
|
||||
Upgrading applications that wish to use different values from the defaults for
|
||||
these parameters may do so by setting the `ConsensusParams.Timeout` field of the
|
||||
`FinalizeBlock` `ABCI` response.
|
||||
|
||||
As a safety measure in case of unusual timing issues during the upgrade to
|
||||
v0.36, an operator may override the consensus timeout values for a single node.
|
||||
Note, however, that these overrides will be removed in Tendermint v0.37. See
|
||||
[configuration](https://github.com/tendermint/tendermint/blob/master/docs/nodes/configuration.md)
|
||||
for more information about these overrides.
|
||||
|
||||
For more discussion of this, see [ADR 074](https://tinyurl.com/adr074), which
|
||||
lays out the reasoning for the changes as well as [RFC
|
||||
009](https://tinyurl.com/rfc009) for a discussion of the complexities of
|
||||
upgrading consensus parameters.
|
||||
|
||||
### RecheckTx Parameter Change
|
||||
|
||||
`RecheckTx` was previously enabled by the `recheck` parameter in the mempool
|
||||
section of the `config.toml`.
|
||||
Setting it to true made Tendermint invoke another `CheckTx` ABCI call on
|
||||
each transaction remaining in the mempool following the execution of a block.
|
||||
Similar to the timeout parameter changes, this parameter makes more sense as a
|
||||
network-wide coordinated variable so that applications can be written knowing
|
||||
either all nodes agree on whether to run `RecheckTx`.
|
||||
|
||||
Applications can turn on `RecheckTx` by altering the `ConsensusParams` in the
|
||||
`FinalizeBlock` ABCI response.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
The functionality around resetting a node has been extended to make it safer. The
|
||||
`unsafe-reset-all` command has been replaced by a `reset` command with four
|
||||
subcommands: `blockchain`, `peers`, `unsafe-signer` and `unsafe-all`.
|
||||
|
||||
- `tendermint reset blockchain`: Clears a node of all blocks, consensus state, evidence,
|
||||
and indexed transactions. NOTE: This command does not reset application state.
|
||||
If you need to rollback the last application state (to recover from application
|
||||
nondeterminism), see instead the `tendermint rollback` command.
|
||||
- `tendermint reset peers`: Clears the peer store, which persists information on peers used
|
||||
by the networking layer. This can be used to get rid of stale addresses or to switch
|
||||
to a predefined set of static peers.
|
||||
- `tendermint reset unsafe-signer`: Resets the watermark level of the PrivVal File signer
|
||||
allowing it to sign votes from the genesis height. This should only be used in testing as
|
||||
it can lead to the node double signing.
|
||||
- `tendermint reset unsafe-all`: A summation of the other three commands. This will delete
|
||||
the entire `data` directory which may include application data as well.
|
||||
|
||||
### Go API Changes
|
||||
|
||||
#### `crypto` Package Cleanup
|
||||
|
||||
The `github.com/tendermint/tendermint/crypto/tmhash` package was removed
|
||||
to improve clarity. Users are encouraged to use the standard library
|
||||
`crypto/sha256` package directly. However, as a convenience, some constants
|
||||
and one function have moved to the Tendermint `crypto` package:
|
||||
|
||||
- The `crypto.Checksum` function returns the sha256 checksum of a
|
||||
byteslice. This is a wrapper around `sha256.Sum265` from the
|
||||
standard libary, but provided as a function to ease type
|
||||
requirements (the library function returns a `[32]byte` rather than
|
||||
a `[]byte`).
|
||||
- `tmhash.TruncatedSize` is now `crypto.AddressSize` which was
|
||||
previously an alias for the same value.
|
||||
- `tmhash.Size` and `tmhash.BlockSize` are now `crypto.HashSize` and
|
||||
`crypto.HashSize`.
|
||||
- `tmhash.SumTruncated` is now available via `crypto.AddressHash` or by
|
||||
`crypto.Checksum(<...>)[:crypto.AddressSize]`
|
||||
|
||||
## v0.35
|
||||
## Unreleased
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
@@ -230,63 +17,25 @@ and one function have moved to the Tendermint `crypto` package:
|
||||
|
||||
### Config Changes
|
||||
|
||||
* The configuration file field `[fastsync]` has been renamed to `[blocksync]`.
|
||||
|
||||
* The top level configuration file field `fast-sync` has moved under the new `[blocksync]`
|
||||
field as `blocksync.enable`.
|
||||
|
||||
* `blocksync.version = "v1"` and `blocksync.version = "v2"` (previously `fastsync`)
|
||||
are no longer supported. Please use `v0` instead. During the v0.35 release cycle, `v0` was
|
||||
determined to suit the existing needs and the cost of maintaining the `v1` and `v2` modules
|
||||
was determined to be greater than necessary.
|
||||
|
||||
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
|
||||
|
||||
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
|
||||
you have updated all the variables in your `config.toml` file.
|
||||
|
||||
* Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`)
|
||||
[ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
|
||||
|
||||
|
||||
* `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace
|
||||
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
|
||||
persistent peers, there's no gaurantee that the node will remain connected with these peers.
|
||||
persistent peers, there's no gaurantee that the node will remain connected with these peers.
|
||||
|
||||
* configuration values starting with `priv-validator-` have moved to the new
|
||||
- configuration values starting with `priv-validator-` have moved to the new
|
||||
`priv-validator` section, without the `priv-validator-` prefix.
|
||||
|
||||
* The fast sync process as well as the blockchain package and service has all
|
||||
been renamed to block sync
|
||||
|
||||
### Database Key Format Changes
|
||||
|
||||
The format of all tendermint on-disk database keys changes in
|
||||
0.35. Upgrading nodes must either re-sync all data or run a migration
|
||||
script provided in this release.
|
||||
|
||||
The script located in
|
||||
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` provides the
|
||||
function `Migrate(context.Context, db.DB)` which you can operationalize as
|
||||
makes sense for your deployment.
|
||||
|
||||
For ease of use the `tendermint` command includes a CLI version of the
|
||||
migration script, which you can invoke, as in:
|
||||
|
||||
tendermint key-migrate
|
||||
|
||||
This reads the configuration file as normal and allows the `--db-backend` and
|
||||
`--db-dir` flags to override the database location as needed.
|
||||
|
||||
The migration operation is intended to be idempotent, and should be safe to
|
||||
rerun on the same database multiple times. As a safety measure, however, we
|
||||
recommend that operators test out the migration on a copy of the database
|
||||
first, if it is practical to do so, before applying it to the production data.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
|
||||
|
||||
* The `--fast-sync` command line option has been renamed to `--blocksync.enable`
|
||||
|
||||
* If you had previously used `tendermint gen_node_key` to generate a new node
|
||||
key, keep in mind that it no longer saves the output to a file. You can use
|
||||
`tendermint init validator` or pipe the output of `tendermint gen_node_key` to
|
||||
@@ -301,8 +50,8 @@ first, if it is practical to do so, before applying it to the production data.
|
||||
|
||||
### API Changes
|
||||
|
||||
The p2p layer was reimplemented as part of the 0.35 release cycle and
|
||||
all reactors were refactored to accomodate the change. As part of that work these
|
||||
The p2p layer was reimplemented as part of the 0.35 release cycle, and
|
||||
all reactors were refactored. As part of that work these
|
||||
implementations moved into the `internal` package and are no longer
|
||||
considered part of the public Go API of tendermint. These packages
|
||||
are:
|
||||
@@ -314,7 +63,7 @@ are:
|
||||
- `blockchain`
|
||||
- `evidence`
|
||||
|
||||
Accordingly, the `node` package changed to reduce access to
|
||||
Accordingly, the space `node` package was changed to reduce access to
|
||||
tendermint internals: applications that use tendermint as a library
|
||||
will need to change to accommodate these changes. Most notably:
|
||||
|
||||
@@ -325,100 +74,6 @@ will need to change to accommodate these changes. Most notably:
|
||||
longer exported and have been replaced with `node.New` and
|
||||
`node.NewDefault` which provide more functional interfaces.
|
||||
|
||||
To access any of the functionality previously available via the
|
||||
`node.Node` type, use the `*local.Local` "RPC" client, that exposes
|
||||
the full RPC interface provided as direct function calls. Import the
|
||||
`github.com/tendermint/tendermint/rpc/client/local` package and pass
|
||||
the node service as in the following:
|
||||
|
||||
```go
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
// Construct and start up a node with default settings.
|
||||
node := node.NewDefault(logger)
|
||||
|
||||
// Construct a local (in-memory) RPC client to the node.
|
||||
client := local.New(logger, node.(local.NodeService))
|
||||
```
|
||||
|
||||
### gRPC Support
|
||||
|
||||
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
|
||||
|
||||
### Peer Management Interface
|
||||
|
||||
When running with the new P2P Layer, the methods `UnsafeDialSeeds` and
|
||||
`UnsafeDialPeers` RPC methods will always return an error. They are
|
||||
deprecated and will be removed in 0.36 when the legacy peer stack is
|
||||
removed.
|
||||
|
||||
Additionally the format of the Peer list returned in the `NetInfo`
|
||||
method changes in this release to accommodate the different way that
|
||||
the new stack tracks data about peers. This change affects users of
|
||||
both stacks.
|
||||
|
||||
### Using the updated p2p library
|
||||
|
||||
The P2P library was reimplemented in this release. The new implementation is
|
||||
enabled by default in this version of Tendermint. The legacy implementation is still
|
||||
included in this version of Tendermint as a backstop to work around unforeseen
|
||||
production issues. The new and legacy version are interoperable. If necessary,
|
||||
you can enable the legacy implementation in the server configuration file.
|
||||
|
||||
To make use of the legacy P2P implemementation add or update the following field of
|
||||
your server's configuration file under the `[p2p]` section:
|
||||
|
||||
```toml
|
||||
[p2p]
|
||||
...
|
||||
use-legacy = true
|
||||
...
|
||||
```
|
||||
|
||||
If you need to do this, please consider filing an issue in the Tendermint repository
|
||||
to let us know why. We plan to remove the legacy P2P code in the next (v0.36) release.
|
||||
|
||||
#### New p2p queue types
|
||||
|
||||
The new p2p implementation enables selection of the queue type to be used for
|
||||
passing messages between peers.
|
||||
|
||||
The following values may be used when selecting which queue type to use:
|
||||
|
||||
* `fifo`: (**default**) An unbuffered and lossless queue that passes messages through
|
||||
in the order in which they were received.
|
||||
|
||||
* `priority`: A priority queue of messages.
|
||||
|
||||
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
|
||||
weighted deficit round robin queue is created per peer. Each queue contains a
|
||||
separate 'flow' for each of the channels of communication that exist between any two
|
||||
peers. Tendermint maintains a channel per message type between peers. Each WDRR
|
||||
queue maintains a shared buffered with a fixed capacity through which messages on different
|
||||
flows are passed.
|
||||
For more information on WDRR scheduling, see: https://en.wikipedia.org/wiki/Deficit_round_robin
|
||||
|
||||
To select a queue type, add or update the following field under the `[p2p]`
|
||||
section of your server's configuration file.
|
||||
|
||||
```toml
|
||||
[p2p]
|
||||
...
|
||||
queue-type = wdrr
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
### Support for Custom Reactor and Mempool Implementations
|
||||
|
||||
The changes to p2p layer removed existing support for custom
|
||||
reactors. Based on our understanding of how this functionality was
|
||||
used, the introduction of the prioritized mempool covers nearly all of
|
||||
the use cases for custom reactors. If you are currently running custom
|
||||
reactors and mempools and are having trouble seeing the migration path
|
||||
for your project please feel free to reach out to the Tendermint Core
|
||||
development team directly.
|
||||
|
||||
## v0.34.0
|
||||
|
||||
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
|
||||
@@ -435,7 +90,7 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
were added to support the new State Sync feature.
|
||||
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
|
||||
new nodes are able to join a network in a matter of seconds.
|
||||
Read [the spec](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md)
|
||||
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
|
||||
if you want to learn more about State Sync, or if you'd like your application to use it.
|
||||
(If you don't want to support State Sync in your application, you can just implement these new
|
||||
ABCI methods as no-ops, leaving them empty.)
|
||||
@@ -560,6 +215,7 @@ The `bech32` package has moved to the Cosmos SDK:
|
||||
### CLI
|
||||
|
||||
The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API.
|
||||
See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
|
||||
### Light Client
|
||||
|
||||
@@ -571,8 +227,8 @@ Other user-relevant changes include:
|
||||
|
||||
* The old `lite` package was removed; the new light client uses the `light` package.
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The new light clients stores headers & validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
@@ -704,12 +360,12 @@ Evidence Params has been changed to include duration.
|
||||
### Go API
|
||||
|
||||
* `libs/common` has been removed in favor of specific pkgs.
|
||||
* `async`
|
||||
* `service`
|
||||
* `rand`
|
||||
* `net`
|
||||
* `strings`
|
||||
* `cmap`
|
||||
* `async`
|
||||
* `service`
|
||||
* `rand`
|
||||
* `net`
|
||||
* `strings`
|
||||
* `cmap`
|
||||
* removal of `errors` pkg
|
||||
|
||||
### RPC Changes
|
||||
@@ -778,9 +434,9 @@ Prior to the update, suppose your `ResponseDeliverTx` look like:
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Tags: []kv.Pair{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -799,14 +455,14 @@ the following `Events`:
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "transfer",
|
||||
Attributes: kv.Pairs{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
},
|
||||
}
|
||||
{
|
||||
Type: "transfer",
|
||||
Attributes: kv.Pairs{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -834,7 +490,7 @@ the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
<https://docs.tendermint.com/v0.35/introduction/install.html)
|
||||
<https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support>)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
@@ -854,9 +510,9 @@ In this case, the WS client will receive an error with description:
|
||||
"jsonrpc": "2.0",
|
||||
"id": "{ID}#event",
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1062,9 +718,9 @@ just the `Data` field set:
|
||||
|
||||
```go
|
||||
[]ProofOp{
|
||||
ProofOp{
|
||||
Data: <proof bytes>,
|
||||
}
|
||||
ProofOp{
|
||||
Data: <proof bytes>,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](../spec/abci/abci.md)
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [A protobuf file](../proto/tendermint/abci/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package abciclient
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
@@ -15,57 +15,138 @@ const (
|
||||
echoRetryIntervalSeconds = 1
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Client
|
||||
//go:generate mockery --case underscore --name Client
|
||||
|
||||
// Client defines the interface for an ABCI client.
|
||||
// Client defines an interface for an ABCI client.
|
||||
//
|
||||
// All `Async` methods return a `ReqRes` object and an error.
|
||||
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
|
||||
//
|
||||
// NOTE these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes
|
||||
// and (potentially) error response.
|
||||
// and logs.
|
||||
type Client interface {
|
||||
service.Service
|
||||
types.Application
|
||||
|
||||
SetResponseCallback(Callback)
|
||||
Error() error
|
||||
Flush(context.Context) error
|
||||
Echo(context.Context, string) (*types.ResponseEcho, error)
|
||||
|
||||
// Asynchronous requests
|
||||
FlushAsync(context.Context) (*ReqRes, error)
|
||||
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
||||
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
||||
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
||||
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
||||
CommitAsync(context.Context) (*ReqRes, error)
|
||||
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
||||
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
||||
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
||||
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||
FinalizeBlockAsync(context.Context, types.RequestFinalizeBlock) (*ReqRes, error)
|
||||
|
||||
// Synchronous requests
|
||||
FlushSync(context.Context) error
|
||||
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
FinalizeBlockSync(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// NewClient returns a new ABCI client of the specified transport type.
|
||||
// It returns an error if the transport is not "socket" or "grpc"
|
||||
func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (Client, error) {
|
||||
func NewClient(addr, transport string, mustConnect bool) (client Client, err error) {
|
||||
switch transport {
|
||||
case "socket":
|
||||
return NewSocketClient(logger, addr, mustConnect), nil
|
||||
client = NewSocketClient(addr, mustConnect)
|
||||
case "grpc":
|
||||
return NewGRPCClient(logger, addr, mustConnect), nil
|
||||
client = NewGRPCClient(addr, mustConnect)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown abci transport %s", transport)
|
||||
err = fmt.Errorf("unknown abci transport %s", transport)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type requestAndResponse struct {
|
||||
type Callback func(*types.Request, *types.Response)
|
||||
|
||||
type ReqRes struct {
|
||||
*types.Request
|
||||
*types.Response
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx sync.Mutex
|
||||
signal chan struct{}
|
||||
mtx tmsync.RWMutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
|
||||
func makeReqRes(req *types.Request) *requestAndResponse {
|
||||
return &requestAndResponse{
|
||||
Request: req,
|
||||
Response: nil,
|
||||
signal: make(chan struct{}),
|
||||
func NewReqRes(req *types.Request) *ReqRes {
|
||||
return &ReqRes{
|
||||
Request: req,
|
||||
WaitGroup: waitGroup1(),
|
||||
Response: nil,
|
||||
|
||||
done: false,
|
||||
cb: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// markDone marks the ReqRes object as done.
|
||||
func (r *requestAndResponse) markDone() {
|
||||
// Sets sets the callback. If reqRes is already done, it will call the cb
|
||||
// immediately. Note, reqRes.cb should not change if reqRes.done and only one
|
||||
// callback is supported.
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
|
||||
if r.done {
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
return
|
||||
}
|
||||
|
||||
r.cb = cb
|
||||
r.mtx.Unlock()
|
||||
}
|
||||
|
||||
// InvokeCallback invokes a thread-safe execution of the configured callback
|
||||
// if non-nil.
|
||||
func (r *ReqRes) InvokeCallback() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
close(r.signal)
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
}
|
||||
|
||||
// GetCallback returns the configured callback of the ReqRes object which may be
|
||||
// nil. Note, it is not safe to concurrently call this in cases where it is
|
||||
// marked done and SetCallback is called before calling GetCallback as that
|
||||
// will invoke the callback twice and create a potential race condition.
|
||||
//
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5439
|
||||
func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.cb
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.done = true
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
wg = &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package abciclient provides an ABCI implementation in Go.
|
||||
// Package abcicli provides an ABCI implementation in Go.
|
||||
//
|
||||
// There are 3 clients available:
|
||||
// 1. socket (unix or TCP)
|
||||
@@ -7,14 +7,23 @@
|
||||
//
|
||||
// ## Socket client
|
||||
//
|
||||
// The client blocks for enqueuing the request, for enqueuing the
|
||||
// Flush to send the request, and for the Flush response to return.
|
||||
// async: the client maintains an internal buffer of a fixed size. when the
|
||||
// buffer becomes full, all Async calls will return an error immediately.
|
||||
//
|
||||
// sync: the client blocks on 1) enqueuing the Sync request 2) enqueuing the
|
||||
// Flush requests 3) waiting for the Flush response
|
||||
//
|
||||
// ## Local client
|
||||
//
|
||||
// The global mutex is locked during each call
|
||||
// async: global mutex is locked during each call (meaning it's not really async!)
|
||||
// sync: global mutex is locked during each call
|
||||
//
|
||||
// ## gRPC client
|
||||
//
|
||||
// The client waits for all calls to complete.
|
||||
package abciclient
|
||||
// async: gRPC is synchronous, but an internal buffer of a fixed size is used
|
||||
// to store responses and later call callbacks (separate goroutine per
|
||||
// response).
|
||||
//
|
||||
// sync: waits for all Async calls to complete (essentially what Flush does in
|
||||
// the socket client) and calls Sync method.
|
||||
package abcicli
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
package abciclient
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
@@ -20,16 +18,16 @@ import (
|
||||
// A gRPC client.
|
||||
type grpcClient struct {
|
||||
service.BaseService
|
||||
logger log.Logger
|
||||
|
||||
mustConnect bool
|
||||
|
||||
client types.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
client types.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
|
||||
|
||||
mtx sync.Mutex
|
||||
addr string
|
||||
err error
|
||||
mtx tmsync.RWMutex
|
||||
addr string
|
||||
err error
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
}
|
||||
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
@@ -37,13 +35,26 @@ var _ Client = (*grpcClient)(nil)
|
||||
// NewGRPCClient creates a gRPC client, which will connect to addr upon the
|
||||
// start. Note Client#Start returns an error if connection is unsuccessful and
|
||||
// mustConnect is true.
|
||||
func NewGRPCClient(logger log.Logger, addr string, mustConnect bool) Client {
|
||||
//
|
||||
// GRPC calls are synchronous, but some callbacks expect to be called
|
||||
// asynchronously (eg. the mempool expects to be able to lock to remove bad txs
|
||||
// from cache). To accommodate, we finish each call in its own go-routine,
|
||||
// which is expensive, but easy - if you want something better, use the socket
|
||||
// protocol! maybe one day, if people really want it, we use grpc streams, but
|
||||
// hopefully not :D
|
||||
func NewGRPCClient(addr string, mustConnect bool) Client {
|
||||
cli := &grpcClient{
|
||||
logger: logger,
|
||||
addr: addr,
|
||||
mustConnect: mustConnect,
|
||||
// Buffering the channel is needed to make calls appear asynchronous,
|
||||
// which is required when the caller makes multiple async calls before
|
||||
// processing callbacks (e.g. due to holding locks). 64 means that a
|
||||
// caller can make up to 64 async calls before a callback must be
|
||||
// processed (otherwise it deadlocks). It also means that we can make 64
|
||||
// gRPC calls while processing a slow callback at the channel head.
|
||||
chReqRes: make(chan *ReqRes, 64),
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(logger, "grpcClient", cli)
|
||||
cli.BaseService = *service.NewBaseService(nil, "grpcClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
@@ -51,52 +62,61 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStart(ctx context.Context) error {
|
||||
timer := time.NewTimer(0)
|
||||
defer timer.Stop()
|
||||
func (cli *grpcClient) OnStart() error {
|
||||
// This processes asynchronous request/response messages and dispatches
|
||||
// them to callbacks.
|
||||
go func() {
|
||||
// Use a separate function to use defer for mutex unlocks (this handles panics)
|
||||
callCb := func(reqres *ReqRes) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
reqres.SetDone()
|
||||
reqres.Done()
|
||||
|
||||
// Notify client listener if set
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, reqres.Response)
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(reqres.Response)
|
||||
}
|
||||
}
|
||||
for reqres := range cli.chReqRes {
|
||||
if reqres != nil {
|
||||
callCb(reqres)
|
||||
} else {
|
||||
cli.Logger.Error("Received nil reqres")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err := grpc.Dial(cli.addr,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(dialerFunc),
|
||||
)
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
}
|
||||
cli.logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err)
|
||||
timer.Reset(time.Second * dialRetryIntervalSeconds)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
continue RETRY_LOOP
|
||||
}
|
||||
cli.Logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err)
|
||||
time.Sleep(time.Second * dialRetryIntervalSeconds)
|
||||
continue RETRY_LOOP
|
||||
}
|
||||
|
||||
cli.logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
|
||||
cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
|
||||
client := types.NewABCIApplicationClient(conn)
|
||||
cli.conn = conn
|
||||
|
||||
ENSURE_CONNECTED:
|
||||
for {
|
||||
_, err := client.Echo(ctx, &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
|
||||
if err == nil {
|
||||
break ENSURE_CONNECTED
|
||||
}
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return err
|
||||
}
|
||||
|
||||
cli.logger.Error("Echo failed", "err", err)
|
||||
timer.Reset(time.Second * echoRetryIntervalSeconds)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
continue ENSURE_CONNECTED
|
||||
}
|
||||
cli.Logger.Error("Echo failed", "err", err)
|
||||
time.Sleep(time.Second * echoRetryIntervalSeconds)
|
||||
}
|
||||
|
||||
cli.client = client
|
||||
@@ -105,81 +125,343 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStop() {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
if cli.conn != nil {
|
||||
cli.err = cli.conn.Close()
|
||||
cli.conn.Close()
|
||||
}
|
||||
close(cli.chReqRes)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) StopForError(err error) {
|
||||
if !cli.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
if cli.err == nil {
|
||||
cli.err = err
|
||||
}
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.Logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error()))
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.Logger.Error("Error stopping abci.grpcClient", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Error() error {
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
// Set listener for all responses
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
return cli.err
|
||||
cli.resCb = resCb
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *grpcClient) Flush(ctx context.Context) error { return nil }
|
||||
|
||||
func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return cli.client.Echo(ctx, types.ToRequestEcho(msg).GetEcho(), grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
req := types.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Info(ctx context.Context, params *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
return cli.client.Info(ctx, types.ToRequestInfo(params).GetInfo(), grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
req := types.ToRequestFlush()
|
||||
res, err := cli.client.Flush(ctx, req.GetFlush(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTx(ctx context.Context, params *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
return cli.client.CheckTx(ctx, types.ToRequestCheckTx(params).GetCheckTx(), grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) {
|
||||
req := types.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Query(ctx context.Context, params *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
return cli.client.Query(ctx, types.ToRequestQuery(params).GetQuery(), grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
res, err := cli.client.CheckTx(ctx, req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
return cli.client.Commit(ctx, types.ToRequestCommit().GetCommit(), grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) {
|
||||
req := types.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChain(ctx context.Context, params *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
return cli.client.InitChain(ctx, types.ToRequestInitChain(params).GetInitChain(), grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
req := types.ToRequestCommit()
|
||||
res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshots(ctx context.Context, params *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
return cli.client.ListSnapshots(ctx, types.ToRequestListSnapshots(params).GetListSnapshots(), grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) {
|
||||
req := types.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OfferSnapshot(ctx context.Context, params *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
return cli.client.OfferSnapshot(ctx, types.ToRequestOfferSnapshot(params).GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunk(ctx context.Context, params *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
return cli.client.LoadSnapshotChunk(ctx, types.ToRequestLoadSnapshotChunk(params).GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
req := types.ToRequestOfferSnapshot(params)
|
||||
res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunk(ctx context.Context, params *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
return cli.client.ApplySnapshotChunk(ctx, types.ToRequestApplySnapshotChunk(params).GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestLoadSnapshotChunk(params)
|
||||
res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) PrepareProposal(ctx context.Context, params *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
return cli.client.PrepareProposal(ctx, types.ToRequestPrepareProposal(params).GetPrepareProposal(), grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestApplySnapshotChunk(params)
|
||||
res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(
|
||||
ctx,
|
||||
req,
|
||||
&types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}},
|
||||
)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ProcessProposal(ctx context.Context, params *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
return cli.client.ProcessProposal(ctx, types.ToRequestProcessProposal(params).GetProcessProposal(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestFinalizeBlock(params)
|
||||
res, err := cli.client.FinalizeBlock(ctx, req.GetFinalizeBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(
|
||||
ctx,
|
||||
req,
|
||||
&types.Response{Value: &types.Response_FinalizeBlock{FinalizeBlock: res}},
|
||||
)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ExtendVote(ctx context.Context, params *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
return cli.client.ExtendVote(ctx, types.ToRequestExtendVote(params).GetExtendVote(), grpc.WaitForReady(true))
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
|
||||
reqres := NewReqRes(req)
|
||||
reqres.Response = res
|
||||
select {
|
||||
case cli.chReqRes <- reqres: // use channel for async responses, since they must be ordered
|
||||
return reqres, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *grpcClient) VerifyVoteExtension(ctx context.Context, params *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
return cli.client.VerifyVoteExtension(ctx, types.ToRequestVerifyVoteExtension(params).GetVerifyVoteExtension(), grpc.WaitForReady(true))
|
||||
// finishSyncCall waits for an async call to complete. It is necessary to call all
|
||||
// sync calls asynchronously as well, to maintain call and response ordering via
|
||||
// the channel, and this method will wait until the async call completes.
|
||||
func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
|
||||
// It's possible that the callback is called twice, since the callback can
|
||||
// be called immediately on SetCallback() in addition to after it has been
|
||||
// set. This is because completing the ReqRes happens in a separate critical
|
||||
// section from the one where the callback is called: there is a race where
|
||||
// SetCallback() is called between completing the ReqRes and dispatching the
|
||||
// callback.
|
||||
//
|
||||
// We also buffer the channel with 1 response, since SetCallback() will be
|
||||
// called synchronously if the reqres is already completed, in which case
|
||||
// it will block on sending to the channel since it hasn't gotten around to
|
||||
// receiving from it yet.
|
||||
//
|
||||
// ReqRes should really handle callback dispatch internally, to guarantee
|
||||
// that it's only called once and avoid the above race conditions.
|
||||
var once sync.Once
|
||||
ch := make(chan *types.Response, 1)
|
||||
reqres.SetCallback(func(res *types.Response) {
|
||||
once.Do(func() {
|
||||
ch <- res
|
||||
})
|
||||
})
|
||||
return <-ch
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FinalizeBlock(ctx context.Context, params *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
return cli.client.FinalizeBlock(ctx, types.ToRequestFinalizeBlock(params).GetFinalizeBlock(), grpc.WaitForReady(true))
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *grpcClient) FlushSync(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.EchoAsync(ctx, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetEcho(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.InfoAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
|
||||
reqres, err := cli.CheckTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.QueryAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.CommitAsync(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetCommit(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
params types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.InitChainAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
params types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.ListSnapshotsAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
params types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.OfferSnapshotAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.LoadSnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.ApplySnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
reqres, err := cli.FinalizeBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetFinalizeBlock(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package abciclient
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
@@ -14,7 +14,10 @@ import (
|
||||
// RPC endpoint), but defers are used everywhere for the sake of consistency.
|
||||
type localClient struct {
|
||||
service.BaseService
|
||||
|
||||
mtx *tmsync.RWMutex
|
||||
types.Application
|
||||
Callback
|
||||
}
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
@@ -22,19 +25,293 @@ var _ Client = (*localClient)(nil)
|
||||
// NewLocalClient creates a local client, which will be directly calling the
|
||||
// methods of the given app.
|
||||
//
|
||||
// The client methods ignore their context argument.
|
||||
func NewLocalClient(logger log.Logger, app types.Application) Client {
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.RWMutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = &tmsync.RWMutex{}
|
||||
}
|
||||
|
||||
cli := &localClient{
|
||||
mtx: mtx,
|
||||
Application: app,
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(logger, "localClient", cli)
|
||||
|
||||
cli.BaseService = *service.NewBaseService(nil, "localClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (*localClient) OnStart(context.Context) error { return nil }
|
||||
func (*localClient) OnStop() {}
|
||||
func (*localClient) Error() error { return nil }
|
||||
func (*localClient) Flush(context.Context) error { return nil }
|
||||
func (*localClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
func (app *localClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.Callback = cb
|
||||
}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
func (app *localClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
// Do nothing
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil), nil
|
||||
}
|
||||
|
||||
func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.callback(
|
||||
types.ToRequestEcho(msg),
|
||||
types.ToResponseEcho(msg),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return app.callback(
|
||||
types.ToRequestInfo(req),
|
||||
types.ToResponseInfo(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.CheckTx(req)
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return app.callback(
|
||||
types.ToRequestQuery(req),
|
||||
types.ToResponseQuery(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Commit()
|
||||
return app.callback(
|
||||
types.ToRequestCommit(),
|
||||
types.ToResponseCommit(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.InitChain(req)
|
||||
return app.callback(
|
||||
types.ToRequestInitChain(req),
|
||||
types.ToResponseInitChain(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return app.callback(
|
||||
types.ToRequestListSnapshots(req),
|
||||
types.ToResponseListSnapshots(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return app.callback(
|
||||
types.ToRequestOfferSnapshot(req),
|
||||
types.ToResponseOfferSnapshot(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestLoadSnapshotChunk(req),
|
||||
types.ToResponseLoadSnapshotChunk(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestApplySnapshotChunk(req),
|
||||
types.ToResponseApplySnapshotChunk(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.FinalizeBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestFinalizeBlock(req),
|
||||
types.ToResponseFinalizeBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.CheckTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.InitChain(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.FinalizeBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.Callback(req, res)
|
||||
return newLocalReqRes(req, res)
|
||||
}
|
||||
|
||||
func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
|
||||
reqRes := NewReqRes(req)
|
||||
reqRes.Response = res
|
||||
reqRes.SetDone()
|
||||
return reqRes
|
||||
}
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
@@ -14,12 +19,35 @@ type Client struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -28,7 +56,7 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestAppl
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -37,12 +65,35 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestAppl
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -51,7 +102,7 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*type
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -60,8 +111,31 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*type
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields: _a0
|
||||
func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
// CommitAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseCommit
|
||||
@@ -83,13 +157,36 @@ func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Echo provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// EchoAsync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoSync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *types.ResponseEcho
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
r0 = rf(ctx, msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEcho)
|
||||
@@ -98,7 +195,7 @@ func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, er
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
r1 = rf(ctx, msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -120,21 +217,21 @@ func (_m *Client) Error() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// ExtendVote provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
// FinalizeBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlockAsync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseExtendVote
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseExtendVote)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -143,12 +240,12 @@ func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote)
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FinalizeBlock provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
// FinalizeBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlockSync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -157,7 +254,7 @@ func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeB
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -166,8 +263,31 @@ func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeB
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Flush provides a mock function with given fields: _a0
|
||||
func (_m *Client) Flush(_a0 context.Context) error {
|
||||
// FlushAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FlushSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushSync(_a0 context.Context) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
@@ -180,21 +300,21 @@ func (_m *Client) Flush(_a0 context.Context) error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
// InfoAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -203,12 +323,58 @@ func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.Resp
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
// InfoSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -217,7 +383,7 @@ func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -240,12 +406,35 @@ func (_m *Client) IsRunning() bool {
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListSnapshotsSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -254,7 +443,7 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnaps
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -263,12 +452,35 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnaps
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -277,7 +489,7 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadS
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -286,12 +498,35 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadS
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshotSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -300,7 +535,7 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnap
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -309,21 +544,54 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnap
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
// OnReset provides a mock function with given fields:
|
||||
func (_m *Client) OnReset() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OnStart provides a mock function with given fields:
|
||||
func (_m *Client) OnStart() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OnStop provides a mock function with given fields:
|
||||
func (_m *Client) OnStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// QueryAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -332,35 +600,12 @@ func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepare
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseProcessProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
// QuerySync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -369,7 +614,7 @@ func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.Re
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -378,13 +623,29 @@ func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.Re
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Start provides a mock function with given fields: _a0
|
||||
func (_m *Client) Start(_a0 context.Context) error {
|
||||
ret := _m.Called(_a0)
|
||||
// Quit provides a mock function with given fields:
|
||||
func (_m *Client) Quit() <-chan struct{} {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 <-chan struct{}
|
||||
if rf, ok := ret.Get(0).(func() <-chan struct{}); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(<-chan struct{})
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Reset provides a mock function with given fields:
|
||||
func (_m *Client) Reset() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
@@ -392,45 +653,59 @@ func (_m *Client) Start(_a0 context.Context) error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// VerifyVoteExtension provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// SetLogger provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetLogger(_a0 log.Logger) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
var r0 *types.ResponseVerifyVoteExtension
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
// SetResponseCallback provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// Start provides a mock function with given fields:
|
||||
func (_m *Client) Start() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension)
|
||||
}
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
return r0
|
||||
}
|
||||
|
||||
// Stop provides a mock function with given fields:
|
||||
func (_m *Client) Stop() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// String provides a mock function with given fields:
|
||||
func (_m *Client) String() string {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Wait provides a mock function with given fields:
|
||||
func (_m *Client) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
type NewClientT interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewClient(t NewClientT) *Client {
|
||||
mock := &Client{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package abciclient
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -8,30 +8,45 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/libs/timer"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
const (
|
||||
// reqQueueSize is the max number of queued async requests.
|
||||
// (memory: 256MB max assuming 1MB transactions)
|
||||
reqQueueSize = 256
|
||||
// Don't wait longer than...
|
||||
flushThrottleMS = 20
|
||||
)
|
||||
|
||||
type reqResWithContext struct {
|
||||
R *ReqRes
|
||||
C context.Context // if context.Err is not nil, reqRes will be thrown away (ignored)
|
||||
}
|
||||
|
||||
// This is goroutine-safe, but users should beware that the application in
|
||||
// general is not meant to be interfaced with concurrent callers.
|
||||
type socketClient struct {
|
||||
service.BaseService
|
||||
logger log.Logger
|
||||
|
||||
addr string
|
||||
mustConnect bool
|
||||
conn net.Conn
|
||||
|
||||
reqQueue chan *requestAndResponse
|
||||
reqQueue chan *reqResWithContext
|
||||
flushTimer *timer.ThrottleTimer
|
||||
|
||||
mtx sync.Mutex
|
||||
mtx tmsync.RWMutex
|
||||
err error
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
}
|
||||
|
||||
var _ Client = (*socketClient)(nil)
|
||||
@@ -39,27 +54,27 @@ var _ Client = (*socketClient)(nil)
|
||||
// NewSocketClient creates a new socket client, which connects to a given
|
||||
// address. If mustConnect is true, the client will return an error upon start
|
||||
// if it fails to connect.
|
||||
func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client {
|
||||
func NewSocketClient(addr string, mustConnect bool) Client {
|
||||
cli := &socketClient{
|
||||
logger: logger,
|
||||
reqQueue: make(chan *requestAndResponse),
|
||||
reqQueue: make(chan *reqResWithContext, reqQueueSize),
|
||||
flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS),
|
||||
mustConnect: mustConnect,
|
||||
addr: addr,
|
||||
reqSent: list.New(),
|
||||
|
||||
addr: addr,
|
||||
reqSent: list.New(),
|
||||
resCb: nil,
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(logger, "socketClient", cli)
|
||||
cli.BaseService = *service.NewBaseService(nil, "socketClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
// OnStart implements Service by connecting to the server and spawning reading
|
||||
// and writing goroutines.
|
||||
func (cli *socketClient) OnStart(ctx context.Context) error {
|
||||
func (cli *socketClient) OnStart() error {
|
||||
var (
|
||||
err error
|
||||
conn net.Conn
|
||||
)
|
||||
timer := time.NewTimer(0)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
conn, err = tmnet.Connect(cli.addr)
|
||||
@@ -67,22 +82,15 @@ func (cli *socketClient) OnStart(ctx context.Context) error {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
}
|
||||
cli.logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...",
|
||||
cli.Logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...",
|
||||
cli.addr, dialRetryIntervalSeconds), "err", err)
|
||||
|
||||
timer.Reset(time.Second * dialRetryIntervalSeconds)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
continue
|
||||
}
|
||||
|
||||
time.Sleep(time.Second * dialRetryIntervalSeconds)
|
||||
continue
|
||||
}
|
||||
cli.conn = conn
|
||||
|
||||
go cli.sendRequestsRoutine(ctx, conn)
|
||||
go cli.recvResponseRoutine(ctx, conn)
|
||||
go cli.sendRequestsRoutine(conn)
|
||||
go cli.recvResponseRoutine(conn)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -93,63 +101,89 @@ func (cli *socketClient) OnStop() {
|
||||
if cli.conn != nil {
|
||||
cli.conn.Close()
|
||||
}
|
||||
cli.drainQueue()
|
||||
|
||||
cli.flushQueue()
|
||||
cli.flushTimer.Stop()
|
||||
}
|
||||
|
||||
// Error returns an error if the client was stopped abruptly.
|
||||
func (cli *socketClient) Error() error {
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
// SetResponseCallback sets a callback, which will be executed for each
|
||||
// non-error & non-empty response from the server.
|
||||
//
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *socketClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
return cli.err
|
||||
cli.resCb = resCb
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer) {
|
||||
bw := bufio.NewWriter(conn)
|
||||
func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
w := bufio.NewWriter(conn)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case reqres := <-cli.reqQueue:
|
||||
// N.B. We must enqueue before sending out the request, otherwise the
|
||||
// server may reply before we do it, and the receiver will fail for an
|
||||
// unsolicited reply.
|
||||
cli.trackRequest(reqres)
|
||||
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
|
||||
|
||||
if err := types.WriteMessage(reqres.Request, bw); err != nil {
|
||||
if reqres.C.Err() != nil {
|
||||
cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err())
|
||||
continue
|
||||
}
|
||||
|
||||
cli.willSendReq(reqres.R)
|
||||
err := types.WriteMessage(reqres.R.Request, w)
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := bw.Flush(); err != nil {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
return
|
||||
// If it's a flush request, flush the current buffer.
|
||||
if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok {
|
||||
err = w.Flush()
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-cli.flushTimer.Ch: // flush queue
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}:
|
||||
default:
|
||||
// Probably will fill the buffer, or retry later.
|
||||
}
|
||||
case <-cli.Quit():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader) {
|
||||
func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
r := bufio.NewReader(conn)
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
res := &types.Response{}
|
||||
|
||||
if err := types.ReadMessage(r, res); err != nil {
|
||||
var res = &types.Response{}
|
||||
err := types.ReadMessage(r, res)
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("read message: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
|
||||
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_Exception: // app responded with error
|
||||
// XXX After setting cli.err, release waiters (e.g. reqres.Done())
|
||||
cli.stopForError(errors.New(r.Exception.Error))
|
||||
return
|
||||
default:
|
||||
if err := cli.didRecvResponse(res); err != nil {
|
||||
err := cli.didRecvResponse(res)
|
||||
if err != nil {
|
||||
cli.stopForError(err)
|
||||
return
|
||||
}
|
||||
@@ -157,13 +191,7 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) trackRequest(reqres *requestAndResponse) {
|
||||
// N.B. We must NOT hold the client state lock while checking this, or we
|
||||
// may deadlock with shutdown.
|
||||
if !cli.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
func (cli *socketClient) willSendReq(reqres *ReqRes) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.reqSent.PushBack(reqres)
|
||||
@@ -176,189 +204,329 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
// Get the first ReqRes.
|
||||
next := cli.reqSent.Front()
|
||||
if next == nil {
|
||||
return fmt.Errorf("unexpected %T when nothing expected", res.Value)
|
||||
return fmt.Errorf("unexpected %v when nothing expected", reflect.TypeOf(res.Value))
|
||||
}
|
||||
|
||||
reqres := next.Value.(*requestAndResponse)
|
||||
reqres := next.Value.(*ReqRes)
|
||||
if !resMatchesReq(reqres.Request, res) {
|
||||
return fmt.Errorf("unexpected %T when response to %T expected", res.Value, reqres.Request.Value)
|
||||
return fmt.Errorf("unexpected %v when response to %v expected",
|
||||
reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value))
|
||||
}
|
||||
|
||||
reqres.Response = res
|
||||
reqres.markDone() // release waiters
|
||||
reqres.Done() // release waiters
|
||||
cli.reqSent.Remove(next) // pop first item from linked list
|
||||
|
||||
// Notify client listener if set (global callback).
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, res)
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set (request specific callback).
|
||||
//
|
||||
// NOTE: It is possible this callback isn't set on the reqres object. At this
|
||||
// point, in which case it will be called after, when it is set.
|
||||
reqres.InvokeCallback()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
|
||||
if !cli.IsRunning() {
|
||||
return nil, errors.New("client has stopped")
|
||||
func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestFlush())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestQuery(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCommit())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestFinalizeBlock(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync(ctx context.Context) error {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true)
|
||||
if err != nil {
|
||||
return queueErr(err)
|
||||
}
|
||||
|
||||
reqres := makeReqRes(req)
|
||||
|
||||
select {
|
||||
case cli.reqQueue <- reqres:
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("can't queue req: %w", ctx.Err())
|
||||
if err := cli.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-reqres.signal:
|
||||
if err := cli.Error(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gotResp := make(chan struct{})
|
||||
go func() {
|
||||
// NOTE: if we don't flush the queue, its possible to get stuck here
|
||||
reqRes.Wait()
|
||||
close(gotResp)
|
||||
}()
|
||||
|
||||
return reqres.Response, nil
|
||||
select {
|
||||
case <-gotResp:
|
||||
return cli.Error()
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// drainQueue marks as complete and discards all remaining pending requests
|
||||
// from the queue.
|
||||
func (cli *socketClient) drainQueue() {
|
||||
func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetEcho(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetCheckTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetQuery(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetCommit(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetListSnapshots(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetOfferSnapshot(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetLoadSnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetApplySnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestFinalizeBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetFinalizeBlock(), nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// queueRequest enqueues req onto the queue. If the queue is full, it ether
|
||||
// returns an error (sync=false) or blocks (sync=true).
|
||||
//
|
||||
// When sync=true, ctx can be used to break early. When sync=false, ctx will be
|
||||
// used later to determine if request should be dropped (if ctx.Err is
|
||||
// non-nil).
|
||||
//
|
||||
// The caller is responsible for checking cli.Error.
|
||||
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, sync bool) (*ReqRes, error) {
|
||||
reqres := NewReqRes(req)
|
||||
|
||||
if sync {
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: reqres, C: context.Background()}:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: reqres, C: ctx}:
|
||||
default:
|
||||
return nil, errors.New("buffer is full")
|
||||
}
|
||||
}
|
||||
|
||||
// Maybe auto-flush, or unset auto-flush
|
||||
switch req.Value.(type) {
|
||||
case *types.Request_Flush:
|
||||
cli.flushTimer.Unset()
|
||||
default:
|
||||
cli.flushTimer.Set()
|
||||
}
|
||||
|
||||
return reqres, nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAsync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, false)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAndFlushSync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, true)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.FlushSync(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func queueErr(e error) error {
|
||||
return fmt.Errorf("can't queue req: %w", e)
|
||||
}
|
||||
|
||||
func (cli *socketClient) flushQueue() {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
// mark all in-flight messages as resolved (they will get cli.Error())
|
||||
for req := cli.reqSent.Front(); req != nil; req = req.Next() {
|
||||
reqres := req.Value.(*requestAndResponse)
|
||||
reqres.markDone()
|
||||
reqres := req.Value.(*ReqRes)
|
||||
reqres.Done()
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) Flush(ctx context.Context) error {
|
||||
_, err := cli.doRequest(ctx, types.ToRequestFlush())
|
||||
if err != nil {
|
||||
return err
|
||||
// mark all queued messages as resolved
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.reqQueue:
|
||||
reqres.R.Done()
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetEcho(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetCheckTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetQuery(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetCommit(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetListSnapshots(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetOfferSnapshot(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetLoadSnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetApplySnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestPrepareProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetPrepareProposal(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestProcessProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetProcessProposal(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestExtendVote(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetExtendVote(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestVerifyVoteExtension(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetVerifyVoteExtension(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
res, err := cli.doRequest(ctx, types.ToRequestFinalizeBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.GetFinalizeBlock(), nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -379,14 +547,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Query)
|
||||
case *types.Request_InitChain:
|
||||
_, ok = res.Value.(*types.Response_InitChain)
|
||||
case *types.Request_ProcessProposal:
|
||||
_, ok = res.Value.(*types.Response_ProcessProposal)
|
||||
case *types.Request_PrepareProposal:
|
||||
_, ok = res.Value.(*types.Response_PrepareProposal)
|
||||
case *types.Request_ExtendVote:
|
||||
_, ok = res.Value.(*types.Response_ExtendVote)
|
||||
case *types.Request_VerifyVoteExtension:
|
||||
_, ok = res.Value.(*types.Response_VerifyVoteExtension)
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
@@ -410,6 +570,8 @@ func (cli *socketClient) stopForError(err error) {
|
||||
cli.err = err
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.logger.Info("Stopping abci.socketClient", "reason", err)
|
||||
cli.Stop()
|
||||
cli.Logger.Info("Stopping abci.socketClient", "reason", err)
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.Logger.Error("Error stopping abci.socketClient", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
127
abci/client/socket_client_test.go
Normal file
127
abci/client/socket_client_test.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package abcicli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"math/rand"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func TestProperSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
|
||||
s, c := setupClientServer(t, app)
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
|
||||
assert.NoError(t, err)
|
||||
err = c.FlushSync(context.Background())
|
||||
assert.NoError(t, err)
|
||||
res := reqres.Response.GetFinalizeBlock()
|
||||
assert.NotNil(t, res)
|
||||
resp <- c.Error()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
require.Fail(t, "No response arrived")
|
||||
case err, ok := <-resp:
|
||||
require.True(t, ok, "Must not close channel")
|
||||
assert.NoError(t, err, "This should return success")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHangingSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
|
||||
s, c := setupClientServer(t, app)
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// Start BeginBlock and flush it
|
||||
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
|
||||
assert.NoError(t, err)
|
||||
flush, err := c.FlushAsync(ctx)
|
||||
assert.NoError(t, err)
|
||||
// wait 20 ms for all events to travel socket, but
|
||||
// no response yet from server
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
// kill the server, so the connections break
|
||||
err = s.Stop()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// wait for the response from FinalizeBlock
|
||||
reqres.Wait()
|
||||
flush.Wait()
|
||||
resp <- c.Error()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
require.Fail(t, "No response arrived")
|
||||
case err, ok := <-resp:
|
||||
require.True(t, ok, "Must not close channel")
|
||||
assert.Error(t, err, "We should get EOF error")
|
||||
}
|
||||
}
|
||||
|
||||
func setupClientServer(t *testing.T, app types.Application) (
|
||||
service.Service, abcicli.Client) {
|
||||
// some port between 20k and 30k
|
||||
port := 20000 + rand.Int31()%10000
|
||||
addr := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
s, err := server.NewServer(addr, "socket", app)
|
||||
require.NoError(t, err)
|
||||
err = s.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
c := abcicli.NewSocketClient(addr, true)
|
||||
err = c.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
return s, c
|
||||
}
|
||||
|
||||
type slowApp struct {
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseFinalizeBlock{}
|
||||
}
|
||||
@@ -2,33 +2,36 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/abci/version"
|
||||
"github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
)
|
||||
|
||||
// client is a global variable so it can be reused by the console
|
||||
var (
|
||||
client abciclient.Client
|
||||
client abcicli.Client
|
||||
logger log.Logger
|
||||
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
||||
// flags
|
||||
@@ -44,46 +47,52 @@ var (
|
||||
flagHeight int
|
||||
flagProve bool
|
||||
|
||||
// counter
|
||||
flagSerial bool
|
||||
|
||||
// kvstore
|
||||
flagPersist string
|
||||
)
|
||||
|
||||
func RootCmmand(logger log.Logger) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "abci-cli",
|
||||
Short: "the ABCI CLI tool wraps an ABCI client",
|
||||
Long: "the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "abci-cli",
|
||||
Short: "the ABCI CLI tool wraps an ABCI client",
|
||||
Long: "the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "kvstore", "version":
|
||||
return nil
|
||||
}
|
||||
|
||||
if client == nil {
|
||||
var err error
|
||||
client, err = abciclient.NewClient(logger.With("module", "abci-client"), flagAddress, flagAbci, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := client.Start(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch cmd.Use {
|
||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
||||
return nil
|
||||
},
|
||||
}
|
||||
case "version": // skip running for version command
|
||||
return nil
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
}
|
||||
|
||||
if client == nil {
|
||||
var err error
|
||||
client, err = abcicli.NewClient(flagAddress, flagAbci, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Structure for data passed to print response.
|
||||
type response struct {
|
||||
// generic abci response
|
||||
Data []byte
|
||||
Code uint32
|
||||
Info string
|
||||
Log string
|
||||
Status int32
|
||||
Data []byte
|
||||
Code uint32
|
||||
Info string
|
||||
Log string
|
||||
|
||||
Query *queryResponse
|
||||
}
|
||||
@@ -96,48 +105,62 @@ type queryResponse struct {
|
||||
}
|
||||
|
||||
func Execute() error {
|
||||
logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := RootCmmand(logger)
|
||||
addGlobalFlags(cmd)
|
||||
addCommands(cmd, logger)
|
||||
return cmd.Execute()
|
||||
addGlobalFlags()
|
||||
addCommands()
|
||||
return RootCmd.Execute()
|
||||
}
|
||||
|
||||
func addGlobalFlags(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().StringVarP(&flagAddress,
|
||||
func addGlobalFlags() {
|
||||
RootCmd.PersistentFlags().StringVarP(&flagAddress,
|
||||
"address",
|
||||
"",
|
||||
"tcp://0.0.0.0:26658",
|
||||
"address of application socket")
|
||||
cmd.PersistentFlags().StringVarP(&flagAbci, "abci", "", "socket", "either socket or grpc")
|
||||
cmd.PersistentFlags().BoolVarP(&flagVerbose,
|
||||
RootCmd.PersistentFlags().StringVarP(&flagAbci, "abci", "", "socket", "either socket or grpc")
|
||||
RootCmd.PersistentFlags().BoolVarP(&flagVerbose,
|
||||
"verbose",
|
||||
"v",
|
||||
false,
|
||||
"print the command and results as if it were a console session")
|
||||
cmd.PersistentFlags().StringVarP(&flagLogLevel, "log_level", "", "debug", "set the logger level")
|
||||
RootCmd.PersistentFlags().StringVarP(&flagLogLevel, "log_level", "", "debug", "set the logger level")
|
||||
}
|
||||
|
||||
func addCommands(cmd *cobra.Command, logger log.Logger) {
|
||||
cmd.AddCommand(batchCmd)
|
||||
cmd.AddCommand(consoleCmd)
|
||||
cmd.AddCommand(echoCmd)
|
||||
cmd.AddCommand(infoCmd)
|
||||
cmd.AddCommand(finalizeBlockCmd)
|
||||
cmd.AddCommand(checkTxCmd)
|
||||
cmd.AddCommand(commitCmd)
|
||||
cmd.AddCommand(versionCmd)
|
||||
cmd.AddCommand(testCmd)
|
||||
cmd.AddCommand(prepareProposalCmd)
|
||||
cmd.AddCommand(processProposalCmd)
|
||||
cmd.AddCommand(getQueryCmd())
|
||||
func addQueryFlags() {
|
||||
queryCmd.PersistentFlags().StringVarP(&flagPath, "path", "", "/store", "path to prefix query with")
|
||||
queryCmd.PersistentFlags().IntVarP(&flagHeight, "height", "", 0, "height to query the blockchain at")
|
||||
queryCmd.PersistentFlags().BoolVarP(&flagProve,
|
||||
"prove",
|
||||
"",
|
||||
false,
|
||||
"whether or not to return a merkle proof of the query result")
|
||||
}
|
||||
|
||||
func addCounterFlags() {
|
||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
||||
}
|
||||
|
||||
func addKVStoreFlags() {
|
||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
|
||||
func addCommands() {
|
||||
RootCmd.AddCommand(batchCmd)
|
||||
RootCmd.AddCommand(consoleCmd)
|
||||
RootCmd.AddCommand(echoCmd)
|
||||
RootCmd.AddCommand(infoCmd)
|
||||
RootCmd.AddCommand(deliverTxCmd)
|
||||
RootCmd.AddCommand(checkTxCmd)
|
||||
RootCmd.AddCommand(commitCmd)
|
||||
RootCmd.AddCommand(versionCmd)
|
||||
RootCmd.AddCommand(testCmd)
|
||||
addQueryFlags()
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
|
||||
// examples
|
||||
cmd.AddCommand(getKVStoreCmd(logger))
|
||||
addCounterFlags()
|
||||
RootCmd.AddCommand(counterCmd)
|
||||
addKVStoreFlags()
|
||||
RootCmd.AddCommand(kvstoreCmd)
|
||||
}
|
||||
|
||||
var batchCmd = &cobra.Command{
|
||||
@@ -154,11 +177,10 @@ where example.file looks something like:
|
||||
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
finalize_block 0x00
|
||||
commit
|
||||
deliver_tx 0x00
|
||||
check_tx 0x00
|
||||
finalize_block 0x01 0x04 0xff
|
||||
commit
|
||||
deliver_tx 0x01
|
||||
deliver_tx 0x04
|
||||
info
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
@@ -174,7 +196,7 @@ This command opens an interactive console for running any of the other commands
|
||||
without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "query", "check_tx", "prepare_proposal", "process_proposal", "finalize_block", "commit"},
|
||||
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
@@ -193,12 +215,12 @@ var infoCmd = &cobra.Command{
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
|
||||
var finalizeBlockCmd = &cobra.Command{
|
||||
Use: "finalize_block",
|
||||
Short: "deliver a block of transactions to the application",
|
||||
Long: "deliver a block of transactions to the application",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
RunE: cmdFinalizeBlock,
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
Use: "deliver_tx",
|
||||
Short: "deliver a new transaction to the application",
|
||||
Long: "deliver a new transaction to the application",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdDeliverTx,
|
||||
}
|
||||
|
||||
var checkTxCmd = &cobra.Command{
|
||||
@@ -223,59 +245,33 @@ var versionCmd = &cobra.Command{
|
||||
Long: "print ABCI console version",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
fmt.Println(version.ABCIVersion)
|
||||
fmt.Println(version.Version)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var prepareProposalCmd = &cobra.Command{
|
||||
Use: "prepare_proposal",
|
||||
Short: "prepare proposal",
|
||||
Long: "prepare proposal",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
RunE: cmdPrepareProposal,
|
||||
var queryCmd = &cobra.Command{
|
||||
Use: "query",
|
||||
Short: "query the application state",
|
||||
Long: "query the application state",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
var processProposalCmd = &cobra.Command{
|
||||
Use: "process_proposal",
|
||||
Short: "process proposal",
|
||||
Long: "process proposal",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
RunE: cmdProcessProposal,
|
||||
var counterCmd = &cobra.Command{
|
||||
Use: "counter",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdCounter,
|
||||
}
|
||||
|
||||
func getQueryCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "query",
|
||||
Short: "query the application state",
|
||||
Long: "query the application state",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
cmd.PersistentFlags().StringVarP(&flagPath, "path", "", "/store", "path to prefix query with")
|
||||
cmd.PersistentFlags().IntVarP(&flagHeight, "height", "", 0, "height to query the blockchain at")
|
||||
cmd.PersistentFlags().BoolVarP(&flagProve,
|
||||
"prove",
|
||||
"",
|
||||
false,
|
||||
"whether or not to return a merkle proof of the query result")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func getKVStoreCmd(logger log.Logger) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "kvstore",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: makeKVStoreCmd(logger),
|
||||
}
|
||||
|
||||
cmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
return cmd
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
Use: "kvstore",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdKVStore,
|
||||
}
|
||||
|
||||
var testCmd = &cobra.Command{
|
||||
@@ -316,57 +312,23 @@ func compose(fs []func() error) error {
|
||||
}
|
||||
|
||||
func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
return compose(
|
||||
[]func() error{
|
||||
func() error { return servertest.InitChain(ctx, client) },
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error { return servertest.InitChain(client) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x01}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) },
|
||||
func() error {
|
||||
return servertest.FinalizeBlock(ctx, client, [][]byte{
|
||||
[]byte("abc"),
|
||||
}, []uint32{
|
||||
code.CodeTypeBadNonce,
|
||||
}, nil, nil)
|
||||
},
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error {
|
||||
return servertest.FinalizeBlock(ctx, client, [][]byte{
|
||||
{0x00},
|
||||
}, []uint32{
|
||||
code.CodeTypeOK,
|
||||
}, nil, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
},
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error {
|
||||
return servertest.FinalizeBlock(ctx, client, [][]byte{
|
||||
{0x00},
|
||||
{0x01},
|
||||
{0x00, 0x02},
|
||||
{0x00, 0x03},
|
||||
{0x00, 0x00, 0x04},
|
||||
{0x00, 0x00, 0x06},
|
||||
}, []uint32{
|
||||
code.CodeTypeBadNonce,
|
||||
code.CodeTypeOK,
|
||||
code.CodeTypeOK,
|
||||
code.CodeTypeOK,
|
||||
code.CodeTypeOK,
|
||||
code.CodeTypeBadNonce,
|
||||
}, nil, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
},
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error {
|
||||
return servertest.PrepareProposal(ctx, client, [][]byte{
|
||||
{0x01},
|
||||
}, []types.TxRecord_TxAction{
|
||||
types.TxRecord_UNMODIFIED,
|
||||
}, nil)
|
||||
},
|
||||
func() error {
|
||||
return servertest.ProcessProposal(ctx, client, [][]byte{
|
||||
{0x01},
|
||||
}, types.ResponseProcessProposal_ACCEPT)
|
||||
return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
},
|
||||
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) },
|
||||
})
|
||||
}
|
||||
|
||||
@@ -459,18 +421,14 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
return cmdCheckTx(cmd, actualArgs)
|
||||
case "commit":
|
||||
return cmdCommit(cmd, actualArgs)
|
||||
case "finalize_block":
|
||||
return cmdFinalizeBlock(cmd, actualArgs)
|
||||
case "deliver_tx":
|
||||
return cmdDeliverTx(cmd, actualArgs)
|
||||
case "echo":
|
||||
return cmdEcho(cmd, actualArgs)
|
||||
case "info":
|
||||
return cmdInfo(cmd, actualArgs)
|
||||
case "query":
|
||||
return cmdQuery(cmd, actualArgs)
|
||||
case "prepare_proposal":
|
||||
return cmdPrepareProposal(cmd, actualArgs)
|
||||
case "process_proposal":
|
||||
return cmdProcessProposal(cmd, actualArgs)
|
||||
default:
|
||||
return cmdUnimplemented(cmd, pArgs)
|
||||
}
|
||||
@@ -488,9 +446,12 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
})
|
||||
|
||||
fmt.Println("Available commands:")
|
||||
for _, cmd := range cmd.Commands() {
|
||||
fmt.Printf("%s: %s\n", cmd.Use, cmd.Short)
|
||||
}
|
||||
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
|
||||
fmt.Println("Use \"[command] --help\" for more information about a command.")
|
||||
|
||||
return nil
|
||||
@@ -502,15 +463,13 @@ func cmdEcho(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
msg = args[0]
|
||||
}
|
||||
res, err := client.Echo(cmd.Context(), msg)
|
||||
res, err := client.EchoSync(ctx, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printResponse(cmd, args, response{
|
||||
Data: []byte(res.Message),
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -520,7 +479,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
version = args[0]
|
||||
}
|
||||
res, err := client.Info(cmd.Context(), &types.RequestInfo{Version: version})
|
||||
res, err := client.InfoSync(ctx, types.RequestInfo{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -532,33 +491,31 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
|
||||
const codeBad uint32 = 10
|
||||
|
||||
// Append new txs to application
|
||||
func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
|
||||
txs := make([][]byte, len(args))
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txs[i] = txBytes
|
||||
// Append a new tx to application
|
||||
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
printResponse(cmd, args, response{
|
||||
Code: codeBad,
|
||||
Log: "want the tx",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
res, err := client.FinalizeBlock(cmd.Context(), &types.RequestFinalizeBlock{Txs: txs})
|
||||
txBytes, err := stringOrHexToBytes(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resps := make([]response, 0, len(res.TxResults)+1)
|
||||
for _, tx := range res.TxResults {
|
||||
resps = append(resps, response{
|
||||
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tx := range res.Txs {
|
||||
printResponse(cmd, args, response{
|
||||
Code: tx.Code,
|
||||
Data: tx.Data,
|
||||
Info: tx.Info,
|
||||
Log: tx.Log,
|
||||
})
|
||||
}
|
||||
resps = append(resps, response{
|
||||
Data: res.AppHash,
|
||||
})
|
||||
printResponse(cmd, args, resps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -575,24 +532,28 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTx(cmd.Context(), &types.RequestCheckTx{Tx: txBytes})
|
||||
res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printResponse(cmd, args, response{
|
||||
Code: res.Code,
|
||||
Data: res.Data,
|
||||
Info: res.Info,
|
||||
Log: res.Log,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get application Merkle root hash
|
||||
func cmdCommit(cmd *cobra.Command, args []string) error {
|
||||
_, err := client.Commit(cmd.Context())
|
||||
res, err := client.CommitSync(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printResponse(cmd, args, response{})
|
||||
printResponse(cmd, args, response{
|
||||
Data: res.Data,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -611,7 +572,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resQuery, err := client.Query(cmd.Context(), &types.RequestQuery{
|
||||
resQuery, err := client.QuerySync(ctx, types.RequestQuery{
|
||||
Data: queryBytes,
|
||||
Path: flagPath,
|
||||
Height: int64(flagHeight),
|
||||
@@ -634,155 +595,106 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func inTxArray(txByteArray [][]byte, tx []byte) bool {
|
||||
for _, txTmp := range txByteArray {
|
||||
if bytes.Equal(txTmp, tx) {
|
||||
return true
|
||||
}
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
app := counter.NewApplication(flagSerial)
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func cmdPrepareProposal(cmd *cobra.Command, args []string) error {
|
||||
txsBytesArray := make([][]byte, len(args))
|
||||
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txsBytesArray[i] = txBytes
|
||||
}
|
||||
|
||||
res, err := client.PrepareProposal(cmd.Context(), &types.RequestPrepareProposal{
|
||||
Txs: txsBytesArray,
|
||||
// kvstore has to have this parameter in order not to reject a tx as the default value is 0
|
||||
MaxTxBytes: 65536,
|
||||
})
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resps := make([]response, 0, len(res.TxResults)+1)
|
||||
for _, tx := range res.TxRecords {
|
||||
existingTx := inTxArray(txsBytesArray, tx.Tx)
|
||||
if tx.Action == types.TxRecord_UNKNOWN ||
|
||||
(existingTx && tx.Action == types.TxRecord_ADDED) ||
|
||||
(!existingTx && (tx.Action == types.TxRecord_UNMODIFIED || tx.Action == types.TxRecord_REMOVED)) {
|
||||
resps = append(resps, response{
|
||||
Code: codeBad,
|
||||
Log: "Failed. Tx: " + string(tx.GetTx()) + " action: " + tx.Action.String(),
|
||||
})
|
||||
} else {
|
||||
resps = append(resps, response{
|
||||
Code: code.CodeTypeOK,
|
||||
Log: "Succeeded. Tx: " + string(tx.Tx) + " action: " + tx.Action.String(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
printResponse(cmd, args, resps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdProcessProposal(cmd *cobra.Command, args []string) error {
|
||||
txsBytesArray := make([][]byte, len(args))
|
||||
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txsBytesArray[i] = txBytes
|
||||
}
|
||||
|
||||
res, err := client.ProcessProposal(cmd.Context(), &types.RequestProcessProposal{
|
||||
Txs: txsBytesArray,
|
||||
})
|
||||
if err != nil {
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printResponse(cmd, args, response{
|
||||
Status: int32(res.Status),
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
return nil
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
app = kvstore.NewApplication()
|
||||
} else {
|
||||
app = kvstore.NewPersistentKVStoreApplication(logger, flagPersist)
|
||||
}
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(logger.With("module", "abci-server"), flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
if err := srv.Start(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Run forever.
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
app = kvstore.NewApplication()
|
||||
} else {
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
}
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
func printResponse(cmd *cobra.Command, args []string, rsps ...response) {
|
||||
func printResponse(cmd *cobra.Command, args []string, rsp response) {
|
||||
|
||||
if flagVerbose {
|
||||
fmt.Println(">", cmd.Use, strings.Join(args, " "))
|
||||
}
|
||||
|
||||
for _, rsp := range rsps {
|
||||
// Always print the status code.
|
||||
if rsp.Code == types.CodeTypeOK {
|
||||
fmt.Printf("-> code: OK\n")
|
||||
} else {
|
||||
fmt.Printf("-> code: %d\n", rsp.Code)
|
||||
}
|
||||
// Always print the status code.
|
||||
if rsp.Code == types.CodeTypeOK {
|
||||
fmt.Printf("-> code: OK\n")
|
||||
} else {
|
||||
fmt.Printf("-> code: %d\n", rsp.Code)
|
||||
|
||||
if len(rsp.Data) != 0 {
|
||||
// Do no print this line when using the finalize_block command
|
||||
// because the string comes out as gibberish
|
||||
if cmd.Use != "finalize_block" {
|
||||
fmt.Printf("-> data: %s\n", rsp.Data)
|
||||
}
|
||||
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
|
||||
}
|
||||
if rsp.Log != "" {
|
||||
fmt.Printf("-> log: %s\n", rsp.Log)
|
||||
}
|
||||
if cmd.Use == "process_proposal" {
|
||||
fmt.Printf("-> status: %s\n", types.ResponseProcessProposal_ProposalStatus_name[rsp.Status])
|
||||
}
|
||||
}
|
||||
|
||||
if rsp.Query != nil {
|
||||
fmt.Printf("-> height: %d\n", rsp.Query.Height)
|
||||
if rsp.Query.Key != nil {
|
||||
fmt.Printf("-> key: %s\n", rsp.Query.Key)
|
||||
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
|
||||
}
|
||||
if rsp.Query.Value != nil {
|
||||
fmt.Printf("-> value: %s\n", rsp.Query.Value)
|
||||
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
|
||||
}
|
||||
if rsp.Query.ProofOps != nil {
|
||||
fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps)
|
||||
}
|
||||
if len(rsp.Data) != 0 {
|
||||
// Do no print this line when using the commit command
|
||||
// because the string comes out as gibberish
|
||||
if cmd.Use != "commit" {
|
||||
fmt.Printf("-> data: %s\n", rsp.Data)
|
||||
}
|
||||
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
|
||||
}
|
||||
if rsp.Log != "" {
|
||||
fmt.Printf("-> log: %s\n", rsp.Log)
|
||||
}
|
||||
|
||||
if rsp.Query != nil {
|
||||
fmt.Printf("-> height: %d\n", rsp.Query.Height)
|
||||
if rsp.Query.Key != nil {
|
||||
fmt.Printf("-> key: %s\n", rsp.Query.Key)
|
||||
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
|
||||
}
|
||||
if rsp.Query.Value != nil {
|
||||
fmt.Printf("-> value: %s\n", rsp.Query.Value)
|
||||
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
|
||||
}
|
||||
if rsp.Query.ProofOps != nil {
|
||||
fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
92
abci/example/counter/counter.go
Normal file
92
abci/example/counter/counter.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package counter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
hashCount int
|
||||
txCount int
|
||||
serial bool
|
||||
}
|
||||
|
||||
func NewApplication(serial bool) *Application {
|
||||
return &Application{serial: serial}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
|
||||
if app.serial {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) > 8 {
|
||||
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}},
|
||||
}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(tx):], tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseFinalizeBlock{
|
||||
Txs: []*types.ResponseDeliverTx{{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() (resp types.ResponseCommit) {
|
||||
app.hashCount++
|
||||
if app.txCount == 0 {
|
||||
return types.ResponseCommit{}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
switch reqQuery.Path {
|
||||
case "hash":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
||||
case "tx":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
|
||||
default:
|
||||
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
||||
}
|
||||
}
|
||||
@@ -6,18 +6,18 @@ import (
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
@@ -29,69 +29,98 @@ func init() {
|
||||
}
|
||||
|
||||
func TestKVStore(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
t.Log("### Testing KVStore")
|
||||
testBulk(ctx, t, logger, kvstore.NewApplication())
|
||||
fmt.Println("### Testing KVStore")
|
||||
testStream(t, kvstore.NewApplication())
|
||||
}
|
||||
|
||||
func TestBaseApp(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
t.Log("### Testing BaseApp")
|
||||
testBulk(ctx, t, logger, types.NewBaseApplication())
|
||||
fmt.Println("### Testing BaseApp")
|
||||
testStream(t, types.NewBaseApplication())
|
||||
}
|
||||
|
||||
func TestGRPC(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
t.Log("### Testing GRPC")
|
||||
testGRPCSync(ctx, t, logger, types.NewBaseApplication())
|
||||
fmt.Println("### Testing GRPC")
|
||||
testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication()))
|
||||
}
|
||||
|
||||
func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) {
|
||||
t.Helper()
|
||||
|
||||
const numDeliverTxs = 700000
|
||||
func testStream(t *testing.T, app types.Application) {
|
||||
const numDeliverTxs = 20000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
// Start the listener
|
||||
server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app)
|
||||
t.Cleanup(server.Wait)
|
||||
err := server.Start(ctx)
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
err := server.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false)
|
||||
t.Cleanup(client.Wait)
|
||||
|
||||
err = client.Start(ctx)
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
|
||||
err = client.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Construct request
|
||||
rfb := &types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)}
|
||||
done := make(chan struct{})
|
||||
counter := 0
|
||||
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
|
||||
// Process response
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_FinalizeBlock:
|
||||
for _, tx := range r.FinalizeBlock.Txs {
|
||||
counter++
|
||||
if tx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", tx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
|
||||
}
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
close(done)
|
||||
}()
|
||||
return
|
||||
}
|
||||
}
|
||||
case *types.Response_Flush:
|
||||
// ignore
|
||||
default:
|
||||
t.Error("Unexpected response type", reflect.TypeOf(res.Value))
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
rfb.Txs[counter] = []byte("test")
|
||||
}
|
||||
// Send bulk request
|
||||
res, err := client.FinalizeBlock(ctx, rfb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, numDeliverTxs, len(res.TxResults), "Number of txs doesn't match")
|
||||
for _, tx := range res.TxResults {
|
||||
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
|
||||
// Send request
|
||||
tx := []byte("test")
|
||||
_, err = client.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sometimes send flush messages
|
||||
if counter%128 == 0 {
|
||||
err = client.FlushSync(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Send final flush message
|
||||
err = client.Flush(ctx)
|
||||
_, err = client.FlushAsync(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-done
|
||||
}
|
||||
|
||||
//-------------------------
|
||||
@@ -101,25 +130,30 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
|
||||
func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) {
|
||||
t.Helper()
|
||||
numDeliverTxs := 680000
|
||||
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
|
||||
func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
numDeliverTxs := 2000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
// Start the listener
|
||||
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app)
|
||||
server := abciserver.NewGRPCServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
t.Fatalf("Error starting GRPC server: %v", err.Error())
|
||||
}
|
||||
|
||||
require.NoError(t, server.Start(ctx))
|
||||
t.Cleanup(server.Wait)
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
conn, err := grpc.Dial(socket,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(dialerFunc),
|
||||
)
|
||||
require.NoError(t, err, "Error dialing GRPC server")
|
||||
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
@@ -129,17 +163,29 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
|
||||
|
||||
client := types.NewABCIApplicationClient(conn)
|
||||
|
||||
// Construct request
|
||||
rfb := types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)}
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
rfb.Txs[counter] = []byte("test")
|
||||
}
|
||||
// Send request
|
||||
txt := []byte("test")
|
||||
response, err := client.FinalizeBlock(context.Background(), &types.RequestFinalizeBlock{Txs: [][]byte{txt}})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
|
||||
}
|
||||
counter++
|
||||
for _, tx := range response.Txs {
|
||||
if tx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", tx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatal("Too many DeliverTx responses")
|
||||
}
|
||||
t.Log("response", counter)
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Send request
|
||||
response, err := client.FinalizeBlock(ctx, &rfb)
|
||||
require.NoError(t, err, "Error in GRPC FinalizeBlock")
|
||||
require.Equal(t, numDeliverTxs, len(response.TxResults), "Number of txs returned via GRPC doesn't match")
|
||||
for _, tx := range response.TxResults {
|
||||
require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ There are two app's here: the KVStoreApplication and the PersistentKVStoreApplic
|
||||
|
||||
## KVStoreApplication
|
||||
|
||||
The KVStoreApplication is a simple merkle key-value store.
|
||||
The KVStoreApplication is a simple merkle key-value store.
|
||||
Transactions of the form `key=value` are stored as key-value pairs in the tree.
|
||||
Transactions without an `=` sign set the value to the key.
|
||||
The app has no replay protection (other than what the mempool provides).
|
||||
@@ -12,7 +12,7 @@ The app has no replay protection (other than what the mempool provides).
|
||||
## PersistentKVStoreApplication
|
||||
|
||||
The PersistentKVStoreApplication wraps the KVStoreApplication
|
||||
and provides three additional features:
|
||||
and provides two additional features:
|
||||
|
||||
1) persistence of state across app restarts (using Tendermint's ABCI-Handshake mechanism)
|
||||
2) validator set changes
|
||||
@@ -27,4 +27,4 @@ Validator set changes are effected using the following transaction format:
|
||||
|
||||
where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one).
|
||||
To remove a validator from the validator set, set power to `0`.
|
||||
There is no sybil protection against new validators joining.
|
||||
There is no sybil protection against new validators joining.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
mrand "math/rand"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
@@ -33,9 +32,8 @@ func RandVals(cnt int) []types.ValidatorUpdate {
|
||||
// InitKVStore initializes the kvstore app with some data,
|
||||
// which allows tests to pass and is fine as long as you
|
||||
// don't make any tx that modify the validator state
|
||||
func InitKVStore(ctx context.Context, app *PersistentKVStoreApplication) error {
|
||||
_, err := app.InitChain(ctx, &types.RequestInitChain{
|
||||
func InitKVStore(app *PersistentKVStoreApplication) {
|
||||
app.InitChain(types.RequestInitChain{
|
||||
Validators: RandVals(1),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,22 +2,14 @@ package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
@@ -73,395 +65,114 @@ var _ types.Application = (*Application)(nil)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
mu sync.Mutex
|
||||
|
||||
state State
|
||||
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
|
||||
logger log.Logger
|
||||
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
|
||||
}
|
||||
|
||||
func NewApplication() *Application {
|
||||
return &Application{
|
||||
logger: log.NewNopLogger(),
|
||||
state: loadState(dbm.NewMemDB()),
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
}
|
||||
state := loadState(dbm.NewMemDB())
|
||||
return &Application{state: state}
|
||||
}
|
||||
|
||||
func (app *Application) InitChain(_ context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
for _, v := range req.Validators {
|
||||
r := app.updateValidator(v)
|
||||
if r.IsErr() {
|
||||
app.logger.Error("error updating validators", "r", r)
|
||||
panic("problem updating validators")
|
||||
}
|
||||
}
|
||||
return &types.ResponseInitChain{}, nil
|
||||
}
|
||||
|
||||
func (app *Application) Info(_ context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
return &types.ResponseInfo{
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
AppVersion: ProtocolVersion,
|
||||
LastBlockHeight: app.state.Height,
|
||||
LastBlockAppHash: app.state.AppHash,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *Application) handleTx(tx []byte) *types.ExecTxResult {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(tx)
|
||||
}
|
||||
|
||||
if isPrepareTx(tx) {
|
||||
return app.execPrepareTx(tx)
|
||||
}
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
var key, value string
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = string(tx), string(tx)
|
||||
}
|
||||
var txs = make([]*types.ResponseDeliverTx, len(req.Txs))
|
||||
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.state.Size++
|
||||
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &types.ExecTxResult{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
func (app *Application) Close() error {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
return app.state.db.Close()
|
||||
}
|
||||
|
||||
func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.Misbehavior {
|
||||
if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
PubKey: pubKey,
|
||||
Power: ev.Validator.Power - 1,
|
||||
})
|
||||
app.logger.Info("Decreased val power by 1 because of the equivocation",
|
||||
"val", addr)
|
||||
} else {
|
||||
panic(fmt.Errorf("wanted to punish val %q but can't find it", addr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
respTxs := make([]*types.ExecTxResult, len(req.Txs))
|
||||
for i, tx := range req.Txs {
|
||||
respTxs[i] = app.handleTx(tx)
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = string(tx), string(tx)
|
||||
}
|
||||
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.state.Size++
|
||||
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
txs[i] = &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
return types.ResponseFinalizeBlock{Txs: txs}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() types.ResponseCommit {
|
||||
// Using a memdb - just return the big endian size of the db
|
||||
appHash := make([]byte, 8)
|
||||
binary.PutVarint(appHash, app.state.Size)
|
||||
app.state.AppHash = appHash
|
||||
app.state.Height++
|
||||
|
||||
return &types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.ValUpdates, AppHash: appHash}, nil
|
||||
}
|
||||
|
||||
func (*Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
return &types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}, nil
|
||||
}
|
||||
|
||||
func (app *Application) Commit(_ context.Context) (*types.ResponseCommit, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
saveState(app.state)
|
||||
|
||||
resp := &types.ResponseCommit{}
|
||||
resp := types.ResponseCommit{Data: appHash}
|
||||
if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks {
|
||||
resp.RetainHeight = app.state.Height - app.RetainBlocks + 1
|
||||
}
|
||||
return resp, nil
|
||||
return resp
|
||||
}
|
||||
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
if reqQuery.Path == "/val" {
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
value, err := app.state.db.Get(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &types.ResponseQuery{
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
if reqQuery.Prove {
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery := types.ResponseQuery{
|
||||
Index: -1,
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
Height: app.state.Height,
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Index = -1 // TODO make Proof return index
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return &resQuery, nil
|
||||
return
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery := types.ResponseQuery{
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
Height: app.state.Height,
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return &resQuery, nil
|
||||
}
|
||||
|
||||
func (app *Application) PrepareProposal(_ context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
return &types.ResponsePrepareProposal{
|
||||
TxRecords: app.substPrepareTx(req.Txs, req.MaxTxBytes),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (*Application) ProcessProposal(_ context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 || isPrepareTx(tx) {
|
||||
return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}, nil
|
||||
}
|
||||
}
|
||||
return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}, nil
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
func (app *Application) Validators() (validators []types.ValidatorUpdate) {
|
||||
app.mu.Lock()
|
||||
defer app.mu.Unlock()
|
||||
|
||||
itr, err := app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
validators = append(validators, *validator)
|
||||
}
|
||||
}
|
||||
if err = itr.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte {
|
||||
pk, err := encoding.PubKeyFromProto(pubkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
|
||||
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
|
||||
}
|
||||
|
||||
// format is "val:pubkey!power"
|
||||
// pubkey is a base64-encoded 32-byte ed25519 key
|
||||
func (app *Application) execValidatorTx(tx []byte) *types.ExecTxResult {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
|
||||
}
|
||||
|
||||
// decode the power
|
||||
power, err := strconv.ParseInt(powerS, 10, 64)
|
||||
if err != nil {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *Application) updateValidator(v types.ValidatorUpdate) *types.ExecTxResult {
|
||||
pubkey, err := encoding.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
key := []byte("val:" + string(pubkey.Bytes()))
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
hasKey, err := app.state.db.Has(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeUnauthorized,
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
}
|
||||
if err = app.state.db.Delete(key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
return &types.ExecTxResult{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("error encoding validator: %v", err)}
|
||||
}
|
||||
if err = app.state.db.Set(key, value.Bytes()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
app.ValUpdates = append(app.ValUpdates, v)
|
||||
|
||||
return &types.ExecTxResult{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
// prepare proposal machinery
|
||||
|
||||
const PreparePrefix = "prepare"
|
||||
|
||||
func isPrepareTx(tx []byte) bool {
|
||||
return bytes.HasPrefix(tx, []byte(PreparePrefix))
|
||||
}
|
||||
|
||||
// execPrepareTx is noop. tx data is considered as placeholder
|
||||
// and is substitute at the PrepareProposal.
|
||||
func (app *Application) execPrepareTx(tx []byte) *types.ExecTxResult {
|
||||
// noop
|
||||
return &types.ExecTxResult{}
|
||||
}
|
||||
|
||||
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
|
||||
// proposal for transactions with the prefix stripped.
|
||||
// It marks all of the original transactions as 'REMOVED' so that
|
||||
// Tendermint will remove them from its mempool.
|
||||
func (app *Application) substPrepareTx(blockData [][]byte, maxTxBytes int64) []*types.TxRecord {
|
||||
trs := make([]*types.TxRecord, 0, len(blockData))
|
||||
var removed []*types.TxRecord
|
||||
var totalBytes int64
|
||||
for _, tx := range blockData {
|
||||
txMod := tx
|
||||
action := types.TxRecord_UNMODIFIED
|
||||
if isPrepareTx(tx) {
|
||||
removed = append(removed, &types.TxRecord{
|
||||
Tx: tx,
|
||||
Action: types.TxRecord_REMOVED,
|
||||
})
|
||||
txMod = bytes.TrimPrefix(tx, []byte(PreparePrefix))
|
||||
action = types.TxRecord_ADDED
|
||||
}
|
||||
totalBytes += int64(len(txMod))
|
||||
if totalBytes > maxTxBytes {
|
||||
break
|
||||
}
|
||||
trs = append(trs, &types.TxRecord{
|
||||
Tx: txMod,
|
||||
Action: action,
|
||||
})
|
||||
}
|
||||
|
||||
return append(trs, removed...)
|
||||
return resQuery
|
||||
}
|
||||
|
||||
@@ -3,19 +3,20 @@ package kvstore
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,43 +24,41 @@ const (
|
||||
testValue = "def"
|
||||
)
|
||||
|
||||
func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := &types.RequestFinalizeBlock{Txs: [][]byte{tx}}
|
||||
ar, err := app.FinalizeBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.FinalizeBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
// commit
|
||||
_, err = app.Commit(ctx)
|
||||
require.NoError(t, err)
|
||||
var ctx = context.Background()
|
||||
|
||||
info, err := app.Info(ctx, &types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
|
||||
ar := app.FinalizeBlock(req)
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
// repeating tx doesn't raise error
|
||||
ar = app.FinalizeBlock(req)
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
// commit
|
||||
app.Commit()
|
||||
|
||||
info := app.Info(types.RequestInfo{})
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery, err := app.Query(ctx, &types.RequestQuery{
|
||||
resQuery := app.Query(types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery, err = app.Query(ctx, &types.RequestQuery{
|
||||
resQuery = app.Query(types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
@@ -67,55 +66,43 @@ func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx []
|
||||
}
|
||||
|
||||
func TestKVStoreKV(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kvstore := NewApplication()
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(ctx, t, kvstore, tx, key, value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(ctx, t, kvstore, tx, key, value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
dir := t.TempDir()
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(ctx, t, kvstore, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(ctx, t, kvstore, tx, key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
dir := t.TempDir()
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
if err := InitKVStore(ctx, kvstore); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
height := int64(0)
|
||||
|
||||
resInfo, err := kvstore.Info(ctx, &types.RequestInfo{})
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
InitKVStore(kvstore)
|
||||
height := int64(0)
|
||||
|
||||
resInfo := kvstore.Info(types.RequestInfo{})
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
@@ -123,19 +110,13 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
// make and apply block
|
||||
height = int64(1)
|
||||
hash := []byte("foo")
|
||||
if _, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Hash: hash, Height: height}); err != nil {
|
||||
t.Fatal(err)
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
}
|
||||
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
|
||||
kvstore.Commit()
|
||||
|
||||
if _, err := kvstore.Commit(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
}
|
||||
|
||||
resInfo, err = kvstore.Info(ctx, &types.RequestInfo{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resInfo = kvstore.Info(types.RequestInfo{})
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
@@ -144,22 +125,20 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kvstore := NewApplication()
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
|
||||
// init with some validators
|
||||
total := 10
|
||||
nInit := 5
|
||||
vals := RandVals(total)
|
||||
// initialize with the first nInit
|
||||
_, err := kvstore.InitChain(ctx, &types.RequestInitChain{
|
||||
kvstore.InitChain(types.RequestInitChain{
|
||||
Validators: vals[:nInit],
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
vals1, vals2 := vals[:nInit], kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
@@ -172,7 +151,7 @@ func TestValUpdates(t *testing.T) {
|
||||
tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
|
||||
makeApplyBlock(ctx, t, kvstore, 1, diff, tx1, tx2)
|
||||
makeApplyBlock(t, kvstore, 1, diff, tx1, tx2)
|
||||
|
||||
vals1, vals2 = vals[:nInit+2], kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
@@ -187,7 +166,7 @@ func TestValUpdates(t *testing.T) {
|
||||
tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power)
|
||||
|
||||
makeApplyBlock(ctx, t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
@@ -203,7 +182,7 @@ func TestValUpdates(t *testing.T) {
|
||||
diff = []types.ValidatorUpdate{v1}
|
||||
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
|
||||
makeApplyBlock(ctx, t, kvstore, 3, diff, tx1)
|
||||
makeApplyBlock(t, kvstore, 3, diff, tx1)
|
||||
|
||||
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
|
||||
vals2 = kvstore.Validators()
|
||||
@@ -211,23 +190,26 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func makeApplyBlock(ctx context.Context, t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) {
|
||||
func makeApplyBlock(
|
||||
t *testing.T,
|
||||
kvstore types.Application,
|
||||
heightInt int,
|
||||
diff []types.ValidatorUpdate,
|
||||
txs ...[]byte) {
|
||||
// make and apply block
|
||||
height := int64(heightInt)
|
||||
hash := []byte("foo")
|
||||
resFinalizeBlock, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{
|
||||
Hash: hash,
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
Txs: txs,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = kvstore.Commit(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
|
||||
Hash: hash,
|
||||
Header: header,
|
||||
Txs: txs,
|
||||
})
|
||||
|
||||
kvstore.Commit()
|
||||
|
||||
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
|
||||
|
||||
@@ -235,7 +217,6 @@ func makeApplyBlock(ctx context.Context, t *testing.T, kvstore types.Application
|
||||
|
||||
// order doesn't matter
|
||||
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
t.Helper()
|
||||
if len(vals1) != len(vals2) {
|
||||
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
|
||||
}
|
||||
@@ -250,141 +231,139 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeSocketClientServer(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
app types.Application,
|
||||
name string,
|
||||
) (abciclient.Client, service.Service, error) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.Cleanup(cancel)
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app)
|
||||
if err := server.Start(ctx); err != nil {
|
||||
cancel()
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
server.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Connect to the socket
|
||||
client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false)
|
||||
if err := client.Start(ctx); err != nil {
|
||||
cancel()
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
if err = server.Stop(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func makeGRPCClientServer(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
app types.Application,
|
||||
name string,
|
||||
) (abciclient.Client, service.Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.Cleanup(cancel)
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app)
|
||||
|
||||
if err := server.Start(ctx); err != nil {
|
||||
cancel()
|
||||
gapp := types.NewGRPCApplication(app)
|
||||
server := abciserver.NewGRPCServer(socket, gapp)
|
||||
server.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
client := abciclient.NewGRPCClient(logger.With("module", "abci-client"), socket, true)
|
||||
|
||||
if err := client.Start(ctx); err != nil {
|
||||
cancel()
|
||||
client := abcicli.NewGRPCClient(socket, true)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
if err := server.Stop(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func TestClientServer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
// set up socket app
|
||||
kvstore := NewApplication()
|
||||
client, server, err := makeSocketClientServer(ctx, t, logger, kvstore, "kvstore-socket")
|
||||
client, server, err := makeSocketClientServer(kvstore, "kvstore-socket")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { cancel(); server.Wait() })
|
||||
t.Cleanup(func() { cancel(); client.Wait() })
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
runClientTests(ctx, t, client)
|
||||
runClientTests(t, client)
|
||||
|
||||
// set up grpc app
|
||||
kvstore = NewApplication()
|
||||
gclient, gserver, err := makeGRPCClientServer(ctx, t, logger, kvstore, "/tmp/kvstore-grpc")
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() { cancel(); gserver.Wait() })
|
||||
t.Cleanup(func() { cancel(); gclient.Wait() })
|
||||
t.Cleanup(func() {
|
||||
if err := gserver.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := gclient.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
runClientTests(ctx, t, gclient)
|
||||
runClientTests(t, gclient)
|
||||
}
|
||||
|
||||
func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client) {
|
||||
func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
// run some tests....
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testClient(ctx, t, client, tx, key, value)
|
||||
testClient(t, client, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testClient(ctx, t, client, tx, key, value)
|
||||
testClient(t, client, tx, key, value)
|
||||
}
|
||||
|
||||
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
|
||||
ar, err := app.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
|
||||
ar, err = app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) // repeating tx doesn't raise error
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
// repeating FinalizeBlock doesn't raise error
|
||||
ar, err = app.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
// commit
|
||||
_, err = app.Commit(ctx)
|
||||
_, err = app.CommitSync(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := app.Info(ctx, &types.RequestInfo{})
|
||||
info, err := app.InfoSync(ctx, types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery, err := app.Query(ctx, &types.RequestQuery{
|
||||
resQuery, err := app.QuerySync(ctx, types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery, err = app.Query(ctx, &types.RequestQuery{
|
||||
resQuery, err = app.QuerySync(ctx, types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -19,28 +25,269 @@ const (
|
||||
var _ types.Application = (*PersistentKVStoreApplication)(nil)
|
||||
|
||||
type PersistentKVStoreApplication struct {
|
||||
*Application
|
||||
app *Application
|
||||
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
|
||||
valAddrToPubKeyMap map[string]pc.PublicKey
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *PersistentKVStoreApplication {
|
||||
db, err := dbm.NewGoLevelDB("kvstore", dbDir)
|
||||
func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication {
|
||||
name := "kvstore"
|
||||
db, err := dbm.NewGoLevelDB(name, dbDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
state := loadState(db)
|
||||
|
||||
return &PersistentKVStoreApplication{
|
||||
Application: &Application{
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
state: loadState(db),
|
||||
logger: logger,
|
||||
},
|
||||
app: &Application{state: state},
|
||||
valAddrToPubKeyMap: make(map[string]pc.PublicKey),
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(_ context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
return &types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}, nil
|
||||
func (app *PersistentKVStoreApplication) Close() error {
|
||||
return app.app.state.db.Close()
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(_ context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
return &types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}, nil
|
||||
func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
|
||||
app.logger = l
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
res := app.app.Info(req)
|
||||
res.LastBlockHeight = app.app.state.Height
|
||||
res.LastBlockAppHash = app.app.state.AppHash
|
||||
return res
|
||||
}
|
||||
|
||||
// // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
// func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// // if it starts with "val:", update the validator set
|
||||
// // format is "val:pubkey!power"
|
||||
// if isValidatorTx(req.Tx) {
|
||||
// // update validators in the merkle tree
|
||||
// // and in app.ValUpdates
|
||||
// return app.execValidatorTx(req.Tx)
|
||||
// }
|
||||
|
||||
// // otherwise, update the key-value store
|
||||
// return app.app.DeliverTx(req)
|
||||
// }
|
||||
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(req)
|
||||
}
|
||||
|
||||
// Commit will panic if InitChain was not called
|
||||
func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
|
||||
return app.app.Commit()
|
||||
}
|
||||
|
||||
// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded.
|
||||
// For any other path, returns an associated value or nil if missing.
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
switch reqQuery.Path {
|
||||
case "/val":
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
value, err := app.app.state.db.Get(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
return
|
||||
default:
|
||||
return app.app.Query(reqQuery)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the validators in the merkle tree
|
||||
func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
|
||||
for _, v := range req.Validators {
|
||||
r := app.updateValidator(v)
|
||||
if r.IsErr() {
|
||||
app.logger.Error("Error updating validators", "r", r)
|
||||
}
|
||||
}
|
||||
return types.ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) FinalizeBlock(
|
||||
req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
// for i, tx := range req.Txs {
|
||||
// // if it starts with "val:", update the validator set
|
||||
// // format is "val:pubkey!power"
|
||||
// if isValidatorTx(tx) {
|
||||
// // update validators in the merkle tree
|
||||
// // and in app.ValUpdates
|
||||
// return app.execValidatorTx(req.Tx)
|
||||
// }
|
||||
|
||||
// // otherwise, update the key-value store
|
||||
// return app.app.DeliverTx(tx)
|
||||
// }
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
PubKey: pubKey,
|
||||
Power: ev.Validator.Power - 1,
|
||||
})
|
||||
app.logger.Info("Decreased val power by 1 because of the equivocation",
|
||||
"val", addr)
|
||||
} else {
|
||||
app.logger.Error("Wanted to punish val, but can't find it",
|
||||
"val", addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return types.ResponseFinalizeBlock{ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
|
||||
itr, err := app.app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
validators = append(validators, *validator)
|
||||
}
|
||||
}
|
||||
if err = itr.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey pc.PublicKey, power int64) []byte {
|
||||
pk, err := cryptoenc.PubKeyFromProto(pubkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
|
||||
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
|
||||
}
|
||||
|
||||
// format is "val:pubkey!power"
|
||||
// pubkey is a base64-encoded 32-byte ed25519 key
|
||||
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
|
||||
}
|
||||
|
||||
// decode the power
|
||||
power, err := strconv.ParseInt(powerS, 10, 64)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
||||
pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
key := []byte("val:" + string(pubkey.Bytes()))
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
hasKey, err := app.app.state.db.Has(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeUnauthorized,
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
}
|
||||
if err = app.app.state.db.Delete(key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Error encoding validator: %v", err)}
|
||||
}
|
||||
if err = app.app.state.db.Set(key, value.Bytes()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
app.ValUpdates = append(app.ValUpdates, v)
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
@@ -1,83 +1,61 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
type GRPCServer struct {
|
||||
service.BaseService
|
||||
logger log.Logger
|
||||
|
||||
proto string
|
||||
addr string
|
||||
server *grpc.Server
|
||||
proto string
|
||||
addr string
|
||||
listener net.Listener
|
||||
server *grpc.Server
|
||||
|
||||
app types.Application
|
||||
app types.ABCIApplicationServer
|
||||
}
|
||||
|
||||
// NewGRPCServer returns a new gRPC ABCI server
|
||||
func NewGRPCServer(logger log.Logger, protoAddr string, app types.Application) service.Service {
|
||||
func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &GRPCServer{
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
app: app,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
}
|
||||
s.BaseService = *service.NewBaseService(logger, "ABCIServer", s)
|
||||
s.BaseService = *service.NewBaseService(nil, "ABCIServer", s)
|
||||
return s
|
||||
}
|
||||
|
||||
// OnStart starts the gRPC service.
|
||||
func (s *GRPCServer) OnStart(ctx context.Context) error {
|
||||
func (s *GRPCServer) OnStart() error {
|
||||
|
||||
ln, err := net.Listen(s.proto, s.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.listener = ln
|
||||
s.server = grpc.NewServer()
|
||||
types.RegisterABCIApplicationServer(s.server, &gRPCApplication{Application: s.app})
|
||||
types.RegisterABCIApplicationServer(s.server, s.app)
|
||||
|
||||
s.logger.Info("Listening", "proto", s.proto, "addr", s.addr)
|
||||
s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr)
|
||||
go func() {
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
s.server.GracefulStop()
|
||||
}()
|
||||
|
||||
if err := s.server.Serve(ln); err != nil {
|
||||
s.logger.Error("error serving gRPC server", "err", err)
|
||||
if err := s.server.Serve(s.listener); err != nil {
|
||||
s.Logger.Error("Error serving gRPC server", "err", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop stops the gRPC server.
|
||||
func (s *GRPCServer) OnStop() { s.server.Stop() }
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// gRPCApplication is a gRPC shim for Application
|
||||
type gRPCApplication struct {
|
||||
types.Application
|
||||
}
|
||||
|
||||
func (app *gRPCApplication) Echo(_ context.Context, req *types.RequestEcho) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: req.Message}, nil
|
||||
}
|
||||
|
||||
func (app *gRPCApplication) Flush(_ context.Context, req *types.RequestFlush) (*types.ResponseFlush, error) {
|
||||
return &types.ResponseFlush{}, nil
|
||||
}
|
||||
|
||||
func (app *gRPCApplication) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
return app.Application.Commit(ctx)
|
||||
func (s *GRPCServer) OnStop() {
|
||||
s.server.Stop()
|
||||
}
|
||||
|
||||
@@ -12,18 +12,17 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
func NewServer(logger log.Logger, protoAddr, transport string, app types.Application) (service.Service, error) {
|
||||
func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) {
|
||||
var s service.Service
|
||||
var err error
|
||||
switch transport {
|
||||
case "socket":
|
||||
s = NewSocketServer(logger, protoAddr, app)
|
||||
s = NewSocketServer(protoAddr, app)
|
||||
case "grpc":
|
||||
s = NewGRPCServer(logger, protoAddr, app)
|
||||
s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app))
|
||||
default:
|
||||
err = fmt.Errorf("unknown server type %s", transport)
|
||||
}
|
||||
|
||||
@@ -2,16 +2,15 @@ package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmlog "github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
@@ -20,298 +19,237 @@ import (
|
||||
|
||||
type SocketServer struct {
|
||||
service.BaseService
|
||||
logger log.Logger
|
||||
isLoggerSet bool
|
||||
|
||||
proto string
|
||||
addr string
|
||||
listener net.Listener
|
||||
|
||||
connsMtx sync.Mutex
|
||||
connsClose map[int]func()
|
||||
connsMtx tmsync.Mutex
|
||||
conns map[int]net.Conn
|
||||
nextConnID int
|
||||
|
||||
app types.Application
|
||||
appMtx tmsync.Mutex
|
||||
app types.Application
|
||||
}
|
||||
|
||||
func NewSocketServer(logger log.Logger, protoAddr string, app types.Application) service.Service {
|
||||
func NewSocketServer(protoAddr string, app types.Application) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &SocketServer{
|
||||
logger: logger,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
connsClose: make(map[int]func()),
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
conns: make(map[int]net.Conn),
|
||||
}
|
||||
s.BaseService = *service.NewBaseService(logger, "ABCIServer", s)
|
||||
s.BaseService = *service.NewBaseService(nil, "ABCIServer", s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *SocketServer) OnStart(ctx context.Context) error {
|
||||
func (s *SocketServer) SetLogger(l tmlog.Logger) {
|
||||
s.BaseService.SetLogger(l)
|
||||
s.isLoggerSet = true
|
||||
}
|
||||
|
||||
func (s *SocketServer) OnStart() error {
|
||||
ln, err := net.Listen(s.proto, s.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.listener = ln
|
||||
go s.acceptConnectionsRoutine(ctx)
|
||||
go s.acceptConnectionsRoutine()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SocketServer) OnStop() {
|
||||
if err := s.listener.Close(); err != nil {
|
||||
s.logger.Error("error closing listener", "err", err)
|
||||
s.Logger.Error("Error closing listener", "err", err)
|
||||
}
|
||||
|
||||
s.connsMtx.Lock()
|
||||
defer s.connsMtx.Unlock()
|
||||
|
||||
for _, closer := range s.connsClose {
|
||||
closer()
|
||||
for id, conn := range s.conns {
|
||||
delete(s.conns, id)
|
||||
if err := conn.Close(); err != nil {
|
||||
s.Logger.Error("Error closing connection", "id", id, "conn", conn, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) addConn(closer func()) int {
|
||||
func (s *SocketServer) addConn(conn net.Conn) int {
|
||||
s.connsMtx.Lock()
|
||||
defer s.connsMtx.Unlock()
|
||||
|
||||
connID := s.nextConnID
|
||||
s.nextConnID++
|
||||
s.connsClose[connID] = closer
|
||||
s.conns[connID] = conn
|
||||
|
||||
return connID
|
||||
}
|
||||
|
||||
// deletes conn even if close errs
|
||||
func (s *SocketServer) rmConn(connID int) {
|
||||
func (s *SocketServer) rmConn(connID int) error {
|
||||
s.connsMtx.Lock()
|
||||
defer s.connsMtx.Unlock()
|
||||
if closer, ok := s.connsClose[connID]; ok {
|
||||
closer()
|
||||
delete(s.connsClose, connID)
|
||||
|
||||
conn, ok := s.conns[connID]
|
||||
if !ok {
|
||||
return fmt.Errorf("connection %d does not exist", connID)
|
||||
}
|
||||
|
||||
delete(s.conns, connID)
|
||||
return conn.Close()
|
||||
}
|
||||
|
||||
func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) {
|
||||
func (s *SocketServer) acceptConnectionsRoutine() {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Accept a connection
|
||||
s.logger.Info("Waiting for new connection...")
|
||||
s.Logger.Info("Waiting for new connection...")
|
||||
conn, err := s.listener.Accept()
|
||||
if err != nil {
|
||||
if !s.IsRunning() {
|
||||
return // Ignore error from listener closing.
|
||||
}
|
||||
s.logger.Error("Failed to accept connection", "err", err)
|
||||
s.Logger.Error("Failed to accept connection", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
cctx, ccancel := context.WithCancel(ctx)
|
||||
connID := s.addConn(ccancel)
|
||||
s.Logger.Info("Accepted a new connection")
|
||||
|
||||
s.logger.Info("Accepted a new connection", "id", connID)
|
||||
connID := s.addConn(conn)
|
||||
|
||||
closeConn := make(chan error, 2) // Push to signal connection closed
|
||||
responses := make(chan *types.Response, 1000) // A channel to buffer responses
|
||||
|
||||
once := &sync.Once{}
|
||||
closer := func(err error) {
|
||||
ccancel()
|
||||
once.Do(func() {
|
||||
if cerr := conn.Close(); err != nil {
|
||||
s.logger.Error("error closing connection",
|
||||
"id", connID,
|
||||
"close_err", cerr,
|
||||
"err", err)
|
||||
}
|
||||
s.rmConn(connID)
|
||||
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled):
|
||||
s.logger.Error("Connection terminated",
|
||||
"id", connID,
|
||||
"err", err)
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
s.logger.Error("Connection encountered timeout",
|
||||
"id", connID,
|
||||
"err", err)
|
||||
case errors.Is(err, io.EOF):
|
||||
s.logger.Error("Connection was closed by client",
|
||||
"id", connID)
|
||||
case err != nil:
|
||||
s.logger.Error("Connection error",
|
||||
"id", connID,
|
||||
"err", err)
|
||||
default:
|
||||
s.logger.Error("Connection was closed",
|
||||
"id", connID)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
go s.handleRequests(cctx, closer, conn, responses)
|
||||
go s.handleRequests(closeConn, conn, responses)
|
||||
// Pull responses from 'responses' and write them to conn.
|
||||
go s.handleResponses(cctx, closer, conn, responses)
|
||||
go s.handleResponses(closeConn, conn, responses)
|
||||
|
||||
// Wait until signal to close connection
|
||||
go s.waitForClose(closeConn, connID)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) waitForClose(closeConn chan error, connID int) {
|
||||
err := <-closeConn
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
s.Logger.Error("Connection was closed by client")
|
||||
case err != nil:
|
||||
s.Logger.Error("Connection error", "err", err)
|
||||
default:
|
||||
// never happens
|
||||
s.Logger.Error("Connection was closed")
|
||||
}
|
||||
|
||||
// Close the connection
|
||||
if err := s.rmConn(connID); err != nil {
|
||||
s.Logger.Error("Error closing connection", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
func (s *SocketServer) handleRequests(ctx context.Context, closer func(error), conn io.Reader, responses chan<- *types.Response) {
|
||||
func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *types.Response) {
|
||||
var count int
|
||||
var bufReader = bufio.NewReader(conn)
|
||||
|
||||
defer func() {
|
||||
// make sure to recover from any app-related panics to allow proper socket cleanup
|
||||
if r := recover(); r != nil {
|
||||
r := recover()
|
||||
if r != nil {
|
||||
const size = 64 << 10
|
||||
buf := make([]byte, size)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
closer(fmt.Errorf("recovered from panic: %v\n%s", r, buf))
|
||||
err := fmt.Errorf("recovered from panic: %v\n%s", r, buf)
|
||||
if !s.isLoggerSet {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
}
|
||||
closeConn <- err
|
||||
s.appMtx.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
req := &types.Request{}
|
||||
if err := types.ReadMessage(bufReader, req); err != nil {
|
||||
closer(fmt.Errorf("error reading message: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := s.processRequest(ctx, req)
|
||||
var req = &types.Request{}
|
||||
err := types.ReadMessage(bufReader, req)
|
||||
if err != nil {
|
||||
closer(err)
|
||||
if err == io.EOF {
|
||||
closeConn <- err
|
||||
} else {
|
||||
closeConn <- fmt.Errorf("error reading message: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
closer(ctx.Err())
|
||||
return
|
||||
case responses <- resp:
|
||||
}
|
||||
s.appMtx.Lock()
|
||||
count++
|
||||
s.handleRequest(req, responses)
|
||||
s.appMtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) processRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
|
||||
func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) {
|
||||
switch r := req.Value.(type) {
|
||||
case *types.Request_Echo:
|
||||
return types.ToResponseEcho(r.Echo.Message), nil
|
||||
responses <- types.ToResponseEcho(r.Echo.Message)
|
||||
case *types.Request_Flush:
|
||||
return types.ToResponseFlush(), nil
|
||||
responses <- types.ToResponseFlush()
|
||||
case *types.Request_Info:
|
||||
res, err := s.app.Info(ctx, r.Info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return types.ToResponseInfo(res), nil
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_CheckTx:
|
||||
res, err := s.app.CheckTx(ctx, r.CheckTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseCheckTx(res), nil
|
||||
res := s.app.CheckTx(*r.CheckTx)
|
||||
responses <- types.ToResponseCheckTx(res)
|
||||
case *types.Request_Commit:
|
||||
res, err := s.app.Commit(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseCommit(res), nil
|
||||
res := s.app.Commit()
|
||||
responses <- types.ToResponseCommit(res)
|
||||
case *types.Request_Query:
|
||||
res, err := s.app.Query(ctx, r.Query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseQuery(res), nil
|
||||
res := s.app.Query(*r.Query)
|
||||
responses <- types.ToResponseQuery(res)
|
||||
case *types.Request_InitChain:
|
||||
res, err := s.app.InitChain(ctx, r.InitChain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseInitChain(res), nil
|
||||
res := s.app.InitChain(*r.InitChain)
|
||||
responses <- types.ToResponseInitChain(res)
|
||||
case *types.Request_ListSnapshots:
|
||||
res, err := s.app.ListSnapshots(ctx, r.ListSnapshots)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseListSnapshots(res), nil
|
||||
res := s.app.ListSnapshots(*r.ListSnapshots)
|
||||
responses <- types.ToResponseListSnapshots(res)
|
||||
case *types.Request_OfferSnapshot:
|
||||
res, err := s.app.OfferSnapshot(ctx, r.OfferSnapshot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseOfferSnapshot(res), nil
|
||||
case *types.Request_PrepareProposal:
|
||||
res, err := s.app.PrepareProposal(ctx, r.PrepareProposal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponsePrepareProposal(res), nil
|
||||
case *types.Request_ProcessProposal:
|
||||
res, err := s.app.ProcessProposal(ctx, r.ProcessProposal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseProcessProposal(res), nil
|
||||
res := s.app.OfferSnapshot(*r.OfferSnapshot)
|
||||
responses <- types.ToResponseOfferSnapshot(res)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
res, err := s.app.LoadSnapshotChunk(ctx, r.LoadSnapshotChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseLoadSnapshotChunk(res), nil
|
||||
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
|
||||
responses <- types.ToResponseLoadSnapshotChunk(res)
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
res, err := s.app.ApplySnapshotChunk(ctx, r.ApplySnapshotChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseApplySnapshotChunk(res), nil
|
||||
case *types.Request_ExtendVote:
|
||||
res, err := s.app.ExtendVote(ctx, r.ExtendVote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseExtendVote(res), nil
|
||||
case *types.Request_VerifyVoteExtension:
|
||||
res, err := s.app.VerifyVoteExtension(ctx, r.VerifyVoteExtension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseVerifyVoteExtension(res), nil
|
||||
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
|
||||
responses <- types.ToResponseApplySnapshotChunk(res)
|
||||
case *types.Request_FinalizeBlock:
|
||||
res, err := s.app.FinalizeBlock(ctx, r.FinalizeBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseFinalizeBlock(res), nil
|
||||
res := s.app.FinalizeBlock(*r.FinalizeBlock)
|
||||
responses <- types.ToResponseFinalizeBlock(res)
|
||||
default:
|
||||
return types.ToResponseException("Unknown request"), errors.New("unknown request type")
|
||||
responses <- types.ToResponseException("Unknown request")
|
||||
}
|
||||
}
|
||||
|
||||
// Pull responses from 'responses' and write them to conn.
|
||||
func (s *SocketServer) handleResponses(
|
||||
ctx context.Context,
|
||||
closer func(error),
|
||||
conn io.Writer,
|
||||
responses <-chan *types.Response,
|
||||
) {
|
||||
bw := bufio.NewWriter(conn)
|
||||
func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) {
|
||||
var count int
|
||||
var bufWriter = bufio.NewWriter(conn)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
closer(ctx.Err())
|
||||
var res = <-responses
|
||||
err := types.WriteMessage(res, bufWriter)
|
||||
if err != nil {
|
||||
closeConn <- fmt.Errorf("error writing message: %w", err)
|
||||
return
|
||||
case res := <-responses:
|
||||
if err := types.WriteMessage(res, bw); err != nil {
|
||||
closer(fmt.Errorf("error writing message: %w", err))
|
||||
return
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
closer(fmt.Errorf("error writing message: %w", err))
|
||||
}
|
||||
if _, ok := res.Value.(*types.Response_Flush); ok {
|
||||
err = bufWriter.Flush()
|
||||
if err != nil {
|
||||
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,40 +1,27 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
abciclientent "github.com/tendermint/tendermint/abci/client"
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
func TestClientServerNoAddrPrefix(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
const (
|
||||
addr = "localhost:26658"
|
||||
transport = "socket"
|
||||
)
|
||||
addr := "localhost:26658"
|
||||
transport := "socket"
|
||||
app := kvstore.NewApplication()
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
server, err := abciserver.NewServer(logger, addr, transport, app)
|
||||
server, err := abciserver.NewServer(addr, transport, app)
|
||||
assert.NoError(t, err, "expected no error on NewServer")
|
||||
err = server.Start(ctx)
|
||||
err = server.Start()
|
||||
assert.NoError(t, err, "expected no error on server.Start")
|
||||
t.Cleanup(server.Wait)
|
||||
|
||||
client, err := abciclientent.NewClient(logger, addr, transport, true)
|
||||
client, err := abciclient.NewClient(addr, transport, true)
|
||||
assert.NoError(t, err, "expected no error on NewClient")
|
||||
err = client.Start(ctx)
|
||||
err = client.Start()
|
||||
assert.NoError(t, err, "expected no error on client.Start")
|
||||
t.Cleanup(client.Wait)
|
||||
}
|
||||
|
||||
@@ -7,12 +7,14 @@ import (
|
||||
"fmt"
|
||||
mrand "math/rand"
|
||||
|
||||
abciclient "github.com/tendermint/tendermint/abci/client"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
func InitChain(ctx context.Context, client abciclient.Client) error {
|
||||
var ctx = context.Background()
|
||||
|
||||
func InitChain(client abcicli.Client) error {
|
||||
total := 10
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
for i := 0; i < total; i++ {
|
||||
@@ -21,7 +23,7 @@ func InitChain(ctx context.Context, client abciclient.Client) error {
|
||||
power := mrand.Int()
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChain(ctx, &types.RequestInitChain{
|
||||
_, err := client.InitChainSync(ctx, types.RequestInitChain{
|
||||
Validators: vals,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -32,77 +34,51 @@ func InitChain(ctx context.Context, client abciclient.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Commit(ctx context.Context, client abciclient.Client) error {
|
||||
_, err := client.Commit(ctx)
|
||||
func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
res, err := client.CommitSync(ctx)
|
||||
data := res.Data
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: Commit")
|
||||
fmt.Printf("error while committing: %v\n", err)
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(data, hashExp) {
|
||||
fmt.Println("Failed test: Commit")
|
||||
fmt.Printf("Commit hash was unexpected. Got %X expected %X\n", data, hashExp)
|
||||
return errors.New("commitTx failed")
|
||||
}
|
||||
fmt.Println("Passed test: Commit")
|
||||
return nil
|
||||
}
|
||||
|
||||
func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte, hashExp []byte) error {
|
||||
res, _ := client.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: txBytes})
|
||||
appHash := res.AppHash
|
||||
for i, tx := range res.TxResults {
|
||||
func FinalizeBlock(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
for _, tx := range res.Txs {
|
||||
code, data, log := tx.Code, tx.Data, tx.Log
|
||||
if code != codeExp[i] {
|
||||
fmt.Println("Failed test: FinalizeBlock")
|
||||
fmt.Printf("FinalizeBlock response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("FinalizeBlock error")
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: FinalizeBlock")
|
||||
fmt.Printf("FinalizeBlock response data was unexpected. Got %X expected %X\n",
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("FinalizeBlock error")
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(appHash, hashExp) {
|
||||
fmt.Println("Failed test: FinalizeBlock")
|
||||
fmt.Printf("Application hash was unexpected. Got %X expected %X\n", appHash, hashExp)
|
||||
return errors.New("FinalizeBlock error")
|
||||
}
|
||||
fmt.Println("Passed test: FinalizeBlock")
|
||||
fmt.Println("Passed test: DeliverTx")
|
||||
return nil
|
||||
}
|
||||
|
||||
func PrepareProposal(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []types.TxRecord_TxAction, dataExp []byte) error {
|
||||
res, _ := client.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: txBytes})
|
||||
for i, tx := range res.TxRecords {
|
||||
if tx.Action != codeExp[i] {
|
||||
fmt.Println("Failed test: PrepareProposal")
|
||||
fmt.Printf("PrepareProposal response code was unexpected. Got %v expected %v.",
|
||||
tx.Action, codeExp)
|
||||
return errors.New("PrepareProposal error")
|
||||
}
|
||||
}
|
||||
fmt.Println("Passed test: PrepareProposal")
|
||||
return nil
|
||||
}
|
||||
|
||||
func ProcessProposal(ctx context.Context, client abciclient.Client, txBytes [][]byte, statusExp types.ResponseProcessProposal_ProposalStatus) error {
|
||||
res, _ := client.ProcessProposal(ctx, &types.RequestProcessProposal{Txs: txBytes})
|
||||
if res.Status != statusExp {
|
||||
fmt.Println("Failed test: ProcessProposal")
|
||||
fmt.Printf("ProcessProposal response status was unexpected. Got %v expected %v.",
|
||||
res.Status, statusExp)
|
||||
return errors.New("ProcessProposal error")
|
||||
}
|
||||
fmt.Println("Passed test: ProcessProposal")
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes})
|
||||
code, data := res.Code, res.Data
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
fmt.Printf("CheckTx response code was unexpected. Got %v expected %v.,",
|
||||
code, codeExp)
|
||||
fmt.Printf("CheckTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("checkTx")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
|
||||
68
abci/tests/test_app/app.go
Normal file
68
abci/tests/test_app/app.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func startClient(abciType string) abcicli.Client {
|
||||
// Start client
|
||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
client.SetLogger(logger.With("module", "abcicli"))
|
||||
if err := client.Start(); err != nil {
|
||||
panicf("connecting to abci_app: %v", err.Error())
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func commit(client abcicli.Client, hashExp []byte) {
|
||||
res, err := client.CommitSync(ctx)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(res.Data, hashExp) {
|
||||
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
|
||||
}
|
||||
}
|
||||
|
||||
type tx struct {
|
||||
Data []byte
|
||||
CodeExp uint32
|
||||
DataExp []byte
|
||||
}
|
||||
|
||||
func finalizeBlock(client abcicli.Client, txs []tx) {
|
||||
var txsData = make([][]byte, len(txs))
|
||||
for i, tx := range txs {
|
||||
txsData[i] = tx.Data
|
||||
}
|
||||
|
||||
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: txsData})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
for i, tx := range res.Txs {
|
||||
if tx.Code != txs[i].CodeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", tx.Code, txs[i].CodeExp, tx.Log)
|
||||
}
|
||||
if !bytes.Equal(tx.Data, txs[i].DataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", tx.Data, txs[i].DataExp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func panicf(format string, a ...interface{}) {
|
||||
panic(fmt.Sprintf(format, a...))
|
||||
}
|
||||
95
abci/tests/test_app/main.go
Normal file
95
abci/tests/test_app/main.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
var abciType string
|
||||
|
||||
func init() {
|
||||
abciType = os.Getenv("ABCI")
|
||||
if abciType == "" {
|
||||
abciType = "socket"
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
testCounter()
|
||||
}
|
||||
|
||||
const (
|
||||
maxABCIConnectTries = 10
|
||||
)
|
||||
|
||||
func ensureABCIIsUp(typ string, n int) error {
|
||||
var err error
|
||||
cmdString := "abci-cli echo hello"
|
||||
if typ == "grpc" {
|
||||
cmdString = "abci-cli --abci grpc echo hello"
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
cmd := exec.Command("bash", "-c", cmdString)
|
||||
_, err = cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func testCounter() {
|
||||
abciApp := os.Getenv("ABCI_APP")
|
||||
if abciApp == "" {
|
||||
panic("No ABCI_APP specified")
|
||||
}
|
||||
|
||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
||||
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
|
||||
cmd := exec.Command("bash", "-c", subCommand)
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error on process kill: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
log.Fatalf("echo failed: %v", err) //nolint:gocritic
|
||||
}
|
||||
|
||||
client := startClient(abciType)
|
||||
defer func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
log.Printf("error trying client stop: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// commit(client, nil)
|
||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
finalizeBlock(client, []tx{{Data: []byte{0x00}, CodeExp: types.CodeTypeOK, DataExp: nil}})
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
txs := []tx{
|
||||
{Data: []byte{0x01}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x02}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x03}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x00, 0x04}, DataExp: nil, CodeExp: types.CodeTypeOK}}
|
||||
finalizeBlock(client, txs)
|
||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
28
abci/tests/test_app/test.sh
Executable file
28
abci/tests/test_app/test.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#! /bin/bash
|
||||
set -e
|
||||
|
||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
||||
|
||||
# Get the directory of where this script is.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd "$DIR"
|
||||
|
||||
echo "RUN COUNTER OVER SOCKET"
|
||||
# test golang counter
|
||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
|
||||
echo "RUN COUNTER OVER GRPC"
|
||||
# test golang counter via grpc
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
# test nodejs counter
|
||||
# TODO: fix node app
|
||||
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter
|
||||
@@ -1,18 +1,10 @@
|
||||
echo hello
|
||||
info
|
||||
prepare_proposal "abc"
|
||||
process_proposal "abc"
|
||||
finalize_block "abc"
|
||||
commit
|
||||
deliver_tx "abc"
|
||||
info
|
||||
commit
|
||||
query "abc"
|
||||
finalize_block "def=xyz" "ghi=123"
|
||||
deliver_tx "def=xyz"
|
||||
commit
|
||||
query "def"
|
||||
prepare_proposal "preparedef"
|
||||
process_proposal "def"
|
||||
process_proposal "preparedef"
|
||||
prepare_proposal
|
||||
process_proposal
|
||||
finalize_block
|
||||
commit
|
||||
@@ -8,78 +8,44 @@
|
||||
-> data: {"size":0}
|
||||
-> data.hex: 0x7B2273697A65223A307D
|
||||
|
||||
> prepare_proposal "abc"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: abc action: UNMODIFIED
|
||||
|
||||
> process_proposal "abc"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> finalize_block "abc"
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
|
||||
> deliver_tx "abc"
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"size":1}
|
||||
-> data.hex: 0x7B2273697A65223A317D
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
> query "abc"
|
||||
-> code: OK
|
||||
-> log: exists
|
||||
-> height: 1
|
||||
-> height: 2
|
||||
-> key: abc
|
||||
-> key.hex: 616263
|
||||
-> value: abc
|
||||
-> value.hex: 616263
|
||||
|
||||
> finalize_block "def=xyz" "ghi=123"
|
||||
> deliver_tx "def=xyz"
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0600000000000000
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
> query "def"
|
||||
-> code: OK
|
||||
-> log: exists
|
||||
-> height: 2
|
||||
-> height: 3
|
||||
-> key: def
|
||||
-> key.hex: 646566
|
||||
-> value: xyz
|
||||
-> value.hex: 78797A
|
||||
|
||||
> prepare_proposal "preparedef"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: def action: ADDED
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: preparedef action: REMOVED
|
||||
|
||||
> process_proposal "def"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> process_proposal "preparedef"
|
||||
-> code: OK
|
||||
-> status: REJECT
|
||||
|
||||
> prepare_proposal
|
||||
|
||||
> process_proposal
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> finalize_block
|
||||
-> code: OK
|
||||
-> data.hex: 0x0600000000000000
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
finalize_block 0x00
|
||||
commit
|
||||
deliver_tx 0x00
|
||||
check_tx 0x00
|
||||
finalize_block 0x01
|
||||
commit
|
||||
finalize_block 0x04
|
||||
commit
|
||||
deliver_tx 0x01
|
||||
deliver_tx 0x04
|
||||
info
|
||||
|
||||
@@ -4,35 +4,20 @@
|
||||
> check_tx 0xff
|
||||
-> code: OK
|
||||
|
||||
> finalize_block 0x00
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
> commit
|
||||
> deliver_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> finalize_block 0x01
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
> commit
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
|
||||
> finalize_block 0x04
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0600000000000000
|
||||
|
||||
> commit
|
||||
> deliver_tx 0x04
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"size":3}
|
||||
-> data.hex: 0x7B2273697A65223A337D
|
||||
-> data: {"hashes":0,"txs":3}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A337D
|
||||
|
||||
|
||||
@@ -30,8 +30,6 @@ function testExample() {
|
||||
cat "${INPUT}.out.new"
|
||||
echo "Expected:"
|
||||
cat "${INPUT}.out"
|
||||
echo "Diff:"
|
||||
diff "${INPUT}.out" "${INPUT}.out.new"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -39,6 +37,7 @@ function testExample() {
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli counter
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
@@ -1,36 +1,31 @@
|
||||
package types
|
||||
|
||||
import "context"
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Application
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
// to be driven by a blockchain-based replication engine via the ABCI.
|
||||
// All methods take a RequestXxx argument and return a ResponseXxx argument,
|
||||
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
|
||||
type Application interface {
|
||||
// Info/Query Connection
|
||||
Info(context.Context, *RequestInfo) (*ResponseInfo, error) // Return application info
|
||||
Query(context.Context, *RequestQuery) (*ResponseQuery, error) // Query for state
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) // Validate a tx for the mempool
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) // Initialize blockchain w validators/other info from TendermintCore
|
||||
PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error)
|
||||
ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error)
|
||||
// Commit the state and return the application Merkle root hash
|
||||
Commit(context.Context) (*ResponseCommit, error)
|
||||
// Create application specific vote extension
|
||||
ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error)
|
||||
// Verify application's vote extension data
|
||||
VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error)
|
||||
// Deliver the decided block with its txs to the Application
|
||||
FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error)
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
|
||||
// State Sync Connection
|
||||
ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) // List available snapshots
|
||||
OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) // Offer a snapshot to the application
|
||||
LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) // Load a snapshot chunk
|
||||
ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) // Apply a shapshot chunk
|
||||
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
|
||||
OfferSnapshot(RequestOfferSnapshot) ResponseOfferSnapshot // Offer a snapshot to the application
|
||||
LoadSnapshotChunk(RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk // Load a snapshot chunk
|
||||
ApplySnapshotChunk(RequestApplySnapshotChunk) ResponseApplySnapshotChunk // Apply a shapshot chunk
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
@@ -38,84 +33,123 @@ type Application interface {
|
||||
|
||||
var _ Application = (*BaseApplication)(nil)
|
||||
|
||||
type BaseApplication struct{}
|
||||
type BaseApplication struct {
|
||||
}
|
||||
|
||||
func NewBaseApplication() *BaseApplication {
|
||||
return &BaseApplication{}
|
||||
}
|
||||
|
||||
func (BaseApplication) Info(_ context.Context, req *RequestInfo) (*ResponseInfo, error) {
|
||||
return &ResponseInfo{}, nil
|
||||
func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
}
|
||||
|
||||
func (BaseApplication) CheckTx(_ context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
return &ResponseCheckTx{Code: CodeTypeOK}, nil
|
||||
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
|
||||
return ResponseCheckTx{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
func (BaseApplication) Commit(_ context.Context) (*ResponseCommit, error) {
|
||||
return &ResponseCommit{}, nil
|
||||
func (BaseApplication) Commit() ResponseCommit {
|
||||
return ResponseCommit{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ExtendVote(_ context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) {
|
||||
return &ResponseExtendVote{}, nil
|
||||
func (BaseApplication) Query(req RequestQuery) ResponseQuery {
|
||||
return ResponseQuery{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
func (BaseApplication) VerifyVoteExtension(_ context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) {
|
||||
return &ResponseVerifyVoteExtension{
|
||||
Status: ResponseVerifyVoteExtension_ACCEPT,
|
||||
}, nil
|
||||
func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
|
||||
return ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (BaseApplication) Query(_ context.Context, req *RequestQuery) (*ResponseQuery, error) {
|
||||
return &ResponseQuery{Code: CodeTypeOK}, nil
|
||||
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
|
||||
return ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (BaseApplication) InitChain(_ context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
|
||||
return &ResponseInitChain{}, nil
|
||||
func (BaseApplication) OfferSnapshot(req RequestOfferSnapshot) ResponseOfferSnapshot {
|
||||
return ResponseOfferSnapshot{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ListSnapshots(_ context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||
return &ResponseListSnapshots{}, nil
|
||||
func (BaseApplication) LoadSnapshotChunk(req RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk {
|
||||
return ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) OfferSnapshot(_ context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) {
|
||||
return &ResponseOfferSnapshot{}, nil
|
||||
func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) ResponseApplySnapshotChunk {
|
||||
return ResponseApplySnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) LoadSnapshotChunk(_ context.Context, _ *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) {
|
||||
return &ResponseLoadSnapshotChunk{}, nil
|
||||
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
|
||||
return ResponseFinalizeBlock{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ApplySnapshotChunk(_ context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) {
|
||||
return &ResponseApplySnapshotChunk{}, nil
|
||||
//-------------------------------------------------------
|
||||
|
||||
// GRPCApplication is a GRPC wrapper for Application
|
||||
type GRPCApplication struct {
|
||||
app Application
|
||||
}
|
||||
|
||||
func (BaseApplication) PrepareProposal(_ context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
|
||||
trs := make([]*TxRecord, 0, len(req.Txs))
|
||||
var totalBytes int64
|
||||
for _, tx := range req.Txs {
|
||||
totalBytes += int64(len(tx))
|
||||
if totalBytes > req.MaxTxBytes {
|
||||
break
|
||||
}
|
||||
trs = append(trs, &TxRecord{
|
||||
Action: TxRecord_UNMODIFIED,
|
||||
Tx: tx,
|
||||
})
|
||||
}
|
||||
return &ResponsePrepareProposal{TxRecords: trs}, nil
|
||||
func NewGRPCApplication(app Application) *GRPCApplication {
|
||||
return &GRPCApplication{app}
|
||||
}
|
||||
|
||||
func (BaseApplication) ProcessProposal(_ context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) {
|
||||
return &ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}, nil
|
||||
func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) {
|
||||
return &ResponseEcho{Message: req.Message}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) FinalizeBlock(_ context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
|
||||
txs := make([]*ExecTxResult, len(req.Txs))
|
||||
for i := range req.Txs {
|
||||
txs[i] = &ExecTxResult{Code: CodeTypeOK}
|
||||
}
|
||||
return &ResponseFinalizeBlock{
|
||||
TxResults: txs,
|
||||
}, nil
|
||||
func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) {
|
||||
return &ResponseFlush{}, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) {
|
||||
res := app.app.Info(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
res := app.app.CheckTx(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) {
|
||||
res := app.app.Query(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) {
|
||||
res := app.app.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
|
||||
res := app.app.InitChain(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ListSnapshots(
|
||||
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||
res := app.app.ListSnapshots(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) OfferSnapshot(
|
||||
ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) {
|
||||
res := app.app.OfferSnapshot(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) LoadSnapshotChunk(
|
||||
ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) {
|
||||
res := app.app.LoadSnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ApplySnapshotChunk(
|
||||
ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) {
|
||||
res := app.app.ApplySnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) FinalizeBlock(
|
||||
ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
|
||||
res := app.app.FinalizeBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
package types
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/libs/protoio"
|
||||
)
|
||||
|
||||
@@ -16,7 +15,11 @@ const (
|
||||
func WriteMessage(msg proto.Message, w io.Writer) error {
|
||||
protoWriter := protoio.NewDelimitedWriter(w)
|
||||
_, err := protoWriter.WriteMsg(msg)
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadMessage reads a varint length-delimited protobuf message.
|
||||
@@ -39,15 +42,15 @@ func ToRequestFlush() *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestInfo(req *RequestInfo) *Request {
|
||||
func ToRequestInfo(req RequestInfo) *Request {
|
||||
return &Request{
|
||||
Value: &Request_Info{req},
|
||||
Value: &Request_Info{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestCheckTx(req *RequestCheckTx) *Request {
|
||||
func ToRequestCheckTx(req RequestCheckTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_CheckTx{req},
|
||||
Value: &Request_CheckTx{&req},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,69 +60,45 @@ func ToRequestCommit() *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestQuery(req *RequestQuery) *Request {
|
||||
func ToRequestQuery(req RequestQuery) *Request {
|
||||
return &Request{
|
||||
Value: &Request_Query{req},
|
||||
Value: &Request_Query{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestInitChain(req *RequestInitChain) *Request {
|
||||
func ToRequestInitChain(req RequestInitChain) *Request {
|
||||
return &Request{
|
||||
Value: &Request_InitChain{req},
|
||||
Value: &Request_InitChain{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestListSnapshots(req *RequestListSnapshots) *Request {
|
||||
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ListSnapshots{req},
|
||||
Value: &Request_ListSnapshots{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestOfferSnapshot(req *RequestOfferSnapshot) *Request {
|
||||
func ToRequestOfferSnapshot(req RequestOfferSnapshot) *Request {
|
||||
return &Request{
|
||||
Value: &Request_OfferSnapshot{req},
|
||||
Value: &Request_OfferSnapshot{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestLoadSnapshotChunk(req *RequestLoadSnapshotChunk) *Request {
|
||||
func ToRequestLoadSnapshotChunk(req RequestLoadSnapshotChunk) *Request {
|
||||
return &Request{
|
||||
Value: &Request_LoadSnapshotChunk{req},
|
||||
Value: &Request_LoadSnapshotChunk{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestApplySnapshotChunk(req *RequestApplySnapshotChunk) *Request {
|
||||
func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ApplySnapshotChunk{req},
|
||||
Value: &Request_ApplySnapshotChunk{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestExtendVote(req *RequestExtendVote) *Request {
|
||||
func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ExtendVote{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestVerifyVoteExtension(req *RequestVerifyVoteExtension) *Request {
|
||||
return &Request{
|
||||
Value: &Request_VerifyVoteExtension{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestPrepareProposal(req *RequestPrepareProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_PrepareProposal{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestProcessProposal(req *RequestProcessProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ProcessProposal{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestFinalizeBlock(req *RequestFinalizeBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_FinalizeBlock{req},
|
||||
Value: &Request_FinalizeBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,86 +122,62 @@ func ToResponseFlush() *Response {
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseInfo(res *ResponseInfo) *Response {
|
||||
func ToResponseInfo(res ResponseInfo) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Info{res},
|
||||
Value: &Response_Info{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseCheckTx(res *ResponseCheckTx) *Response {
|
||||
func ToResponseCheckTx(res ResponseCheckTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_CheckTx{res},
|
||||
Value: &Response_CheckTx{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseCommit(res *ResponseCommit) *Response {
|
||||
func ToResponseCommit(res ResponseCommit) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Commit{res},
|
||||
Value: &Response_Commit{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseQuery(res *ResponseQuery) *Response {
|
||||
func ToResponseQuery(res ResponseQuery) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Query{res},
|
||||
Value: &Response_Query{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseInitChain(res *ResponseInitChain) *Response {
|
||||
func ToResponseInitChain(res ResponseInitChain) *Response {
|
||||
return &Response{
|
||||
Value: &Response_InitChain{res},
|
||||
Value: &Response_InitChain{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseListSnapshots(res *ResponseListSnapshots) *Response {
|
||||
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ListSnapshots{res},
|
||||
Value: &Response_ListSnapshots{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseOfferSnapshot(res *ResponseOfferSnapshot) *Response {
|
||||
func ToResponseOfferSnapshot(res ResponseOfferSnapshot) *Response {
|
||||
return &Response{
|
||||
Value: &Response_OfferSnapshot{res},
|
||||
Value: &Response_OfferSnapshot{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseLoadSnapshotChunk(res *ResponseLoadSnapshotChunk) *Response {
|
||||
func ToResponseLoadSnapshotChunk(res ResponseLoadSnapshotChunk) *Response {
|
||||
return &Response{
|
||||
Value: &Response_LoadSnapshotChunk{res},
|
||||
Value: &Response_LoadSnapshotChunk{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseApplySnapshotChunk(res *ResponseApplySnapshotChunk) *Response {
|
||||
func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ApplySnapshotChunk{res},
|
||||
Value: &Response_ApplySnapshotChunk{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseExtendVote(res *ResponseExtendVote) *Response {
|
||||
func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ExtendVote{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseVerifyVoteExtension(res *ResponseVerifyVoteExtension) *Response {
|
||||
return &Response{
|
||||
Value: &Response_VerifyVoteExtension{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponsePrepareProposal(res *ResponsePrepareProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_PrepareProposal{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseProcessProposal(res *ResponseProcessProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ProcessProposal{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseFinalizeBlock(res *ResponseFinalizeBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_FinalizeBlock{res},
|
||||
Value: &Response_FinalizeBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,21 +13,29 @@ import (
|
||||
)
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
b, err := json.Marshal(&ExecTxResult{Code: 1})
|
||||
assert.NoError(t, err)
|
||||
b, err := json.Marshal(&ResponseDeliverTx{})
|
||||
assert.Nil(t, err)
|
||||
// include empty fields.
|
||||
assert.True(t, strings.Contains(string(b), "code"))
|
||||
r1 := ResponseCheckTx{
|
||||
Code: 1,
|
||||
Data: []byte("hello"),
|
||||
GasWanted: 43,
|
||||
Events: []Event{
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: "pho", Value: "bo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err = json.Marshal(&r1)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var r2 ResponseCheckTx
|
||||
err = json.Unmarshal(b, &r2)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, r1, r2)
|
||||
}
|
||||
|
||||
@@ -41,11 +49,11 @@ func TestWriteReadMessageSimple(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
buf := new(bytes.Buffer)
|
||||
err := WriteMessage(c, buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
msg := new(RequestEcho)
|
||||
err = ReadMessage(buf, msg)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.True(t, proto.Equal(c, msg))
|
||||
}
|
||||
@@ -63,11 +71,11 @@ func TestWriteReadMessage(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
buf := new(bytes.Buffer)
|
||||
err := WriteMessage(c, buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
msg := new(tmproto.Header)
|
||||
err = ReadMessage(buf, msg)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.True(t, proto.Equal(c, msg))
|
||||
}
|
||||
@@ -78,7 +86,16 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
cases := []proto.Message{
|
||||
&ResponseCheckTx{
|
||||
Data: []byte(phrase),
|
||||
Log: phrase,
|
||||
GasWanted: 10,
|
||||
Events: []Event{
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: "abc", Value: "def"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// TODO: add the rest
|
||||
}
|
||||
@@ -86,11 +103,11 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
buf := new(bytes.Buffer)
|
||||
err := WriteMessage(c, buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
msg := new(ResponseCheckTx)
|
||||
err = ReadMessage(buf, msg)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.True(t, proto.Equal(c, msg))
|
||||
}
|
||||
|
||||
@@ -1,352 +0,0 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// Application is an autogenerated mock type for the Application type
|
||||
type Application struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields: _a0
|
||||
func (_m *Application) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCommit)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ExtendVote provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseExtendVote
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseExtendVote)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FinalizeBlock provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInitChain)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseListSnapshots)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseOfferSnapshot)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseProcessProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseQuery)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// VerifyVoteExtension provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseVerifyVoteExtension
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type NewApplicationT interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewApplication(t NewApplicationT) *Application {
|
||||
mock := &Application{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
fmt "fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/encoding"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
"github.com/tendermint/tendermint/crypto/sr25519"
|
||||
)
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
|
||||
pke := ed25519.PubKey(pk)
|
||||
|
||||
pkp, err := encoding.PubKeyToProto(pke)
|
||||
pkp, err := cryptoenc.PubKeyToProto(pke)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -29,7 +29,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
|
||||
return Ed25519ValidatorUpdate(pk, power)
|
||||
case secp256k1.KeyType:
|
||||
pke := secp256k1.PubKey(pk)
|
||||
pkp, err := encoding.PubKeyToProto(pke)
|
||||
pkp, err := cryptoenc.PubKeyToProto(pke)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -39,7 +39,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
|
||||
}
|
||||
case sr25519.KeyType:
|
||||
pke := sr25519.PubKey(pk)
|
||||
pkp, err := encoding.PubKeyToProto(pke)
|
||||
pkp, err := cryptoenc.PubKeyToProto(pke)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
120
abci/types/result.go
Normal file
120
abci/types/result.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
)
|
||||
|
||||
const (
|
||||
CodeTypeOK uint32 = 0
|
||||
)
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ResponseCheckTx) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ResponseCheckTx) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ResponseDeliverTx) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ResponseDeliverTx) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ResponseQuery) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ResponseQuery) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
EnumsAsInts: true,
|
||||
EmitDefaults: true,
|
||||
}
|
||||
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
|
||||
)
|
||||
|
||||
func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseCheckTx) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ResponseDeliverTx) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseDeliverTx) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ResponseQuery) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseQuery) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ResponseCommit) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseCommit) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *EventAttribute) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *EventAttribute) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
// Some compile time assertions to ensure we don't
|
||||
// have accidental runtime surprises later on.
|
||||
|
||||
// jsonEncodingRoundTripper ensures that asserted
|
||||
// interfaces implement both MarshalJSON and UnmarshalJSON
|
||||
type jsonRoundTripper interface {
|
||||
json.Marshaler
|
||||
json.Unmarshaler
|
||||
}
|
||||
|
||||
var _ jsonRoundTripper = (*ResponseCommit)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseQuery)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
|
||||
|
||||
var _ jsonRoundTripper = (*EventAttribute)(nil)
|
||||
@@ -1,239 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/internal/jsontypes"
|
||||
)
|
||||
|
||||
const (
|
||||
CodeTypeOK uint32 = 0
|
||||
)
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ResponseCheckTx) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ResponseCheckTx) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ResponseDeliverTx) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ResponseDeliverTx) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ExecTxResult) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ExecTxResult) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ResponseQuery) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ResponseQuery) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
func (r ResponseProcessProposal) IsAccepted() bool {
|
||||
return r.Status == ResponseProcessProposal_ACCEPT
|
||||
}
|
||||
|
||||
func (r ResponseProcessProposal) IsStatusUnknown() bool {
|
||||
return r.Status == ResponseProcessProposal_UNKNOWN
|
||||
}
|
||||
|
||||
// IsStatusUnknown returns true if Code is Unknown
|
||||
func (r ResponseVerifyVoteExtension) IsStatusUnknown() bool {
|
||||
return r.Status == ResponseVerifyVoteExtension_UNKNOWN
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK
|
||||
func (r ResponseVerifyVoteExtension) IsOK() bool {
|
||||
return r.Status == ResponseVerifyVoteExtension_ACCEPT
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ResponseVerifyVoteExtension) IsErr() bool {
|
||||
return r.Status != ResponseVerifyVoteExtension_ACCEPT
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
EnumsAsInts: true,
|
||||
EmitDefaults: true,
|
||||
}
|
||||
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
|
||||
)
|
||||
|
||||
func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseCheckTx) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ResponseDeliverTx) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseDeliverTx) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ResponseQuery) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseQuery) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ResponseCommit) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseCommit) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *EventAttribute) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *EventAttribute) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
// validatorUpdateJSON is the JSON encoding of a validator update.
|
||||
//
|
||||
// It handles translation of public keys from the protobuf representation to
|
||||
// the legacy Amino-compatible format expected by RPC clients.
|
||||
type validatorUpdateJSON struct {
|
||||
PubKey json.RawMessage `json:"pub_key,omitempty"`
|
||||
Power int64 `json:"power,string"`
|
||||
}
|
||||
|
||||
func (v *ValidatorUpdate) MarshalJSON() ([]byte, error) {
|
||||
key, err := encoding.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jkey, err := jsontypes.Marshal(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(validatorUpdateJSON{
|
||||
PubKey: jkey,
|
||||
Power: v.GetPower(),
|
||||
})
|
||||
}
|
||||
|
||||
func (v *ValidatorUpdate) UnmarshalJSON(data []byte) error {
|
||||
var vu validatorUpdateJSON
|
||||
if err := json.Unmarshal(data, &vu); err != nil {
|
||||
return err
|
||||
}
|
||||
var key crypto.PubKey
|
||||
if err := jsontypes.Unmarshal(vu.PubKey, &key); err != nil {
|
||||
return err
|
||||
}
|
||||
pkey, err := encoding.PubKeyToProto(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.PubKey = pkey
|
||||
v.Power = vu.Power
|
||||
return nil
|
||||
}
|
||||
|
||||
// Some compile time assertions to ensure we don't
|
||||
// have accidental runtime surprises later on.
|
||||
|
||||
// jsonEncodingRoundTripper ensures that asserted
|
||||
// interfaces implement both MarshalJSON and UnmarshalJSON
|
||||
type jsonRoundTripper interface {
|
||||
json.Marshaler
|
||||
json.Unmarshaler
|
||||
}
|
||||
|
||||
var _ jsonRoundTripper = (*ResponseCommit)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseQuery)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
|
||||
|
||||
var _ jsonRoundTripper = (*EventAttribute)(nil)
|
||||
|
||||
// -----------------------------------------------
|
||||
// construct Result data
|
||||
|
||||
func RespondVerifyVoteExtension(ok bool) ResponseVerifyVoteExtension {
|
||||
status := ResponseVerifyVoteExtension_REJECT
|
||||
if ok {
|
||||
status = ResponseVerifyVoteExtension_ACCEPT
|
||||
}
|
||||
return ResponseVerifyVoteExtension{
|
||||
Status: status,
|
||||
}
|
||||
}
|
||||
|
||||
// deterministicExecTxResult constructs a copy of response that omits
|
||||
// non-deterministic fields. The input response is not modified.
|
||||
func deterministicExecTxResult(response *ExecTxResult) *ExecTxResult {
|
||||
return &ExecTxResult{
|
||||
Code: response.Code,
|
||||
Data: response.Data,
|
||||
GasWanted: response.GasWanted,
|
||||
GasUsed: response.GasUsed,
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalTxResults encodes the the TxResults as a list of byte
|
||||
// slices. It strips off the non-deterministic pieces of the TxResults
|
||||
// so that the resulting data can be used for hash comparisons and used
|
||||
// in Merkle proofs.
|
||||
func MarshalTxResults(r []*ExecTxResult) ([][]byte, error) {
|
||||
s := make([][]byte, len(r))
|
||||
for i, e := range r {
|
||||
d := deterministicExecTxResult(e)
|
||||
b, err := d.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s[i] = b
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,74 +0,0 @@
|
||||
package types_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
)
|
||||
|
||||
func TestHashAndProveResults(t *testing.T) {
|
||||
trs := []*abci.ExecTxResult{
|
||||
// Note, these tests rely on the first two entries being in this order.
|
||||
{Code: 0, Data: nil},
|
||||
{Code: 0, Data: []byte{}},
|
||||
|
||||
{Code: 0, Data: []byte("one")},
|
||||
{Code: 14, Data: nil},
|
||||
{Code: 14, Data: []byte("foo")},
|
||||
{Code: 14, Data: []byte("bar")},
|
||||
}
|
||||
|
||||
// Nil and []byte{} should produce the same bytes
|
||||
bz0, err := trs[0].Marshal()
|
||||
require.NoError(t, err)
|
||||
bz1, err := trs[1].Marshal()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bz0, bz1)
|
||||
|
||||
// Make sure that we can get a root hash from results and verify proofs.
|
||||
rs, err := abci.MarshalTxResults(trs)
|
||||
require.NoError(t, err)
|
||||
root := merkle.HashFromByteSlices(rs)
|
||||
assert.NotEmpty(t, root)
|
||||
|
||||
_, proofs := merkle.ProofsFromByteSlices(rs)
|
||||
for i, tr := range trs {
|
||||
bz, err := tr.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
valid := proofs[i].Verify(root, bz)
|
||||
assert.NoError(t, valid, "%d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashDeterministicFieldsOnly(t *testing.T) {
|
||||
tr1 := abci.ExecTxResult{
|
||||
Code: 1,
|
||||
Data: []byte("transaction"),
|
||||
Log: "nondeterministic data: abc",
|
||||
Info: "nondeterministic data: abc",
|
||||
GasWanted: 1000,
|
||||
GasUsed: 1000,
|
||||
Events: []abci.Event{},
|
||||
Codespace: "nondeterministic.data.abc",
|
||||
}
|
||||
tr2 := abci.ExecTxResult{
|
||||
Code: 1,
|
||||
Data: []byte("transaction"),
|
||||
Log: "nondeterministic data: def",
|
||||
Info: "nondeterministic data: def",
|
||||
GasWanted: 1000,
|
||||
GasUsed: 1000,
|
||||
Events: []abci.Event{},
|
||||
Codespace: "nondeterministic.data.def",
|
||||
}
|
||||
r1, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr1})
|
||||
require.NoError(t, err)
|
||||
r2, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, merkle.HashFromByteSlices(r1), merkle.HashFromByteSlices(r2))
|
||||
}
|
||||
9
abci/version/version.go
Normal file
9
abci/version/version.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package version
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
// TODO: eliminate this after some version refactor
|
||||
|
||||
const Version = version.ABCIVersion
|
||||
18
buf.gen.yaml
18
buf.gen.yaml
@@ -1,9 +1,13 @@
|
||||
version: v1
|
||||
# The version of the generation template.
|
||||
# Required.
|
||||
# The only currently-valid value is v1beta1.
|
||||
version: v1beta1
|
||||
|
||||
# The plugins to run.
|
||||
plugins:
|
||||
# The name of the plugin.
|
||||
- name: gogofaster
|
||||
out: ./proto/
|
||||
opt:
|
||||
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types
|
||||
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
|
||||
- plugins=grpc
|
||||
- paths=source_relative
|
||||
# The the relative output directory.
|
||||
out: proto
|
||||
# Any options to provide to the plugin.
|
||||
opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
version: v1
|
||||
directories:
|
||||
- proto
|
||||
@@ -1,11 +1,16 @@
|
||||
version: v1
|
||||
deps:
|
||||
- buf.build/gogo/protobuf
|
||||
breaking:
|
||||
use:
|
||||
- FILE
|
||||
version: v1beta1
|
||||
|
||||
build:
|
||||
roots:
|
||||
- proto
|
||||
- third_party/proto
|
||||
lint:
|
||||
use:
|
||||
- BASIC
|
||||
- FILE_LOWER_SNAKE_CASE
|
||||
- UNARY_RPC
|
||||
ignore:
|
||||
- gogoproto
|
||||
breaking:
|
||||
use:
|
||||
- FILE
|
||||
@@ -6,11 +6,10 @@ import (
|
||||
"crypto/x509"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
@@ -21,6 +20,7 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
grpcprivval "github.com/tendermint/tendermint/privval/grpc"
|
||||
privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval"
|
||||
@@ -45,19 +45,12 @@ func main() {
|
||||
keyFile = flag.String("keyfile", "", "absolute path to server key")
|
||||
rootCA = flag.String("rootcafile", "", "absolute path to root CA")
|
||||
prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)")
|
||||
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false).
|
||||
With("module", "priv_val")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to construct logger: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger = logger.With("module", "priv_val")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
logger.Info(
|
||||
"Starting private validator",
|
||||
"addr", *addr,
|
||||
@@ -85,7 +78,7 @@ func main() {
|
||||
}
|
||||
|
||||
certPool := x509.NewCertPool()
|
||||
bs, err := os.ReadFile(*rootCA)
|
||||
bs, err := ioutil.ReadFile(*rootCA)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to read client ca cert: %s", err)
|
||||
os.Exit(1)
|
||||
@@ -113,7 +106,7 @@ func main() {
|
||||
// add prometheus metrics for unary RPC calls
|
||||
opts = append(opts, grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor))
|
||||
|
||||
ss := grpcprivval.NewSignerServer(logger, *chainID, pv)
|
||||
ss := grpcprivval.NewSignerServer(*chainID, pv, logger)
|
||||
|
||||
protocol, address := tmnet.ProtocolAndAddress(*addr)
|
||||
|
||||
@@ -138,10 +131,9 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
opctx, opcancel := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
|
||||
defer opcancel()
|
||||
go func() {
|
||||
<-opctx.Done()
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
logger.Debug("SignerServer: calling Close")
|
||||
if *prometheusAddr != "" {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
@@ -151,7 +143,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
s.GracefulStop()
|
||||
}()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user