mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-12 15:52:50 +00:00
Compare commits
112 Commits
sam/abci++
...
feature/ab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4255d5d233 | ||
|
|
1332be0831 | ||
|
|
12f7e31610 | ||
|
|
a749afe8fb | ||
|
|
9eb3fb050b | ||
|
|
0bc5399249 | ||
|
|
03bdcad31e | ||
|
|
aeec999151 | ||
|
|
78c643718e | ||
|
|
67d3ce03a4 | ||
|
|
1d911818e6 | ||
|
|
70813a80cf | ||
|
|
21b2801c60 | ||
|
|
18d38dd409 | ||
|
|
b8975ccff6 | ||
|
|
4290ad2982 | ||
|
|
0cbecba3d6 | ||
|
|
7efdb6362d | ||
|
|
fee53a074d | ||
|
|
790132a7d8 | ||
|
|
fd49683920 | ||
|
|
c5c2aafad2 | ||
|
|
da204d371d | ||
|
|
9d01a6880e | ||
|
|
8fd66a6e8d | ||
|
|
f7bb0659be | ||
|
|
4af7568f99 | ||
|
|
3e766984a0 | ||
|
|
2d036c59fe | ||
|
|
12f0c4a624 | ||
|
|
7769467012 | ||
|
|
d16f17569f | ||
|
|
cc0c478c14 | ||
|
|
b9dcddd07a | ||
|
|
34ca3fb474 | ||
|
|
fc8df9a151 | ||
|
|
b85e13aa0c | ||
|
|
20ffa4fd32 | ||
|
|
f9bfdf4ce2 | ||
|
|
001cd50fc7 | ||
|
|
2b4436d1b4 | ||
|
|
4d330ab4fc | ||
|
|
627b77693f | ||
|
|
755e1474b1 | ||
|
|
b07e1fae89 | ||
|
|
ae164bf533 | ||
|
|
eb14a9564a | ||
|
|
a7dc8aaf91 | ||
|
|
a7c6233d66 | ||
|
|
d324430f82 | ||
|
|
99a7ac84dc | ||
|
|
f12588aab1 | ||
|
|
d534285bfe | ||
|
|
f8e453a245 | ||
|
|
ffae184b62 | ||
|
|
c6a0dc8559 | ||
|
|
3aa6c816e5 | ||
|
|
ff0f98892f | ||
|
|
0beac722b0 | ||
|
|
45071d1f23 | ||
|
|
5a9a84eb02 | ||
|
|
f008a275d1 | ||
|
|
816c6bac00 | ||
|
|
9ec9085678 | ||
|
|
f58ba4d2f9 | ||
|
|
6bde634be2 | ||
|
|
d704c0a0b6 | ||
|
|
629cdc7f3d | ||
|
|
c8f9f061fd | ||
|
|
f138cb9c0c | ||
|
|
83b7f4ad5b | ||
|
|
09b8708314 | ||
|
|
d95e423756 | ||
|
|
95bd4b6701 | ||
|
|
98ad5f1bf3 | ||
|
|
82c29db2bc | ||
|
|
160a33fdb1 | ||
|
|
13bd4b63f8 | ||
|
|
bc15531e1d | ||
|
|
716a624d57 | ||
|
|
58b9e4f03d | ||
|
|
6a46a76163 | ||
|
|
0d4db7aae6 | ||
|
|
40a59d1e10 | ||
|
|
f6709208b0 | ||
|
|
2c40ca52c1 | ||
|
|
3136b7a084 | ||
|
|
af2981a2f7 | ||
|
|
3bd2153136 | ||
|
|
301211c2cb | ||
|
|
58ee42ca52 | ||
|
|
6e38fff9ed | ||
|
|
93ab364abc | ||
|
|
1c60efc0bc | ||
|
|
6768b98568 | ||
|
|
3cdfbda2eb | ||
|
|
4552cfc271 | ||
|
|
91fba07e49 | ||
|
|
5df9c410ff | ||
|
|
c8f203293d | ||
|
|
b06e1cea54 | ||
|
|
6ea968d576 | ||
|
|
b42c439776 | ||
|
|
387bf6795a | ||
|
|
4f3e87b2e4 | ||
|
|
a371b1e3a8 | ||
|
|
9dd99e9294 | ||
|
|
4fd19a275e | ||
|
|
8d26460f9d | ||
|
|
c0bdb2423a | ||
|
|
cdd3479f20 | ||
|
|
b1dc5a6def |
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -7,6 +7,6 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @tendermint/tendermint-engineering
|
||||
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
|
||||
12
.github/dependabot.yml
vendored
12
.github/dependabot.yml
vendored
@@ -53,10 +53,10 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: daily
|
||||
target-branch: "v0.37.x"
|
||||
# Only allow automated security-related dependency updates until we cut the
|
||||
# final v0.37.0 release.
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
open-pull-requests-limit: 0
|
||||
labels:
|
||||
- T:dependencies
|
||||
@@ -65,9 +65,11 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: daily
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
open-pull-requests-limit: 0
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@@ -41,17 +41,17 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.0.0
|
||||
uses: docker/setup-buildx-action@v2.2.1
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2.0.0
|
||||
uses: docker/login-action@v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3.1.1
|
||||
uses: docker/build-push-action@v3.2.0
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
2
.github/workflows/docs-deployment.yml
vendored
2
.github/workflows/docs-deployment.yml
vendored
@@ -35,6 +35,8 @@ jobs:
|
||||
# versions will be available for the build.
|
||||
fetch-depth: 0
|
||||
- name: Build documentation
|
||||
env:
|
||||
NODE_OPTIONS: "--openssl-legacy-provider"
|
||||
run: |
|
||||
git config --global --add safe.directory "$PWD"
|
||||
make build-docs
|
||||
|
||||
4
.github/workflows/e2e-nightly-34x.yml
vendored
4
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
4
.github/workflows/e2e-nightly-37x.yml
vendored
4
.github/workflows/e2e-nightly-37x.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
4
.github/workflows/e2e-nightly-main.yml
vendored
4
.github/workflows/e2e-nightly-main.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
2
.github/workflows/fuzz-nightly.yml
vendored
2
.github/workflows/fuzz-nightly.yml
vendored
@@ -76,7 +76,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
|
||||
41
.github/workflows/gosec.yml
vendored
41
.github/workflows/gosec.yml
vendored
@@ -1,41 +0,0 @@
|
||||
name: Run Gosec
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'feature/*'
|
||||
- 'v0.37.x'
|
||||
- 'v0.34.x'
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
|
||||
jobs:
|
||||
Gosec:
|
||||
permissions:
|
||||
security-events: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout Source
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run Gosec Security Scanner
|
||||
uses: cosmos/gosec@master
|
||||
with:
|
||||
# Let the report trigger a failure with the Github Security scanner features.
|
||||
args: "-no-fail -fmt sarif -out results.sarif ./..."
|
||||
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
# Path to SARIF file relative to the root of the repository
|
||||
sarif_file: results.sarif
|
||||
2
.github/workflows/janitor.yml
vendored
2
.github/workflows/janitor.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.10.1
|
||||
- uses: styfle/cancel-workflow-action@0.11.0
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
5
.github/workflows/lint.yml
vendored
5
.github/workflows/lint.yml
vendored
@@ -31,10 +31,7 @@ jobs:
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.47.3
|
||||
version: v1.50.1
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
19
.github/workflows/markdown-links.yml
vendored
19
.github/workflows/markdown-links.yml
vendored
@@ -1,23 +1,20 @@
|
||||
name: Check Markdown links
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches: [main]
|
||||
schedule:
|
||||
# 2am UTC daily
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
strategy:
|
||||
matrix:
|
||||
branch: ['main', 'v0.37.x', 'v0.34.x']
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.md
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
ref: ${{ matrix.branch }}
|
||||
- uses: informalsystems/github-action-markdown-link-check@main
|
||||
with:
|
||||
check-modified-files-only: 'yes'
|
||||
config-file: '.md-link-check.json'
|
||||
if: env.GIT_DIFF
|
||||
|
||||
27
.github/workflows/pre-release.yml
vendored
27
.github/workflows/pre-release.yml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
prerelease:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -38,3 +38,28 @@ jobs:
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
prerelease-success:
|
||||
needs: prerelease
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon pre-release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/workflows/proto-lint.yml
vendored
2
.github/workflows/proto-lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.8.0
|
||||
- uses: bufbuild/buf-setup-action@v1.9.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
27
.github/workflows/release.yml
vendored
27
.github/workflows/release.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -35,3 +35,28 @@ jobs:
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
release-success:
|
||||
needs: release
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -55,3 +55,5 @@ proto/spec/**/*.pb.go
|
||||
*.pdf
|
||||
*.gz
|
||||
*.dvi
|
||||
# Python virtual environments
|
||||
.venv
|
||||
|
||||
@@ -2,7 +2,6 @@ linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
@@ -26,7 +25,6 @@ linters:
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
|
||||
@@ -2,5 +2,16 @@
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503]
|
||||
"aliveStatusCodes": [200, 206, 503],
|
||||
"httpHeaders": [
|
||||
{
|
||||
"urls": [
|
||||
"https://docs.github.com/",
|
||||
"https://help.github.com/"
|
||||
],
|
||||
"headers": {
|
||||
"Accept-Encoding": "zstd, br, gzip, deflate"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
88
CHANGELOG.md
88
CHANGELOG.md
@@ -2,6 +2,94 @@
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.24
|
||||
|
||||
*Nov 22, 2022*
|
||||
|
||||
Apart from one minor bug fix, this release aims to optimize the output of the
|
||||
RPC (both HTTP and WebSocket endpoints). See our [upgrading
|
||||
guidelines](./UPGRADING.md#v03424) for more details.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- `[rpc]` [\#9724](https://github.com/tendermint/tendermint/issues/9724) Remove
|
||||
useless whitespace in RPC output (@adizere, @thanethomson)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- `[rpc]` [\#9692](https://github.com/tendermint/tendermint/issues/9692) Remove
|
||||
`Cache-Control` header response from `/check_tx` endpoint (@JayT106)
|
||||
|
||||
## v0.34.23
|
||||
|
||||
*Nov 9, 2022*
|
||||
|
||||
This release introduces some new Prometheus metrics to help in determining what
|
||||
kinds of messages are consuming the most P2P bandwidth. This builds towards our
|
||||
broader goal of optimizing Tendermint bandwidth consumption, and will give us
|
||||
meaningful insights once we can establish these metrics for a number of chains.
|
||||
|
||||
We now also return `Cache-Control` headers for select RPC endpoints to help
|
||||
facilitate caching.
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106
|
||||
|
||||
### IMPROVEMENTS
|
||||
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
|
||||
Envelope type and associated methods for sending and receiving Envelopes
|
||||
instead of raw bytes. This also adds new metrics,
|
||||
`tendermint_p2p_message_send_bytes_total` and
|
||||
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
|
||||
each message type have been sent.
|
||||
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
|
||||
caching of RPC responses (@JayT106)
|
||||
|
||||
The following RPC endpoints will return `Cache-Control` headers with a maximum
|
||||
age of 1 day:
|
||||
|
||||
- `/abci_info`
|
||||
- `/block`, if `height` is supplied
|
||||
- `/block_by_hash`
|
||||
- `/block_results`, if `height` is supplied
|
||||
- `/blockchain`
|
||||
- `/check_tx`
|
||||
- `/commit`, if `height` is supplied
|
||||
- `/consensus_params`, if `height` is supplied
|
||||
- `/genesis`
|
||||
- `/genesis_chunked`
|
||||
- `/tx`
|
||||
- `/validators`, if `height` is supplied
|
||||
|
||||
## v0.34.22
|
||||
|
||||
This release includes several bug fixes, [one of
|
||||
which](https://github.com/tendermint/tendermint/pull/9518) we discovered while
|
||||
building up a baseline for v0.34 against which to compare our upcoming v0.37
|
||||
release during our [QA process](./docs/qa/).
|
||||
|
||||
Special thanks to external contributors on this release: @RiccardoM
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#9423](https://github.com/tendermint/tendermint/pull/9423) Support
|
||||
HTTPS URLs from the WebSocket client (@RiccardoM, @cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [config] [\#9483](https://github.com/tendermint/tendermint/issues/9483)
|
||||
Calling `tendermint init` would incorrectly leave out the new `[storage]`
|
||||
section delimiter in the generated configuration file - this has now been
|
||||
fixed
|
||||
- [p2p] [\#9500](https://github.com/tendermint/tendermint/issues/9500) Prevent
|
||||
peers who have errored being added to the peer set (@jmalicevic)
|
||||
- [indexer] [\#9473](https://github.com/tendermint/tendermint/issues/9473) Fix
|
||||
bug that caused the psql indexer to index empty blocks whenever one of the
|
||||
transactions returned a non zero code. The relevant deduplication logic has
|
||||
been moved within the kv indexer only (@cmwaters)
|
||||
- [blocksync] [\#9518](https://github.com/tendermint/tendermint/issues/9518) A
|
||||
block sync stall was observed during our QA process whereby the node was
|
||||
unable to make progress. Retrying block requests after a timeout fixes this.
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Release highlights include:
|
||||
|
||||
@@ -7,30 +7,41 @@
|
||||
- CLI/RPC/Config
|
||||
|
||||
- Apps
|
||||
- [abci] \#9468 Introduce `FinalizeBlock` which condenses `BeginBlock`, `DeliverTx` and `EndBlock`
|
||||
into a single method call (@cmwaters)
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [p2p] \#9625 Remove unused p2p/trust package (@cmwaters)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
|
||||
- Tooling
|
||||
- [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [metrics] \#9682 move state-syncing and block-syncing metrics to their respective packages (@cmwaters)
|
||||
labels have moved from block_syncing -> blocksync_syncing and state_syncing -> statesync_syncing
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [config] \#9680 Introduce `BootstrapPeers` to the config to allow nodes to list peers to be added to
|
||||
the addressbook upon start up (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [crypto/merkle] \#6443 & \#6513 Improve HashAlternatives performance (@cuonglm, @marbar3778)
|
||||
- [rpc] \#9650 Enable caching of RPC responses (@JayT106)
|
||||
- [consensus] \#9760 Save peer LastCommit correctly to achieve 50% reduction in gossiped precommits. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [docker] \#9462 ensure Docker image uses consistent version of Go
|
||||
- [abci-cli] \#9717 fix broken abci-cli help command
|
||||
|
||||
## v0.37.0
|
||||
|
||||
@@ -96,3 +107,4 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)
|
||||
- [docker] \#9073 enable cross platform build using docker buildx
|
||||
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout
|
||||
|
||||
@@ -12,7 +12,7 @@ and hence to Tendermint.
|
||||
* We are committed to providing a friendly, safe and welcoming environment for
|
||||
all, regardless of level of experience, gender, gender identity and
|
||||
expression, sexual orientation, disability, personal appearance, body size,
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristics.
|
||||
|
||||
* On Slack, please avoid using overtly sexual nicknames or other nicknames that
|
||||
might detract from a friendly, safe and welcoming environment for all.
|
||||
|
||||
@@ -12,7 +12,7 @@ landing changes in `main`.
|
||||
All work on the code base should be motivated by a [Github
|
||||
Issue](https://github.com/tendermint/tendermint/issues).
|
||||
[Search](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)
|
||||
is a good place start when looking for places to contribute. If you
|
||||
is a good place to start when looking for places to contribute. If you
|
||||
would like to work on an issue which already exists, please indicate so
|
||||
by leaving a comment.
|
||||
|
||||
@@ -213,7 +213,7 @@ Changes with multiple classifications should be doubly included (eg. a bug fix
|
||||
that is also a breaking change should be recorded under both).
|
||||
|
||||
Breaking changes are further subdivided according to the APIs/users they impact.
|
||||
Any change that effects multiple APIs/users should be recorded multiply - for
|
||||
Any change that affects multiple APIs/users should be recorded multiply - for
|
||||
instance, a change to the `Blockchain Protocol` that removes a field from the
|
||||
header should also be recorded under `CLI/RPC/Config` since the field will be
|
||||
removed from the header in RPC responses as well.
|
||||
@@ -247,7 +247,7 @@ To begin contributing, create a development branch either on `github.com/tenderm
|
||||
Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `main`. (Since pull requests are squash-merged, either is fine!)
|
||||
|
||||
Update the `UPGRADING.md` if the change you've made is breaking and the
|
||||
instructions should be in place for a user on how he/she can upgrade it's
|
||||
instructions should be in place for a user on how he/she can upgrade its
|
||||
software (ABCI application, Tendermint-based blockchain, light client, wallet).
|
||||
|
||||
Once you have submitted a pull request label the pull request with either `R:minor`, if the change should be included in the next minor release, or `R:major`, if the change is meant for a major release.
|
||||
|
||||
2
Makefile
2
Makefile
@@ -271,7 +271,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@golangci-lint run
|
||||
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
13
README.md
13
README.md
@@ -113,10 +113,15 @@ For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Supported Versions
|
||||
|
||||
Because we are a small core team, we only ship patch updates, including security
|
||||
updates, to the most recent minor release and the second-most recent minor
|
||||
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
|
||||
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
Because we are a small core team, we have limited capacity to ship patch
|
||||
updates, including security updates. Consequently, we strongly recommend keeping
|
||||
Tendermint up-to-date. Upgrading instructions can be found in
|
||||
[UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
Currently supported versions include:
|
||||
|
||||
- v0.34.x
|
||||
- v0.37.x (release candidate)
|
||||
|
||||
## Resources
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig
|
||||
* Make use of table driven testing where possible and not-cumbersome
|
||||
* [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)
|
||||
* Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require)
|
||||
* When using mocks, it is recommended to use Testify [mock] (<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
* When using mocks, it is recommended to use Testify [mock](<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
) along with [Mockery](https://github.com/vektra/mockery) for autogeneration
|
||||
|
||||
## Errors
|
||||
|
||||
17
UPGRADING.md
17
UPGRADING.md
@@ -5,6 +5,15 @@ Tendermint Core.
|
||||
|
||||
## Unreleased
|
||||
|
||||
## Config Changes
|
||||
|
||||
* A new config field, `BootstrapPeers` has been introduced as a means of
|
||||
adding a list of addresses to the addressbook upon initializing a node. This is an
|
||||
alternative to `PersistentPeers`. `PersistentPeers` shold be only used for
|
||||
nodes that you want to keep a constant connection with i.e. sentry nodes
|
||||
|
||||
----
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* The `ABCIVersion` is now `1.0.0`.
|
||||
@@ -32,6 +41,14 @@ Tendermint Core.
|
||||
this should have no effect on the wire-level encoding for UTF8-encoded
|
||||
strings.
|
||||
|
||||
## v0.34.24
|
||||
|
||||
Note that in [\#9724](https://github.com/tendermint/tendermint/pull/9724) we
|
||||
un-prettified the JSON output (i.e. removed all indentation) of the HTTP and
|
||||
WebSocket RPC for performance and subscription stability reasons. We recommend
|
||||
using a tool such as [jq](https://github.com/stedolan/jq) to obtain prettified
|
||||
output if you rely on that prettified output in some way.
|
||||
|
||||
## v0.34.20
|
||||
|
||||
### Feature: Priority Mempool
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
@@ -16,50 +17,28 @@ const (
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Client
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
// All `Async` methods return a `ReqRes` object.
|
||||
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
|
||||
// Note these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes and logs.
|
||||
// Client defines the interface for an ABCI client.
|
||||
//
|
||||
// NOTE these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes
|
||||
// and (potentially) error response.
|
||||
type Client interface {
|
||||
service.Service
|
||||
types.Application
|
||||
|
||||
SetResponseCallback(Callback)
|
||||
// TODO: remove as each method now returns an error
|
||||
Error() error
|
||||
// TODO: remove as this is not implemented
|
||||
Flush(context.Context) error
|
||||
Echo(context.Context, string) (*types.ResponseEcho, error)
|
||||
|
||||
FlushAsync() *ReqRes
|
||||
EchoAsync(msg string) *ReqRes
|
||||
InfoAsync(types.RequestInfo) *ReqRes
|
||||
DeliverTxAsync(types.RequestDeliverTx) *ReqRes
|
||||
CheckTxAsync(types.RequestCheckTx) *ReqRes
|
||||
QueryAsync(types.RequestQuery) *ReqRes
|
||||
CommitAsync() *ReqRes
|
||||
InitChainAsync(types.RequestInitChain) *ReqRes
|
||||
PrepareProposalAsync(types.RequestPrepareProposal) *ReqRes
|
||||
BeginBlockAsync(types.RequestBeginBlock) *ReqRes
|
||||
EndBlockAsync(types.RequestEndBlock) *ReqRes
|
||||
ListSnapshotsAsync(types.RequestListSnapshots) *ReqRes
|
||||
OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes
|
||||
LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes
|
||||
ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes
|
||||
ProcessProposalAsync(types.RequestProcessProposal) *ReqRes
|
||||
|
||||
FlushSync() error
|
||||
EchoSync(msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(types.RequestInfo) (*types.ResponseInfo, error)
|
||||
DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync() (*types.ResponseCommit, error)
|
||||
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
PrepareProposalSync(types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)
|
||||
BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
ProcessProposalSync(types.RequestProcessProposal) (*types.ResponseProcessProposal, error)
|
||||
// FIXME: All other operations are run synchronously and rely
|
||||
// on the caller to dictate concurrency (i.e. run a go routine),
|
||||
// with the exception of `CheckTxAsync` which we maintain
|
||||
// for the v0 mempool. We should explore refactoring the
|
||||
// mempool to remove this vestige behavior.
|
||||
SetResponseCallback(Callback)
|
||||
CheckTxAsync(context.Context, *types.RequestCheckTx) (*ReqRes, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
@@ -23,11 +23,11 @@ type grpcClient struct {
|
||||
service.BaseService
|
||||
mustConnect bool
|
||||
|
||||
client types.ABCIApplicationClient
|
||||
client types.ABCIClient
|
||||
conn *grpc.ClientConn
|
||||
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
|
||||
|
||||
mtx tmsync.Mutex
|
||||
mtx sync.Mutex
|
||||
addr string
|
||||
err error
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
@@ -87,8 +87,10 @@ func (cli *grpcClient) OnStart() error {
|
||||
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
conn, err := grpc.Dial(cli.addr,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(dialerFunc),
|
||||
)
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
@@ -99,7 +101,7 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
|
||||
client := types.NewABCIApplicationClient(conn)
|
||||
client := types.NewABCIClient(conn)
|
||||
cli.conn = conn
|
||||
|
||||
ENSURE_CONNECTED:
|
||||
@@ -158,156 +160,14 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// GRPC calls are synchronous, but some callbacks expect to be called asynchronously
|
||||
// (eg. the mempool expects to be able to lock to remove bad txs from cache).
|
||||
// To accommodate, we finish each call in its own go-routine,
|
||||
// which is expensive, but easy - if you want something better, use the socket protocol!
|
||||
// maybe one day, if people really want it, we use grpc streams,
|
||||
// but hopefully not :D
|
||||
|
||||
func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
req := types.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) {
|
||||
res, err := cli.client.CheckTx(ctx, req, grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
req := types.ToRequestFlush()
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
req := types.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
req := types.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
req := types.ToRequestCommit()
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
req := types.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshotsAsync(params types.RequestListSnapshots) *ReqRes {
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
res, err := cli.client.ListSnapshots(context.Background(), req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OfferSnapshotAsync(params types.RequestOfferSnapshot) *ReqRes {
|
||||
req := types.ToRequestOfferSnapshot(params)
|
||||
res, err := cli.client.OfferSnapshot(context.Background(), req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunkAsync(params types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
req := types.ToRequestLoadSnapshotChunk(params)
|
||||
res, err := cli.client.LoadSnapshotChunk(context.Background(), req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshotChunk) *ReqRes {
|
||||
req := types.ToRequestApplySnapshotChunk(params)
|
||||
res, err := cli.client.ApplySnapshotChunk(context.Background(), req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) PrepareProposalAsync(params types.RequestPrepareProposal) *ReqRes {
|
||||
req := types.ToRequestPrepareProposal(params)
|
||||
res, err := cli.client.PrepareProposal(context.Background(), req.GetPrepareProposal(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_PrepareProposal{PrepareProposal: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ProcessProposalAsync(params types.RequestProcessProposal) *ReqRes {
|
||||
req := types.ToRequestProcessProposal(params)
|
||||
res, err := cli.client.ProcessProposal(context.Background(), req.GetProcessProposal(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ProcessProposal{ProcessProposal: res}})
|
||||
return cli.finishAsyncCall(types.ToRequestCheckTx(req), &types.Response{Value: &types.Response_CheckTx{CheckTx: res}}), nil
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
@@ -319,117 +179,69 @@ func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response)
|
||||
return reqres
|
||||
}
|
||||
|
||||
// finishSyncCall waits for an async call to complete. It is necessary to call all
|
||||
// sync calls asynchronously as well, to maintain call and response ordering via
|
||||
// the channel, and this method will wait until the async call completes.
|
||||
func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
|
||||
// It's possible that the callback is called twice, since the callback can
|
||||
// be called immediately on SetCallback() in addition to after it has been
|
||||
// set. This is because completing the ReqRes happens in a separate critical
|
||||
// section from the one where the callback is called: there is a race where
|
||||
// SetCallback() is called between completing the ReqRes and dispatching the
|
||||
// callback.
|
||||
//
|
||||
// We also buffer the channel with 1 response, since SetCallback() will be
|
||||
// called synchronously if the reqres is already completed, in which case
|
||||
// it will block on sending to the channel since it hasn't gotten around to
|
||||
// receiving from it yet.
|
||||
//
|
||||
// ReqRes should really handle callback dispatch internally, to guarantee
|
||||
// that it's only called once and avoid the above race conditions.
|
||||
var once sync.Once
|
||||
ch := make(chan *types.Response, 1)
|
||||
reqres.SetCallback(func(res *types.Response) {
|
||||
once.Do(func() {
|
||||
ch <- res
|
||||
})
|
||||
})
|
||||
return <-ch
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *grpcClient) FlushSync() error {
|
||||
reqres := cli.FlushAsync()
|
||||
cli.finishSyncCall(reqres).GetFlush()
|
||||
return cli.Error()
|
||||
func (cli *grpcClient) Flush(ctx context.Context) error {
|
||||
_, err := cli.client.Flush(ctx, types.ToRequestFlush().GetFlush(), grpc.WaitForReady(true))
|
||||
return err
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
reqres := cli.EchoAsync(msg)
|
||||
// StopForError should already have been called if error is set
|
||||
return cli.finishSyncCall(reqres).GetEcho(), cli.Error()
|
||||
func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return cli.client.Echo(ctx, types.ToRequestEcho(msg).GetEcho(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
reqres := cli.InfoAsync(req)
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
func (cli *grpcClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
return cli.client.Info(ctx, req, grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.DeliverTxAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||
func (cli *grpcClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
return cli.client.CheckTx(ctx, req, grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxSync(params types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.CheckTxAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error()
|
||||
func (cli *grpcClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
return cli.client.Query(ctx, types.ToRequestQuery(req).GetQuery(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
reqres := cli.QueryAsync(req)
|
||||
return cli.finishSyncCall(reqres).GetQuery(), cli.Error()
|
||||
func (cli *grpcClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
return cli.client.Commit(ctx, types.ToRequestCommit().GetCommit(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
reqres := cli.CommitAsync()
|
||||
return cli.finishSyncCall(reqres).GetCommit(), cli.Error()
|
||||
func (cli *grpcClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
return cli.client.InitChain(ctx, types.ToRequestInitChain(req).GetInitChain(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainSync(params types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
reqres := cli.InitChainAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
||||
func (cli *grpcClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
return cli.client.ListSnapshots(ctx, types.ToRequestListSnapshots(req).GetListSnapshots(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockSync(params types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
reqres := cli.BeginBlockAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
|
||||
func (cli *grpcClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
return cli.client.OfferSnapshot(ctx, types.ToRequestOfferSnapshot(req).GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockSync(params types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
reqres := cli.EndBlockAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
|
||||
func (cli *grpcClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
return cli.client.LoadSnapshotChunk(ctx, types.ToRequestLoadSnapshotChunk(req).GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshotsSync(params types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
reqres := cli.ListSnapshotsAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error()
|
||||
func (cli *grpcClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
return cli.client.ApplySnapshotChunk(ctx, types.ToRequestApplySnapshotChunk(req).GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OfferSnapshotSync(params types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
reqres := cli.OfferSnapshotAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error()
|
||||
func (cli *grpcClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
return cli.client.PrepareProposal(ctx, types.ToRequestPrepareProposal(req).GetPrepareProposal(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunkSync(
|
||||
params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
reqres := cli.LoadSnapshotChunkAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error()
|
||||
func (cli *grpcClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
return cli.client.ProcessProposal(ctx, types.ToRequestProcessProposal(req).GetProcessProposal(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
reqres := cli.ApplySnapshotChunkAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
func (cli *grpcClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
return cli.client.ExtendVote(ctx, types.ToRequestExtendVote(req).GetExtendVote(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) PrepareProposalSync(
|
||||
params types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
reqres := cli.PrepareProposalAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetPrepareProposal(), cli.Error()
|
||||
func (cli *grpcClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
return cli.client.VerifyVoteExtension(ctx, types.ToRequestVerifyVoteExtension(req).GetVerifyVoteExtension(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ProcessProposalSync(params types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
reqres := cli.ProcessProposalAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetProcessProposal(), cli.Error()
|
||||
func (cli *grpcClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
return cli.client.FinalizeBlock(ctx, types.ToRequestFinalizeBlock(req).GetFinalizeBlock(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
80
abci/client/grpc_client_test.go
Normal file
80
abci/client/grpc_client_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package abcicli_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
func TestGRPC(t *testing.T) {
|
||||
app := types.NewBaseApplication()
|
||||
numCheckTxs := 2000
|
||||
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
// Start the listener
|
||||
server := abciserver.NewGRPCServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
err := server.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
client := types.NewABCIClient(conn)
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numCheckTxs; counter++ {
|
||||
// Send request
|
||||
response, err := client.CheckTx(context.Background(), &types.RequestCheckTx{Tx: []byte("test")})
|
||||
require.NoError(t, err)
|
||||
counter++
|
||||
if response.Code != 0 {
|
||||
t.Error("CheckTx failed with ret_code", response.Code)
|
||||
}
|
||||
if counter > numCheckTxs {
|
||||
t.Fatal("Too many CheckTx responses")
|
||||
}
|
||||
t.Log("response", counter)
|
||||
if counter == numCheckTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
@@ -1,13 +1,13 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
// case of malicious tx or query). It only makes sense for publicly exposed
|
||||
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
|
||||
@@ -22,10 +22,10 @@ type localClient struct {
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NewLocalClient creates a local client, which will be directly calling the
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
// NewLocalClient creates a local client, which wraps the application interface that
|
||||
// Tendermint as the client will call to the application as the server. The only
|
||||
// difference, is that the local client has a global mutex which enforces serialization
|
||||
// of all the ABCI calls from Tendermint to the Application.
|
||||
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = new(tmsync.Mutex)
|
||||
@@ -44,306 +44,20 @@ func (app *localClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Unlock()
|
||||
}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
func (app *localClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) FlushAsync() *ReqRes {
|
||||
// Do nothing
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil)
|
||||
}
|
||||
|
||||
func (app *localClient) EchoAsync(msg string) *ReqRes {
|
||||
func (app *localClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.callback(
|
||||
types.ToRequestEcho(msg),
|
||||
types.ToResponseEcho(msg),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return app.callback(
|
||||
types.ToRequestInfo(req),
|
||||
types.ToResponseInfo(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(params)
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.CheckTx(req)
|
||||
res, err := app.Application.CheckTx(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return app.callback(
|
||||
types.ToRequestQuery(req),
|
||||
types.ToResponseQuery(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) CommitAsync() *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Commit()
|
||||
return app.callback(
|
||||
types.ToRequestCommit(),
|
||||
types.ToResponseCommit(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.InitChain(req)
|
||||
return app.callback(
|
||||
types.ToRequestInitChain(req),
|
||||
types.ToResponseInitChain(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return app.callback(
|
||||
types.ToRequestListSnapshots(req),
|
||||
types.ToResponseListSnapshots(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return app.callback(
|
||||
types.ToRequestOfferSnapshot(req),
|
||||
types.ToResponseOfferSnapshot(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestLoadSnapshotChunk(req),
|
||||
types.ToResponseLoadSnapshotChunk(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestApplySnapshotChunk(req),
|
||||
types.ToResponseApplySnapshotChunk(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestPrepareProposal(req),
|
||||
types.ToResponsePrepareProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestProcessProposal(req),
|
||||
types.ToResponseProcessProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.CheckTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.InitChain(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkSync(
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkSync(
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.Callback(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
@@ -356,3 +70,117 @@ func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
|
||||
reqRes.Response = res
|
||||
return reqRes
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) Flush(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.Info(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.CheckTx(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.Query(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.Commit(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.InitChain(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.ListSnapshots(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.OfferSnapshot(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunk(ctx context.Context,
|
||||
req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.LoadSnapshotChunk(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunk(ctx context.Context,
|
||||
req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.ApplySnapshotChunk(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.PrepareProposal(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.ProcessProposal(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.ExtendVote(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.VerifyVoteExtension(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.FinalizeBlock(ctx, req)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,10 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
@@ -16,29 +19,13 @@ type Client struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 types.RequestApplySnapshotChunk) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
|
||||
@@ -46,8 +33,8 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -55,68 +42,13 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) BeginBlockAsync(_a0 types.RequestBeginBlock) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// BeginBlockSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CheckTxSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// CheckTx provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
@@ -124,8 +56,8 @@ func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx,
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -133,29 +65,36 @@ func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx,
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitAsync provides a mock function with given fields:
|
||||
func (_m *Client) CommitAsync() *abcicli.ReqRes {
|
||||
ret := _m.Called()
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok {
|
||||
r0 = rf()
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitSync provides a mock function with given fields:
|
||||
func (_m *Client) CommitSync() (*types.ResponseCommit, error) {
|
||||
ret := _m.Called()
|
||||
// Commit provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func() *types.ResponseCommit); ok {
|
||||
r0 = rf()
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCommit) *types.ResponseCommit); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCommit)
|
||||
@@ -163,8 +102,8 @@ func (_m *Client) CommitSync() (*types.ResponseCommit, error) {
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCommit) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -172,68 +111,13 @@ func (_m *Client) CommitSync() (*types.ResponseCommit, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeliverTxSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) DeliverTxSync(_a0 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoAsync provides a mock function with given fields: msg
|
||||
func (_m *Client) EchoAsync(msg string) *abcicli.ReqRes {
|
||||
ret := _m.Called(msg)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(string) *abcicli.ReqRes); ok {
|
||||
r0 = rf(msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// EchoSync provides a mock function with given fields: msg
|
||||
func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(msg)
|
||||
// Echo provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseEcho
|
||||
if rf, ok := ret.Get(0).(func(string) *types.ResponseEcho); ok {
|
||||
r0 = rf(msg)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEcho)
|
||||
@@ -241,47 +125,8 @@ func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) EndBlockAsync(_a0 types.RequestEndBlock) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// EndBlockSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEndBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -303,68 +148,22 @@ func (_m *Client) Error() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// FlushAsync provides a mock function with given fields:
|
||||
func (_m *Client) FlushAsync() *abcicli.ReqRes {
|
||||
ret := _m.Called()
|
||||
// ExtendVote provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok {
|
||||
r0 = rf()
|
||||
var r0 *types.ResponseExtendVote
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// FlushSync provides a mock function with given fields:
|
||||
func (_m *Client) FlushSync() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InfoAsync(_a0 types.RequestInfo) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
r0 = ret.Get(0).(*types.ResponseExtendVote)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -372,29 +171,73 @@ func (_m *Client) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InitChainAsync(_a0 types.RequestInitChain) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
// FinalizeBlock provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Flush provides a mock function with given fields: _a0
|
||||
func (_m *Client) Flush(_a0 context.Context) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InitChainSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// Info provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInitChain)
|
||||
@@ -402,8 +245,8 @@ func (_m *Client) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInit
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -425,29 +268,13 @@ func (_m *Client) IsRunning() bool {
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 types.RequestListSnapshots) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshotsSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// ListSnapshots provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseListSnapshots)
|
||||
@@ -455,8 +282,8 @@ func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.Resp
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -464,29 +291,13 @@ func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.Resp
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 types.RequestLoadSnapshotChunk) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk)
|
||||
@@ -494,8 +305,8 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*ty
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -503,29 +314,13 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*ty
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 types.RequestOfferSnapshot) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// OfferSnapshotSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// OfferSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseOfferSnapshot)
|
||||
@@ -533,8 +328,8 @@ func (_m *Client) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.Resp
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -575,29 +370,13 @@ func (_m *Client) OnStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// PrepareProposalAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) PrepareProposalAsync(_a0 types.RequestPrepareProposal) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// PrepareProposalSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) PrepareProposalSync(_a0 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// PrepareProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
|
||||
@@ -605,8 +384,8 @@ func (_m *Client) PrepareProposalSync(_a0 types.RequestPrepareProposal) (*types.
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestPrepareProposal) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -614,29 +393,13 @@ func (_m *Client) PrepareProposalSync(_a0 types.RequestPrepareProposal) (*types.
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ProcessProposalAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ProcessProposalAsync(_a0 types.RequestProcessProposal) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ProcessProposalSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ProcessProposalSync(_a0 types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// ProcessProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseProcessProposal)
|
||||
@@ -644,8 +407,8 @@ func (_m *Client) ProcessProposalSync(_a0 types.RequestProcessProposal) (*types.
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -653,29 +416,13 @@ func (_m *Client) ProcessProposalSync(_a0 types.RequestProcessProposal) (*types.
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// QueryAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) QueryAsync(_a0 types.RequestQuery) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// QuerySync provides a mock function with given fields: _a0
|
||||
func (_m *Client) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// Query provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseQuery)
|
||||
@@ -683,8 +430,8 @@ func (_m *Client) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -774,6 +521,29 @@ func (_m *Client) String() string {
|
||||
return r0
|
||||
}
|
||||
|
||||
// VerifyVoteExtension provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseVerifyVoteExtension
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewClient interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
|
||||
@@ -3,17 +3,17 @@ package abcicli
|
||||
import (
|
||||
"bufio"
|
||||
"container/list"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/timer"
|
||||
)
|
||||
|
||||
@@ -22,8 +22,12 @@ const (
|
||||
flushThrottleMS = 20 // Don't wait longer than...
|
||||
)
|
||||
|
||||
// This is goroutine-safe, but users should beware that the application in
|
||||
// general is not meant to be interfaced with concurrent callers.
|
||||
// socketClient is the client side implementation of the Tendermint
|
||||
// Socket Protocol (TSP). It is used by an instance of Tendermint to pass
|
||||
// ABCI requests to an out of process application running the socketServer.
|
||||
//
|
||||
// This is goroutine-safe. All calls are serialized to the server through an unbuffered queue. The socketClient
|
||||
// tracks responses and expects them to respect the order of the requests sent.
|
||||
type socketClient struct {
|
||||
service.BaseService
|
||||
|
||||
@@ -34,7 +38,7 @@ type socketClient struct {
|
||||
reqQueue chan *ReqRes
|
||||
flushTimer *timer.ThrottleTimer
|
||||
|
||||
mtx tmsync.Mutex
|
||||
mtx sync.Mutex
|
||||
err error
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
@@ -44,7 +48,7 @@ var _ Client = (*socketClient)(nil)
|
||||
|
||||
// NewSocketClient creates a new socket client, which connects to a given
|
||||
// address. If mustConnect is true, the client will return an error upon start
|
||||
// if it fails to connect.
|
||||
// if it fails to connect else it will continue to retry.
|
||||
func NewSocketClient(addr string, mustConnect bool) Client {
|
||||
cli := &socketClient{
|
||||
reqQueue: make(chan *ReqRes, reqQueueSize),
|
||||
@@ -104,6 +108,8 @@ func (cli *socketClient) Error() error {
|
||||
return cli.err
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// SetResponseCallback sets a callback, which will be executed for each
|
||||
// non-error & non-empty response from the server.
|
||||
//
|
||||
@@ -114,6 +120,10 @@ func (cli *socketClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequest(ctx, types.ToRequestCheckTx(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
@@ -121,9 +131,11 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.reqQueue:
|
||||
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
|
||||
// N.B. We must enqueue before sending out the request, otherwise the
|
||||
// server may reply before we do it, and the receiver will fail for an
|
||||
// unsolicited reply.
|
||||
cli.trackRequest(reqres)
|
||||
|
||||
cli.willSendReq(reqres)
|
||||
err := types.WriteMessage(reqres.Request, w)
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
|
||||
@@ -153,6 +165,10 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
r := bufio.NewReader(conn)
|
||||
for {
|
||||
if !cli.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
var res = &types.Response{}
|
||||
err := types.ReadMessage(r, res)
|
||||
if err != nil {
|
||||
@@ -160,8 +176,6 @@ func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
return
|
||||
}
|
||||
|
||||
// cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
|
||||
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_Exception: // app responded with error
|
||||
// XXX After setting cli.err, release waiters (e.g. reqres.Done())
|
||||
@@ -177,7 +191,13 @@ func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) willSendReq(reqres *ReqRes) {
|
||||
func (cli *socketClient) trackRequest(reqres *ReqRes) {
|
||||
// N.B. We must NOT hold the client state lock while checking this, or we
|
||||
// may deadlock with shutdown.
|
||||
if !cli.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.reqSent.PushBack(reqres)
|
||||
@@ -190,13 +210,12 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
// Get the first ReqRes.
|
||||
next := cli.reqSent.Front()
|
||||
if next == nil {
|
||||
return fmt.Errorf("unexpected %v when nothing expected", reflect.TypeOf(res.Value))
|
||||
return fmt.Errorf("unexpected response %T when no call was made", res.Value)
|
||||
}
|
||||
|
||||
reqres := next.Value.(*ReqRes)
|
||||
if !resMatchesReq(reqres.Request, res) {
|
||||
return fmt.Errorf("unexpected %v when response to %v expected",
|
||||
reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value))
|
||||
return fmt.Errorf("unexpected response %T to the request %T", res.Value, reqres.Request.Value)
|
||||
}
|
||||
|
||||
reqres.Response = res
|
||||
@@ -219,224 +238,189 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) EchoAsync(msg string) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestEcho(msg))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FlushAsync() *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestFlush())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestQuery(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitAsync() *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCommit())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestInitChain(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestBeginBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestEndBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestListSnapshots(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestOfferSnapshot(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestLoadSnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestPrepareProposal(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestProcessProposal(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync() error {
|
||||
reqRes := cli.queueRequest(types.ToRequestFlush())
|
||||
if err := cli.Error(); err != nil {
|
||||
func (cli *socketClient) Flush(ctx context.Context) error {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reqRes.Wait() // NOTE: if we don't flush the queue, its possible to get stuck here
|
||||
return cli.Error()
|
||||
reqRes.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestEcho(msg))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetEcho(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestInfo(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetInfo(), cli.Error()
|
||||
return reqRes.Response.GetEcho(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetCheckTx(), cli.Error()
|
||||
return reqRes.Response.GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestQuery(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCommit())
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetCommit(), cli.Error()
|
||||
return reqRes.Response.GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestInitChain(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestBeginBlock(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetBeginBlock(), cli.Error()
|
||||
return reqRes.Response.GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestEndBlock(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestListSnapshots(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetListSnapshots(), cli.Error()
|
||||
return reqRes.Response.GetCommit(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestOfferSnapshot(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetOfferSnapshot(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkSync(
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestLoadSnapshotChunk(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetLoadSnapshotChunk(), cli.Error()
|
||||
return reqRes.Response.GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestPrepareProposal(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetPrepareProposal(), cli.Error()
|
||||
return reqRes.Response.GetListSnapshots(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestProcessProposal(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetProcessProposal(), cli.Error()
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetOfferSnapshot(), cli.Error()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetLoadSnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
|
||||
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestPrepareProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetPrepareProposal(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestProcessProposal(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetProcessProposal(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestExtendVote(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetExtendVote(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestVerifyVoteExtension(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetVerifyVoteExtension(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFinalizeBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetFinalizeBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request) (*ReqRes, error) {
|
||||
reqres := NewReqRes(req)
|
||||
|
||||
// TODO: set cli.err if reqQueue times out
|
||||
cli.reqQueue <- reqres
|
||||
select {
|
||||
case cli.reqQueue <- reqres:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
// Maybe auto-flush, or unset auto-flush
|
||||
switch req.Value.(type) {
|
||||
@@ -446,9 +430,11 @@ func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
|
||||
cli.flushTimer.Set()
|
||||
}
|
||||
|
||||
return reqres
|
||||
return reqres, nil
|
||||
}
|
||||
|
||||
// flushQueue marks as complete and discards all remaining pending requests
|
||||
// from the queue.
|
||||
func (cli *socketClient) flushQueue() {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
@@ -481,8 +467,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Flush)
|
||||
case *types.Request_Info:
|
||||
_, ok = res.Value.(*types.Response_Info)
|
||||
case *types.Request_DeliverTx:
|
||||
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||
case *types.Request_CheckTx:
|
||||
_, ok = res.Value.(*types.Response_CheckTx)
|
||||
case *types.Request_Commit:
|
||||
@@ -491,10 +475,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Query)
|
||||
case *types.Request_InitChain:
|
||||
_, ok = res.Value.(*types.Response_InitChain)
|
||||
case *types.Request_BeginBlock:
|
||||
_, ok = res.Value.(*types.Response_BeginBlock)
|
||||
case *types.Request_EndBlock:
|
||||
_, ok = res.Value.(*types.Response_EndBlock)
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
@@ -503,10 +483,16 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_ListSnapshots)
|
||||
case *types.Request_OfferSnapshot:
|
||||
_, ok = res.Value.(*types.Response_OfferSnapshot)
|
||||
case *types.Request_ExtendVote:
|
||||
_, ok = res.Value.(*types.Response_ExtendVote)
|
||||
case *types.Request_VerifyVoteExtension:
|
||||
_, ok = res.Value.(*types.Response_VerifyVoteExtension)
|
||||
case *types.Request_PrepareProposal:
|
||||
_, ok = res.Value.(*types.Response_PrepareProposal)
|
||||
case *types.Request_ProcessProposal:
|
||||
_, ok = res.Value.(*types.Response_ProcessProposal)
|
||||
case *types.Request_FinalizeBlock:
|
||||
_, ok = res.Value.(*types.Response_FinalizeBlock)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package abcicli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -16,28 +19,17 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
func TestProperSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
func TestCalls(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
app := types.BaseApplication{}
|
||||
|
||||
s, c := setupClientServer(t, app)
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
_, c := setupClientServer(t, app)
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
|
||||
err := c.FlushSync()
|
||||
res, err := c.Echo(ctx, "hello")
|
||||
require.NoError(t, err)
|
||||
res := reqres.Response.GetBeginBlock()
|
||||
require.NotNil(t, res)
|
||||
resp <- c.Error()
|
||||
}()
|
||||
@@ -51,36 +43,25 @@ func TestProperSyncCalls(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHangingSyncCalls(t *testing.T) {
|
||||
func TestHangingAsyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
|
||||
s, c := setupClientServer(t, app)
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// Start BeginBlock and flush it
|
||||
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
|
||||
flush := c.FlushAsync()
|
||||
// wait 20 ms for all events to travel socket, but
|
||||
// Call CheckTx
|
||||
reqres, err := c.CheckTxAsync(context.Background(), &types.RequestCheckTx{})
|
||||
require.NoError(t, err)
|
||||
// wait 50 ms for all events to travel socket, but
|
||||
// no response yet from server
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// kill the server, so the connections break
|
||||
err := s.Stop()
|
||||
err = s.Stop()
|
||||
require.NoError(t, err)
|
||||
|
||||
// wait for the response from BeginBlock
|
||||
// wait for the response from CheckTx
|
||||
reqres.Wait()
|
||||
flush.Wait()
|
||||
resp <- c.Error()
|
||||
}()
|
||||
|
||||
@@ -93,21 +74,81 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBulk(t *testing.T) {
|
||||
const numTxs = 700000
|
||||
// use a socket instead of a port
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
app := types.NewBaseApplication()
|
||||
// Start the listener
|
||||
server := server.NewSocketServer(socket, app)
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
err := server.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Connect to the socket
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
err = client.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Construct request
|
||||
rfb := &types.RequestFinalizeBlock{Txs: make([][]byte, numTxs)}
|
||||
for counter := 0; counter < numTxs; counter++ {
|
||||
rfb.Txs[counter] = []byte("test")
|
||||
}
|
||||
// Send bulk request
|
||||
res, err := client.FinalizeBlock(context.Background(), rfb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, numTxs, len(res.TxResults), "Number of txs doesn't match")
|
||||
for _, tx := range res.TxResults {
|
||||
require.Equal(t, uint32(0), tx.Code, "Tx failed")
|
||||
}
|
||||
|
||||
// Send final flush message
|
||||
err = client.Flush(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func setupClientServer(t *testing.T, app types.Application) (
|
||||
service.Service, abcicli.Client) {
|
||||
t.Helper()
|
||||
|
||||
// some port between 20k and 30k
|
||||
port := 20000 + tmrand.Int32()%10000
|
||||
addr := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
s, err := server.NewServer(addr, "socket", app)
|
||||
require.NoError(t, err)
|
||||
err = s.Start()
|
||||
s := server.NewSocketServer(addr, app)
|
||||
err := s.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
c := abcicli.NewSocketClient(addr, true)
|
||||
err = c.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
return s, c
|
||||
}
|
||||
|
||||
@@ -115,9 +156,9 @@ type slowApp struct {
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseBeginBlock{}
|
||||
func (slowApp) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
time.Sleep(time.Second)
|
||||
return &types.ResponseCheckTx{}, nil
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetLaet ensures that the callback is invoked when
|
||||
@@ -125,13 +166,17 @@ func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock
|
||||
// test relies on the callback being allowed to be invoked twice if set multiple
|
||||
// times, once when set early and once when set late.
|
||||
func TestCallbackInvokedWhenSetLate(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
|
||||
reqRes, err := c.CheckTxAsync(ctx, &types.RequestCheckTx{})
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
@@ -154,21 +199,25 @@ type blockedABCIApplication struct {
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (b blockedABCIApplication) CheckTx(r types.RequestCheckTx) types.ResponseCheckTx {
|
||||
func (b blockedABCIApplication) CheckTxAsync(ctx context.Context, r *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
b.wg.Wait()
|
||||
return b.BaseApplication.CheckTx(r)
|
||||
return b.BaseApplication.CheckTx(ctx, r)
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetEarly ensures that the callback is invoked when
|
||||
// set before the client completes the call into the app.
|
||||
func TestCallbackInvokedWhenSetEarly(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
|
||||
reqRes, err := c.CheckTxAsync(ctx, &types.RequestCheckTx{})
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
|
||||
75
abci/client/unsync_local_client.go
Normal file
75
abci/client/unsync_local_client.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
type unsyncLocalClient struct {
|
||||
service.BaseService
|
||||
|
||||
types.Application
|
||||
|
||||
// This mutex is exclusively used to protect the callback.
|
||||
mtx sync.RWMutex
|
||||
Callback
|
||||
}
|
||||
|
||||
var _ Client = (*unsyncLocalClient)(nil)
|
||||
|
||||
// NewUnsyncLocalClient creates an unsynchronized local client, which will be
|
||||
// directly calling the methods of the given app.
|
||||
//
|
||||
// Unlike NewLocalClient, it does not hold a mutex around the application, so
|
||||
// it is up to the application to manage its synchronization properly.
|
||||
func NewUnsyncLocalClient(app types.Application) Client {
|
||||
cli := &unsyncLocalClient{
|
||||
Application: app,
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(nil, "unsyncLocalClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
func (app *unsyncLocalClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) Flush(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *unsyncLocalClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.Callback = cb
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) {
|
||||
res, err := app.Application.CheckTx(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.Callback(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
rr.callbackInvoked = true
|
||||
return rr
|
||||
}
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
@@ -54,7 +53,7 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "kvstore", "version":
|
||||
case "kvstore", "version", "help [command]":
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -139,7 +138,6 @@ func addCommands() {
|
||||
RootCmd.AddCommand(consoleCmd)
|
||||
RootCmd.AddCommand(echoCmd)
|
||||
RootCmd.AddCommand(infoCmd)
|
||||
RootCmd.AddCommand(deliverTxCmd)
|
||||
RootCmd.AddCommand(checkTxCmd)
|
||||
RootCmd.AddCommand(commitCmd)
|
||||
RootCmd.AddCommand(versionCmd)
|
||||
@@ -148,6 +146,7 @@ func addCommands() {
|
||||
RootCmd.AddCommand(processProposalCmd)
|
||||
addQueryFlags()
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
RootCmd.AddCommand(finalizeBlockCmd)
|
||||
|
||||
// examples
|
||||
addKVStoreFlags()
|
||||
@@ -168,10 +167,9 @@ where example.file looks something like:
|
||||
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
finalize_block 0x00
|
||||
check_tx 0x00
|
||||
deliver_tx 0x01
|
||||
deliver_tx 0x04
|
||||
finalize_block 0x01 0x04 0xff
|
||||
info
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
@@ -187,7 +185,7 @@ This command opens an interactive console for running any of the other commands
|
||||
without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "prepare_proposal", "process_proposal", "commit", "query"},
|
||||
ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "prepare_proposal", "process_proposal", "commit", "query"},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
@@ -206,12 +204,12 @@ var infoCmd = &cobra.Command{
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
Use: "deliver_tx",
|
||||
Short: "deliver a new transaction to the application",
|
||||
Long: "deliver a new transaction to the application",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdDeliverTx,
|
||||
var finalizeBlockCmd = &cobra.Command{
|
||||
Use: "finalize_block",
|
||||
Short: "deliver a block of transactions to the application",
|
||||
Long: "deliver a block of transactions to the application",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: cmdFinalizeBlock,
|
||||
}
|
||||
|
||||
var checkTxCmd = &cobra.Command{
|
||||
@@ -311,30 +309,52 @@ func compose(fs []func() error) error {
|
||||
}
|
||||
|
||||
func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
return compose(
|
||||
[]func() error{
|
||||
func() error { return servertest.InitChain(client) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x01}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.InitChain(ctx, client) },
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error {
|
||||
return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
return servertest.FinalizeBlock(ctx, client, [][]byte{
|
||||
[]byte("abc"),
|
||||
}, []uint32{
|
||||
kvstore.CodeTypeInvalidTxFormat,
|
||||
}, nil, nil)
|
||||
},
|
||||
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) },
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error {
|
||||
return servertest.PrepareProposal(client, [][]byte{
|
||||
return servertest.FinalizeBlock(ctx, client, [][]byte{
|
||||
{0x00},
|
||||
}, []uint32{
|
||||
kvstore.CodeTypeOK,
|
||||
}, nil, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
},
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error {
|
||||
return servertest.FinalizeBlock(ctx, client, [][]byte{
|
||||
{0x00},
|
||||
{0x01},
|
||||
{0x00, 0x02},
|
||||
{0x00, 0x03},
|
||||
{0x00, 0x00, 0x04},
|
||||
{0x00, 0x00, 0x06},
|
||||
}, []uint32{
|
||||
kvstore.CodeTypeInvalidTxFormat,
|
||||
kvstore.CodeTypeOK,
|
||||
kvstore.CodeTypeOK,
|
||||
kvstore.CodeTypeOK,
|
||||
kvstore.CodeTypeOK,
|
||||
kvstore.CodeTypeInvalidTxFormat,
|
||||
}, nil, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
},
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error {
|
||||
return servertest.PrepareProposal(ctx, client, [][]byte{
|
||||
{0x01},
|
||||
}, [][]byte{{0x01}}, nil)
|
||||
},
|
||||
func() error {
|
||||
return servertest.ProcessProposal(client, [][]byte{
|
||||
return servertest.ProcessProposal(ctx, client, [][]byte{
|
||||
{0x01},
|
||||
}, types.ResponseProcessProposal_ACCEPT)
|
||||
},
|
||||
@@ -430,8 +450,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
return cmdCheckTx(cmd, actualArgs)
|
||||
case "commit":
|
||||
return cmdCommit(cmd, actualArgs)
|
||||
case "deliver_tx":
|
||||
return cmdDeliverTx(cmd, actualArgs)
|
||||
case "finalize_block":
|
||||
return cmdFinalizeBlock(cmd, actualArgs)
|
||||
case "echo":
|
||||
return cmdEcho(cmd, actualArgs)
|
||||
case "info":
|
||||
@@ -462,7 +482,7 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", finalizeBlockCmd.Use, finalizeBlockCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
|
||||
fmt.Println("Use \"[command] --help\" for more information about a command.")
|
||||
@@ -476,13 +496,15 @@ func cmdEcho(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
msg = args[0]
|
||||
}
|
||||
res, err := client.EchoSync(msg)
|
||||
res, err := client.Echo(cmd.Context(), msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printResponse(cmd, args, response{
|
||||
Data: []byte(res.Message),
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -492,7 +514,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
version = args[0]
|
||||
}
|
||||
res, err := client.InfoSync(types.RequestInfo{Version: version})
|
||||
res, err := client.Info(cmd.Context(), &types.RequestInfo{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -504,29 +526,40 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
|
||||
const codeBad uint32 = 10
|
||||
|
||||
// Append a new tx to application
|
||||
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
// Append new txs to application
|
||||
func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
printResponse(cmd, args, response{
|
||||
Code: codeBad,
|
||||
Log: "want the tx",
|
||||
Log: "Must provide at least one transaction",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
txBytes, err := stringOrHexToBytes(args[0])
|
||||
txs := make([][]byte, len(args))
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txs[i] = txBytes
|
||||
}
|
||||
res, err := client.FinalizeBlock(cmd.Context(), &types.RequestFinalizeBlock{Txs: txs})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
resps := make([]response, 0, len(res.TxResults)+1)
|
||||
for _, tx := range res.TxResults {
|
||||
resps = append(resps, response{
|
||||
Code: tx.Code,
|
||||
Data: tx.Data,
|
||||
Info: tx.Info,
|
||||
Log: tx.Log,
|
||||
})
|
||||
}
|
||||
printResponse(cmd, args, response{
|
||||
Code: res.Code,
|
||||
Data: res.Data,
|
||||
Info: res.Info,
|
||||
Log: res.Log,
|
||||
resps = append(resps, response{
|
||||
Data: res.AgreedAppData,
|
||||
})
|
||||
printResponse(cmd, args, resps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -543,7 +576,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
res, err := client.CheckTx(cmd.Context(), &types.RequestCheckTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -558,13 +591,11 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Get application Merkle root hash
|
||||
func cmdCommit(cmd *cobra.Command, args []string) error {
|
||||
res, err := client.CommitSync()
|
||||
_, err := client.Commit(cmd.Context(), &types.RequestCommit{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printResponse(cmd, args, response{
|
||||
Data: res.Data,
|
||||
})
|
||||
printResponse(cmd, args, response{})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -583,7 +614,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resQuery, err := client.QuerySync(types.RequestQuery{
|
||||
resQuery, err := client.Query(cmd.Context(), &types.RequestQuery{
|
||||
Data: queryBytes,
|
||||
Path: flagPath,
|
||||
Height: int64(flagHeight),
|
||||
@@ -617,7 +648,7 @@ func cmdPrepareProposal(cmd *cobra.Command, args []string) error {
|
||||
txsBytesArray[i] = txBytes
|
||||
}
|
||||
|
||||
res, err := client.PrepareProposalSync(types.RequestPrepareProposal{
|
||||
res, err := client.PrepareProposal(cmd.Context(), &types.RequestPrepareProposal{
|
||||
Txs: txsBytesArray,
|
||||
// kvstore has to have this parameter in order not to reject a tx as the default value is 0
|
||||
MaxTxBytes: 65536,
|
||||
@@ -628,7 +659,7 @@ func cmdPrepareProposal(cmd *cobra.Command, args []string) error {
|
||||
resps := make([]response, 0, len(res.Txs))
|
||||
for _, tx := range res.Txs {
|
||||
resps = append(resps, response{
|
||||
Code: code.CodeTypeOK,
|
||||
Code: 0, // CodeOK
|
||||
Log: "Succeeded. Tx: " + string(tx),
|
||||
})
|
||||
}
|
||||
@@ -648,7 +679,7 @@ func cmdProcessProposal(cmd *cobra.Command, args []string) error {
|
||||
txsBytesArray[i] = txBytes
|
||||
}
|
||||
|
||||
res, err := client.ProcessProposalSync(types.RequestProcessProposal{
|
||||
res, err := client.ProcessProposal(cmd.Context(), &types.RequestProcessProposal{
|
||||
Txs: txsBytesArray,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -673,8 +704,7 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
app = kvstore.NewPersistentApplication(flagPersist)
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
@@ -716,9 +746,9 @@ func printResponse(cmd *cobra.Command, args []string, rsps ...response) {
|
||||
}
|
||||
|
||||
if len(rsp.Data) != 0 {
|
||||
// Do no print this line when using the commit command
|
||||
// Do no print this line when using the finalize_block command
|
||||
// because the string comes out as gibberish
|
||||
if cmd.Use != "commit" {
|
||||
if cmd.Use != "finalize_block" {
|
||||
fmt.Printf("-> data: %s\n", rsp.Data)
|
||||
}
|
||||
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package code
|
||||
|
||||
// Return codes for the examples
|
||||
const (
|
||||
CodeTypeOK uint32 = 0
|
||||
CodeTypeEncodingError uint32 = 1
|
||||
CodeTypeBadNonce uint32 = 2
|
||||
CodeTypeUnauthorized uint32 = 3
|
||||
CodeTypeUnknownError uint32 = 4
|
||||
CodeTypeExecuted uint32 = 5
|
||||
)
|
||||
@@ -1,3 +0,0 @@
|
||||
package example
|
||||
|
||||
// so the go tool doesn't return errors about no buildable go files ...
|
||||
@@ -1,187 +0,0 @@
|
||||
package example
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func TestKVStore(t *testing.T) {
|
||||
fmt.Println("### Testing KVStore")
|
||||
testStream(t, kvstore.NewApplication())
|
||||
}
|
||||
|
||||
func TestBaseApp(t *testing.T) {
|
||||
fmt.Println("### Testing BaseApp")
|
||||
testStream(t, types.NewBaseApplication())
|
||||
}
|
||||
|
||||
func TestGRPC(t *testing.T) {
|
||||
fmt.Println("### Testing GRPC")
|
||||
testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication()))
|
||||
}
|
||||
|
||||
func testStream(t *testing.T, app types.Application) {
|
||||
numDeliverTxs := 20000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
// Start the listener
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
require.NoError(t, err, "Error starting socket server")
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
t.Fatalf("Error starting socket client: %v", err.Error())
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
done := make(chan struct{})
|
||||
counter := 0
|
||||
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
|
||||
// Process response
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_DeliverTx:
|
||||
counter++
|
||||
if r.DeliverTx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
|
||||
}
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
close(done)
|
||||
}()
|
||||
return
|
||||
}
|
||||
case *types.Response_Flush:
|
||||
// ignore
|
||||
default:
|
||||
t.Error("Unexpected response type", reflect.TypeOf(res.Value))
|
||||
}
|
||||
})
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
reqRes := client.DeliverTxAsync(types.RequestDeliverTx{Tx: []byte("test")})
|
||||
_ = reqRes
|
||||
// check err ?
|
||||
|
||||
// Sometimes send flush messages
|
||||
if counter%123 == 0 {
|
||||
client.FlushAsync()
|
||||
// check err ?
|
||||
}
|
||||
}
|
||||
|
||||
// Send final flush message
|
||||
client.FlushAsync()
|
||||
|
||||
<-done
|
||||
}
|
||||
|
||||
//-------------------------
|
||||
// test grpc
|
||||
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
|
||||
func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
numDeliverTxs := 2000
|
||||
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
// Start the listener
|
||||
server := abciserver.NewGRPCServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
t.Fatalf("Error starting GRPC server: %v", err.Error())
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
client := types.NewABCIApplicationClient(conn)
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
|
||||
}
|
||||
counter++
|
||||
if response.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", response.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatal("Too many DeliverTx responses")
|
||||
}
|
||||
t.Log("response", counter)
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,24 +1,10 @@
|
||||
# KVStore
|
||||
|
||||
There are two app's here: the KVStoreApplication and the PersistentKVStoreApplication.
|
||||
|
||||
## KVStoreApplication
|
||||
|
||||
The KVStoreApplication is a simple merkle key-value store.
|
||||
The KVStoreApplication is a simple merkle key-value store.
|
||||
Transactions of the form `key=value` are stored as key-value pairs in the tree.
|
||||
Transactions without an `=` sign set the value to the key.
|
||||
The app has no replay protection (other than what the mempool provides).
|
||||
|
||||
## PersistentKVStoreApplication
|
||||
|
||||
The PersistentKVStoreApplication wraps the KVStoreApplication
|
||||
and provides two additional features:
|
||||
|
||||
1) persistence of state across app restarts (using Tendermint's ABCI-Handshake mechanism)
|
||||
2) validator set changes
|
||||
|
||||
The state is persisted in leveldb along with the last block committed,
|
||||
and the Handshake allows any necessary blocks to be replayed.
|
||||
Validator set changes are effected using the following transaction format:
|
||||
|
||||
```md
|
||||
@@ -27,4 +13,4 @@ Validator set changes are effected using the following transaction format:
|
||||
|
||||
where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one).
|
||||
To remove a validator from the validator set, set power to `0`.
|
||||
There is no sybil protection against new validators joining.
|
||||
There is no sybil protection against new validators joining.
|
||||
|
||||
10
abci/example/kvstore/code.go
Normal file
10
abci/example/kvstore/code.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package kvstore
|
||||
|
||||
// Return codes for the examples
|
||||
const (
|
||||
CodeTypeOK uint32 = 0
|
||||
CodeTypeEncodingError uint32 = 1
|
||||
CodeTypeInvalidTxFormat uint32 = 2
|
||||
CodeTypeUnauthorized uint32 = 3
|
||||
CodeTypeExecuted uint32 = 5
|
||||
)
|
||||
@@ -1,8 +1,15 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cryptoencoding "github.com/tendermint/tendermint/crypto/encoding"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
)
|
||||
|
||||
// RandVal creates one random validator, with a key derived
|
||||
@@ -29,8 +36,44 @@ func RandVals(cnt int) []types.ValidatorUpdate {
|
||||
// InitKVStore initializes the kvstore app with some data,
|
||||
// which allows tests to pass and is fine as long as you
|
||||
// don't make any tx that modify the validator state
|
||||
func InitKVStore(app *PersistentKVStoreApplication) {
|
||||
app.InitChain(types.RequestInitChain{
|
||||
func InitKVStore(ctx context.Context, app *Application) error {
|
||||
_, err := app.InitChain(ctx, &types.RequestInitChain{
|
||||
Validators: RandVals(1),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new transaction
|
||||
func NewTx(key, value string) []byte {
|
||||
return []byte(strings.Join([]string{key, value}, "="))
|
||||
}
|
||||
|
||||
func NewRandomTx(size int) []byte {
|
||||
if size < 4 {
|
||||
panic("random tx size must be greater than 3")
|
||||
}
|
||||
return NewTx(tmrand.Str(2), tmrand.Str(size-3))
|
||||
}
|
||||
|
||||
func NewRandomTxs(n int) [][]byte {
|
||||
txs := make([][]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
txs[i] = NewRandomTx(10)
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func NewTxFromID(i int) []byte {
|
||||
return []byte(fmt.Sprintf("%d=%d", i, i))
|
||||
}
|
||||
|
||||
// Create a transaction to add/remove/update a validator
|
||||
// To remove, set power to 0.
|
||||
func MakeValSetChangeTx(pubkey crypto.PublicKey, power int64) []byte {
|
||||
pk, err := cryptoencoding.PubKeyFromProto(pubkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
|
||||
return []byte(fmt.Sprintf("%s%s!%d", ValidatorPrefix, pubStr, power))
|
||||
}
|
||||
|
||||
@@ -2,29 +2,426 @@ package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cryptoencoding "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
var (
|
||||
stateKey = []byte("stateKey")
|
||||
kvPairPrefixKey = []byte("kvPairKey:")
|
||||
|
||||
ProtocolVersion uint64 = 0x1
|
||||
)
|
||||
|
||||
const (
|
||||
ValidatorPrefix = "val="
|
||||
AppVersion uint64 = 1
|
||||
)
|
||||
|
||||
var _ types.Application = (*Application)(nil)
|
||||
|
||||
// Application is the kvstore state machine. It complies with the abci.Application interface.
|
||||
// It takes transactions in the form of key=value and saves them in a database. This is
|
||||
// a somewhat trivial example as there is no real state execution
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
state State
|
||||
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
|
||||
stagedTxs [][]byte
|
||||
logger log.Logger
|
||||
|
||||
// validator set
|
||||
valUpdates []types.ValidatorUpdate
|
||||
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
|
||||
}
|
||||
|
||||
// NewApplication creates an instance of the kvstore from the provided database
|
||||
func NewApplication(db dbm.DB) *Application {
|
||||
return &Application{
|
||||
logger: log.NewNopLogger(),
|
||||
state: loadState(db),
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
}
|
||||
}
|
||||
|
||||
// NewPersistentApplication creates a new application using the goleveldb database engine
|
||||
func NewPersistentApplication(dbDir string) *Application {
|
||||
name := "kvstore"
|
||||
db, err := dbm.NewGoLevelDB(name, dbDir)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to create persistent app at %s: %w", dbDir, err))
|
||||
}
|
||||
return NewApplication(db)
|
||||
}
|
||||
|
||||
// NewInMemoryApplication creates a new application from an in memory database.
|
||||
// Nothing will be persisted.
|
||||
func NewInMemoryApplication() *Application {
|
||||
return NewApplication(dbm.NewMemDB())
|
||||
}
|
||||
|
||||
// Info returns information about the state of the application. This is generally used everytime a Tendermint instance
|
||||
// begins and let's the application know what Tendermint versions it's interacting with. Based from this information,
|
||||
// Tendermint will ensure it is in sync with the application by potentially replaying the blocks it has. If the
|
||||
// Application returns a 0 appBlockHeight, Tendermint will call InitChain to initialize the application with consensus related data
|
||||
func (app *Application) Info(_ context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
// Tendermint expects the application to persist validators, on start-up we need to reload them to memory if they exist
|
||||
if len(app.valAddrToPubKeyMap) == 0 && app.state.Height > 0 {
|
||||
validators := app.getValidators()
|
||||
for _, v := range validators {
|
||||
pubkey, err := cryptoencoding.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
}
|
||||
|
||||
return &types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
AppVersion: AppVersion,
|
||||
LastBlockHeight: app.state.Height,
|
||||
LastBlockAppHash: app.state.Hash(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InitChain takes the genesis validators and stores them in the kvstore. It returns the application hash in the
|
||||
// case that the application starts prepopulated with values. This method is called whenever a new instance of the application
|
||||
// starts (i.e. app height = 0).
|
||||
func (app *Application) InitChain(_ context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
for _, v := range req.Validators {
|
||||
app.updateValidator(v)
|
||||
}
|
||||
appHash := make([]byte, 8)
|
||||
binary.PutVarint(appHash, app.state.Size)
|
||||
return &types.ResponseInitChain{
|
||||
AppHash: appHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CheckTx handles inbound transactions or in the case of recheckTx assesses old transaction validity after a state transition.
|
||||
// As this is called frequently, it's preferably to keep the check as stateless and as quick as possible.
|
||||
// Here we check that the transaction has the correctly key=value format.
|
||||
// For the KVStore we check that each transaction has the valid tx format:
|
||||
// - Contains one and only one `=`
|
||||
// - `=` is not the first or last byte.
|
||||
// - if key is `val` that the validator update transaction is also valid
|
||||
func (app *Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
// If it is a validator update transaction, check that it is correctly formatted
|
||||
if isValidatorTx(req.Tx) {
|
||||
if _, _, err := parseValidatorTx(req.Tx); err != nil {
|
||||
return &types.ResponseCheckTx{Code: CodeTypeInvalidTxFormat}, nil
|
||||
}
|
||||
} else if !isValidTx(req.Tx) {
|
||||
return &types.ResponseCheckTx{Code: CodeTypeInvalidTxFormat}, nil
|
||||
}
|
||||
|
||||
return &types.ResponseCheckTx{Code: CodeTypeOK, GasWanted: 1}, nil
|
||||
}
|
||||
|
||||
// Tx must have a format like key:value or key=value. That is:
|
||||
// - it must have one and only one ":" or "="
|
||||
// - It must not begin or end with these special characters
|
||||
func isValidTx(tx []byte) bool {
|
||||
if bytes.Count(tx, []byte(":")) == 1 && bytes.Count(tx, []byte("=")) == 0 {
|
||||
if !bytes.HasPrefix(tx, []byte(":")) && !bytes.HasSuffix(tx, []byte(":")) {
|
||||
return true
|
||||
}
|
||||
} else if bytes.Count(tx, []byte("=")) == 1 && bytes.Count(tx, []byte(":")) == 0 {
|
||||
if !bytes.HasPrefix(tx, []byte("=")) && !bytes.HasSuffix(tx, []byte("=")) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PrepareProposal is called when the node is a proposer. Tendermint stages a set of transactions to the application. As the
|
||||
// KVStore has two accepted formats, `:` and `=`, we modify all instances of `:` with `=` to make it consistent. Note: this is
|
||||
// quite a trivial example of transaction modification.
|
||||
// NOTE: we assume that Tendermint will never provide more transactions than can fit in a block.
|
||||
func (app *Application) PrepareProposal(_ context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
return &types.ResponsePrepareProposal{Txs: formatTxs(req.Txs)}, nil
|
||||
}
|
||||
|
||||
// formatTxs substitutes all the transactions with x:y to x=y
|
||||
func formatTxs(blockData [][]byte) [][]byte {
|
||||
txs := make([][]byte, len(blockData))
|
||||
for idx, tx := range blockData {
|
||||
txs[idx] = bytes.Replace(tx, []byte(":"), []byte("="), 1)
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
// ProcessProposal is called whenever a node receives a complete proposal. It allows the application to validate the proposal.
|
||||
// Only validators who can vote will have this method called. For the KVstore we reuse CheckTx.
|
||||
func (app *Application) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
for _, tx := range req.Txs {
|
||||
// As CheckTx is a full validity check we can simply reuse this
|
||||
if resp, err := app.CheckTx(ctx, &types.RequestCheckTx{Tx: tx}); resp.Code != CodeTypeOK || err != nil {
|
||||
return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}, nil
|
||||
}
|
||||
}
|
||||
return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}, nil
|
||||
}
|
||||
|
||||
// FinalizeBlock executes the block against the application state. It punishes validators who equivocated and
|
||||
// updates validators according to transactions in a block. The rest of the transactions are regular key value
|
||||
// updates and are cached in memory and will be persisted once Commit is called.
|
||||
// ConsensusParams are never changed.
|
||||
func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
// reset valset changes
|
||||
app.valUpdates = make([]types.ValidatorUpdate, 0)
|
||||
app.stagedTxs = make([][]byte, 0)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.Misbehavior {
|
||||
if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.valUpdates = append(app.valUpdates, types.ValidatorUpdate{
|
||||
PubKey: pubKey,
|
||||
Power: ev.Validator.Power - 1,
|
||||
})
|
||||
app.logger.Info("Decreased val power by 1 because of the equivocation",
|
||||
"val", addr)
|
||||
} else {
|
||||
panic(fmt.Errorf("wanted to punish val %q but can't find it", addr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
respTxs := make([]*types.ExecTxResult, len(req.Txs))
|
||||
for i, tx := range req.Txs {
|
||||
if isValidatorTx(tx) {
|
||||
pubKey, power, err := parseValidatorTx(tx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valUpdates = append(app.valUpdates, types.UpdateValidator(pubKey, power, ""))
|
||||
} else {
|
||||
app.stagedTxs = append(app.stagedTxs, tx)
|
||||
}
|
||||
respTxs[i] = &types.ExecTxResult{
|
||||
Code: CodeTypeOK,
|
||||
// With every transaction we can emit a series of events. To make it simple, we just emit the same events.
|
||||
Events: []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
app.state.Size++
|
||||
}
|
||||
|
||||
app.state.Height = req.Height
|
||||
|
||||
return &types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.valUpdates, AgreedAppData: app.state.Hash()}, nil
|
||||
}
|
||||
|
||||
// Commit is called after FinalizeBlock and after Tendermint state which includes the updates to
|
||||
// AppHash, ConsensusParams and ValidatorSet has occurred.
|
||||
// The KVStore persists the validator updates and the new key values
|
||||
func (app *Application) Commit(_ context.Context, _ *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
|
||||
// apply the validator updates to state (note this is really the validator set at h + 2)
|
||||
for _, valUpdate := range app.valUpdates {
|
||||
app.updateValidator(valUpdate)
|
||||
}
|
||||
|
||||
// persist all the staged txs in the kvstore
|
||||
for _, tx := range app.stagedTxs {
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) != 2 {
|
||||
panic(fmt.Sprintf("unexpected tx format. Expected 2 got %d: %s", len(parts), parts))
|
||||
}
|
||||
key, value := string(parts[0]), string(parts[1])
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// persist the state (i.e. size and height)
|
||||
saveState(app.state)
|
||||
|
||||
resp := &types.ResponseCommit{}
|
||||
if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks {
|
||||
resp.RetainHeight = app.state.Height - app.RetainBlocks + 1
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
resQuery := &types.ResponseQuery{}
|
||||
|
||||
if reqQuery.Path == "/val" {
|
||||
key := []byte(ValidatorPrefix + string(reqQuery.Data))
|
||||
value, err := app.state.db.Get(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &types.ResponseQuery{
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if reqQuery.Prove {
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Index = -1 // TODO make Proof return index
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return resQuery, nil
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return resQuery, nil
|
||||
}
|
||||
|
||||
func (app *Application) Close() error {
|
||||
return app.state.db.Close()
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorPrefix)
|
||||
}
|
||||
|
||||
func parseValidatorTx(tx []byte) ([]byte, int64, error) {
|
||||
tx = tx[len(ValidatorPrefix):]
|
||||
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return nil, 0, fmt.Errorf("expected 'pubkey!power'. Got %v", pubKeyAndPower)
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("pubkey (%s) is invalid base64", pubkeyS)
|
||||
}
|
||||
|
||||
// decode the power
|
||||
power, err := strconv.ParseInt(powerS, 10, 64)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("power (%s) is not an int", powerS)
|
||||
}
|
||||
|
||||
if power < 0 {
|
||||
return nil, 0, fmt.Errorf("power can not be less than 0, got %d", power)
|
||||
}
|
||||
|
||||
return pubkey, power, nil
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *Application) updateValidator(v types.ValidatorUpdate) {
|
||||
pubkey, err := cryptoencoding.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
key := []byte(ValidatorPrefix + string(pubkey.Bytes()))
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
hasKey, err := app.state.db.Has(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
|
||||
app.logger.Info("tried to remove non existent validator. Skipping...", "pubKey", pubStr)
|
||||
}
|
||||
if err = app.state.db.Delete(key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err = app.state.db.Set(key, value.Bytes()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
}
|
||||
|
||||
func (app *Application) getValidators() (validators []types.ValidatorUpdate) {
|
||||
itr, err := app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
validators = append(validators, *validator)
|
||||
}
|
||||
}
|
||||
if err = itr.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
|
||||
type State struct {
|
||||
db dbm.DB
|
||||
Size int64 `json:"size"`
|
||||
Height int64 `json:"height"`
|
||||
AppHash []byte `json:"app_hash"`
|
||||
db dbm.DB
|
||||
// Size is essentially the amount of transactions that have been processes.
|
||||
// This is used for the appHash
|
||||
Size int64 `json:"size"`
|
||||
Height int64 `json:"height"`
|
||||
}
|
||||
|
||||
func loadState(db dbm.DB) State {
|
||||
@@ -55,145 +452,17 @@ func saveState(state State) {
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the hash of the application state. This is computed
|
||||
// as the size or number of transactions processed within the state. Note that this isn't
|
||||
// a strong guarantee of state machine replication because states could
|
||||
// have different kv values but still have the same size.
|
||||
// This function is used as the "AgreedAppData"
|
||||
func (s State) Hash() []byte {
|
||||
appHash := make([]byte, 8)
|
||||
binary.PutVarint(appHash, s.Size)
|
||||
return appHash
|
||||
}
|
||||
|
||||
func prefixKey(key []byte) []byte {
|
||||
return append(kvPairPrefixKey, key...)
|
||||
}
|
||||
|
||||
//---------------------------------------------------
|
||||
|
||||
var _ types.Application = (*Application)(nil)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
state State
|
||||
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
|
||||
txToRemove map[string]struct{}
|
||||
}
|
||||
|
||||
func NewApplication() *Application {
|
||||
state := loadState(dbm.NewMemDB())
|
||||
return &Application{state: state}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCISemVer,
|
||||
AppVersion: ProtocolVersion,
|
||||
LastBlockHeight: app.state.Height,
|
||||
LastBlockAppHash: app.state.AppHash,
|
||||
}
|
||||
}
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if isReplacedTx(req.Tx) {
|
||||
app.txToRemove[string(req.Tx)] = struct{}{}
|
||||
}
|
||||
var key, value string
|
||||
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = string(req.Tx), string(req.Tx)
|
||||
}
|
||||
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.state.Size++
|
||||
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if req.Type == types.CheckTxType_Recheck {
|
||||
if _, ok := app.txToRemove[string(req.Tx)]; ok {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeExecuted, GasWanted: 1}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() types.ResponseCommit {
|
||||
// Using a memdb - just return the big endian size of the db
|
||||
appHash := make([]byte, 8)
|
||||
binary.PutVarint(appHash, app.state.Size)
|
||||
app.state.AppHash = appHash
|
||||
app.state.Height++
|
||||
|
||||
// empty out the set of transactions to remove via rechecktx
|
||||
saveState(app.state)
|
||||
|
||||
resp := types.ResponseCommit{Data: appHash}
|
||||
if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks {
|
||||
resp.RetainHeight = app.state.Height - app.RetainBlocks + 1
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
if reqQuery.Prove {
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Index = -1 // TODO make Proof return index
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return resQuery
|
||||
}
|
||||
|
||||
func (app *Application) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
app.txToRemove = map[string]struct{}{}
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (app *Application) ProcessProposal(
|
||||
req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -12,10 +12,8 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,79 +21,82 @@ const (
|
||||
testValue = "def"
|
||||
)
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar = app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
app.Commit()
|
||||
func TestKVStoreKV(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
info := app.Info(types.RequestInfo{})
|
||||
kvstore := NewInMemoryApplication()
|
||||
tx := []byte(testKey + ":" + testValue)
|
||||
testKVStore(ctx, t, kvstore, tx, testKey, testValue)
|
||||
tx = []byte(testKey + "=" + testValue)
|
||||
testKVStore(ctx, t, kvstore, tx, testKey, testValue)
|
||||
}
|
||||
|
||||
func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
checkTxResp, err := app.CheckTx(ctx, &types.RequestCheckTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint32(0), checkTxResp.Code)
|
||||
|
||||
ppResp, err := app.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: [][]byte{tx}})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, ppResp.Txs, 1)
|
||||
req := &types.RequestFinalizeBlock{Height: 1, Txs: ppResp.Txs}
|
||||
ar, err := app.FinalizeBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
// commit
|
||||
_, err = app.Commit(ctx, &types.RequestCommit{})
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := app.Info(ctx, &types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery := app.Query(types.RequestQuery{
|
||||
resQuery, err := app.Query(ctx, &types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery = app.Query(types.RequestQuery{
|
||||
resQuery, err = app.Query(ctx, &types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
})
|
||||
require.EqualValues(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
}
|
||||
|
||||
func TestKVStoreKV(t *testing.T) {
|
||||
kvstore := NewApplication()
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
kvstore := NewPersistentApplication(t.TempDir())
|
||||
key := testKey
|
||||
value := testValue
|
||||
testKVStore(ctx, t, kvstore, NewTx(key, value), key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
InitKVStore(kvstore)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kvstore := NewPersistentApplication(t.TempDir())
|
||||
require.NoError(t, InitKVStore(ctx, kvstore))
|
||||
height := int64(0)
|
||||
|
||||
resInfo := kvstore.Info(types.RequestInfo{})
|
||||
resInfo, err := kvstore.Info(ctx, &types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
@@ -103,38 +104,37 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
// make and apply block
|
||||
height = int64(1)
|
||||
hash := []byte("foo")
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
if _, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Hash: hash, Height: height}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
|
||||
resInfo = kvstore.Info(types.RequestInfo{})
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
_, err = kvstore.Commit(ctx, &types.RequestCommit{})
|
||||
require.NoError(t, err)
|
||||
|
||||
resInfo, err = kvstore.Info(ctx, &types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, height, resInfo.LastBlockHeight)
|
||||
|
||||
}
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kvstore := NewInMemoryApplication()
|
||||
|
||||
// init with some validators
|
||||
total := 10
|
||||
nInit := 5
|
||||
vals := RandVals(total)
|
||||
// initialize with the first nInit
|
||||
kvstore.InitChain(types.RequestInitChain{
|
||||
_, err := kvstore.InitChain(ctx, &types.RequestInitChain{
|
||||
Validators: vals[:nInit],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
vals1, vals2 := vals[:nInit], kvstore.Validators()
|
||||
vals1, vals2 := vals[:nInit], kvstore.getValidators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
var v1, v2, v3 types.ValidatorUpdate
|
||||
@@ -145,9 +145,9 @@ func TestValUpdates(t *testing.T) {
|
||||
tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
|
||||
makeApplyBlock(t, kvstore, 1, diff, tx1, tx2)
|
||||
makeApplyBlock(ctx, t, kvstore, 1, diff, tx1, tx2)
|
||||
|
||||
vals1, vals2 = vals[:nInit+2], kvstore.Validators()
|
||||
vals1, vals2 = vals[:nInit+2], kvstore.getValidators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
// remove some validators
|
||||
@@ -160,10 +160,10 @@ func TestValUpdates(t *testing.T) {
|
||||
tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power)
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
makeApplyBlock(ctx, t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
vals2 = kvstore.getValidators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
// update some validators
|
||||
@@ -176,15 +176,62 @@ func TestValUpdates(t *testing.T) {
|
||||
diff = []types.ValidatorUpdate{v1}
|
||||
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
|
||||
makeApplyBlock(t, kvstore, 3, diff, tx1)
|
||||
makeApplyBlock(ctx, t, kvstore, 3, diff, tx1)
|
||||
|
||||
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
|
||||
vals2 = kvstore.Validators()
|
||||
vals2 = kvstore.getValidators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
}
|
||||
|
||||
func TestCheckTx(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
kvstore := NewInMemoryApplication()
|
||||
|
||||
val := RandVal(1)
|
||||
|
||||
testCases := []struct {
|
||||
expCode uint32
|
||||
tx []byte
|
||||
}{
|
||||
{CodeTypeOK, NewTx("hello", "world")},
|
||||
{CodeTypeInvalidTxFormat, []byte("hello")},
|
||||
{CodeTypeOK, []byte("space:jam")},
|
||||
{CodeTypeInvalidTxFormat, []byte("=hello")},
|
||||
{CodeTypeInvalidTxFormat, []byte("hello=")},
|
||||
{CodeTypeOK, []byte("a=b")},
|
||||
{CodeTypeInvalidTxFormat, []byte("val=hello")},
|
||||
{CodeTypeInvalidTxFormat, []byte("val=hi!5")},
|
||||
{CodeTypeOK, MakeValSetChangeTx(val.PubKey, 10)},
|
||||
}
|
||||
|
||||
for idx, tc := range testCases {
|
||||
resp, err := kvstore.CheckTx(ctx, &types.RequestCheckTx{Tx: tc.tx})
|
||||
require.NoError(t, err, idx)
|
||||
fmt.Println(string(tc.tx))
|
||||
require.Equal(t, tc.expCode, resp.Code, idx)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientServer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
// set up socket app
|
||||
kvstore := NewInMemoryApplication()
|
||||
client, _, err := makeClientServer(t, kvstore, "kvstore-socket", "socket")
|
||||
require.NoError(t, err)
|
||||
runClientTests(ctx, t, client)
|
||||
|
||||
// set up grpc app
|
||||
kvstore = NewInMemoryApplication()
|
||||
gclient, _, err := makeClientServer(t, kvstore, t.TempDir(), "grpc")
|
||||
require.NoError(t, err)
|
||||
runClientTests(ctx, t, gclient)
|
||||
}
|
||||
|
||||
func makeApplyBlock(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
kvstore types.Application,
|
||||
heightInt int,
|
||||
@@ -193,25 +240,23 @@ func makeApplyBlock(
|
||||
// make and apply block
|
||||
height := int64(heightInt)
|
||||
hash := []byte("foo")
|
||||
header := tmproto.Header{
|
||||
resFinalizeBlock, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{
|
||||
Hash: hash,
|
||||
Height: height,
|
||||
}
|
||||
Txs: txs,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
for _, tx := range txs {
|
||||
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
|
||||
t.Fatal(r)
|
||||
}
|
||||
}
|
||||
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
_, err = kvstore.Commit(ctx, &types.RequestCommit{})
|
||||
require.NoError(t, err)
|
||||
|
||||
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
|
||||
|
||||
}
|
||||
|
||||
// order doesn't matter
|
||||
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
t.Helper()
|
||||
if len(vals1) != len(vals2) {
|
||||
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
|
||||
}
|
||||
@@ -226,138 +271,50 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
func makeClientServer(t *testing.T, app types.Application, name, transport string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
addr := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
server.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Connect to the socket
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
if err = server.Stop(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
gapp := types.NewGRPCApplication(app)
|
||||
server := abciserver.NewGRPCServer(socket, gapp)
|
||||
server.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
client := abcicli.NewGRPCClient(socket, true)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
if err := server.Stop(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func TestClientServer(t *testing.T) {
|
||||
// set up socket app
|
||||
kvstore := NewApplication()
|
||||
client, server, err := makeSocketClientServer(kvstore, "kvstore-socket")
|
||||
server, err := abciserver.NewServer(addr, transport, app)
|
||||
require.NoError(t, err)
|
||||
server.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the client
|
||||
client, err := abcicli.NewClient(addr, transport, false)
|
||||
require.NoError(t, err)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
runClientTests(t, client)
|
||||
|
||||
// set up grpc app
|
||||
kvstore = NewApplication()
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "/tmp/kvstore-grpc")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := gserver.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := gclient.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
runClientTests(t, gclient)
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
func runClientTests(ctx context.Context, t *testing.T, client abcicli.Client) {
|
||||
// run some tests....
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testClient(t, client, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testClient(t, client, tx, key, value)
|
||||
tx := []byte(testKey + ":" + testValue)
|
||||
testKVStore(ctx, t, client, tx, testKey, testValue)
|
||||
tx = []byte(testKey + "=" + testValue)
|
||||
testKVStore(ctx, t, client, tx, testKey, testValue)
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
_, err = app.CommitSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := app.InfoSync(types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery, err := app.QuerySync(types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery, err = app.QuerySync(types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
})
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
func TestTxGeneration(t *testing.T) {
|
||||
require.Len(t, NewRandomTx(20), 20)
|
||||
require.Len(t, NewRandomTxs(10), 10)
|
||||
}
|
||||
|
||||
@@ -1,343 +0,0 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
)
|
||||
|
||||
const (
|
||||
ValidatorSetChangePrefix string = "val:"
|
||||
)
|
||||
|
||||
//-----------------------------------------
|
||||
|
||||
var _ types.Application = (*PersistentKVStoreApplication)(nil)
|
||||
|
||||
type PersistentKVStoreApplication struct {
|
||||
app *Application
|
||||
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
|
||||
valAddrToPubKeyMap map[string]pc.PublicKey
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication {
|
||||
name := "kvstore"
|
||||
db, err := dbm.NewGoLevelDB(name, dbDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
state := loadState(db)
|
||||
|
||||
return &PersistentKVStoreApplication{
|
||||
app: &Application{
|
||||
state: state,
|
||||
txToRemove: map[string]struct{}{},
|
||||
},
|
||||
valAddrToPubKeyMap: make(map[string]pc.PublicKey),
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
|
||||
app.logger = l
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
res := app.app.Info(req)
|
||||
res.LastBlockHeight = app.app.state.Height
|
||||
res.LastBlockAppHash = app.app.state.AppHash
|
||||
return res
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(req.Tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(req.Tx)
|
||||
}
|
||||
|
||||
if isPrepareTx(req.Tx) {
|
||||
return app.execPrepareTx(req.Tx)
|
||||
}
|
||||
|
||||
// otherwise, update the key-value store
|
||||
return app.app.DeliverTx(req)
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(req)
|
||||
}
|
||||
|
||||
// Commit will panic if InitChain was not called
|
||||
func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
|
||||
return app.app.Commit()
|
||||
}
|
||||
|
||||
// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded.
|
||||
// For any other path, returns an associated value or nil if missing.
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
switch reqQuery.Path {
|
||||
case "/val":
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
value, err := app.app.state.db.Get(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
return
|
||||
default:
|
||||
return app.app.Query(reqQuery)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the validators in the merkle tree
|
||||
func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
|
||||
for _, v := range req.Validators {
|
||||
r := app.updateValidator(v)
|
||||
if r.IsErr() {
|
||||
app.logger.Error("Error updating validators", "r", r)
|
||||
}
|
||||
}
|
||||
return types.ResponseInitChain{}
|
||||
}
|
||||
|
||||
// Track the block hash and header information
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
PubKey: pubKey,
|
||||
Power: ev.Validator.Power - 1,
|
||||
})
|
||||
app.logger.Info("Decreased val power by 1 because of the equivocation",
|
||||
"val", addr)
|
||||
} else {
|
||||
app.logger.Error("Wanted to punish val, but can't find it",
|
||||
"val", addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return app.app.BeginBlock(req)
|
||||
}
|
||||
|
||||
// Update the validator set
|
||||
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) PrepareProposal(
|
||||
req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
return types.ResponsePrepareProposal{Txs: app.substPrepareTx(req.Txs, req.MaxTxBytes)}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ProcessProposal(
|
||||
req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) == 0 || isPrepareTx(tx) {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
|
||||
itr, err := app.app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
validators = append(validators, *validator)
|
||||
}
|
||||
}
|
||||
if err = itr.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey pc.PublicKey, power int64) []byte {
|
||||
pk, err := cryptoenc.PubKeyFromProto(pubkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
|
||||
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
|
||||
}
|
||||
|
||||
// format is "val:pubkey!power"
|
||||
// pubkey is a base64-encoded 32-byte ed25519 key
|
||||
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
|
||||
}
|
||||
|
||||
// decode the power
|
||||
power, err := strconv.ParseInt(powerS, 10, 64)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
||||
pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
key := []byte("val:" + string(pubkey.Bytes()))
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
hasKey, err := app.app.state.db.Has(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeUnauthorized,
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
}
|
||||
if err = app.app.state.db.Delete(key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Error encoding validator: %v", err)}
|
||||
}
|
||||
if err = app.app.state.db.Set(key, value.Bytes()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
app.ValUpdates = append(app.ValUpdates, v)
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
|
||||
const (
|
||||
PreparePrefix = "prepare"
|
||||
ReplacePrefix = "replace"
|
||||
)
|
||||
|
||||
func isPrepareTx(tx []byte) bool { return bytes.HasPrefix(tx, []byte(PreparePrefix)) }
|
||||
|
||||
func isReplacedTx(tx []byte) bool {
|
||||
return bytes.HasPrefix(tx, []byte(ReplacePrefix))
|
||||
}
|
||||
|
||||
// execPrepareTx is noop. tx data is considered as placeholder
|
||||
// and is substitute at the PrepareProposal.
|
||||
func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.ResponseDeliverTx {
|
||||
// noop
|
||||
return types.ResponseDeliverTx{}
|
||||
}
|
||||
|
||||
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
|
||||
// proposal for transactions with the prefix stripped.
|
||||
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte, maxTxBytes int64) [][]byte {
|
||||
txs := make([][]byte, 0, len(blockData))
|
||||
var totalBytes int64
|
||||
for _, tx := range blockData {
|
||||
txMod := tx
|
||||
if isPrepareTx(tx) {
|
||||
txMod = bytes.Replace(tx, []byte(PreparePrefix), []byte(ReplacePrefix), 1)
|
||||
}
|
||||
totalBytes += int64(len(txMod))
|
||||
if totalBytes > maxTxBytes {
|
||||
break
|
||||
}
|
||||
txs = append(txs, txMod)
|
||||
}
|
||||
return txs
|
||||
}
|
||||
@@ -1,573 +0,0 @@
|
||||
package orderbook
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
fmt "fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
var _ types.Application = (*StateMachine)(nil)
|
||||
|
||||
const Version = 1
|
||||
|
||||
const (
|
||||
// In tendermint a zero code is okay and all non zero codes are errors
|
||||
StatusOK = iota
|
||||
StatusErrDecoding
|
||||
StatusErrUnknownMessage
|
||||
StatusErrValidateBasic
|
||||
StatusErrNoAccount
|
||||
StatusErrAccountExists
|
||||
StatusErrNoPair
|
||||
StatusErrPairExists
|
||||
StatusErrInvalidOrder
|
||||
StatusErrUnacceptableMessage
|
||||
StatusErrNoCommodity
|
||||
)
|
||||
|
||||
var (
|
||||
stateKey = []byte("state")
|
||||
accountKey = []byte("account")
|
||||
pairKey = []byte("pair")
|
||||
)
|
||||
|
||||
// StateMachine is the main struct that encompasses the logic of the orderbook
|
||||
type StateMachine struct {
|
||||
// inherit all the abci methods so we don't have to implement everything
|
||||
types.BaseApplication
|
||||
|
||||
// persisted state which is a key value store containing:
|
||||
// accountID -> account
|
||||
// pairID -> pair
|
||||
db dbm.DB
|
||||
|
||||
// in-memory state
|
||||
lastHeight int64 // the last height that was persisted
|
||||
lastHash []byte // the last hash that was persisted
|
||||
// list of accounts (this is used for the app hash)
|
||||
accounts []*Account
|
||||
pairs map[string]*Pair // lookup pairs
|
||||
commodities map[string]struct{} // lookup commodities
|
||||
publicKeys map[string]struct{} // lookup existence of an account
|
||||
// a list of transactions that have been modified by the most recent block
|
||||
// and will need to result in an update to the db
|
||||
touchedAccounts map[uint64]struct{}
|
||||
// new pairs added in this block which will needed to be added to the
|
||||
// db on "Commit"
|
||||
newPairs []*Pair
|
||||
|
||||
// app-side mempool (also emphemeral)
|
||||
// this takes ask and bid transactions from `CheckTx`
|
||||
// and matches them as a "MatchedOrder" which is
|
||||
// then proposed in a block
|
||||
//
|
||||
// it's important to note that there is no garbage collection
|
||||
// here. Bids and asks, potentially even invalid, will
|
||||
// continue to stay here until matched
|
||||
markets map[string]*Market // i.e. ATOM/USDC
|
||||
}
|
||||
|
||||
// New creates a StateMachine from a given database. If the database is
|
||||
// empty a fresh instance is created else the accounts, pairs and
|
||||
// state are loaded into memory.
|
||||
func New(db dbm.DB) (*StateMachine, error) {
|
||||
// iterate over all the account keys
|
||||
iter, err := db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer iter.Close()
|
||||
|
||||
var (
|
||||
accounts = make([]*Account, 0)
|
||||
publicKeys = make(map[string]struct{})
|
||||
commodities = make(map[string]struct{})
|
||||
pairs = make(map[string]*Pair)
|
||||
markets = make(map[string]*Market)
|
||||
lastHeight uint64
|
||||
lastHash []byte
|
||||
)
|
||||
|
||||
for ; iter.Valid(); iter.Next() {
|
||||
if bytes.HasPrefix(iter.Key(), pairKey) {
|
||||
var pair Pair
|
||||
if err := proto.Unmarshal(iter.Value(), &pair); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pairs[pair.String()] = &pair
|
||||
commodities[pair.BuyersDenomination] = struct{}{}
|
||||
markets[pair.String()] = NewMarket(&pair)
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(iter.Key(), accountKey) {
|
||||
var acc Account
|
||||
if err := proto.Unmarshal(iter.Value(), &acc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accounts = append(accounts, &acc)
|
||||
publicKeys[string(acc.PublicKey)] = struct{}{}
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(iter.Key(), stateKey) {
|
||||
state := iter.Value()
|
||||
lastHeight = binary.BigEndian.Uint64(state[:4])
|
||||
lastHash = state[4:]
|
||||
}
|
||||
}
|
||||
|
||||
return &StateMachine{
|
||||
accounts: accounts,
|
||||
pairs: pairs,
|
||||
commodities: commodities,
|
||||
publicKeys: publicKeys,
|
||||
markets: markets,
|
||||
lastHeight: int64(lastHeight),
|
||||
lastHash: lastHash,
|
||||
db: db,
|
||||
touchedAccounts: make(map[uint64]struct{}),
|
||||
newPairs: make([]*Pair, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Info is used by Tendermint to understand the state of the application.
|
||||
// This is useful for replay and syncing modes.
|
||||
func (sm *StateMachine) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{
|
||||
AppVersion: Version,
|
||||
LastBlockHeight: sm.lastHeight,
|
||||
LastBlockAppHash: sm.lastHash,
|
||||
}
|
||||
}
|
||||
|
||||
// CheckTx indicates which transactions should be accepted in the mempool. It is
|
||||
// not a perfect validity check because we're unsure of the state that the transaction
|
||||
// will be executed against. We should treat this as a gatekeeper to the mempool.
|
||||
// Apart from adding transactions to the app-side mempool, this check is stateless.
|
||||
func (sm *StateMachine) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
var msg = new(Msg)
|
||||
|
||||
err := proto.Unmarshal(req.Tx, msg)
|
||||
if err != nil {
|
||||
return types.ResponseCheckTx{Code: StatusErrDecoding, Log: err.Error()} // decoding error
|
||||
}
|
||||
|
||||
if err := msg.ValidateBasic(); err != nil {
|
||||
return types.ResponseCheckTx{Code: StatusErrValidateBasic, Log: err.Error()}
|
||||
}
|
||||
|
||||
// add either bids or asks to the market which will match them in PrepareProposal
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgAsk:
|
||||
market, ok := sm.markets[m.MsgAsk.Pair.String()]
|
||||
if !ok {
|
||||
return types.ResponseCheckTx{Code: StatusErrNoPair}
|
||||
}
|
||||
market.AddAsk(m.MsgAsk.AskOrder)
|
||||
case *Msg_MsgBid:
|
||||
market, ok := sm.markets[m.MsgBid.Pair.String()]
|
||||
if !ok {
|
||||
return types.ResponseCheckTx{Code: StatusErrNoPair}
|
||||
}
|
||||
market.AddBid(m.MsgBid.BidOrder)
|
||||
}
|
||||
|
||||
return types.ResponseCheckTx{Code: StatusOK}
|
||||
}
|
||||
|
||||
// ValidateTx validates the transactions against state.
|
||||
func (sm *StateMachine) ValidateTx(msg *Msg) uint32 {
|
||||
if err := msg.ValidateBasic(); err != nil {
|
||||
return StatusErrValidateBasic
|
||||
}
|
||||
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgRegisterPair:
|
||||
pair := m.MsgRegisterPair.Pair
|
||||
if _, ok := sm.pairs[pair.String()]; ok {
|
||||
return StatusErrPairExists
|
||||
}
|
||||
|
||||
reversePair := &Pair{BuyersDenomination: pair.SellersDenomination, SellersDenomination: pair.BuyersDenomination}
|
||||
if _, ok := sm.pairs[reversePair.String()]; ok {
|
||||
return StatusErrPairExists
|
||||
}
|
||||
|
||||
case *Msg_MsgAsk, *Msg_MsgBid: // MsgAsk and MsgBid are not allowed individually - they need to be matched as a TradeSet
|
||||
return StatusErrUnacceptableMessage //Todo add logic around msg ask and bid to allow
|
||||
|
||||
case *Msg_MsgCreateAccount:
|
||||
// check for duplicate accounts in state machine
|
||||
if _, ok := sm.publicKeys[string(m.MsgCreateAccount.PublicKey)]; ok {
|
||||
return StatusErrAccountExists
|
||||
}
|
||||
|
||||
// check that each of the commodities is present in at least one trading pair
|
||||
for _, commodity := range m.MsgCreateAccount.Commodities {
|
||||
if _, exists := sm.commodities[commodity.Denom]; !exists {
|
||||
return StatusErrNoCommodity
|
||||
}
|
||||
}
|
||||
|
||||
case *Msg_MsgTradeSet:
|
||||
// check the pair exists
|
||||
if _, ok := sm.pairs[m.MsgTradeSet.TradeSet.Pair.String()]; !ok {
|
||||
return StatusErrNoPair
|
||||
}
|
||||
|
||||
for _, order := range m.MsgTradeSet.TradeSet.MatchedOrders {
|
||||
// validate matched order i.e. users have funds and signatures are valid
|
||||
if !sm.isMatchedOrderValid(order, m.MsgTradeSet.TradeSet.Pair) {
|
||||
return StatusErrInvalidOrder
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return StatusErrUnknownMessage
|
||||
}
|
||||
|
||||
return StatusOK
|
||||
}
|
||||
|
||||
// PrepareProposal is called whenever the validator is the proposer for that round. First, it adds the non order
|
||||
// transactions provided by tendermint. The orderbook then loops through each market and tries to match as many
|
||||
// transactions as possible. For each new transaction it checks that the max bytes has not been exceeded.
|
||||
func (sm *StateMachine) PrepareProposal(req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
// declare transaction with the size of 0
|
||||
txs := make([][]byte, 0)
|
||||
|
||||
// go through the transactions passed up via Tendermint first
|
||||
for _, tx := range req.Txs {
|
||||
var msg = new(Msg)
|
||||
err := proto.Unmarshal(tx, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// skip over the bids and asks that are proposed. We already have them
|
||||
if _, ok := msg.Sum.(*Msg_MsgBid); ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := msg.Sum.(*Msg_MsgAsk); ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// make sure we're proposing valid transactions
|
||||
if status := sm.ValidateTx(msg); status != StatusOK {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(txs)+len(tx) > int(req.MaxTxBytes) {
|
||||
return types.ResponsePrepareProposal{Txs: txs}
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
|
||||
// fetch and match all the bids and asks for each market and add these
|
||||
for _, market := range sm.markets {
|
||||
tradeSet := market.Match()
|
||||
// tradesets into bytes and bytes into a transaction
|
||||
if tradeSet == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Println("we have a tradeset")
|
||||
|
||||
tradeSet = sm.validateTradeSetAgainstState(tradeSet)
|
||||
if tradeSet == nil || len(tradeSet.MatchedOrders) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Println("we have a valid tradeset")
|
||||
|
||||
// wrap this as a message typ
|
||||
msgTradeSet := &MsgTradeSet{TradeSet: tradeSet}
|
||||
bz, err := proto.Marshal(msgTradeSet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// check to see that we don't over populate the block
|
||||
if len(txs)+len(bz) > int(req.MaxTxBytes) {
|
||||
return types.ResponsePrepareProposal{Txs: txs}
|
||||
}
|
||||
txs = append(txs, bz)
|
||||
}
|
||||
|
||||
return types.ResponsePrepareProposal{Txs: req.Txs}
|
||||
}
|
||||
|
||||
// Process Proposal either rejects or accepts transactions
|
||||
//
|
||||
// It uses the same validity function for prepare proposal. This ensures the coherence property
|
||||
// is adhered to i.e. all honest validators must accept a proposal by an honest proposer
|
||||
func (sm *StateMachine) ProcessProposal(req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
var msg = new(Msg)
|
||||
err := proto.Unmarshal(tx, msg)
|
||||
if err != nil {
|
||||
return rejectProposal()
|
||||
}
|
||||
|
||||
if status := sm.ValidateTx(msg); status != StatusOK {
|
||||
fmt.Printf("tx failed validation, status: %d\n", status)
|
||||
return rejectProposal()
|
||||
}
|
||||
}
|
||||
|
||||
return acceptProposal()
|
||||
}
|
||||
|
||||
func (sm *StateMachine) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
// reset the new pairs
|
||||
sm.newPairs = make([]*Pair, 0)
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// DeliverTx is called for each tx in a block once it has been finalized. This is where the
|
||||
// execution code lives. Most importantly it's where we update the user accounts following
|
||||
// a successful order.
|
||||
func (sm *StateMachine) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var msg = new(Msg)
|
||||
|
||||
err := proto.Unmarshal(req.Tx, msg)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{Code: StatusErrDecoding, Log: err.Error()} // decoding error
|
||||
}
|
||||
|
||||
if status := sm.ValidateTx(msg); status != StatusOK {
|
||||
return types.ResponseDeliverTx{Code: status}
|
||||
}
|
||||
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgRegisterPair:
|
||||
sm.markets[m.MsgRegisterPair.Pair.String()] = NewMarket(m.MsgRegisterPair.Pair)
|
||||
sm.pairs[m.MsgRegisterPair.Pair.String()] = m.MsgRegisterPair.Pair
|
||||
sm.commodities[m.MsgRegisterPair.Pair.BuyersDenomination] = struct{}{}
|
||||
sm.commodities[m.MsgRegisterPair.Pair.SellersDenomination] = struct{}{}
|
||||
sm.newPairs = append(sm.newPairs, m.MsgRegisterPair.Pair)
|
||||
|
||||
case *Msg_MsgCreateAccount:
|
||||
nextAccountID := uint64(len(sm.accounts))
|
||||
sm.accounts = append(sm.accounts, &Account{
|
||||
Index: nextAccountID,
|
||||
PublicKey: m.MsgCreateAccount.PublicKey,
|
||||
Commodities: m.MsgCreateAccount.Commodities,
|
||||
})
|
||||
sm.touchedAccounts[nextAccountID] = struct{}{}
|
||||
sm.publicKeys[string(m.MsgCreateAccount.PublicKey)] = struct{}{}
|
||||
|
||||
case *Msg_MsgTradeSet:
|
||||
pair := m.MsgTradeSet.TradeSet.Pair
|
||||
for _, order := range m.MsgTradeSet.TradeSet.MatchedOrders {
|
||||
buyer := sm.accounts[order.OrderBid.OwnerId]
|
||||
seller := sm.accounts[order.OrderAsk.OwnerId]
|
||||
|
||||
// the buyer gets quantity of the asset that the seller was selling
|
||||
buyer.AddCommodity(NewCommodity(pair.SellersDenomination, order.OrderAsk.Quantity))
|
||||
// the buyer gives up quantity * ask price of the buyers denomination
|
||||
buyer.SubtractCommodity(NewCommodity(pair.BuyersDenomination, order.OrderAsk.Quantity*order.OrderAsk.AskPrice))
|
||||
|
||||
// the seller gets quantity * ask price of the asset that the buyer was paying with
|
||||
seller.AddCommodity(NewCommodity(pair.BuyersDenomination, order.OrderAsk.Quantity*order.OrderAsk.AskPrice))
|
||||
// the seller gives up quantity of the commodity they were selling
|
||||
seller.SubtractCommodity(NewCommodity(pair.SellersDenomination, order.OrderAsk.Quantity))
|
||||
|
||||
// mark that these account have been touched
|
||||
sm.touchedAccounts[order.OrderBid.OwnerId] = struct{}{}
|
||||
sm.touchedAccounts[order.OrderAsk.OwnerId] = struct{}{}
|
||||
}
|
||||
|
||||
default:
|
||||
return types.ResponseDeliverTx{Code: StatusErrUnknownMessage}
|
||||
}
|
||||
|
||||
return types.ResponseDeliverTx{Code: 0}
|
||||
}
|
||||
|
||||
// EndBlock is used to update consensus params and the validator set. For the orderbook,
|
||||
// we keep both the same for thw
|
||||
func (sm *StateMachine) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||
return types.ResponseEndBlock{}
|
||||
}
|
||||
|
||||
// Commit is called to tell the app it is safe to persist state to disk.
|
||||
// We now take the in-memory representation and update the parts that have
|
||||
// changed on to disk.
|
||||
func (sm *StateMachine) Commit() types.ResponseCommit {
|
||||
batch := sm.db.NewBatch()
|
||||
|
||||
// write to accounts that were modified by the last block
|
||||
for accountID := range sm.touchedAccounts {
|
||||
value, err := proto.Marshal(sm.accounts[accountID])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(accountKey, accountID)
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// write the new pairs that were added by the last block
|
||||
pairID := len(sm.pairs) - len(sm.newPairs)
|
||||
for id, pair := range sm.newPairs {
|
||||
value, err := proto.Marshal(pair)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(pairKey, uint64(pairID+id))
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
hash := sm.hash()
|
||||
err := sm.updateState(batch, sm.lastHeight+1, hash)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = batch.WriteSync()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
// hash is just the the sha256 of the byte representation of all accounts.
|
||||
// remember that this needs to be deterministic for all state machines
|
||||
func (sm *StateMachine) hash() []byte {
|
||||
digest := bytes.NewBuffer(nil)
|
||||
for _, account := range sm.accounts {
|
||||
bz, err := proto.Marshal(account)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
digest.Write(bz)
|
||||
}
|
||||
return tmhash.Sum(digest.Bytes())
|
||||
}
|
||||
|
||||
func (sm *StateMachine) updateState(batch dbm.Batch, height int64, hash []byte) error {
|
||||
sm.lastHash = hash
|
||||
sm.lastHeight = height
|
||||
heightBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(heightBytes, uint64(height))
|
||||
return batch.Set(stateKey, append(heightBytes, hash...))
|
||||
}
|
||||
|
||||
func (sm *StateMachine) validateTradeSetAgainstState(tradeSet *TradeSet) *TradeSet {
|
||||
output := &TradeSet{Pair: tradeSet.Pair}
|
||||
|
||||
for _, matchedOrder := range tradeSet.MatchedOrders {
|
||||
if !sm.isMatchedOrderValid(matchedOrder, tradeSet.Pair) {
|
||||
continue
|
||||
}
|
||||
|
||||
// yayy! this matched order is still valid and can be executed
|
||||
output.MatchedOrders = append(output.MatchedOrders, matchedOrder)
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// isMatchedOrderValid is a check against current state to ensure that the order
|
||||
// is valid and can execute.
|
||||
//
|
||||
// This method is also called when preparing a proposal since `CheckTx` doesn't have
|
||||
// strict validity guarantees and there could be invalid transactions within the mempool
|
||||
//
|
||||
// Note: if one of the two orders are invalid we discard both. In the future we could
|
||||
// improve this by adding back the part of the order that might still be valid.
|
||||
func (sm *StateMachine) isMatchedOrderValid(order *MatchedOrder, pair *Pair) bool {
|
||||
if int(order.OrderBid.OwnerId) >= len(sm.accounts) {
|
||||
return false
|
||||
}
|
||||
bidOwner := sm.accounts[order.OrderBid.OwnerId]
|
||||
if bidOwner == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if int(order.OrderAsk.OwnerId) >= len(sm.accounts) {
|
||||
return false
|
||||
}
|
||||
askOwner := sm.accounts[order.OrderAsk.OwnerId]
|
||||
if askOwner == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
askCommodities := askOwner.FindCommidity(pair.SellersDenomination)
|
||||
if askCommodities == nil {
|
||||
return false
|
||||
}
|
||||
buyCommodities := bidOwner.FindCommidity(pair.BuyersDenomination)
|
||||
if buyCommodities == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Seller has enough of the commodity
|
||||
if askCommodities.Quantity-order.OrderAsk.Quantity < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Buyer has enough of the buying commodity
|
||||
if buyCommodities.Quantity-(order.OrderAsk.AskPrice*order.OrderAsk.Quantity) < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if !order.OrderAsk.ValidateSignature(ed25519.PubKey(askOwner.PublicKey), pair) {
|
||||
return false
|
||||
}
|
||||
if !order.OrderBid.ValidateSignature(ed25519.PubKey(bidOwner.PublicKey), pair) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// InitDB takes an empty DB instance and populates it with the
|
||||
// provided pairs and accounts. Note that the order here is important
|
||||
func InitDB(db dbm.DB, pairs []*Pair, accounts []*Account) error {
|
||||
batch := db.NewBatch()
|
||||
|
||||
for id, account := range accounts {
|
||||
value, err := proto.Marshal(account)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(accountKey, uint64(id))
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for id, pair := range pairs {
|
||||
value, err := proto.Marshal(pair)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(pairKey, uint64(id))
|
||||
fmt.Println(key)
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return batch.WriteSync()
|
||||
}
|
||||
|
||||
func rejectProposal() types.ResponseProcessProposal {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
|
||||
func acceptProposal() types.ResponseProcessProposal {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
@@ -1,327 +0,0 @@
|
||||
package orderbook_test
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/orderbook"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
params "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// TODO: we should also check that CheckTx adds bids and asks to the app-side mempool
|
||||
func TestCheckTx(t *testing.T) {
|
||||
db := dbm.NewMemDB()
|
||||
require.NoError(t, orderbook.InitDB(db, []*orderbook.Pair{testPair}, nil))
|
||||
app, err := orderbook.New(db)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
msg *orderbook.Msg
|
||||
responseCode uint32
|
||||
expOrderSize int
|
||||
}{
|
||||
{
|
||||
name: "test empty tx",
|
||||
msg: &orderbook.Msg{},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 0,
|
||||
},
|
||||
{
|
||||
name: "test msg ask",
|
||||
msg: &orderbook.Msg{
|
||||
Sum: &orderbook.Msg_MsgAsk{
|
||||
MsgAsk: &orderbook.MsgAsk{
|
||||
Pair: testPair,
|
||||
AskOrder: &orderbook.OrderAsk{
|
||||
Quantity: 10,
|
||||
AskPrice: 1,
|
||||
OwnerId: 1,
|
||||
Signature: crypto.CRandBytes(ed25519.SignatureSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
responseCode: orderbook.StatusOK,
|
||||
expOrderSize: 1,
|
||||
},
|
||||
{
|
||||
name: "test msg ask wrong signature",
|
||||
msg: &orderbook.Msg{
|
||||
Sum: &orderbook.Msg_MsgAsk{
|
||||
MsgAsk: &orderbook.MsgAsk{
|
||||
Pair: testPair,
|
||||
AskOrder: &orderbook.OrderAsk{
|
||||
Quantity: 10,
|
||||
AskPrice: 1,
|
||||
OwnerId: 1,
|
||||
Signature: crypto.CRandBytes(62),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 1,
|
||||
},
|
||||
{
|
||||
name: "test msg bid",
|
||||
msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgBid{MsgBid: &orderbook.MsgBid{
|
||||
Pair: testPair,
|
||||
BidOrder: &orderbook.OrderBid{
|
||||
MaxQuantity: 15,
|
||||
MaxPrice: 5,
|
||||
OwnerId: 1,
|
||||
Signature: crypto.CRandBytes(ed25519.SignatureSize),
|
||||
},
|
||||
}}},
|
||||
responseCode: orderbook.StatusOK,
|
||||
expOrderSize: 2,
|
||||
},
|
||||
{
|
||||
name: "test msg bid blank",
|
||||
msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgBid{MsgBid: &orderbook.MsgBid{
|
||||
Pair: testPair,
|
||||
BidOrder: &orderbook.OrderBid{
|
||||
MaxQuantity: 0,
|
||||
MaxPrice: 0,
|
||||
OwnerId: 0,
|
||||
Signature: crypto.CRandBytes(ed25519.SignatureSize),
|
||||
},
|
||||
}}},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 2,
|
||||
},
|
||||
{
|
||||
name: "test msg register duplicate pair",
|
||||
msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgRegisterPair{MsgRegisterPair: &orderbook.MsgRegisterPair{
|
||||
Pair: &orderbook.Pair{BuyersDenomination: "ATOM", SellersDenomination: "ATOM"},
|
||||
}}},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
bz, err := proto.Marshal(tc.msg)
|
||||
require.NoError(t, err)
|
||||
resp := app.CheckTx(types.RequestCheckTx{Tx: bz})
|
||||
require.Equal(t, tc.responseCode, resp.Code, resp.Log)
|
||||
bids, asks := app.Orders(testPair)
|
||||
require.Equal(t, tc.expOrderSize, len(bids)+len(asks))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// func ValidateTx(t *testing.T) {
|
||||
// db := dbm.NewMemDB()
|
||||
// require.NoError(t, orderbook.InitDB(db, []*orderbook.Pair{testPair}, nil))
|
||||
// app, err := orderbook.New(db)
|
||||
// require.NoError(t, err)
|
||||
|
||||
// for _, tc := range testCases {
|
||||
// t.Run(tc.name, func(t *testing.T) {
|
||||
// bz, err := proto.Marshal(tc.msg)
|
||||
// require.NoError(t, err)
|
||||
// resp := app.CheckTx(types.RequestCheckTx{Tx: bz})
|
||||
// require.Equal(t, tc.responseCode, resp.Code, resp.Log)
|
||||
// bids, asks := app.Orders(testPair)
|
||||
// require.Equal(t, tc.expOrderSize, len(bids)+len(asks))
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
// TODO: we should check that transactions in
|
||||
// a market are being validated and added to the proposal
|
||||
// // and that other transactions get in
|
||||
// func TestPrepareProposal(t *testing.T) {
|
||||
// db := dbm.NewMemDB()
|
||||
// require.NoError(t, orderbook.InitDB(db, []*orderbook.Pair{testPair}, nil))
|
||||
// app, err := orderbook.New(db)
|
||||
// require.NoError(t, err)
|
||||
|
||||
// for _, tc := range testCases {
|
||||
// t.Run(tc.name, func(t *testing.T) {
|
||||
// bz, err := proto.Marshal(tc.msg)
|
||||
// require.NoError(t, err)
|
||||
// resp := app.CheckTx(types.RequestCheckTx{Tx: bz})
|
||||
// require.Equal(t, tc.responseCode, resp.Code, resp.Log)
|
||||
// bids, asks := app.Orders(testPair)
|
||||
// require.Equal(t, tc.expOrderSize, len(bids)+len(asks))
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
// {
|
||||
// name: "test msg register pair",
|
||||
// msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgRegisterPair{MsgRegisterPair: &orderbook.MsgRegisterPair{
|
||||
// Pair: &orderbook.Pair{BuyersDenomination: "ATOM", SellersDenomination: "AUD"},
|
||||
// }}},
|
||||
// responseCode: orderbook.StatusOK,
|
||||
// expOrderSize: 2,
|
||||
// pairSize: 2,
|
||||
// },
|
||||
|
||||
// TODO: we should test that transactions are
|
||||
// always valid i.e. ValidateTx. We could potentially
|
||||
// combine this with PrepareProposal
|
||||
// func TestProcessProposal(t *testing.T) {
|
||||
// app := orderbook.New(dbm.NewMemDB())
|
||||
// }
|
||||
|
||||
// TODO: we should test that a matched order
|
||||
// correctly updates the accounts. We should
|
||||
// also test that committing a block persists
|
||||
// it to the database and that we can now
|
||||
// query the new state
|
||||
// func TestFinalizeBlock(t *testing.T) {
|
||||
// app := orderbook.New(dbm.NewMemDB())
|
||||
// }
|
||||
|
||||
// TODO: test that we can start from new
|
||||
// and from existing state
|
||||
// func TestNewStateMachine(t *testing.T) {}
|
||||
|
||||
func TestEndToEnd(t *testing.T) {
|
||||
db := dbm.NewMemDB()
|
||||
app, err := orderbook.New(db)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
var (
|
||||
maxBytes = params.DefaultConsensusParams().Block.MaxBytes
|
||||
commodityNZD = &orderbook.Commodity{Denom: "NZD", Quantity: 100}
|
||||
commodityAUD = &orderbook.Commodity{Denom: "AUD", Quantity: 100}
|
||||
registerPairMsg = newRegisterPair("NZD", "AUD")
|
||||
pair = registerPairMsg.GetMsgRegisterPair().Pair
|
||||
pkAlice = ed25519.GenPrivKey()
|
||||
pkBob = ed25519.GenPrivKey()
|
||||
pubKeyAlice = pkAlice.PubKey().Bytes()
|
||||
pubKeyBob = pkBob.PubKey().Bytes()
|
||||
registerAlice = newRegisterAccount(pubKeyAlice, []*orderbook.Commodity{commodityAUD})
|
||||
registerBob = newRegisterAccount(pubKeyBob, []*orderbook.Commodity{commodityNZD})
|
||||
// bob is asking for 25 AUD for 5 NZD
|
||||
ask = &orderbook.Msg{Sum: &orderbook.Msg_MsgAsk{MsgAsk: orderbook.NewMsgAsk(pair, 5, 5, 1)}}
|
||||
// alice is bidding for 5 NZD for 25 AUD
|
||||
bid = &orderbook.Msg{Sum: &orderbook.Msg_MsgBid{MsgBid: orderbook.NewMsgBid(pair, 5, 5, 0)}}
|
||||
)
|
||||
|
||||
require.NoError(t, ask.GetMsgAsk().Sign(pkBob))
|
||||
require.NoError(t, bid.GetMsgBid().Sign(pkAlice))
|
||||
|
||||
testCases := []struct {
|
||||
txs [][]byte
|
||||
accepted bool
|
||||
// assertions to be made about the state of the application
|
||||
// after each block
|
||||
assertions func(t *testing.T, app *orderbook.StateMachine)
|
||||
}{
|
||||
{
|
||||
// block 1 sets up the trading pair
|
||||
txs: asTxs(registerPairMsg),
|
||||
accepted: true,
|
||||
assertions: func(t *testing.T, app *orderbook.StateMachine) {
|
||||
pairs := app.Pairs()
|
||||
require.Len(t, pairs, 1)
|
||||
require.Equal(t, pair, &pairs[0])
|
||||
},
|
||||
},
|
||||
{
|
||||
// block 2 registers two accounts: alice and bob
|
||||
txs: asTxs(registerAlice, registerBob),
|
||||
accepted: true,
|
||||
assertions: func(t *testing.T, app *orderbook.StateMachine) {
|
||||
alice := app.Account(0)
|
||||
require.False(t, alice.IsEmpty(), alice)
|
||||
require.Equal(t, pubKeyAlice, alice.PublicKey)
|
||||
require.Len(t, alice.Commodities, 1)
|
||||
require.Equal(t, alice.Commodities[0], commodityAUD)
|
||||
bob := app.Account(1)
|
||||
require.False(t, bob.IsEmpty(), bob)
|
||||
require.Equal(t, pubKeyBob, bob.PublicKey)
|
||||
require.Len(t, bob.Commodities, 1)
|
||||
require.Equal(t, bob.Commodities[0], commodityNZD)
|
||||
require.True(t, app.Account(2).IsEmpty())
|
||||
},
|
||||
},
|
||||
{
|
||||
// block 3 performs a trade between alice and bob
|
||||
txs: asTxs(ask, bid),
|
||||
accepted: true,
|
||||
assertions: func(t *testing.T, app *orderbook.StateMachine) {
|
||||
alice := app.Account(0)
|
||||
require.Equal(t, alice.Commodities[0].Quantity, 75) // 75 AUD
|
||||
require.Equal(t, alice.Commodities[1].Quantity, 5) // 5 NZD
|
||||
bob := app.Account(1)
|
||||
require.Equal(t, bob.Commodities[0].Quantity, 95) // 95 NZD
|
||||
require.Equal(t, bob.Commodities[0].Quantity, 5) // 5 AUD
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range testCases {
|
||||
for _, tx := range tc.txs {
|
||||
resp := app.CheckTx(types.RequestCheckTx{Tx: tx})
|
||||
require.EqualValues(t, orderbook.StatusOK, resp.Code)
|
||||
}
|
||||
txs := app.PrepareProposal(types.RequestPrepareProposal{MaxTxBytes: maxBytes, Txs: tc.txs}).Txs
|
||||
require.Equal(t, txs, tc.txs)
|
||||
if idx == 2 {
|
||||
fmt.Print(tc.txs)
|
||||
fmt.Println()
|
||||
fmt.Print(txs)
|
||||
}
|
||||
|
||||
result := app.ProcessProposal(types.RequestProcessProposal{Txs: txs})
|
||||
if tc.accepted {
|
||||
require.Equal(t, types.ResponseProcessProposal_ACCEPT, result.Status)
|
||||
} else {
|
||||
require.Equal(t, types.ResponseProcessProposal_REJECT, result.Status)
|
||||
continue
|
||||
}
|
||||
|
||||
app.BeginBlock(types.RequestBeginBlock{})
|
||||
for _, tx := range txs {
|
||||
app.DeliverTx(types.RequestDeliverTx{Tx: tx})
|
||||
}
|
||||
app.EndBlock(types.RequestEndBlock{})
|
||||
app.Commit()
|
||||
|
||||
if tc.assertions != nil {
|
||||
tc.assertions(t, app)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func asTxs(msgs ...*orderbook.Msg) [][]byte {
|
||||
output := make([][]byte, len(msgs))
|
||||
for i, msg := range msgs {
|
||||
bz, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
output[i] = bz
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func newRegisterPair(d1, d2 string) *orderbook.Msg {
|
||||
return &orderbook.Msg{Sum: &orderbook.Msg_MsgRegisterPair{MsgRegisterPair: &orderbook.MsgRegisterPair{
|
||||
Pair: &orderbook.Pair{BuyersDenomination: d1, SellersDenomination: d2},
|
||||
}}}
|
||||
}
|
||||
|
||||
func newRegisterAccount(pubkey []byte, commodities []*orderbook.Commodity) *orderbook.Msg {
|
||||
return &orderbook.Msg{Sum: &orderbook.Msg_MsgCreateAccount{MsgCreateAccount: &orderbook.MsgCreateAccount{
|
||||
PublicKey: pubkey,
|
||||
Commodities: commodities,
|
||||
}}}
|
||||
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
version: v1
|
||||
plugins:
|
||||
- name: gogofaster
|
||||
out: .
|
||||
opt:
|
||||
- Mgoogle/protobuf/timestamp.proto=github.com/cosmos/gogoproto/types
|
||||
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
|
||||
- plugins=grpc
|
||||
- paths=source_relative
|
||||
@@ -1,243 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/orderbook"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
NewCLI().Run()
|
||||
}
|
||||
|
||||
type CLI struct {
|
||||
root *cobra.Command
|
||||
config *cfg.Config
|
||||
}
|
||||
|
||||
func NewCLI() *CLI {
|
||||
cli := &CLI{}
|
||||
cli.root = &cobra.Command{
|
||||
Use: "orderbook",
|
||||
Short: "orderbook abci++ example",
|
||||
}
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "init",
|
||||
Short: "initialize the file system for an orderbook node",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
root, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
viper.AddConfigPath(filepath.Join(root, "config"))
|
||||
viper.SetConfigName("config")
|
||||
|
||||
config := cfg.DefaultConfig()
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
// Config file not found; use default
|
||||
// This often happens when initializing a config for the first time
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := viper.Unmarshal(config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
config.SetRoot(root)
|
||||
cli.config = config
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
privValKeyFile := cli.config.PrivValidatorKeyFile()
|
||||
privValStateFile := cli.config.PrivValidatorStateFile()
|
||||
var pv *privval.FilePV
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
fmt.Print("found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv = privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv.Save()
|
||||
fmt.Print("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
|
||||
nodeKeyFile := cli.config.NodeKeyFile()
|
||||
if tmos.FileExists(nodeKeyFile) {
|
||||
fmt.Print("Found node key", "path", nodeKeyFile)
|
||||
} else {
|
||||
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Print("Generated node key", "path", nodeKeyFile)
|
||||
}
|
||||
|
||||
// genesis file
|
||||
genFile := cli.config.GenesisFile()
|
||||
if tmos.FileExists(genFile) {
|
||||
fmt.Print("Found genesis file", "path", genFile)
|
||||
} else {
|
||||
genDoc := types.GenesisDoc{
|
||||
ChainID: fmt.Sprintf("orderbook-chain-%v", tmrand.Int()),
|
||||
GenesisTime: tmtime.Now(),
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
pubKey, err := pv.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
Address: pubKey.Address(),
|
||||
PubKey: pubKey,
|
||||
Power: 10,
|
||||
}}
|
||||
|
||||
if err := genDoc.SaveAs(genFile); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Print("Generated genesis file", "path", genFile)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "run",
|
||||
Short: "runs an orderbook node",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
dbProvider := node.DefaultDBProvider
|
||||
appDB, err := dbProvider(&node.DBContext{"orderbook", cli.config})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app, err := orderbook.New(appDB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeKey, err := p2p.LoadOrGenNodeKey(cli.config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load or gen node key %s: %w", cli.config.NodeKeyFile(), err)
|
||||
}
|
||||
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
n, err := node.NewNode(
|
||||
cli.config,
|
||||
privval.LoadOrGenFilePV(cli.config.PrivValidatorKeyFile(), cli.config.PrivValidatorStateFile()),
|
||||
nodeKey,
|
||||
proxy.NewLocalClientCreator(app),
|
||||
node.DefaultGenesisDocProviderFunc(cli.config),
|
||||
dbProvider,
|
||||
node.DefaultMetricsProvider(cli.config.Instrumentation),
|
||||
logger,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := n.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmos.TrapSignal(logger, func() {
|
||||
if err := n.Stop(); err != nil {
|
||||
logger.Error("unable to stop the node", "error", err)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "create-account [commodities...]",
|
||||
Short: "creates a new account message and submits it to the chain",
|
||||
Example: "create-account 500BTC 10000USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "create-pair buyers-denomination sellers-denomination",
|
||||
Short: "creates a new pair message and submits it to the chain",
|
||||
Example: "create-pair BTC USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "bid buying-commodity price",
|
||||
Short: "creates a bid message and submits it to the chain",
|
||||
Example: "bid 10BTC 15000BTC/USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "ask selling-commodity price",
|
||||
Short: "creates an ask message and submits it to the chain",
|
||||
Example: "ask 5BTC 12000BTC/USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
querySubcommand := &cobra.Command{
|
||||
Use: "query",
|
||||
Short: "query the bal",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
querySubcommand.AddCommand(&cobra.Command{
|
||||
Use: "account pubkey|id",
|
||||
Short: "query the balance of an account",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
querySubcommand.AddCommand(&cobra.Command{
|
||||
Use: "pairs",
|
||||
Short: "list all the trading pairs",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
querySubcommand.AddCommand(&cobra.Command{
|
||||
Use: "orders pair",
|
||||
Short: "list all current orders for a given pair",
|
||||
Example: "orders BTC/USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(querySubcommand)
|
||||
|
||||
return cli
|
||||
}
|
||||
|
||||
// Run runs the CLI.
|
||||
func (cli *CLI) Run() {
|
||||
if err := cli.root.Execute(); err != nil {
|
||||
fmt.Print(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
//go:generate go install github.com/bufbuild/buf/cmd/buf
|
||||
//go:generate buf generate
|
||||
|
||||
// The orderbook presents a more advanced example of a Tendermint application than the simple kvstore
|
||||
//
|
||||
// An orderbook is a tool used in financial markets for enabling trading of various commodities. Without
|
||||
// delving into too much detail, an orderbook is made of two types of transactions: Bids and Asks. An Ask
|
||||
// is an offer by a seller for n amount of a commodity at an AskPrice and a bid is an offer from a buyer
|
||||
// for m amount of a commodity at a BidPrice. When the bid price exceeds the ask price, and the buyer quantity
|
||||
// is less than or equal to the sellers quantity, the order is matched. In actual terms, we neglect the
|
||||
// underlying denomination (i.e. USD) and effectively both participants are simultaneously a buyer and seller.
|
||||
//
|
||||
// This example falls far short of being a decentralized orderbook, but demonstrates how one can build an
|
||||
// app-side mempool, how one can use PrepareProposal and ProcessProposal to craft complex transactions,
|
||||
// how we can use signatures and validate transactions against state. How applications can manage concurrency,
|
||||
// and demonstrate the lifecycle of transactions from RPC -> CheckTx -> Mempool -> PrepareProposal -> ProcessProposal
|
||||
// -> DeliverTx -> Commit -> Querying
|
||||
|
||||
package orderbook
|
||||
@@ -1,254 +0,0 @@
|
||||
package orderbook
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
type Market struct {
|
||||
// immutable
|
||||
pair *Pair // i.e. EUR/USD (a market is bidirectional)
|
||||
|
||||
mtx sync.RWMutex
|
||||
askOrders *AskOrders // i.e. buying EUR for USD
|
||||
lowestAsk float64
|
||||
bidOrders *BidOrders // i.e. selling EUR for USD or buying USD for EUR
|
||||
highestBid float64
|
||||
}
|
||||
|
||||
func NewMarket(p *Pair) *Market {
|
||||
askOrders := make(AskOrders, 0)
|
||||
bidOrders := make(BidOrders, 0)
|
||||
return &Market{pair: p, askOrders: &askOrders, bidOrders: &bidOrders}
|
||||
}
|
||||
|
||||
func (m *Market) AddBid(b *OrderBid) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
heap.Push(m.bidOrders, b)
|
||||
if b.MaxPrice > m.highestBid {
|
||||
m.highestBid = b.MaxPrice
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Market) AddAsk(a *OrderAsk) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
heap.Push(m.askOrders, a)
|
||||
if a.AskPrice < m.lowestAsk || m.lowestAsk == 0 {
|
||||
m.lowestAsk = a.AskPrice
|
||||
}
|
||||
}
|
||||
|
||||
// Match takes the set of bids and asks and matches them together.
|
||||
// A bid matches an ask when the MaxPrice is greater than the AskPrice
|
||||
// and the MaxQuantity is greater than the quantity.
|
||||
func (m *Market) Match() *TradeSet {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
// if one side doesn't have any orders than there is nothing to match
|
||||
// and we return early
|
||||
if m.askOrders.Len() == 0 || m.bidOrders.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.highestBid < m.lowestAsk {
|
||||
// no orders match, we return early.
|
||||
return nil
|
||||
}
|
||||
|
||||
t := &TradeSet{Pair: m.pair}
|
||||
bids := make([]*OrderBid, 0)
|
||||
asks := make([]*OrderAsk, 0)
|
||||
|
||||
// get all the bids that are greater than the lowest ask. In order from heighest bid to lowest bid
|
||||
for m.bidOrders.Len() > 0 {
|
||||
bid := heap.Pop(m.bidOrders).(*OrderBid)
|
||||
if bid.MaxPrice < m.lowestAsk {
|
||||
// we've reached the limit, push the bid back and break the loop
|
||||
heap.Push(m.bidOrders, bid)
|
||||
break
|
||||
} else {
|
||||
bids = append(bids, bid)
|
||||
}
|
||||
}
|
||||
|
||||
// get all the asks that are lower than the highest bid in the bids set. Ordered from lowest to highest ask
|
||||
for m.askOrders.Len() > 0 {
|
||||
ask := heap.Pop(m.askOrders).(*OrderAsk)
|
||||
if ask.AskPrice > bids[0].MaxPrice {
|
||||
// the ask price is greater than the highest bid; push the ask back and break theh loop
|
||||
heap.Push(m.askOrders, ask)
|
||||
break
|
||||
} else {
|
||||
asks = append(asks, ask)
|
||||
}
|
||||
}
|
||||
|
||||
// this is to keep track of the index of the bids that have been matched
|
||||
reserved := make(map[int]struct{})
|
||||
|
||||
// start from the highest ask and the highest bid and for each ask loop downwards through the slice of
|
||||
// bids until one is matched
|
||||
OUTER_LOOP:
|
||||
for i := len(asks) - 1; i >= 0; i-- {
|
||||
ask := asks[i]
|
||||
|
||||
// start with the highest bid and increment down since we're more likely to find a match
|
||||
for j := len(bids) - 1; j >= 0; j-- {
|
||||
if _, ok := reserved[j]; ok {
|
||||
// skip over the bids that have already been reserved
|
||||
continue
|
||||
}
|
||||
|
||||
bid := bids[j]
|
||||
if bid.MaxPrice >= ask.AskPrice {
|
||||
if bid.MaxQuantity >= ask.Quantity {
|
||||
// yay! we have a match
|
||||
t.AddFilledOrder(ask, bid)
|
||||
|
||||
// reserve the bid so we don't rematch it with another ask
|
||||
reserved[j] = struct{}{}
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
} else {
|
||||
// once we've dropped below the ask price there are no more possible bids and so we break
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// as we go from highest to lowest, asks that aren't matched become the new lowest ask price
|
||||
m.lowestAsk = ask.AskPrice
|
||||
|
||||
// no match found, add the ask order back into the heap
|
||||
heap.Push(m.askOrders, ask)
|
||||
}
|
||||
|
||||
// if all available asks were matched then
|
||||
// we never have the opportunity to update the lowest ask.
|
||||
// Now we reset it to 0
|
||||
if m.askOrders.Len() == 0 {
|
||||
m.lowestAsk = 0
|
||||
}
|
||||
|
||||
// add back the unmatched bids to the heap so they can be matched again in a later round.
|
||||
// We also neeed to recalculate the new highest bid. First we tackle an edge case whereby all
|
||||
// selected bids were matched. In this case we grab the next highest and set that as the new
|
||||
// highest bid
|
||||
m.highestBid = 0
|
||||
if len(reserved) == len(bids) && m.bidOrders.Len() > 0 {
|
||||
newHighestBid := heap.Pop(m.bidOrders).(*OrderBid)
|
||||
m.highestBid = newHighestBid.MaxPrice
|
||||
heap.Push(m.bidOrders, newHighestBid)
|
||||
}
|
||||
for j := 0; j < len(bids); j++ {
|
||||
if _, ok := reserved[j]; !ok {
|
||||
if bids[j].MaxPrice > m.highestBid {
|
||||
m.highestBid = bids[j].MaxPrice
|
||||
}
|
||||
heap.Push(m.bidOrders, bids[j])
|
||||
}
|
||||
}
|
||||
|
||||
if len(t.MatchedOrders) == 0 {
|
||||
return nil
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (m Market) LowestAsk() float64 {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
return m.lowestAsk
|
||||
}
|
||||
|
||||
func (m Market) HighestBid() float64 {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
return m.highestBid
|
||||
}
|
||||
|
||||
func (m Market) GetBids() []OrderBid {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
orders := make([]OrderBid, m.bidOrders.Len())
|
||||
for idx, order := range *m.bidOrders {
|
||||
orders[idx] = *order
|
||||
}
|
||||
return orders
|
||||
}
|
||||
|
||||
func (m Market) GetAsks() []OrderAsk {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
orders := make([]OrderAsk, m.askOrders.Len())
|
||||
for idx, order := range *m.askOrders {
|
||||
orders[idx] = *order
|
||||
}
|
||||
return orders
|
||||
}
|
||||
|
||||
// Heap ordered by lowest price
|
||||
type AskOrders []*OrderAsk
|
||||
|
||||
var _ heap.Interface = (*AskOrders)(nil)
|
||||
|
||||
func (a AskOrders) Len() int { return len(a) }
|
||||
|
||||
func (a AskOrders) Less(i, j int) bool {
|
||||
return a[i].AskPrice < a[j].AskPrice
|
||||
}
|
||||
|
||||
func (a AskOrders) Swap(i, j int) {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
|
||||
func (a *AskOrders) Push(x any) {
|
||||
item := x.(*OrderAsk)
|
||||
*a = append(*a, item)
|
||||
}
|
||||
|
||||
func (a *AskOrders) Pop() any {
|
||||
old := *a
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil
|
||||
*a = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// Heap ordered by highest price
|
||||
type BidOrders []*OrderBid
|
||||
|
||||
var _ heap.Interface = (*BidOrders)(nil)
|
||||
|
||||
func (b BidOrders) Len() int { return len(b) }
|
||||
|
||||
func (b BidOrders) Less(i, j int) bool {
|
||||
return b[i].MaxPrice > b[j].MaxPrice
|
||||
}
|
||||
|
||||
func (b BidOrders) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
func (b *BidOrders) Push(x any) {
|
||||
item := x.(*OrderBid)
|
||||
*b = append(*b, item)
|
||||
}
|
||||
|
||||
func (b *BidOrders) Pop() any {
|
||||
old := *b
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil
|
||||
*b = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
func (t *TradeSet) AddFilledOrder(ask *OrderAsk, bid *OrderBid) {
|
||||
t.MatchedOrders = append(t.MatchedOrders, &MatchedOrder{
|
||||
OrderAsk: ask,
|
||||
OrderBid: bid,
|
||||
})
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
package orderbook_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/abci/example/orderbook"
|
||||
)
|
||||
|
||||
var testPair = &orderbook.Pair{BuyersDenomination: "ATOM", SellersDenomination: "USD"}
|
||||
|
||||
func testBid(price, quantity float64) *orderbook.OrderBid {
|
||||
return &orderbook.OrderBid{
|
||||
MaxPrice: price,
|
||||
MaxQuantity: quantity,
|
||||
}
|
||||
}
|
||||
|
||||
func testAsk(price, quantity float64) *orderbook.OrderAsk {
|
||||
return &orderbook.OrderAsk{
|
||||
AskPrice: price,
|
||||
Quantity: quantity,
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrackLowestAndHighestPrices(t *testing.T) {
|
||||
market := orderbook.NewMarket(testPair)
|
||||
require.Zero(t, market.LowestAsk())
|
||||
require.Zero(t, market.HighestBid())
|
||||
|
||||
market.AddBid(testBid(100, 10))
|
||||
require.EqualValues(t, 100, market.HighestBid())
|
||||
|
||||
market.AddAsk(testAsk(50, 10))
|
||||
require.EqualValues(t, 50, market.LowestAsk())
|
||||
|
||||
market.AddAsk(testAsk(30, 10))
|
||||
require.EqualValues(t, 30, market.LowestAsk())
|
||||
|
||||
market.AddAsk(testAsk(40, 10))
|
||||
require.EqualValues(t, 30, market.LowestAsk())
|
||||
}
|
||||
|
||||
func TestSimpleOrderMatching(t *testing.T) {
|
||||
testcases := []struct {
|
||||
bid *orderbook.OrderBid
|
||||
ask *orderbook.OrderAsk
|
||||
match bool
|
||||
}{
|
||||
{
|
||||
bid: testBid(50, 10),
|
||||
ask: testAsk(50, 10),
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
bid: testBid(60, 10),
|
||||
ask: testAsk(50, 10),
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
bid: testBid(50, 10),
|
||||
ask: testAsk(60, 10),
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
bid: testBid(50, 5),
|
||||
ask: testAsk(40, 10),
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
bid: testBid(50, 15),
|
||||
ask: testAsk(40, 10),
|
||||
match: true,
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range testcases {
|
||||
market := orderbook.NewMarket(testPair)
|
||||
market.AddAsk(tc.ask)
|
||||
market.AddBid(tc.bid)
|
||||
resp := market.Match()
|
||||
if tc.match {
|
||||
require.Len(t, resp.MatchedOrders, 1, idx)
|
||||
} else {
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiOrderMatching(t *testing.T) {
|
||||
testcases := []struct {
|
||||
bids []*orderbook.OrderBid
|
||||
asks []*orderbook.OrderAsk
|
||||
expected []*orderbook.MatchedOrder
|
||||
expectedLowestAsk float64
|
||||
expectedHighestBid float64
|
||||
}{
|
||||
{
|
||||
bids: []*orderbook.OrderBid{
|
||||
testBid(50, 20),
|
||||
testBid(40, 10),
|
||||
testBid(30, 15),
|
||||
},
|
||||
asks: []*orderbook.OrderAsk{
|
||||
testAsk(30, 15),
|
||||
testAsk(30, 5),
|
||||
},
|
||||
expected: []*orderbook.MatchedOrder{
|
||||
{
|
||||
OrderAsk: testAsk(30, 5),
|
||||
OrderBid: testBid(30, 15),
|
||||
},
|
||||
{
|
||||
OrderAsk: testAsk(30, 15),
|
||||
OrderBid: testBid(50, 20),
|
||||
},
|
||||
},
|
||||
expectedLowestAsk: 0,
|
||||
expectedHighestBid: 40,
|
||||
},
|
||||
{
|
||||
bids: []*orderbook.OrderBid{
|
||||
testBid(60, 20),
|
||||
testBid(80, 5),
|
||||
},
|
||||
asks: []*orderbook.OrderAsk{
|
||||
testAsk(60, 15),
|
||||
testAsk(70, 10),
|
||||
testAsk(50, 20),
|
||||
},
|
||||
expected: []*orderbook.MatchedOrder{
|
||||
{
|
||||
OrderAsk: testAsk(60, 15),
|
||||
OrderBid: testBid(60, 20),
|
||||
},
|
||||
},
|
||||
expectedLowestAsk: 50,
|
||||
expectedHighestBid: 80,
|
||||
},
|
||||
{
|
||||
bids: []*orderbook.OrderBid{
|
||||
testBid(60, 20),
|
||||
testBid(80, 5),
|
||||
},
|
||||
asks: []*orderbook.OrderAsk{},
|
||||
expected: []*orderbook.MatchedOrder{},
|
||||
expectedLowestAsk: 0,
|
||||
expectedHighestBid: 80,
|
||||
},
|
||||
{
|
||||
bids: []*orderbook.OrderBid{},
|
||||
asks: []*orderbook.OrderAsk{
|
||||
testAsk(70, 10),
|
||||
testAsk(50, 20),
|
||||
},
|
||||
expected: []*orderbook.MatchedOrder{},
|
||||
expectedLowestAsk: 50,
|
||||
expectedHighestBid: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range testcases {
|
||||
market := orderbook.NewMarket(testPair)
|
||||
for _, ask := range tc.asks {
|
||||
market.AddAsk(ask)
|
||||
}
|
||||
for _, bid := range tc.bids {
|
||||
market.AddBid(bid)
|
||||
}
|
||||
resp := market.Match()
|
||||
if len(tc.expected) == 0 {
|
||||
require.Nil(t, resp, idx)
|
||||
} else {
|
||||
require.Equal(t, tc.expected, resp.MatchedOrders, idx)
|
||||
}
|
||||
require.EqualValues(t, tc.expectedLowestAsk, market.LowestAsk(), idx)
|
||||
require.EqualValues(t, tc.expectedHighestBid, market.HighestBid(), idx)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,40 +0,0 @@
|
||||
€syntax = "proto3";
|
||||
|
||||
package orderbook;
|
||||
option go_package = "github.com/tendermint/tendermint/abci/example/orderbook";
|
||||
|
||||
import "wire.proto";
|
||||
|
||||
message MsgBid {
|
||||
Pair pair = 1;
|
||||
OrderBid bid_order = 2;
|
||||
}
|
||||
|
||||
message MsgAsk {
|
||||
Pair pair = 1;
|
||||
OrderAsk ask_order = 2;
|
||||
}
|
||||
|
||||
message MsgCreateAccount {
|
||||
bytes public_key = 1;
|
||||
repeated Commodity commodities = 2;
|
||||
}
|
||||
|
||||
message MsgRegisterPair {
|
||||
Pair pair = 1;
|
||||
}
|
||||
|
||||
message MsgTradeSet {
|
||||
TradeSet trade_set = 1;
|
||||
}
|
||||
|
||||
message Msg {
|
||||
//a Msg has to be one of the below
|
||||
oneof sum {
|
||||
MsgBid msg_bid = 1;
|
||||
MsgAsk msg_ask = 2;
|
||||
MsgRegisterPair msg_register_pair = 3;
|
||||
MsgCreateAccount msg_create_account = 4;
|
||||
MsgTradeSet msg_trade_set = 5;
|
||||
}
|
||||
}
|
||||
Binary file not shown.
@@ -1,31 +0,0 @@
|
||||
package orderbook
|
||||
|
||||
// Query the state of an account (returns a concrete copy)
|
||||
func (sm *StateMachine) Account(id uint64) Account {
|
||||
if int(id) >= len(sm.accounts) {
|
||||
return Account{}
|
||||
}
|
||||
return *sm.accounts[id]
|
||||
}
|
||||
|
||||
// Query all the pairs that the orderbook has (returns a concrete copy)
|
||||
func (sm *StateMachine) Pairs() []Pair {
|
||||
pairs := make([]Pair, len(sm.pairs))
|
||||
idx := 0
|
||||
for _, pair := range sm.pairs {
|
||||
pairs[idx] = *pair
|
||||
idx++
|
||||
}
|
||||
return pairs
|
||||
}
|
||||
|
||||
// Query the current orders for a pair (returns concrete copies)
|
||||
func (sm *StateMachine) Orders(pair *Pair) ([]OrderBid, []OrderAsk) {
|
||||
market, ok := sm.markets[pair.String()]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return market.GetBids(), market.GetAsks()
|
||||
}
|
||||
|
||||
func (sm *StateMachine) Height() int64 { return sm.lastHeight }
|
||||
@@ -1,299 +0,0 @@
|
||||
package orderbook
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
)
|
||||
|
||||
func NewMsgBid(pair *Pair, maxPrice, maxQuantity float64, ownerId uint64) *MsgBid {
|
||||
return &MsgBid{
|
||||
Pair: pair,
|
||||
BidOrder: &OrderBid{
|
||||
MaxPrice: maxPrice,
|
||||
MaxQuantity: maxQuantity,
|
||||
OwnerId: ownerId,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (msg *MsgBid) Sign(pk crypto.PrivKey) error {
|
||||
sig, err := pk.Sign(msg.BidOrder.DeterministicSignatureBytes(msg.Pair))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg.BidOrder.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
func (msg *MsgBid) ValidateBasic() error {
|
||||
if err := msg.BidOrder.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := msg.Pair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(msg.BidOrder.Signature) != ed25519.SignatureSize {
|
||||
return errors.New("invalid signature size")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewMsgAsk(pair *Pair, askPrice, quantity float64, ownerId uint64) *MsgAsk {
|
||||
return &MsgAsk{
|
||||
Pair: pair,
|
||||
AskOrder: &OrderAsk{
|
||||
AskPrice: askPrice,
|
||||
Quantity: quantity,
|
||||
OwnerId: ownerId,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (msg *MsgAsk) Sign(pk crypto.PrivKey) error {
|
||||
sig, err := pk.Sign(msg.AskOrder.DeterministicSignatureBytes(msg.Pair))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg.AskOrder.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
func (msg *MsgAsk) ValidateBasic() error {
|
||||
if err := msg.AskOrder.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := msg.Pair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewMsgCreateAccount(commodities ...*Commodity) (*MsgCreateAccount, crypto.PrivKey) {
|
||||
pk := ed25519.GenPrivKey()
|
||||
return &MsgCreateAccount{
|
||||
PublicKey: pk.PubKey().Bytes(),
|
||||
Commodities: commodities,
|
||||
}, pk
|
||||
}
|
||||
|
||||
func (msg *MsgCreateAccount) ValidateBasic() error {
|
||||
if len(msg.PublicKey) != ed25519.PubKeySize {
|
||||
return errors.New("invalid pub key size")
|
||||
}
|
||||
|
||||
uniqueMap := make(map[string]struct{}, len(msg.Commodities))
|
||||
for _, c := range msg.Commodities {
|
||||
if err := c.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := uniqueMap[c.Denom]; ok {
|
||||
return fmt.Errorf("commodity %s declared twice", c.Denom)
|
||||
}
|
||||
uniqueMap[c.Denom] = struct{}{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewMsgRegisterPair(pair *Pair) *MsgRegisterPair {
|
||||
return &MsgRegisterPair{Pair: pair}
|
||||
}
|
||||
|
||||
func (msg *MsgRegisterPair) ValidateBasic() error {
|
||||
return msg.Pair.ValidateBasic()
|
||||
}
|
||||
|
||||
func NewCommodity(denom string, quantity float64) *Commodity {
|
||||
return &Commodity{
|
||||
Denom: denom,
|
||||
Quantity: quantity,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Commodity) ValidateBasic() error {
|
||||
if c.Quantity <= 0 {
|
||||
return errors.New("quantity must be greater than zero")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pair) ValidateBasic() error {
|
||||
if p.BuyersDenomination == "" || p.SellersDenomination == "" {
|
||||
return errors.New("inbound and outbound commodities must be present")
|
||||
}
|
||||
|
||||
if p.BuyersDenomination == p.SellersDenomination {
|
||||
return errors.New("commodities must not be the same")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderBid) ValidateBasic() error {
|
||||
if o.MaxQuantity == 0 {
|
||||
return errors.New("max quantity must be non zero")
|
||||
}
|
||||
|
||||
if o.MaxPrice <= 0 {
|
||||
return errors.New("min price must be greater than 0")
|
||||
}
|
||||
|
||||
if len(o.Signature) != ed25519.SignatureSize {
|
||||
return errors.New("invalid signature size")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderBid) ValidateSignature(pk crypto.PubKey, pair *Pair) bool {
|
||||
return pk.VerifySignature(o.DeterministicSignatureBytes(pair), o.Signature)
|
||||
}
|
||||
|
||||
func (o *OrderBid) DeterministicSignatureBytes(pair *Pair) []byte {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf.WriteString(pair.SellersDenomination)
|
||||
buf.WriteString(pair.BuyersDenomination)
|
||||
bz := buf.Bytes()
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.MaxQuantity))
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.MaxPrice))
|
||||
return bz
|
||||
}
|
||||
|
||||
func (m *MatchedOrder) ValidateBasic() error {
|
||||
if err := m.OrderAsk.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.OrderBid.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TradeSet) ValidateBasic() error {
|
||||
for _, matchedOrder := range t.MatchedOrders {
|
||||
if err := matchedOrder.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
// checking if there is an account
|
||||
if matchedOrder.OrderAsk.OwnerId == 0 {
|
||||
return errors.New("must have an owner id more than zero")
|
||||
}
|
||||
}
|
||||
// validate the pairs are valid
|
||||
if err := t.Pair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderAsk) ValidateBasic() error {
|
||||
if o.Quantity == 0 {
|
||||
return errors.New("quantity outbound must be non zero")
|
||||
}
|
||||
|
||||
if o.AskPrice <= 0 {
|
||||
return errors.New("min price must be greater than 0")
|
||||
}
|
||||
|
||||
if len(o.Signature) != ed25519.SignatureSize {
|
||||
return errors.New("invalid signature size")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderAsk) ValidateSignature(pk crypto.PubKey, pair *Pair) bool {
|
||||
return pk.VerifySignature(o.DeterministicSignatureBytes(pair), o.Signature)
|
||||
}
|
||||
|
||||
func (o *OrderAsk) DeterministicSignatureBytes(pair *Pair) []byte {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf.WriteString(pair.BuyersDenomination)
|
||||
buf.WriteString(pair.SellersDenomination)
|
||||
bz := buf.Bytes()
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.Quantity))
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.AskPrice))
|
||||
return bz
|
||||
}
|
||||
|
||||
func (a Account) IsEmpty() bool {
|
||||
return len(a.PublicKey) == 0
|
||||
}
|
||||
|
||||
func (a *Account) FindCommidity(denom string) *Commodity {
|
||||
for _, c := range a.Commodities {
|
||||
if c.Denom == denom {
|
||||
return c
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (a *Account) AddCommodity(c *Commodity) {
|
||||
curr := a.FindCommidity(c.Denom)
|
||||
if curr == nil {
|
||||
a.Commodities = append(a.Commodities, c)
|
||||
} else {
|
||||
curr.Quantity += c.Quantity
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Account) SubtractCommodity(c *Commodity) {
|
||||
curr := a.FindCommidity(c.Denom)
|
||||
if curr == nil {
|
||||
panic("trying to remove a commodity the account does not have")
|
||||
}
|
||||
curr.Quantity -= c.Quantity
|
||||
}
|
||||
|
||||
func (msg *Msg) ValidateBasic() error {
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgRegisterPair:
|
||||
if err := m.MsgRegisterPair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgCreateAccount:
|
||||
if err := m.MsgCreateAccount.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgBid:
|
||||
if err := m.MsgBid.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgAsk:
|
||||
if err := m.MsgAsk.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgTradeSet:
|
||||
if err := m.MsgTradeSet.TradeSet.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return errors.New("unknown tx")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,52 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package orderbook;
|
||||
option go_package = "github.com/tendermint/tendermint/abci/example/orderbook";
|
||||
|
||||
|
||||
message OrderAsk {
|
||||
double quantity = 1;
|
||||
double ask_price = 2;
|
||||
uint64 owner_id = 3;
|
||||
bytes signature = 4;
|
||||
}
|
||||
|
||||
message OrderBid {
|
||||
double max_quantity = 1;
|
||||
double max_price = 2;
|
||||
uint64 owner_id = 3;
|
||||
bytes signature = 4;
|
||||
}
|
||||
|
||||
message Pair {
|
||||
// the denomination that the buyer receives i.e. EUR
|
||||
string buyers_denomination = 1;
|
||||
// the denomination that the seller receives i.e. USD
|
||||
string sellers_denomination = 2;
|
||||
}
|
||||
|
||||
message Commodity {
|
||||
string denom = 1;
|
||||
double quantity = 2;
|
||||
}
|
||||
|
||||
// Accounts is the atomic piece of information that is persisted to disk.
|
||||
message Account {
|
||||
uint64 index = 1;
|
||||
bytes public_key = 2;
|
||||
// the set of commodities that the account has
|
||||
repeated Commodity commodities = 3;
|
||||
}
|
||||
|
||||
// TradeSet is the transaction that eventually is committed in a block
|
||||
// It is derived from a group of MsgBid and MsgAsk's
|
||||
message TradeSet {
|
||||
Pair pair = 1; // i.e. EUR/USD
|
||||
// the set of matched trades for that peer
|
||||
repeated MatchedOrder matched_orders = 2;
|
||||
}
|
||||
|
||||
message MatchedOrder {
|
||||
OrderAsk order_ask = 1;
|
||||
OrderBid order_bid = 2;
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
@@ -18,11 +19,11 @@ type GRPCServer struct {
|
||||
listener net.Listener
|
||||
server *grpc.Server
|
||||
|
||||
app types.ABCIApplicationServer
|
||||
app types.Application
|
||||
}
|
||||
|
||||
// NewGRPCServer returns a new gRPC ABCI server
|
||||
func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Service {
|
||||
func NewGRPCServer(protoAddr string, app types.Application) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &GRPCServer{
|
||||
proto: proto,
|
||||
@@ -44,7 +45,7 @@ func (s *GRPCServer) OnStart() error {
|
||||
|
||||
s.listener = ln
|
||||
s.server = grpc.NewServer()
|
||||
types.RegisterABCIApplicationServer(s.server, s.app)
|
||||
types.RegisterABCIServer(s.server, &gRPCApplication{s.app})
|
||||
|
||||
s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr)
|
||||
go func() {
|
||||
@@ -59,3 +60,18 @@ func (s *GRPCServer) OnStart() error {
|
||||
func (s *GRPCServer) OnStop() {
|
||||
s.server.Stop()
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// gRPCApplication is a gRPC shim for Application
|
||||
type gRPCApplication struct {
|
||||
types.Application
|
||||
}
|
||||
|
||||
func (app *gRPCApplication) Echo(_ context.Context, req *types.RequestEcho) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: req.Message}, nil
|
||||
}
|
||||
|
||||
func (app *gRPCApplication) Flush(_ context.Context, req *types.RequestFlush) (*types.ResponseFlush, error) {
|
||||
return &types.ResponseFlush{}, nil
|
||||
}
|
||||
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
// NewServer is a utility function for out of process applications to set up either a socket or
|
||||
// grpc server that can listen to requests from the equivalent Tendermint client
|
||||
func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) {
|
||||
var s service.Service
|
||||
var err error
|
||||
@@ -21,7 +23,7 @@ func NewServer(protoAddr, transport string, app types.Application) (service.Serv
|
||||
case "socket":
|
||||
s = NewSocketServer(protoAddr, app)
|
||||
case "grpc":
|
||||
s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app))
|
||||
s = NewGRPCServer(protoAddr, app)
|
||||
default:
|
||||
err = fmt.Errorf("unknown server type %s", transport)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -15,8 +17,11 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// var maxNumberConnections = 2
|
||||
|
||||
// SocketServer is the server-side implementation of the TSP (Tendermint Socket Protocol)
|
||||
// for out-of-process go applications. Note, in the case of an application written in golang,
|
||||
// the developer may also run both Tendermint and the application within the same process.
|
||||
//
|
||||
// The socket server deliver
|
||||
type SocketServer struct {
|
||||
service.BaseService
|
||||
isLoggerSet bool
|
||||
@@ -33,6 +38,9 @@ type SocketServer struct {
|
||||
app types.Application
|
||||
}
|
||||
|
||||
const responseBufferSize = 1000
|
||||
|
||||
// NewSocketServer creates a server from a golang-based out-of-process application.
|
||||
func NewSocketServer(protoAddr string, app types.Application) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &SocketServer{
|
||||
@@ -120,8 +128,8 @@ func (s *SocketServer) acceptConnectionsRoutine() {
|
||||
|
||||
connID := s.addConn(conn)
|
||||
|
||||
closeConn := make(chan error, 2) // Push to signal connection closed
|
||||
responses := make(chan *types.Response, 1000) // A channel to buffer responses
|
||||
closeConn := make(chan error, 2) // Push to signal connection closed
|
||||
responses := make(chan *types.Response, responseBufferSize) // A channel to buffer responses
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
go s.handleRequests(closeConn, conn, responses)
|
||||
@@ -157,7 +165,9 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp
|
||||
var bufReader = bufio.NewReader(conn)
|
||||
|
||||
defer func() {
|
||||
// make sure to recover from any app-related panics to allow proper socket cleanup
|
||||
// make sure to recover from any app-related panics to allow proper socket cleanup.
|
||||
// In the case of a panic, we do not notify the client by passing an exception so
|
||||
// presume that the client is still running and retying to connect
|
||||
r := recover()
|
||||
if r != nil {
|
||||
const size = 64 << 10
|
||||
@@ -186,61 +196,112 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp
|
||||
}
|
||||
s.appMtx.Lock()
|
||||
count++
|
||||
s.handleRequest(req, responses)
|
||||
resp, err := s.handleRequest(context.TODO(), req)
|
||||
if err != nil {
|
||||
// any error either from the application or because of an unknown request
|
||||
// throws an exception back to the client. This will stop the server and
|
||||
// should also halt the client.
|
||||
responses <- types.ToResponseException(err.Error())
|
||||
} else {
|
||||
responses <- resp
|
||||
}
|
||||
s.appMtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) {
|
||||
// handleRequests takes a request and calls the application passing the returned
|
||||
func (s *SocketServer) handleRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
|
||||
switch r := req.Value.(type) {
|
||||
case *types.Request_Echo:
|
||||
responses <- types.ToResponseEcho(r.Echo.Message)
|
||||
return types.ToResponseEcho(r.Echo.Message), nil
|
||||
case *types.Request_Flush:
|
||||
responses <- types.ToResponseFlush()
|
||||
return types.ToResponseFlush(), nil
|
||||
case *types.Request_Info:
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
res, err := s.app.Info(ctx, r.Info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseInfo(res), nil
|
||||
case *types.Request_CheckTx:
|
||||
res := s.app.CheckTx(*r.CheckTx)
|
||||
responses <- types.ToResponseCheckTx(res)
|
||||
res, err := s.app.CheckTx(ctx, r.CheckTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseCheckTx(res), nil
|
||||
case *types.Request_Commit:
|
||||
res := s.app.Commit()
|
||||
responses <- types.ToResponseCommit(res)
|
||||
res, err := s.app.Commit(ctx, r.Commit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseCommit(res), nil
|
||||
case *types.Request_Query:
|
||||
res := s.app.Query(*r.Query)
|
||||
responses <- types.ToResponseQuery(res)
|
||||
res, err := s.app.Query(ctx, r.Query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseQuery(res), nil
|
||||
case *types.Request_InitChain:
|
||||
res := s.app.InitChain(*r.InitChain)
|
||||
responses <- types.ToResponseInitChain(res)
|
||||
case *types.Request_BeginBlock:
|
||||
res := s.app.BeginBlock(*r.BeginBlock)
|
||||
responses <- types.ToResponseBeginBlock(res)
|
||||
case *types.Request_EndBlock:
|
||||
res := s.app.EndBlock(*r.EndBlock)
|
||||
responses <- types.ToResponseEndBlock(res)
|
||||
res, err := s.app.InitChain(ctx, r.InitChain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseInitChain(res), nil
|
||||
case *types.Request_FinalizeBlock:
|
||||
res, err := s.app.FinalizeBlock(ctx, r.FinalizeBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseFinalizeBlock(res), nil
|
||||
case *types.Request_ListSnapshots:
|
||||
res := s.app.ListSnapshots(*r.ListSnapshots)
|
||||
responses <- types.ToResponseListSnapshots(res)
|
||||
res, err := s.app.ListSnapshots(ctx, r.ListSnapshots)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseListSnapshots(res), nil
|
||||
case *types.Request_OfferSnapshot:
|
||||
res := s.app.OfferSnapshot(*r.OfferSnapshot)
|
||||
responses <- types.ToResponseOfferSnapshot(res)
|
||||
res, err := s.app.OfferSnapshot(ctx, r.OfferSnapshot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseOfferSnapshot(res), nil
|
||||
case *types.Request_PrepareProposal:
|
||||
res := s.app.PrepareProposal(*r.PrepareProposal)
|
||||
responses <- types.ToResponsePrepareProposal(res)
|
||||
res, err := s.app.PrepareProposal(ctx, r.PrepareProposal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponsePrepareProposal(res), nil
|
||||
case *types.Request_ProcessProposal:
|
||||
res := s.app.ProcessProposal(*r.ProcessProposal)
|
||||
responses <- types.ToResponseProcessProposal(res)
|
||||
res, err := s.app.ProcessProposal(ctx, r.ProcessProposal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseProcessProposal(res), nil
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
|
||||
responses <- types.ToResponseLoadSnapshotChunk(res)
|
||||
res, err := s.app.LoadSnapshotChunk(ctx, r.LoadSnapshotChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseLoadSnapshotChunk(res), nil
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
|
||||
responses <- types.ToResponseApplySnapshotChunk(res)
|
||||
res, err := s.app.ApplySnapshotChunk(ctx, r.ApplySnapshotChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseApplySnapshotChunk(res), nil
|
||||
case *types.Request_ExtendVote:
|
||||
res, err := s.app.ExtendVote(ctx, r.ExtendVote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseExtendVote(res), nil
|
||||
case *types.Request_VerifyVoteExtension:
|
||||
res, err := s.app.VerifyVoteExtension(ctx, r.VerifyVoteExtension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseVerifyVoteExtension(res), nil
|
||||
default:
|
||||
responses <- types.ToResponseException("Unknown request")
|
||||
return nil, fmt.Errorf("unknown request from client: %T", req)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,6 +323,13 @@ func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, res
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If the application has responded with an exception, the server returns the error
|
||||
// back to the client and closes the connection. The receiving Tendermint client should
|
||||
// log the error and gracefully terminate
|
||||
if e, ok := res.Value.(*types.Response_Exception); ok {
|
||||
closeConn <- errors.New(e.Exception.Error)
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,20 +11,29 @@ import (
|
||||
)
|
||||
|
||||
func TestClientServerNoAddrPrefix(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
addr := "localhost:26658"
|
||||
transport := "socket"
|
||||
app := kvstore.NewApplication()
|
||||
app := kvstore.NewInMemoryApplication()
|
||||
|
||||
server, err := abciserver.NewServer(addr, transport, app)
|
||||
assert.NoError(t, err, "expected no error on NewServer")
|
||||
err = server.Start()
|
||||
assert.NoError(t, err, "expected no error on server.Start")
|
||||
defer func() { _ = server.Stop() }()
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
client, err := abciclient.NewClient(addr, transport, true)
|
||||
assert.NoError(t, err, "expected no error on NewClient")
|
||||
err = client.Start()
|
||||
assert.NoError(t, err, "expected no error on client.Start")
|
||||
|
||||
_ = client.Stop()
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package testsuite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
@@ -10,7 +11,7 @@ import (
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
func InitChain(client abcicli.Client) error {
|
||||
func InitChain(ctx context.Context, client abcicli.Client) error {
|
||||
total := 10
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
for i := 0; i < total; i++ {
|
||||
@@ -18,7 +19,7 @@ func InitChain(client abcicli.Client) error {
|
||||
power := tmrand.Int()
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChainSync(types.RequestInitChain{
|
||||
_, err := client.InitChain(ctx, &types.RequestInitChain{
|
||||
Validators: vals,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -29,44 +30,46 @@ func InitChain(client abcicli.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
res, err := client.CommitSync()
|
||||
data := res.Data
|
||||
func Commit(ctx context.Context, client abcicli.Client) error {
|
||||
_, err := client.Commit(ctx, &types.RequestCommit{})
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: Commit")
|
||||
fmt.Printf("error while committing: %v\n", err)
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(data, hashExp) {
|
||||
fmt.Println("Failed test: Commit")
|
||||
fmt.Printf("Commit hash was unexpected. Got %X expected %X\n", data, hashExp)
|
||||
return errors.New("commitTx failed")
|
||||
}
|
||||
fmt.Println("Passed test: Commit")
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("deliverTx error")
|
||||
func FinalizeBlock(ctx context.Context, client abcicli.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte, hashExp []byte) error {
|
||||
res, _ := client.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: txBytes})
|
||||
appHash := res.AgreedAppData
|
||||
for i, tx := range res.TxResults {
|
||||
code, data, log := tx.Code, tx.Data, tx.Log
|
||||
if code != codeExp[i] {
|
||||
fmt.Println("Failed test: FinalizeBlock")
|
||||
fmt.Printf("FinalizeBlock response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("FinalizeBlock error")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: FinalizeBlock")
|
||||
fmt.Printf("FinalizeBlock response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("FinalizeBlock error")
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("deliverTx error")
|
||||
if !bytes.Equal(appHash, hashExp) {
|
||||
fmt.Println("Failed test: FinalizeBlock")
|
||||
fmt.Printf("Application hash was unexpected. Got %X expected %X\n", appHash, hashExp)
|
||||
return errors.New("FinalizeBlock error")
|
||||
}
|
||||
fmt.Println("Passed test: DeliverTx")
|
||||
fmt.Println("Passed test: FinalizeBlock")
|
||||
return nil
|
||||
}
|
||||
|
||||
func PrepareProposal(client abcicli.Client, txBytes [][]byte, txExpected [][]byte, dataExp []byte) error {
|
||||
res, _ := client.PrepareProposalSync(types.RequestPrepareProposal{Txs: txBytes})
|
||||
func PrepareProposal(ctx context.Context, client abcicli.Client, txBytes [][]byte, txExpected [][]byte, dataExp []byte) error {
|
||||
res, _ := client.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: txBytes})
|
||||
for i, tx := range res.Txs {
|
||||
if !bytes.Equal(tx, txExpected[i]) {
|
||||
fmt.Println("Failed test: PrepareProposal")
|
||||
@@ -79,8 +82,8 @@ func PrepareProposal(client abcicli.Client, txBytes [][]byte, txExpected [][]byt
|
||||
return nil
|
||||
}
|
||||
|
||||
func ProcessProposal(client abcicli.Client, txBytes [][]byte, statusExp types.ResponseProcessProposal_ProposalStatus) error {
|
||||
res, _ := client.ProcessProposalSync(types.RequestProcessProposal{Txs: txBytes})
|
||||
func ProcessProposal(ctx context.Context, client abcicli.Client, txBytes [][]byte, statusExp types.ResponseProcessProposal_ProposalStatus) error {
|
||||
res, _ := client.ProcessProposal(ctx, &types.RequestProcessProposal{Txs: txBytes})
|
||||
if res.Status != statusExp {
|
||||
fmt.Println("Failed test: ProcessProposal")
|
||||
fmt.Printf("ProcessProposal response status was unexpected. Got %v expected %v.",
|
||||
@@ -91,8 +94,8 @@ func ProcessProposal(client abcicli.Client, txBytes [][]byte, statusExp types.Re
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
func CheckTx(ctx context.Context, client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
|
||||
@@ -1,18 +1,12 @@
|
||||
echo hello
|
||||
info
|
||||
prepare_proposal "abc"
|
||||
process_proposal "abc"
|
||||
prepare_proposal "abc=123"
|
||||
process_proposal "abc=123"
|
||||
finalize_block "abc=123"
|
||||
commit
|
||||
deliver_tx "abc"
|
||||
info
|
||||
commit
|
||||
query "abc"
|
||||
deliver_tx "def=xyz"
|
||||
finalize_block "def=xyz" "ghi=123"
|
||||
commit
|
||||
query "def"
|
||||
prepare_proposal "preparedef"
|
||||
process_proposal "replacedef"
|
||||
process_proposal "preparedef"
|
||||
prepare_proposal
|
||||
process_proposal
|
||||
commit
|
||||
|
||||
|
||||
@@ -8,19 +8,20 @@
|
||||
-> data: {"size":0}
|
||||
-> data.hex: 0x7B2273697A65223A307D
|
||||
|
||||
> prepare_proposal "abc"
|
||||
> prepare_proposal "abc=123"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: abc
|
||||
-> log: Succeeded. Tx: abc=123
|
||||
|
||||
> process_proposal "abc"
|
||||
> process_proposal "abc=123"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> commit
|
||||
> finalize_block "abc=123"
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
-> code: OK
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
> deliver_tx "abc"
|
||||
> commit
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
@@ -28,54 +29,30 @@
|
||||
-> data: {"size":1}
|
||||
-> data.hex: 0x7B2273697A65223A317D
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
> query "abc"
|
||||
-> code: OK
|
||||
-> log: exists
|
||||
-> height: 2
|
||||
-> height: 0
|
||||
-> key: abc
|
||||
-> key.hex: 616263
|
||||
-> value: abc
|
||||
-> value.hex: 616263
|
||||
-> value: 123
|
||||
-> value.hex: 313233
|
||||
|
||||
> deliver_tx "def=xyz"
|
||||
> finalize_block "def=xyz" "ghi=123"
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0600000000000000
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
> query "def"
|
||||
-> code: OK
|
||||
-> log: exists
|
||||
-> height: 3
|
||||
-> height: 0
|
||||
-> key: def
|
||||
-> key.hex: 646566
|
||||
-> value: xyz
|
||||
-> value.hex: 78797A
|
||||
|
||||
> prepare_proposal "preparedef"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: replacedef
|
||||
|
||||
> process_proposal "replacedef"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> process_proposal "preparedef"
|
||||
-> code: OK
|
||||
-> status: REJECT
|
||||
|
||||
> prepare_proposal
|
||||
|
||||
> process_proposal
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
check_tx 0x00
|
||||
deliver_tx 0x01
|
||||
deliver_tx 0x04
|
||||
check_tx "abc"
|
||||
check_tx "def=567"
|
||||
finalize_block "def=567"
|
||||
commit
|
||||
finalize_block "hello=world"
|
||||
commit
|
||||
finalize_block "first=second"
|
||||
commit
|
||||
info
|
||||
|
||||
@@ -1,19 +1,31 @@
|
||||
> check_tx 0x00
|
||||
> check_tx "abc"
|
||||
-> code: 2
|
||||
|
||||
> check_tx "def=567"
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0xff
|
||||
> finalize_block "def=567"
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x00
|
||||
> finalize_block "hello=world"
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
> finalize_block "first=second"
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
-> data.hex: 0x0600000000000000
|
||||
|
||||
> deliver_tx 0x04
|
||||
> commit
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
|
||||
@@ -38,8 +38,34 @@ function testExample() {
|
||||
rm "${INPUT}".out.new
|
||||
}
|
||||
|
||||
function testHelp() {
|
||||
INPUT=$1
|
||||
APP="$2 $3"
|
||||
|
||||
echo "Test: $APP"
|
||||
$APP &> "${INPUT}.new" &
|
||||
sleep 2
|
||||
|
||||
pre=$(shasum < "${INPUT}")
|
||||
post=$(shasum < "${INPUT}.new")
|
||||
|
||||
if [[ "$pre" != "$post" ]]; then
|
||||
echo "You broke the tutorial"
|
||||
echo "Got:"
|
||||
cat "${INPUT}.new"
|
||||
echo "Expected:"
|
||||
cat "${INPUT}"
|
||||
echo "Diff:"
|
||||
diff "${INPUT}" "${INPUT}.new"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm "${INPUT}".new
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli kvstore
|
||||
testHelp tests/test_cli/testHelp.out abci-cli help
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
30
abci/tests/test_cli/testHelp.out
Normal file
30
abci/tests/test_cli/testHelp.out
Normal file
@@ -0,0 +1,30 @@
|
||||
the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers
|
||||
|
||||
Usage:
|
||||
abci-cli [command]
|
||||
|
||||
Available Commands:
|
||||
batch run a batch of abci commands against an application
|
||||
check_tx validate a transaction
|
||||
commit commit the application state and return the Merkle root hash
|
||||
completion Generate the autocompletion script for the specified shell
|
||||
console start an interactive ABCI console for multiple commands
|
||||
echo have the application echo a message
|
||||
finalize_block deliver a block of transactions to the application
|
||||
help Help about any command
|
||||
info get some info about the application
|
||||
kvstore ABCI demo example
|
||||
prepare_proposal prepare proposal
|
||||
process_proposal process proposal
|
||||
query query the application state
|
||||
test run integration tests
|
||||
version print ABCI console version
|
||||
|
||||
Flags:
|
||||
--abci string either socket or grpc (default "socket")
|
||||
--address string address of application socket (default "tcp://0.0.0.0:26658")
|
||||
-h, --help help for abci-cli
|
||||
--log_level string set the logger level (default "debug")
|
||||
-v, --verbose print the command and results as if it were a console session
|
||||
|
||||
Use "abci-cli [command] --help" for more information about a command.
|
||||
@@ -1,37 +1,37 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
)
|
||||
import "context"
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Application
|
||||
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
// to be driven by a blockchain-based replication engine via the ABCI.
|
||||
// All methods take a RequestXxx argument and return a ResponseXxx argument,
|
||||
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
|
||||
type Application interface {
|
||||
// Info/Query Connection
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
Info(context.Context, *RequestInfo) (*ResponseInfo, error) // Return application info
|
||||
Query(context.Context, *RequestQuery) (*ResponseQuery, error) // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal
|
||||
ProcessProposal(RequestProcessProposal) ResponseProcessProposal
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) // Initialize blockchain w validators/other info from TendermintCore
|
||||
PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error)
|
||||
ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error)
|
||||
// Deliver the decided block with its txs to the Application
|
||||
FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error)
|
||||
// Create application specific vote extension
|
||||
ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error)
|
||||
// Verify application's vote extension data
|
||||
VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error)
|
||||
// Commit the state and return the application Merkle root hash
|
||||
Commit(context.Context, *RequestCommit) (*ResponseCommit, error)
|
||||
|
||||
// State Sync Connection
|
||||
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
|
||||
OfferSnapshot(RequestOfferSnapshot) ResponseOfferSnapshot // Offer a snapshot to the application
|
||||
LoadSnapshotChunk(RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk // Load a snapshot chunk
|
||||
ApplySnapshotChunk(RequestApplySnapshotChunk) ResponseApplySnapshotChunk // Apply a shapshot chunk
|
||||
ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) // List available snapshots
|
||||
OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) // Offer a snapshot to the application
|
||||
LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) // Load a snapshot chunk
|
||||
ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) // Apply a shapshot chunk
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
@@ -39,62 +39,49 @@ type Application interface {
|
||||
|
||||
var _ Application = (*BaseApplication)(nil)
|
||||
|
||||
type BaseApplication struct {
|
||||
}
|
||||
type BaseApplication struct{}
|
||||
|
||||
func NewBaseApplication() *BaseApplication {
|
||||
return &BaseApplication{}
|
||||
}
|
||||
|
||||
func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
func (BaseApplication) Info(_ context.Context, req *RequestInfo) (*ResponseInfo, error) {
|
||||
return &ResponseInfo{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
func (BaseApplication) CheckTx(_ context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
return &ResponseCheckTx{Code: CodeTypeOK}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
|
||||
return ResponseCheckTx{Code: CodeTypeOK}
|
||||
func (BaseApplication) Commit(_ context.Context, req *RequestCommit) (*ResponseCommit, error) {
|
||||
return &ResponseCommit{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) Commit() ResponseCommit {
|
||||
return ResponseCommit{}
|
||||
func (BaseApplication) Query(_ context.Context, req *RequestQuery) (*ResponseQuery, error) {
|
||||
return &ResponseQuery{Code: CodeTypeOK}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) Query(req RequestQuery) ResponseQuery {
|
||||
return ResponseQuery{Code: CodeTypeOK}
|
||||
func (BaseApplication) InitChain(_ context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
|
||||
return &ResponseInitChain{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
|
||||
return ResponseInitChain{}
|
||||
func (BaseApplication) ListSnapshots(_ context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||
return &ResponseListSnapshots{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
|
||||
return ResponseBeginBlock{}
|
||||
func (BaseApplication) OfferSnapshot(_ context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) {
|
||||
return &ResponseOfferSnapshot{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
|
||||
return ResponseEndBlock{}
|
||||
func (BaseApplication) LoadSnapshotChunk(_ context.Context, _ *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) {
|
||||
return &ResponseLoadSnapshotChunk{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
|
||||
return ResponseListSnapshots{}
|
||||
func (BaseApplication) ApplySnapshotChunk(_ context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) {
|
||||
return &ResponseApplySnapshotChunk{}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) OfferSnapshot(req RequestOfferSnapshot) ResponseOfferSnapshot {
|
||||
return ResponseOfferSnapshot{}
|
||||
}
|
||||
|
||||
func (BaseApplication) LoadSnapshotChunk(req RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk {
|
||||
return ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) ResponseApplySnapshotChunk {
|
||||
return ResponseApplySnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal {
|
||||
func (BaseApplication) PrepareProposal(_ context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
|
||||
txs := make([][]byte, 0, len(req.Txs))
|
||||
var totalBytes int64
|
||||
for _, tx := range req.Txs {
|
||||
@@ -104,105 +91,29 @@ func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepa
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
return ResponsePrepareProposal{Txs: txs}
|
||||
return &ResponsePrepareProposal{Txs: txs}, nil
|
||||
}
|
||||
|
||||
func (BaseApplication) ProcessProposal(req RequestProcessProposal) ResponseProcessProposal {
|
||||
return ResponseProcessProposal{
|
||||
Status: ResponseProcessProposal_ACCEPT}
|
||||
func (BaseApplication) ProcessProposal(_ context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) {
|
||||
return &ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// GRPCApplication is a GRPC wrapper for Application
|
||||
type GRPCApplication struct {
|
||||
app Application
|
||||
func (BaseApplication) ExtendVote(_ context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) {
|
||||
return &ResponseExtendVote{}, nil
|
||||
}
|
||||
|
||||
func NewGRPCApplication(app Application) *GRPCApplication {
|
||||
return &GRPCApplication{app}
|
||||
func (BaseApplication) VerifyVoteExtension(_ context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) {
|
||||
return &ResponseVerifyVoteExtension{
|
||||
Status: ResponseVerifyVoteExtension_ACCEPT,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) {
|
||||
return &ResponseEcho{Message: req.Message}, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) {
|
||||
return &ResponseFlush{}, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) {
|
||||
res := app.app.Info(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
res := app.app.CheckTx(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) {
|
||||
res := app.app.Query(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) {
|
||||
res := app.app.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
|
||||
res := app.app.InitChain(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
|
||||
res := app.app.BeginBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
|
||||
res := app.app.EndBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ListSnapshots(
|
||||
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||
res := app.app.ListSnapshots(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) OfferSnapshot(
|
||||
ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) {
|
||||
res := app.app.OfferSnapshot(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) LoadSnapshotChunk(
|
||||
ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) {
|
||||
res := app.app.LoadSnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ApplySnapshotChunk(
|
||||
ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) {
|
||||
res := app.app.ApplySnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) PrepareProposal(
|
||||
ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
|
||||
res := app.app.PrepareProposal(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ProcessProposal(
|
||||
ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) {
|
||||
res := app.app.ProcessProposal(*req)
|
||||
return &res, nil
|
||||
func (BaseApplication) FinalizeBlock(_ context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
|
||||
txs := make([]*ExecTxResult, len(req.Txs))
|
||||
for i := range req.Txs {
|
||||
txs[i] = &ExecTxResult{Code: CodeTypeOK}
|
||||
}
|
||||
return &ResponseFinalizeBlock{
|
||||
TxResults: txs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
)
|
||||
|
||||
@@ -15,11 +16,7 @@ const (
|
||||
func WriteMessage(msg proto.Message, w io.Writer) error {
|
||||
protoWriter := protoio.NewDelimitedWriter(w)
|
||||
_, err := protoWriter.WriteMsg(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadMessage reads a varint length-delimited protobuf message.
|
||||
@@ -42,21 +39,15 @@ func ToRequestFlush() *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestInfo(req RequestInfo) *Request {
|
||||
func ToRequestInfo(req *RequestInfo) *Request {
|
||||
return &Request{
|
||||
Value: &Request_Info{&req},
|
||||
Value: &Request_Info{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
func ToRequestCheckTx(req *RequestCheckTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestCheckTx(req RequestCheckTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_CheckTx{&req},
|
||||
Value: &Request_CheckTx{req},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,63 +57,69 @@ func ToRequestCommit() *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestQuery(req RequestQuery) *Request {
|
||||
func ToRequestQuery(req *RequestQuery) *Request {
|
||||
return &Request{
|
||||
Value: &Request_Query{&req},
|
||||
Value: &Request_Query{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestInitChain(req RequestInitChain) *Request {
|
||||
func ToRequestInitChain(req *RequestInitChain) *Request {
|
||||
return &Request{
|
||||
Value: &Request_InitChain{&req},
|
||||
Value: &Request_InitChain{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
|
||||
func ToRequestListSnapshots(req *RequestListSnapshots) *Request {
|
||||
return &Request{
|
||||
Value: &Request_BeginBlock{&req},
|
||||
Value: &Request_ListSnapshots{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestEndBlock(req RequestEndBlock) *Request {
|
||||
func ToRequestOfferSnapshot(req *RequestOfferSnapshot) *Request {
|
||||
return &Request{
|
||||
Value: &Request_EndBlock{&req},
|
||||
Value: &Request_OfferSnapshot{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
|
||||
func ToRequestLoadSnapshotChunk(req *RequestLoadSnapshotChunk) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ListSnapshots{&req},
|
||||
Value: &Request_LoadSnapshotChunk{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestOfferSnapshot(req RequestOfferSnapshot) *Request {
|
||||
func ToRequestApplySnapshotChunk(req *RequestApplySnapshotChunk) *Request {
|
||||
return &Request{
|
||||
Value: &Request_OfferSnapshot{&req},
|
||||
Value: &Request_ApplySnapshotChunk{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestLoadSnapshotChunk(req RequestLoadSnapshotChunk) *Request {
|
||||
func ToRequestPrepareProposal(req *RequestPrepareProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_LoadSnapshotChunk{&req},
|
||||
Value: &Request_PrepareProposal{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
|
||||
func ToRequestProcessProposal(req *RequestProcessProposal) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ApplySnapshotChunk{&req},
|
||||
Value: &Request_ProcessProposal{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestPrepareProposal(req RequestPrepareProposal) *Request {
|
||||
func ToRequestExtendVote(req *RequestExtendVote) *Request {
|
||||
return &Request{
|
||||
Value: &Request_PrepareProposal{&req},
|
||||
Value: &Request_ExtendVote{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestProcessProposal(req RequestProcessProposal) *Request {
|
||||
func ToRequestVerifyVoteExtension(req *RequestVerifyVoteExtension) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ProcessProposal{&req},
|
||||
Value: &Request_VerifyVoteExtension{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestFinalizeBlock(req *RequestFinalizeBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_FinalizeBlock{req},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,86 +143,86 @@ func ToResponseFlush() *Response {
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseInfo(res ResponseInfo) *Response {
|
||||
func ToResponseInfo(res *ResponseInfo) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Info{&res},
|
||||
Value: &Response_Info{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
|
||||
func ToResponseCheckTx(res *ResponseCheckTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_DeliverTx{&res},
|
||||
Value: &Response_CheckTx{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseCheckTx(res ResponseCheckTx) *Response {
|
||||
func ToResponseCommit(res *ResponseCommit) *Response {
|
||||
return &Response{
|
||||
Value: &Response_CheckTx{&res},
|
||||
Value: &Response_Commit{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseCommit(res ResponseCommit) *Response {
|
||||
func ToResponseQuery(res *ResponseQuery) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Commit{&res},
|
||||
Value: &Response_Query{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseQuery(res ResponseQuery) *Response {
|
||||
func ToResponseInitChain(res *ResponseInitChain) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Query{&res},
|
||||
Value: &Response_InitChain{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseInitChain(res ResponseInitChain) *Response {
|
||||
func ToResponseListSnapshots(res *ResponseListSnapshots) *Response {
|
||||
return &Response{
|
||||
Value: &Response_InitChain{&res},
|
||||
Value: &Response_ListSnapshots{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
|
||||
func ToResponseOfferSnapshot(res *ResponseOfferSnapshot) *Response {
|
||||
return &Response{
|
||||
Value: &Response_BeginBlock{&res},
|
||||
Value: &Response_OfferSnapshot{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseEndBlock(res ResponseEndBlock) *Response {
|
||||
func ToResponseLoadSnapshotChunk(res *ResponseLoadSnapshotChunk) *Response {
|
||||
return &Response{
|
||||
Value: &Response_EndBlock{&res},
|
||||
Value: &Response_LoadSnapshotChunk{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
|
||||
func ToResponseApplySnapshotChunk(res *ResponseApplySnapshotChunk) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ListSnapshots{&res},
|
||||
Value: &Response_ApplySnapshotChunk{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseOfferSnapshot(res ResponseOfferSnapshot) *Response {
|
||||
func ToResponsePrepareProposal(res *ResponsePrepareProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_OfferSnapshot{&res},
|
||||
Value: &Response_PrepareProposal{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseLoadSnapshotChunk(res ResponseLoadSnapshotChunk) *Response {
|
||||
func ToResponseProcessProposal(res *ResponseProcessProposal) *Response {
|
||||
return &Response{
|
||||
Value: &Response_LoadSnapshotChunk{&res},
|
||||
Value: &Response_ProcessProposal{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
|
||||
func ToResponseExtendVote(res *ResponseExtendVote) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ApplySnapshotChunk{&res},
|
||||
Value: &Response_ExtendVote{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponsePrepareProposal(res ResponsePrepareProposal) *Response {
|
||||
func ToResponseVerifyVoteExtension(res *ResponseVerifyVoteExtension) *Response {
|
||||
return &Response{
|
||||
Value: &Response_PrepareProposal{&res},
|
||||
Value: &Response_VerifyVoteExtension{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseProcessProposal(res ResponseProcessProposal) *Response {
|
||||
func ToResponseFinalizeBlock(res *ResponseFinalizeBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ProcessProposal{&res},
|
||||
Value: &Response_FinalizeBlock{res},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
)
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
b, err := json.Marshal(&ResponseDeliverTx{})
|
||||
assert.Nil(t, err)
|
||||
b, err := json.Marshal(&ExecTxResult{Code: 1})
|
||||
assert.NoError(t, err)
|
||||
// include empty fields.
|
||||
assert.True(t, strings.Contains(string(b), "code"))
|
||||
r1 := ResponseCheckTx{
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
@@ -12,200 +14,326 @@ type Application struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseApplySnapshotChunk)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlock provides a mock function with given fields: _a0
|
||||
func (_m *Application) BeginBlock(_a0 types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
ret := _m.Called(_a0)
|
||||
// CheckTx provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseBeginBlock)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0
|
||||
func (_m *Application) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx {
|
||||
ret := _m.Called(_a0)
|
||||
// Commit provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCommit) *types.ResponseCommit); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCheckTx)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCommit)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCommit) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields:
|
||||
func (_m *Application) Commit() types.ResponseCommit {
|
||||
ret := _m.Called()
|
||||
// ExtendVote provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func() types.ResponseCommit); ok {
|
||||
r0 = rf()
|
||||
var r0 *types.ResponseExtendVote
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseCommit)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseExtendVote)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTx provides a mock function with given fields: _a0
|
||||
func (_m *Application) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
ret := _m.Called(_a0)
|
||||
// FinalizeBlock provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseDeliverTx)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlock provides a mock function with given fields: _a0
|
||||
func (_m *Application) EndBlock(_a0 types.RequestEndBlock) types.ResponseEndBlock {
|
||||
ret := _m.Called(_a0)
|
||||
// Info provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseEndBlock)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0
|
||||
func (_m *Application) Info(_a0 types.RequestInfo) types.ResponseInfo {
|
||||
ret := _m.Called(_a0)
|
||||
// InitChain provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) types.ResponseInfo); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInfo)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInitChain)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0
|
||||
func (_m *Application) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain {
|
||||
ret := _m.Called(_a0)
|
||||
// ListSnapshots provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseInitChain)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseListSnapshots)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0
|
||||
func (_m *Application) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
ret := _m.Called(_a0)
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseListSnapshots)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0
|
||||
func (_m *Application) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
ret := _m.Called(_a0)
|
||||
// OfferSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseLoadSnapshotChunk)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseOfferSnapshot)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0
|
||||
func (_m *Application) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
ret := _m.Called(_a0)
|
||||
// PrepareProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseOfferSnapshot)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) PrepareProposal(_a0 types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
ret := _m.Called(_a0)
|
||||
// ProcessProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponsePrepareProposal)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseProcessProposal)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0
|
||||
func (_m *Application) ProcessProposal(_a0 types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
ret := _m.Called(_a0)
|
||||
// Query provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseProcessProposal)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseQuery)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0
|
||||
func (_m *Application) Query(_a0 types.RequestQuery) types.ResponseQuery {
|
||||
ret := _m.Called(_a0)
|
||||
// VerifyVoteExtension provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) types.ResponseQuery); ok {
|
||||
r0 = rf(_a0)
|
||||
var r0 *types.ResponseVerifyVoteExtension
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.ResponseQuery)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewApplication interface {
|
||||
|
||||
@@ -1,187 +0,0 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// BaseMock provides a wrapper around the generated Application mock and a BaseApplication.
|
||||
// BaseMock first tries to use the mock's implementation of the method.
|
||||
// If no functionality was provided for the mock by the user, BaseMock dispatches
|
||||
// to the BaseApplication and uses its functionality.
|
||||
// BaseMock allows users to provide mocked functionality for only the methods that matter
|
||||
// for their test while avoiding a panic if the code calls Application methods that are
|
||||
// not relevant to the test.
|
||||
type BaseMock struct {
|
||||
base *types.BaseApplication
|
||||
*Application
|
||||
}
|
||||
|
||||
func NewBaseMock() BaseMock {
|
||||
return BaseMock{
|
||||
base: types.NewBaseApplication(),
|
||||
Application: new(Application),
|
||||
}
|
||||
}
|
||||
|
||||
// Info/Query Connection
|
||||
// Return application info
|
||||
func (m BaseMock) Info(input types.RequestInfo) types.ResponseInfo {
|
||||
var ret types.ResponseInfo
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Info(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Info(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) Query(input types.RequestQuery) types.ResponseQuery {
|
||||
var ret types.ResponseQuery
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Query(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Query(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Mempool Connection
|
||||
// Validate a tx for the mempool
|
||||
func (m BaseMock) CheckTx(input types.RequestCheckTx) types.ResponseCheckTx {
|
||||
var ret types.ResponseCheckTx
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.CheckTx(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.CheckTx(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Consensus Connection
|
||||
// Initialize blockchain w validators/other info from TendermintCore
|
||||
func (m BaseMock) InitChain(input types.RequestInitChain) types.ResponseInitChain {
|
||||
var ret types.ResponseInitChain
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.InitChain(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.InitChain(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) PrepareProposal(input types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
var ret types.ResponsePrepareProposal
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.PrepareProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.PrepareProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ProcessProposal(input types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
var ret types.ResponseProcessProposal
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ProcessProposal(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ProcessProposal(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Commit the state and return the application Merkle root hash
|
||||
func (m BaseMock) Commit() types.ResponseCommit {
|
||||
var ret types.ResponseCommit
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.Commit()
|
||||
}
|
||||
}()
|
||||
ret = m.Application.Commit()
|
||||
return ret
|
||||
}
|
||||
|
||||
// State Sync Connection
|
||||
// List available snapshots
|
||||
func (m BaseMock) ListSnapshots(input types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
var ret types.ResponseListSnapshots
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ListSnapshots(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ListSnapshots(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) OfferSnapshot(input types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
var ret types.ResponseOfferSnapshot
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.OfferSnapshot(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.OfferSnapshot(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) LoadSnapshotChunk(input types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
var ret types.ResponseLoadSnapshotChunk
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.LoadSnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.LoadSnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) ApplySnapshotChunk(input types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
var ret types.ResponseApplySnapshotChunk
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.ApplySnapshotChunk(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.ApplySnapshotChunk(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) BeginBlock(input types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
var ret types.ResponseBeginBlock
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.BeginBlock(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.BeginBlock(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) DeliverTx(input types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var ret types.ResponseDeliverTx
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.DeliverTx(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.DeliverTx(input)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m BaseMock) EndBlock(input types.RequestEndBlock) types.ResponseEndBlock {
|
||||
var ret types.ResponseEndBlock
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = m.base.EndBlock(input)
|
||||
}
|
||||
}()
|
||||
ret = m.Application.EndBlock(input)
|
||||
return ret
|
||||
}
|
||||
@@ -22,12 +22,12 @@ func (r ResponseCheckTx) IsErr() bool {
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ResponseDeliverTx) IsOK() bool {
|
||||
func (r ExecTxResult) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ResponseDeliverTx) IsErr() bool {
|
||||
func (r ExecTxResult) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
@@ -51,6 +51,10 @@ func (r ResponseProcessProposal) IsStatusUnknown() bool {
|
||||
return r.Status == ResponseProcessProposal_UNKNOWN
|
||||
}
|
||||
|
||||
func (r ResponseVerifyVoteExtension) IsAccepted() bool {
|
||||
return r.Status == ResponseVerifyVoteExtension_ACCEPT
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
@@ -72,12 +76,12 @@ func (r *ResponseCheckTx) UnmarshalJSON(b []byte) error {
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ResponseDeliverTx) MarshalJSON() ([]byte, error) {
|
||||
func (r *ExecTxResult) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseDeliverTx) UnmarshalJSON(b []byte) error {
|
||||
func (r *ExecTxResult) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
@@ -124,7 +128,38 @@ type jsonRoundTripper interface {
|
||||
|
||||
var _ jsonRoundTripper = (*ResponseCommit)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseQuery)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
|
||||
var _ jsonRoundTripper = (*ExecTxResult)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
|
||||
|
||||
var _ jsonRoundTripper = (*EventAttribute)(nil)
|
||||
|
||||
// deterministicExecTxResult constructs a copy of response that omits
|
||||
// non-deterministic fields. The input response is not modified.
|
||||
func deterministicExecTxResult(response *ExecTxResult) *ExecTxResult {
|
||||
return &ExecTxResult{
|
||||
Code: response.Code,
|
||||
Data: response.Data,
|
||||
GasWanted: response.GasWanted,
|
||||
GasUsed: response.GasUsed,
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalTxResults encodes the the TxResults as a list of byte
|
||||
// slices. It strips off the non-deterministic pieces of the TxResults
|
||||
// so that the resulting data can be used for hash comparisons and used
|
||||
// in Merkle proofs.
|
||||
func MarshalTxResults(r []*ExecTxResult) ([][]byte, error) {
|
||||
s := make([][]byte, len(r))
|
||||
for i, e := range r {
|
||||
d := deterministicExecTxResult(e)
|
||||
b, err := d.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s[i] = b
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// -----------------------------------------------
|
||||
// construct Result data
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
74
abci/types/types_test.go
Normal file
74
abci/types/types_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package types_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
)
|
||||
|
||||
func TestHashAndProveResults(t *testing.T) {
|
||||
trs := []*abci.ExecTxResult{
|
||||
// Note, these tests rely on the first two entries being in this order.
|
||||
{Code: 0, Data: nil},
|
||||
{Code: 0, Data: []byte{}},
|
||||
|
||||
{Code: 0, Data: []byte("one")},
|
||||
{Code: 14, Data: nil},
|
||||
{Code: 14, Data: []byte("foo")},
|
||||
{Code: 14, Data: []byte("bar")},
|
||||
}
|
||||
|
||||
// Nil and []byte{} should produce the same bytes
|
||||
bz0, err := trs[0].Marshal()
|
||||
require.NoError(t, err)
|
||||
bz1, err := trs[1].Marshal()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bz0, bz1)
|
||||
|
||||
// Make sure that we can get a root hash from results and verify proofs.
|
||||
rs, err := abci.MarshalTxResults(trs)
|
||||
require.NoError(t, err)
|
||||
root := merkle.HashFromByteSlices(rs)
|
||||
assert.NotEmpty(t, root)
|
||||
|
||||
_, proofs := merkle.ProofsFromByteSlices(rs)
|
||||
for i, tr := range trs {
|
||||
bz, err := tr.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
valid := proofs[i].Verify(root, bz)
|
||||
assert.NoError(t, valid, "%d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashDeterministicFieldsOnly(t *testing.T) {
|
||||
tr1 := abci.ExecTxResult{
|
||||
Code: 1,
|
||||
Data: []byte("transaction"),
|
||||
Log: "nondeterministic data: abc",
|
||||
Info: "nondeterministic data: abc",
|
||||
GasWanted: 1000,
|
||||
GasUsed: 1000,
|
||||
Events: []abci.Event{},
|
||||
Codespace: "nondeterministic.data.abc",
|
||||
}
|
||||
tr2 := abci.ExecTxResult{
|
||||
Code: 1,
|
||||
Data: []byte("transaction"),
|
||||
Log: "nondeterministic data: def",
|
||||
Info: "nondeterministic data: def",
|
||||
GasWanted: 1000,
|
||||
GasUsed: 1000,
|
||||
Events: []abci.Event{},
|
||||
Codespace: "nondeterministic.data.def",
|
||||
}
|
||||
r1, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr1})
|
||||
require.NoError(t, err)
|
||||
r2, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, merkle.HashFromByteSlices(r1), merkle.HashFromByteSlices(r2))
|
||||
}
|
||||
30
blocksync/metrics.gen.go
Normal file
30
blocksync/metrics.gen.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Syncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Syncing: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
19
blocksync/metrics.go
Normal file
19
blocksync/metrics.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "blocksync"
|
||||
)
|
||||
|
||||
//go:generate go run ../scripts/metricsgen -struct=Metrics
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// Whether or not a node is block syncing. 1 if yes, 0 if no.
|
||||
Syncing metrics.Gauge
|
||||
}
|
||||
@@ -19,58 +19,6 @@ const (
|
||||
BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
msg := bcproto.Message{}
|
||||
|
||||
switch pb := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
|
||||
case *bcproto.BlockResponse:
|
||||
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
|
||||
case *bcproto.NoBlockResponse:
|
||||
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
|
||||
case *bcproto.StatusRequest:
|
||||
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
|
||||
case *bcproto.StatusResponse:
|
||||
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", pb)
|
||||
}
|
||||
|
||||
bz, err := proto.Marshal(&msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *bcproto.Message_BlockRequest:
|
||||
return msg.BlockRequest, nil
|
||||
case *bcproto.Message_BlockResponse:
|
||||
return msg.BlockResponse, nil
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
return msg.NoBlockResponse, nil
|
||||
case *bcproto.Message_StatusRequest:
|
||||
return msg.StatusRequest, nil
|
||||
case *bcproto.Message_StatusResponse:
|
||||
return msg.StatusResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateMsg validates a message.
|
||||
func ValidateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
|
||||
@@ -80,7 +80,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
//nolint:lll // ignore line length in tests
|
||||
func TestBlockchainMessageVectors(t *testing.T) {
|
||||
func TestBlocksyncMessageVectors(t *testing.T) {
|
||||
block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil)
|
||||
block.Version.Block = 11 // overwrite updated protocol version
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ const (
|
||||
maxTotalRequesters = 600
|
||||
maxPendingRequests = maxTotalRequesters
|
||||
maxPendingRequestsPerPeer = 20
|
||||
requestRetrySeconds = 30
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending us data at at least that rate, we
|
||||
@@ -185,16 +186,20 @@ func (pool *BlockPool) IsCaughtUp() bool {
|
||||
return isCaughtUp
|
||||
}
|
||||
|
||||
// PeekTwoBlocks returns blocks at pool.height and pool.height+1.
|
||||
// We need to see the second block's Commit to validate the first block.
|
||||
// So we peek two blocks at a time.
|
||||
// PeekTwoBlocks returns blocks at pool.height and pool.height+1. We need to
|
||||
// see the second block's Commit to validate the first block. So we peek two
|
||||
// blocks at a time. We return an extended commit, containing vote extensions
|
||||
// and their associated signatures, as this is critical to consensus in ABCI++
|
||||
// as we switch from block sync to consensus mode.
|
||||
//
|
||||
// The caller will verify the commit.
|
||||
func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) {
|
||||
func (pool *BlockPool) PeekTwoBlocks() (first, second *types.Block, firstExtCommit *types.ExtendedCommit) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
if r := pool.requesters[pool.height]; r != nil {
|
||||
first = r.getBlock()
|
||||
firstExtCommit = r.getExtendedCommit()
|
||||
}
|
||||
if r := pool.requesters[pool.height+1]; r != nil {
|
||||
second = r.getBlock()
|
||||
@@ -203,7 +208,8 @@ func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block)
|
||||
}
|
||||
|
||||
// PopRequest pops the first block at pool.height.
|
||||
// It must have been validated by 'second'.Commit from PeekTwoBlocks().
|
||||
// It must have been validated by the second Commit from PeekTwoBlocks.
|
||||
// TODO(thane): (?) and its corresponding ExtendedCommit.
|
||||
func (pool *BlockPool) PopRequest() {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -240,12 +246,23 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
return peerID
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
|
||||
// AddBlock validates that the block comes from the peer it was expected from
|
||||
// and calls the requester to store it.
|
||||
//
|
||||
// This requires an extended commit at the same height as the supplied block -
|
||||
// the block contains the last commit, but we need the latest commit in case we
|
||||
// need to switch over from block sync to consensus at this height. If the
|
||||
// height of the extended commit and the height of the block do not match, we
|
||||
// do not add the block and return an error.
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) {
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, extCommit *types.ExtendedCommit, blockSize int) error {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
if extCommit != nil && block.Height != extCommit.Height {
|
||||
return fmt.Errorf("heights don't match, not adding block (block height: %d, commit height: %d)", block.Height, extCommit.Height)
|
||||
}
|
||||
|
||||
requester := pool.requesters[block.Height]
|
||||
if requester == nil {
|
||||
pool.Logger.Info(
|
||||
@@ -263,19 +280,22 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
|
||||
if diff > maxDiffBetweenCurrentAndReceivedBlockHeight {
|
||||
pool.sendError(errors.New("peer sent us a block we didn't expect with a height too far ahead/behind"), peerID)
|
||||
}
|
||||
return
|
||||
return fmt.Errorf("peer sent us a block we didn't expect (peer: %s, current height: %d, block height: %d)", peerID, pool.height, block.Height)
|
||||
}
|
||||
|
||||
if requester.setBlock(block, peerID) {
|
||||
if requester.setBlock(block, extCommit, peerID) {
|
||||
atomic.AddInt32(&pool.numPending, -1)
|
||||
peer := pool.peers[peerID]
|
||||
if peer != nil {
|
||||
peer.decrPending(blockSize)
|
||||
}
|
||||
} else {
|
||||
pool.Logger.Info("invalid peer", "peer", peerID, "blockHeight", block.Height)
|
||||
pool.sendError(errors.New("invalid peer"), peerID)
|
||||
err := errors.New("requester is different or block already exists")
|
||||
pool.sendError(err, peerID)
|
||||
return fmt.Errorf("%w (peer: %s, requester: %s, block height: %d)", err, peerID, requester.getPeerID(), block.Height)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MaxPeerHeight returns the highest reported height.
|
||||
@@ -424,6 +444,7 @@ func (pool *BlockPool) debug() string {
|
||||
} else {
|
||||
str += fmt.Sprintf("H(%v):", h)
|
||||
str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil)
|
||||
str += fmt.Sprintf("C?(%v) ", pool.requesters[h].extCommit != nil)
|
||||
}
|
||||
}
|
||||
return str
|
||||
@@ -512,9 +533,10 @@ type bpRequester struct {
|
||||
gotBlockCh chan struct{}
|
||||
redoCh chan p2p.ID // redo may send multitime, add peerId to identify repeat
|
||||
|
||||
mtx tmsync.Mutex
|
||||
peerID p2p.ID
|
||||
block *types.Block
|
||||
mtx tmsync.Mutex
|
||||
peerID p2p.ID
|
||||
block *types.Block
|
||||
extCommit *types.ExtendedCommit
|
||||
}
|
||||
|
||||
func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
@@ -537,13 +559,14 @@ func (bpr *bpRequester) OnStart() error {
|
||||
}
|
||||
|
||||
// Returns true if the peer matches and block doesn't already exist.
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, extCommit *types.ExtendedCommit, peerID p2p.ID) bool {
|
||||
bpr.mtx.Lock()
|
||||
if bpr.block != nil || bpr.peerID != peerID {
|
||||
bpr.mtx.Unlock()
|
||||
return false
|
||||
}
|
||||
bpr.block = block
|
||||
bpr.extCommit = extCommit
|
||||
bpr.mtx.Unlock()
|
||||
|
||||
select {
|
||||
@@ -559,6 +582,12 @@ func (bpr *bpRequester) getBlock() *types.Block {
|
||||
return bpr.block
|
||||
}
|
||||
|
||||
func (bpr *bpRequester) getExtendedCommit() *types.ExtendedCommit {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
return bpr.extCommit
|
||||
}
|
||||
|
||||
func (bpr *bpRequester) getPeerID() p2p.ID {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
@@ -576,6 +605,7 @@ func (bpr *bpRequester) reset() {
|
||||
|
||||
bpr.peerID = ""
|
||||
bpr.block = nil
|
||||
bpr.extCommit = nil
|
||||
}
|
||||
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
@@ -602,7 +632,7 @@ OUTER_LOOP:
|
||||
}
|
||||
peer = bpr.pool.pickIncrAvailablePeer(bpr.height)
|
||||
if peer == nil {
|
||||
// log.Info("No peers available", "height", height)
|
||||
bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height)
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
continue PICK_PEER_LOOP
|
||||
}
|
||||
@@ -612,6 +642,7 @@ OUTER_LOOP:
|
||||
bpr.peerID = peer.id
|
||||
bpr.mtx.Unlock()
|
||||
|
||||
to := time.NewTimer(requestRetrySeconds * time.Second)
|
||||
// Send request and wait.
|
||||
bpr.pool.sendRequest(bpr.height, peer.id)
|
||||
WAIT_LOOP:
|
||||
@@ -624,6 +655,11 @@ OUTER_LOOP:
|
||||
return
|
||||
case <-bpr.Quit():
|
||||
return
|
||||
case <-to.C:
|
||||
bpr.Logger.Debug("Retrying block request after timeout", "height", bpr.height, "peer", bpr.peerID)
|
||||
// Simulate a redo
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP
|
||||
case peerID := <-bpr.redoCh:
|
||||
if peerID == bpr.peerID {
|
||||
bpr.reset()
|
||||
|
||||
@@ -42,7 +42,10 @@ func (p testPeer) runInputRoutine() {
|
||||
// Request desired, pretend like we got the block immediately.
|
||||
func (p testPeer) simulateInput(input inputData) {
|
||||
block := &types.Block{Header: types.Header{Height: input.request.Height}}
|
||||
input.pool.AddBlock(input.request.PeerID, block, 123)
|
||||
extCommit := &types.ExtendedCommit{
|
||||
Height: input.request.Height,
|
||||
}
|
||||
_ = input.pool.AddBlock(input.request.PeerID, block, extCommit, 123)
|
||||
// TODO: uncommenting this creates a race which is detected by:
|
||||
// https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856
|
||||
// see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890
|
||||
@@ -112,7 +115,7 @@ func TestBlockPoolBasic(t *testing.T) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
first, second := pool.PeekTwoBlocks()
|
||||
first, second, _ := pool.PeekTwoBlocks()
|
||||
if first != nil && second != nil {
|
||||
pool.PopRequest()
|
||||
} else {
|
||||
@@ -171,7 +174,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
first, second := pool.PeekTwoBlocks()
|
||||
first, second, _ := pool.PeekTwoBlocks()
|
||||
if first != nil && second != nil {
|
||||
pool.PopRequest()
|
||||
} else {
|
||||
|
||||
@@ -30,7 +30,7 @@ const (
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and block sync to
|
||||
// for when we switch from blocksync reactor and block sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
@@ -52,17 +52,21 @@ type Reactor struct {
|
||||
initialState sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
store sm.BlockStore
|
||||
pool *BlockPool
|
||||
blockSync bool
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
|
||||
switchToConsensusMs int
|
||||
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
blockSync bool) *Reactor {
|
||||
blockSync bool, metrics *Metrics) *Reactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
@@ -88,6 +92,7 @@ func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockS
|
||||
blockSync: blockSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
metrics: metrics,
|
||||
}
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("Reactor", bcR)
|
||||
return bcR
|
||||
@@ -143,21 +148,20 @@ func (bcR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes, err := EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height()})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
peer.Send(BlocksyncChannel, msgBytes)
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
})
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -175,76 +179,91 @@ func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
src p2p.Peer) (queued bool) {
|
||||
|
||||
block := bcR.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
bl, err := block.ToProto()
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
msgBytes, err := EncodeMsg(&bcproto.BlockResponse{Block: bl})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
if block == nil {
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
return src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
})
|
||||
}
|
||||
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
state, err := bcR.blockExec.Store().Load()
|
||||
if err != nil {
|
||||
bcR.Logger.Error("loading state", "err", err)
|
||||
return false
|
||||
}
|
||||
var extCommit *types.ExtendedCommit
|
||||
if state.ConsensusParams.ABCI.VoteExtensionsEnabled(msg.Height) {
|
||||
extCommit = bcR.store.LoadBlockExtendedCommit(msg.Height)
|
||||
if extCommit == nil {
|
||||
bcR.Logger.Error("found block in store with no extended commit", "block", block)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
msgBytes, err := EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
bl, err := block.ToProto()
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
return src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.BlockResponse{
|
||||
Block: bl,
|
||||
ExtCommit: extCommit.ToProto(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
func (bcR *Reactor) Receive(e p2p.Envelope) {
|
||||
if err := ValidateMsg(e.Message); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
bcR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message)
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
switch msg := e.Message.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
bcR.respondToPeer(msg, src)
|
||||
bcR.respondToPeer(msg, e.Src)
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Block content is invalid", "err", err)
|
||||
return
|
||||
}
|
||||
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
|
||||
var extCommit *types.ExtendedCommit
|
||||
if msg.ExtCommit != nil {
|
||||
var err error
|
||||
extCommit, err = types.ExtendedCommitFromProto(msg.ExtCommit)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("failed to convert extended commit from proto",
|
||||
"peer", e.Src,
|
||||
"err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := bcR.pool.AddBlock(e.Src.ID(), bi, extCommit, msg.Block.Size()); err != nil {
|
||||
bcR.Logger.Error("failed to add block", "err", err)
|
||||
}
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
msgBytes, err := EncodeMsg(&bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
e.Src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobut", "err", err)
|
||||
return
|
||||
}
|
||||
src.TrySend(BlocksyncChannel, msgBytes)
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
|
||||
bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height)
|
||||
case *bcproto.NoBlockResponse:
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -253,6 +272,8 @@ func (bcR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
bcR.metrics.Syncing.Set(1)
|
||||
defer bcR.metrics.Syncing.Set(0)
|
||||
|
||||
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
defer trySyncTicker.Stop()
|
||||
@@ -260,7 +281,10 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
defer statusUpdateTicker.Stop()
|
||||
|
||||
switchToConsensusTicker := time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
|
||||
if bcR.switchToConsensusMs == 0 {
|
||||
bcR.switchToConsensusMs = switchToConsensusIntervalSeconds * 1000
|
||||
}
|
||||
switchToConsensusTicker := time.NewTicker(time.Duration(bcR.switchToConsensusMs) * time.Millisecond)
|
||||
defer switchToConsensusTicker.Stop()
|
||||
|
||||
blocksSynced := uint64(0)
|
||||
@@ -273,6 +297,8 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
|
||||
didProcessCh := make(chan struct{}, 1)
|
||||
|
||||
initialCommitHasExtensions := (bcR.initialState.LastBlockHeight > 0 && bcR.store.LoadBlockExtendedCommit(bcR.initialState.LastBlockHeight) != nil)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
@@ -285,13 +311,10 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
msgBytes, err := EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlocksyncChannel, msgBytes)
|
||||
queued := peer.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
})
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
@@ -303,7 +326,7 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() //nolint: errcheck
|
||||
go bcR.BroadcastStatusRequest()
|
||||
|
||||
}
|
||||
}
|
||||
@@ -316,7 +339,44 @@ FOR_LOOP:
|
||||
height, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
outbound, inbound, _ := bcR.Switch.NumPeers()
|
||||
bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters,
|
||||
"outbound", outbound, "inbound", inbound)
|
||||
"outbound", outbound, "inbound", inbound, "lastHeight", state.LastBlockHeight)
|
||||
|
||||
// The "if" statement below is a bit confusing, so here is a breakdown
|
||||
// of its logic and purpose:
|
||||
//
|
||||
// If we are at genesis (no block in the chain), we don't need VoteExtensions
|
||||
// because the first block's LastCommit is empty anyway.
|
||||
//
|
||||
// If VoteExtensions were disabled for the previous height then we don't need
|
||||
// VoteExtensions.
|
||||
//
|
||||
// If we have sync'd at least one block, then we are guaranteed to have extensions
|
||||
// if we need them by the logic inside loop FOR_LOOP: it requires that the blocks
|
||||
// it fetches have extensions if extensions were enabled during the height.
|
||||
//
|
||||
// If we already had extensions for the initial height (e.g. we are recovering),
|
||||
// then we are guaranteed to have extensions for the last block (if required) even
|
||||
// if we did not blocksync any block.
|
||||
//
|
||||
missingExtension := true
|
||||
if state.LastBlockHeight == 0 ||
|
||||
!state.ConsensusParams.ABCI.VoteExtensionsEnabled(state.LastBlockHeight) ||
|
||||
blocksSynced > 0 ||
|
||||
initialCommitHasExtensions {
|
||||
missingExtension = false
|
||||
}
|
||||
|
||||
// If require extensions, but since we don't have them yet, then we cannot switch to consensus yet.
|
||||
if missingExtension {
|
||||
bcR.Logger.Info(
|
||||
"no extended commit yet",
|
||||
"height", height,
|
||||
"last_block_height", state.LastBlockHeight,
|
||||
"initial_height", state.InitialHeight,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(),
|
||||
)
|
||||
continue FOR_LOOP
|
||||
}
|
||||
if bcR.pool.IsCaughtUp() {
|
||||
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
@@ -349,15 +409,28 @@ FOR_LOOP:
|
||||
// routine.
|
||||
|
||||
// See if there are any blocks to sync.
|
||||
first, second := bcR.pool.PeekTwoBlocks()
|
||||
// bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
|
||||
first, second, extCommit := bcR.pool.PeekTwoBlocks()
|
||||
if first == nil || second == nil {
|
||||
// We need both to sync the first block.
|
||||
// we need to have fetched two consecutive blocks in order to
|
||||
// perform blocksync verification
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
// Try again quickly next loop.
|
||||
didProcessCh <- struct{}{}
|
||||
}
|
||||
// Some sanity checks on heights
|
||||
if state.LastBlockHeight > 0 && state.LastBlockHeight+1 != first.Height {
|
||||
// Panicking because the block pool's height MUST keep consistent with the state; the block pool is totally under our control
|
||||
panic(fmt.Errorf("peeked first block has unexpected height; expected %d, got %d", state.LastBlockHeight+1, first.Height))
|
||||
}
|
||||
if first.Height+1 != second.Height {
|
||||
// Panicking because this is an obvious bug in the block pool, which is totally under our control
|
||||
panic(fmt.Errorf("heights of first and second block are not consecutive; expected %d, got %d", state.LastBlockHeight, first.Height))
|
||||
}
|
||||
if extCommit == nil && state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) {
|
||||
// See https://github.com/tendermint/tendermint/pull/8433#discussion_r866790631
|
||||
panic(fmt.Errorf("peeked first block without extended commit at height %d - possible node store corruption", first.Height))
|
||||
}
|
||||
|
||||
// Try again quickly next loop.
|
||||
didProcessCh <- struct{}{}
|
||||
|
||||
firstParts, err := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
if err != nil {
|
||||
@@ -372,6 +445,7 @@ FOR_LOOP:
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
// TODO(sergio): Should we also validate against the extended commit?
|
||||
err = state.Validators.VerifyCommitLight(
|
||||
chainID, firstID, first.Height, second.LastCommit)
|
||||
|
||||
@@ -379,6 +453,10 @@ FOR_LOOP:
|
||||
// validate the block before we persist it
|
||||
err = bcR.blockExec.ValidateBlock(state, first)
|
||||
}
|
||||
if err == nil && state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) {
|
||||
// if vote extensions were required at this height, ensure they exist.
|
||||
err = extCommit.EnsureExtensions()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error in validation", "err", err)
|
||||
@@ -402,7 +480,15 @@ FOR_LOOP:
|
||||
bcR.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
if state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) {
|
||||
bcR.store.SaveBlockWithExtendedCommit(first, firstParts, extCommit)
|
||||
} else {
|
||||
// We use LastCommit here instead of extCommit. extCommit is not
|
||||
// guaranteed to be populated by the peer if extensions are not enabled.
|
||||
// Currently, the peer should provide an extCommit even if the vote extension data are absent
|
||||
// but this may change so using second.LastCommit is safer.
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
}
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
@@ -429,14 +515,9 @@ FOR_LOOP:
|
||||
}
|
||||
|
||||
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
|
||||
func (bcR *Reactor) BroadcastStatusRequest() error {
|
||||
bm, err := EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
return fmt.Errorf("could not convert msg to proto: %w", err)
|
||||
}
|
||||
|
||||
bcR.Switch.Broadcast(BlocksyncChannel, bm)
|
||||
|
||||
return nil
|
||||
func (bcR *Reactor) BroadcastStatusRequest() {
|
||||
bcR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
mpmocks "github.com/tendermint/tendermint/mempool/mocks"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -41,10 +42,13 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
consPar := types.DefaultConsensusParams()
|
||||
consPar.ABCI.VoteExtensionsEnableHeight = 1
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: test.DefaultTestChainID,
|
||||
Validators: validators,
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: test.DefaultTestChainID,
|
||||
Validators: validators,
|
||||
ConsensusParams: consPar,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
@@ -63,7 +67,7 @@ func newReactor(
|
||||
panic("only support one validator")
|
||||
}
|
||||
|
||||
app := &testApp{}
|
||||
app := abci.NewBaseApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics())
|
||||
err := proxyApp.Start()
|
||||
@@ -109,50 +113,62 @@ func newReactor(
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// The commit we are building for the current height.
|
||||
seenExtCommit := &types.ExtendedCommit{}
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
lastExtCommit := seenExtCommit.Clone()
|
||||
|
||||
vote, err := types.MakeVote(
|
||||
lastBlock.Header.Height,
|
||||
lastBlockMeta.BlockID,
|
||||
state.Validators,
|
||||
privVals[0],
|
||||
lastBlock.Header.ChainID,
|
||||
time.Now(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := state.MakeBlock(blockHeight, nil, lastCommit, nil, state.Validators.Proposer.Address)
|
||||
thisBlock := state.MakeBlock(blockHeight, nil, lastExtCommit.ToCommit(), nil, state.Validators.Proposer.Address)
|
||||
|
||||
thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
// Simulate a commit for the current height
|
||||
pubKey, err := privVals[0].GetPubKey()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
addr := pubKey.Address()
|
||||
idx, _ := state.Validators.GetByAddress(addr)
|
||||
vote, err := types.MakeVote(
|
||||
privVals[0],
|
||||
thisBlock.Header.ChainID,
|
||||
idx,
|
||||
thisBlock.Header.Height,
|
||||
0,
|
||||
tmproto.PrecommitType,
|
||||
blockID,
|
||||
time.Now(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
seenExtCommit = &types.ExtendedCommit{
|
||||
Height: vote.Height,
|
||||
Round: vote.Round,
|
||||
BlockID: blockID,
|
||||
ExtendedSignatures: []types.ExtendedCommitSig{vote.ExtendedCommitSig()},
|
||||
}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error apply block: %w", err))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
blockStore.SaveBlockWithExtendedCommit(thisBlock, thisParts, seenExtCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync, NopMetrics())
|
||||
bcReactor.SetLogger(logger.With("module", "blocksync"))
|
||||
|
||||
return ReactorPair{bcReactor, proxyApp}
|
||||
}
|
||||
|
||||
func TestNoBlockResponse(t *testing.T) {
|
||||
config = test.ResetTestRoot("blockchain_reactor_test")
|
||||
config = test.ResetTestRoot("blocksync_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
@@ -164,7 +180,7 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
reactorPairs[1] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
@@ -214,7 +230,7 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestBadBlockStopsPeer(t *testing.T) {
|
||||
config = test.ResetTestRoot("blockchain_reactor_test")
|
||||
config = test.ResetTestRoot("blocksync_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
@@ -239,7 +255,7 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
reactorPairs[3] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
@@ -278,7 +294,7 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].reactor)
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[len(reactorPairs)-1].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
@@ -298,36 +314,65 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
func TestCheckSwitchToConsensusLastHeightZero(t *testing.T) {
|
||||
const maxBlockHeight = int64(45)
|
||||
|
||||
var _ abci.Application = (*testApp)(nil)
|
||||
config = test.ResetTestRoot("blocksync_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
|
||||
return abci.ResponseInfo{}
|
||||
}
|
||||
reactorPairs := make([]ReactorPair, 1, 2)
|
||||
reactorPairs[0] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[0].reactor.switchToConsensusMs = 50
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
err := r.reactor.Stop()
|
||||
require.NoError(t, err)
|
||||
err = r.app.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
|
||||
return abci.ResponseBeginBlock{}
|
||||
}
|
||||
reactorPairs = append(reactorPairs, newReactor(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight))
|
||||
|
||||
func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
|
||||
return abci.ResponseEndBlock{}
|
||||
}
|
||||
var switches []*p2p.Switch
|
||||
for _, r := range reactorPairs {
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKSYNC", r.reactor)
|
||||
return s
|
||||
}, p2p.Connect2Switches)...)
|
||||
}
|
||||
|
||||
func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
return abci.ResponseDeliverTx{Events: []abci.Event{}}
|
||||
}
|
||||
time.Sleep(60 * time.Millisecond)
|
||||
|
||||
func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
return abci.ResponseCheckTx{}
|
||||
}
|
||||
// Connect both switches
|
||||
p2p.Connect2Switches(switches, 0, 1)
|
||||
|
||||
func (app *testApp) Commit() abci.ResponseCommit {
|
||||
return abci.ResponseCommit{}
|
||||
}
|
||||
startTime := time.Now()
|
||||
for {
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
caughtUp := true
|
||||
for _, r := range reactorPairs {
|
||||
if !r.reactor.pool.IsCaughtUp() {
|
||||
caughtUp = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if caughtUp {
|
||||
break
|
||||
}
|
||||
if time.Since(startTime) > 90*time.Second {
|
||||
msg := "timeout: reactors didn't catch up;"
|
||||
for i, r := range reactorPairs {
|
||||
h, p, lr := r.reactor.pool.GetStatus()
|
||||
c := r.reactor.pool.IsCaughtUp()
|
||||
msg += fmt.Sprintf(" reactor#%d (h %d, p %d, lr %d, c %t);", i, h, p, lr, c)
|
||||
}
|
||||
require.Fail(t, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) {
|
||||
return
|
||||
for _, r := range reactorPairs {
|
||||
assert.GreaterOrEqual(t, r.reactor.store.Height(), maxBlockHeight-2)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,7 +67,8 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
resp, err := http.Get(endpoint) //nolint: gosec
|
||||
//nolint:gosec,nolintlint
|
||||
resp, err := http.Get(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
}
|
||||
|
||||
@@ -143,38 +143,38 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
|
||||
|
||||
fmt.Println("start re-indexing events:")
|
||||
defer bar.Finish()
|
||||
for i := args.startHeight; i <= args.endHeight; i++ {
|
||||
for height := args.startHeight; height <= args.endHeight; height++ {
|
||||
select {
|
||||
case <-cmd.Context().Done():
|
||||
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
|
||||
return fmt.Errorf("event re-index terminated at height %d: %w", height, cmd.Context().Err())
|
||||
default:
|
||||
b := args.blockStore.LoadBlock(i)
|
||||
if b == nil {
|
||||
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
|
||||
block := args.blockStore.LoadBlock(height)
|
||||
if block == nil {
|
||||
return fmt.Errorf("not able to load block at height %d from the blockstore", height)
|
||||
}
|
||||
|
||||
r, err := args.stateStore.LoadABCIResponses(i)
|
||||
resp, err := args.stateStore.LoadFinalizeBlockResponse(height)
|
||||
if err != nil {
|
||||
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
|
||||
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", height)
|
||||
}
|
||||
|
||||
e := types.EventDataNewBlockHeader{
|
||||
Header: b.Header,
|
||||
NumTxs: int64(len(b.Txs)),
|
||||
ResultBeginBlock: *r.BeginBlock,
|
||||
ResultEndBlock: *r.EndBlock,
|
||||
e := types.EventDataNewBlockEvents{
|
||||
Height: height,
|
||||
Events: resp.Events,
|
||||
}
|
||||
|
||||
numTxs := len(resp.TxResults)
|
||||
|
||||
var batch *txindex.Batch
|
||||
if e.NumTxs > 0 {
|
||||
batch = txindex.NewBatch(e.NumTxs)
|
||||
if numTxs > 0 {
|
||||
batch = txindex.NewBatch(int64(numTxs))
|
||||
|
||||
for i := range b.Data.Txs {
|
||||
for idx, txResult := range resp.TxResults {
|
||||
tr := abcitypes.TxResult{
|
||||
Height: b.Height,
|
||||
Index: uint32(i),
|
||||
Tx: b.Data.Txs[i],
|
||||
Result: *(r.DeliverTxs[i]),
|
||||
Height: height,
|
||||
Index: uint32(idx),
|
||||
Tx: block.Txs[idx],
|
||||
Result: *txResult,
|
||||
}
|
||||
|
||||
if err = batch.Add(&tr); err != nil {
|
||||
@@ -183,16 +183,16 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error {
|
||||
}
|
||||
|
||||
if err := args.txIndexer.AddBatch(batch); err != nil {
|
||||
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
|
||||
return fmt.Errorf("tx event re-index at height %d failed: %w", height, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := args.blockIndexer.Index(e); err != nil {
|
||||
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
|
||||
return fmt.Errorf("block event re-index at height %d failed: %w", height, err)
|
||||
}
|
||||
}
|
||||
|
||||
bar.Play(i)
|
||||
bar.Play(height)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
blockmocks "github.com/tendermint/tendermint/state/indexer/mocks"
|
||||
"github.com/tendermint/tendermint/state/mocks"
|
||||
txmocks "github.com/tendermint/tendermint/state/txindex/mocks"
|
||||
@@ -141,25 +140,24 @@ func TestReIndexEvent(t *testing.T) {
|
||||
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
|
||||
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
|
||||
|
||||
dtx := abcitypes.ResponseDeliverTx{}
|
||||
abciResp := &prototmstate.ABCIResponses{
|
||||
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
|
||||
EndBlock: &abcitypes.ResponseEndBlock{},
|
||||
BeginBlock: &abcitypes.ResponseBeginBlock{},
|
||||
abciResp := &abcitypes.ResponseFinalizeBlock{
|
||||
TxResults: []*abcitypes.ExecTxResult{
|
||||
{Code: 1},
|
||||
},
|
||||
}
|
||||
|
||||
mockBlockIndexer.
|
||||
On("Index", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
|
||||
On("Index", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil)
|
||||
On("Index", mock.AnythingOfType("types.EventDataNewBlockEvents")).Return(errors.New("")).Once().
|
||||
On("Index", mock.AnythingOfType("types.EventDataNewBlockEvents")).Return(nil)
|
||||
|
||||
mockTxIndexer.
|
||||
On("AddBatch", mock.AnythingOfType("*txindex.Batch")).Return(errors.New("")).Once().
|
||||
On("AddBatch", mock.AnythingOfType("*txindex.Batch")).Return(nil)
|
||||
|
||||
mockStateStore.
|
||||
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
|
||||
On("LoadABCIResponses", base).Return(abciResp, nil).
|
||||
On("LoadABCIResponses", height).Return(abciResp, nil)
|
||||
On("LoadFinalizeBlockResponse", base).Return(nil, errors.New("")).Once().
|
||||
On("LoadFinalizeBlockResponse", base).Return(abciResp, nil).
|
||||
On("LoadFinalizeBlockResponse", height).Return(abciResp, nil)
|
||||
|
||||
testCases := []struct {
|
||||
startHeight int64
|
||||
@@ -167,7 +165,7 @@ func TestReIndexEvent(t *testing.T) {
|
||||
reIndexErr bool
|
||||
}{
|
||||
{base, height, true}, // LoadBlock error
|
||||
{base, height, true}, // LoadABCIResponses error
|
||||
{base, height, true}, // LoadFinalizeBlockResponse error
|
||||
{base, height, true}, // index block event error
|
||||
{base, height, true}, // index tx event error
|
||||
{base, base, false},
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/tendermint/tendermint/store"
|
||||
)
|
||||
|
||||
var removeBlock bool = false
|
||||
var removeBlock = false
|
||||
|
||||
func init() {
|
||||
RollbackStateCmd.Flags().BoolVar(&removeBlock, "hard", false, "remove last block as well as state")
|
||||
|
||||
@@ -66,6 +66,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().String("p2p.external-address", config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
|
||||
cmd.Flags().String("p2p.bootstrap_peers", config.P2P.BootstrapPeers, "comma-delimited ID@host:port peers to be added to the addressbook on startup")
|
||||
cmd.Flags().String("p2p.unconditional_peer_ids",
|
||||
config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers")
|
||||
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")
|
||||
|
||||
@@ -423,6 +423,7 @@ type RPCConfig struct {
|
||||
TLSKeyFile string `mapstructure:"tls_key_file"`
|
||||
|
||||
// pprof listen address (https://golang.org/pkg/net/http/pprof)
|
||||
// FIXME: This should be moved under the instrumentation section
|
||||
PprofListenAddress string `mapstructure:"pprof_laddr"`
|
||||
}
|
||||
|
||||
@@ -506,6 +507,10 @@ func (cfg *RPCConfig) IsCorsEnabled() bool {
|
||||
return len(cfg.CORSAllowedOrigins) != 0
|
||||
}
|
||||
|
||||
func (cfg *RPCConfig) IsPprofEnabled() bool {
|
||||
return len(cfg.PprofListenAddress) != 0
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) KeyFile() string {
|
||||
path := cfg.TLSKeyFile
|
||||
if filepath.IsAbs(path) {
|
||||
@@ -543,6 +548,11 @@ type P2PConfig struct { //nolint: maligned
|
||||
// We only use these if we can’t connect to peers in the addrbook
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
|
||||
// Comma separated list of peers to be added to the peer store
|
||||
// on startup. Either BootstrapPeers or PersistentPeers are
|
||||
// needed for peer discovery
|
||||
BootstrapPeers string `mapstructure:"bootstrap_peers"`
|
||||
|
||||
// Comma separated list of nodes to keep persistent connections to
|
||||
PersistentPeers string `mapstructure:"persistent_peers"`
|
||||
|
||||
@@ -703,14 +713,28 @@ type MempoolConfig struct {
|
||||
// Mempool version to use:
|
||||
// 1) "v0" - (default) FIFO mempool.
|
||||
// 2) "v1" - prioritized mempool.
|
||||
// WARNING: There's a known memory leak with the prioritized mempool
|
||||
// that the team are working on. Read more here:
|
||||
// https://github.com/tendermint/tendermint/issues/8775
|
||||
Version string `mapstructure:"version"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
Version string `mapstructure:"version"`
|
||||
// RootDir is the root directory for all data. This should be configured via
|
||||
// the $TMHOME env variable or --home cmd flag rather than overriding this
|
||||
// struct field.
|
||||
RootDir string `mapstructure:"home"`
|
||||
// Recheck (default: true) defines whether Tendermint should recheck the
|
||||
// validity for all remaining transaction in the mempool after a block.
|
||||
// Since a block affects the application state, some transactions in the
|
||||
// mempool may become invalid. If this does not apply to your application,
|
||||
// you can disable rechecking.
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
// Broadcast (default: true) defines whether the mempool should relay
|
||||
// transactions to other peers. Setting this to false will stop the mempool
|
||||
// from relaying transactions to other peers until they are included in a
|
||||
// block. In other words, if Broadcast is disabled, only the peer you send
|
||||
// the tx to will see it until it is included in a block.
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
// WalPath (default: "") configures the location of the Write Ahead Log
|
||||
// (WAL) for the mempool. The WAL is disabled by default. To enable, set
|
||||
// WalPath to where you want the WAL to be written (e.g.
|
||||
// "data/mempool.wal").
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
// Maximum number of transactions in the mempool
|
||||
Size int `mapstructure:"size"`
|
||||
// Limit the total size of all txs in the mempool.
|
||||
@@ -1204,6 +1228,10 @@ func (cfg *InstrumentationConfig) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *InstrumentationConfig) IsPrometheusEnabled() bool {
|
||||
return cfg.Prometheus && cfg.PrometheusListenAddr != ""
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Utils
|
||||
|
||||
|
||||
@@ -283,6 +283,11 @@ external_address = "{{ .P2P.ExternalAddress }}"
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = "{{ .P2P.Seeds }}"
|
||||
|
||||
# Comma separated list of peers to be added to the peer store
|
||||
# on startup. Either BootstrapPeers or PersistentPeers are
|
||||
# needed for peer discovery
|
||||
bootstrap_peers = "{{ .P2P.BootstrapPeers }}"
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
persistent_peers = "{{ .P2P.PersistentPeers }}"
|
||||
|
||||
@@ -349,8 +354,24 @@ dial_timeout = "{{ .P2P.DialTimeout }}"
|
||||
# 2) "v1" - prioritized mempool.
|
||||
version = "{{ .Mempool.Version }}"
|
||||
|
||||
# Recheck (default: true) defines whether Tendermint should recheck the
|
||||
# validity for all remaining transaction in the mempool after a block.
|
||||
# Since a block affects the application state, some transactions in the
|
||||
# mempool may become invalid. If this does not apply to your application,
|
||||
# you can disable rechecking.
|
||||
recheck = {{ .Mempool.Recheck }}
|
||||
|
||||
# Broadcast (default: true) defines whether the mempool should relay
|
||||
# transactions to other peers. Setting this to false will stop the mempool
|
||||
# from relaying transactions to other peers until they are included in a
|
||||
# block. In other words, if Broadcast is disabled, only the peer you send
|
||||
# the tx to will see it until it is included in a block.
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
|
||||
# WalPath (default: "") configures the location of the Write Ahead Log
|
||||
# (WAL) for the mempool. The WAL is disabled by default. To enable, set
|
||||
# WalPath to where you want the WAL to be written (e.g.
|
||||
# "data/mempool.wal").
|
||||
wal_dir = "{{ js .Mempool.WalPath }}"
|
||||
|
||||
# Maximum number of transactions in the mempool
|
||||
@@ -436,7 +457,7 @@ chunk_fetchers = "{{ .StateSync.ChunkFetchers }}"
|
||||
[blocksync]
|
||||
|
||||
# Block Sync version to use:
|
||||
#
|
||||
#
|
||||
# In v0.37, v1 and v2 of the block sync protocols were deprecated.
|
||||
# Please use v0 instead.
|
||||
#
|
||||
|
||||
@@ -21,11 +21,13 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -44,7 +46,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
tickerFunc := newMockTickerFunc(true)
|
||||
appFunc := newKVStore
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30, nil)
|
||||
css := make([]*State, nValidators)
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
@@ -59,15 +61,16 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
_, err := app.InitChain(context.Background(), &abci.RequestInitChain{Validators: vals})
|
||||
require.NoError(t, err)
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
mtx := new(tmsync.Mutex)
|
||||
// one for mempool, one for consensus
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := proxy.NewAppConnConsensus(abcicli.NewLocalClient(mtx, app), proxy.NopMetrics())
|
||||
proxyAppConnMem := proxy.NewAppConnMempool(abcicli.NewLocalClient(mtx, app), proxy.NopMetrics())
|
||||
|
||||
// Make Mempool
|
||||
var mempool mempl.Mempool
|
||||
@@ -75,14 +78,14 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
switch thisConfig.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
proxyAppConnMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv0.WithPostCheck(sm.TxPostCheck(state)))
|
||||
case cfg.MempoolV1:
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
proxyAppConnMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
|
||||
@@ -165,10 +168,16 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
for i, peer := range peerList {
|
||||
if i < len(peerList)/2 {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1}))
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: prevote1.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
} else {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2}))
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: prevote2.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -188,22 +197,22 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
panic("entered createProposalBlock with privValidator being nil")
|
||||
}
|
||||
|
||||
var commit *types.Commit
|
||||
var extCommit *types.ExtendedCommit
|
||||
switch {
|
||||
case lazyProposer.Height == lazyProposer.state.InitialHeight:
|
||||
// We're creating a proposal for the first block.
|
||||
// The commit is empty, but not nil.
|
||||
commit = types.NewCommit(0, 0, types.BlockID{}, nil)
|
||||
extCommit = &types.ExtendedCommit{}
|
||||
case lazyProposer.LastCommit.HasTwoThirdsMajority():
|
||||
// Make the commit from LastCommit
|
||||
commit = lazyProposer.LastCommit.MakeCommit()
|
||||
extCommit = lazyProposer.LastCommit.MakeExtendedCommit()
|
||||
default: // This shouldn't happen.
|
||||
lazyProposer.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block")
|
||||
return
|
||||
}
|
||||
|
||||
// omit the last signature in the commit
|
||||
commit.Signatures[len(commit.Signatures)-1] = types.NewCommitSigAbsent()
|
||||
extCommit.ExtendedSignatures[len(extCommit.ExtendedSignatures)-1] = types.NewExtendedCommitSigAbsent()
|
||||
|
||||
if lazyProposer.privValidatorPubKey == nil {
|
||||
// If this node is a validator & proposer in the current round, it will
|
||||
@@ -214,7 +223,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
proposerAddr := lazyProposer.privValidatorPubKey.Address()
|
||||
|
||||
block, err := lazyProposer.blockExec.CreateProposalBlock(
|
||||
lazyProposer.Height, lazyProposer.state, commit, proposerAddr, nil)
|
||||
lazyProposer.Height, lazyProposer.state, extCommit, proposerAddr)
|
||||
require.NoError(t, err)
|
||||
blockParts, err := block.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
@@ -291,9 +300,6 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
case <-time.After(20 * time.Second):
|
||||
for i, reactor := range reactors {
|
||||
t.Logf("Consensus Reactor %d\n%v", i, reactor)
|
||||
}
|
||||
t.Fatalf("Timed out waiting for validators to commit evidence")
|
||||
}
|
||||
}
|
||||
@@ -307,7 +313,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
N := 4
|
||||
logger := consensusLogger().With("test", "byzantine")
|
||||
app := newKVStore
|
||||
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), app)
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_byzantine_test", newMockTickerFunc(false), app)
|
||||
defer cleanup()
|
||||
|
||||
// give the byzantine validator a normal ticker
|
||||
@@ -479,7 +485,7 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *St
|
||||
proposal1.Signature = p1.Signature
|
||||
|
||||
// some new transactions come in (this ensures that the proposals are different)
|
||||
deliverTxsRange(cs, 0, 1)
|
||||
deliverTxsRange(t, cs, 0, 1)
|
||||
|
||||
// Create a new proposal block from state/txs from the mempool.
|
||||
block2, err := cs.createProposalBlock()
|
||||
@@ -520,18 +526,26 @@ func sendProposalAndParts(
|
||||
parts *types.PartSet,
|
||||
) {
|
||||
// proposal
|
||||
msg := &ProposalMessage{Proposal: proposal}
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.Proposal{Proposal: *proposal.ToProto()},
|
||||
})
|
||||
|
||||
// parts
|
||||
for i := 0; i < int(parts.Total()); i++ {
|
||||
part := parts.GetPart(i)
|
||||
msg := &BlockPartMessage{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
pp, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(err) // TODO: wbanfield better error handling
|
||||
}
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: *pp,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// votes
|
||||
@@ -539,9 +553,14 @@ func sendProposalAndParts(
|
||||
prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
|
||||
precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
|
||||
cs.mtx.Unlock()
|
||||
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{Vote: prevote.ToProto()},
|
||||
})
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{Vote: precommit.ToProto()},
|
||||
})
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -579,7 +598,7 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
|
||||
func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
br.reactor.RemovePeer(peer, reason)
|
||||
}
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
br.reactor.Receive(chID, peer, msgBytes)
|
||||
func (br *ByzantineReactor) Receive(e p2p.Envelope) {
|
||||
br.reactor.Receive(e)
|
||||
}
|
||||
func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -92,7 +93,8 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida
|
||||
func (vs *validatorStub) signVote(
|
||||
voteType tmproto.SignedMsgType,
|
||||
hash []byte,
|
||||
header types.PartSetHeader) (*types.Vote, error) {
|
||||
header types.PartSetHeader,
|
||||
voteExtension []byte) (*types.Vote, error) {
|
||||
|
||||
pubKey, err := vs.PrivValidator.GetPubKey()
|
||||
if err != nil {
|
||||
@@ -100,34 +102,43 @@ func (vs *validatorStub) signVote(
|
||||
}
|
||||
|
||||
vote := &types.Vote{
|
||||
ValidatorIndex: vs.Index,
|
||||
ValidatorAddress: pubKey.Address(),
|
||||
Type: voteType,
|
||||
Height: vs.Height,
|
||||
Round: vs.Round,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: voteType,
|
||||
BlockID: types.BlockID{Hash: hash, PartSetHeader: header},
|
||||
Timestamp: tmtime.Now(),
|
||||
ValidatorAddress: pubKey.Address(),
|
||||
ValidatorIndex: vs.Index,
|
||||
Extension: voteExtension,
|
||||
}
|
||||
v := vote.ToProto()
|
||||
if err := vs.PrivValidator.SignVote(test.DefaultTestChainID, v); err != nil {
|
||||
if err = vs.PrivValidator.SignVote(test.DefaultTestChainID, v); err != nil {
|
||||
return nil, fmt.Errorf("sign vote failed: %w", err)
|
||||
}
|
||||
|
||||
// ref: signVote in FilePV, the vote should use the privious vote info when the sign data is the same.
|
||||
// ref: signVote in FilePV, the vote should use the previous vote info when the sign data is the same.
|
||||
if signDataIsEqual(vs.lastVote, v) {
|
||||
v.Signature = vs.lastVote.Signature
|
||||
v.Timestamp = vs.lastVote.Timestamp
|
||||
v.ExtensionSignature = vs.lastVote.ExtensionSignature
|
||||
}
|
||||
|
||||
vote.Signature = v.Signature
|
||||
vote.Timestamp = v.Timestamp
|
||||
vote.ExtensionSignature = v.ExtensionSignature
|
||||
|
||||
return vote, err
|
||||
}
|
||||
|
||||
// Sign vote for type/hash/header
|
||||
func signVote(vs *validatorStub, voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote {
|
||||
v, err := vs.signVote(voteType, hash, header)
|
||||
var ext []byte
|
||||
// Only non-nil precommits are allowed to carry vote extensions.
|
||||
if voteType == tmproto.PrecommitType && len(hash) != 0 {
|
||||
ext = []byte("extension")
|
||||
}
|
||||
v, err := vs.signVote(voteType, hash, header, ext)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to sign vote: %v", err))
|
||||
}
|
||||
@@ -311,43 +322,27 @@ func validatePrecommit(
|
||||
}
|
||||
}
|
||||
|
||||
rs := cs.GetRoundState()
|
||||
if lockedBlockHash == nil {
|
||||
if cs.LockedRound != lockRound || cs.LockedBlock != nil {
|
||||
if rs.LockedRound != lockRound || rs.LockedBlock != nil {
|
||||
panic(fmt.Sprintf(
|
||||
"Expected to be locked on nil at round %d. Got locked at round %d with block %v",
|
||||
lockRound,
|
||||
cs.LockedRound,
|
||||
cs.LockedBlock))
|
||||
rs.LockedRound,
|
||||
rs.LockedBlock))
|
||||
}
|
||||
} else {
|
||||
if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) {
|
||||
if rs.LockedRound != lockRound || !bytes.Equal(rs.LockedBlock.Hash(), lockedBlockHash) {
|
||||
panic(fmt.Sprintf(
|
||||
"Expected block to be locked on round %d, got %d. Got locked block %X, expected %X",
|
||||
lockRound,
|
||||
cs.LockedRound,
|
||||
cs.LockedBlock.Hash(),
|
||||
rs.LockedRound,
|
||||
rs.LockedBlock.Hash(),
|
||||
lockedBlockHash))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validatePrevoteAndPrecommit(
|
||||
t *testing.T,
|
||||
cs *State,
|
||||
thisRound,
|
||||
lockRound int32,
|
||||
privVal *validatorStub,
|
||||
votedBlockHash,
|
||||
lockedBlockHash []byte,
|
||||
) {
|
||||
// verify the prevote
|
||||
validatePrevote(t, cs, thisRound, privVal, votedBlockHash)
|
||||
// verify precommit
|
||||
cs.mtx.Lock()
|
||||
validatePrecommit(t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash)
|
||||
cs.mtx.Unlock()
|
||||
}
|
||||
|
||||
func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message {
|
||||
votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote)
|
||||
if err != nil {
|
||||
@@ -397,8 +392,8 @@ func newStateWithConfigAndBlockStore(
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnConMem := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := proxy.NewAppConnConsensus(abcicli.NewLocalClient(mtx, app), proxy.NopMetrics())
|
||||
proxyAppConnMem := proxy.NewAppConnMempool(abcicli.NewLocalClient(mtx, app), proxy.NopMetrics())
|
||||
// Make Mempool
|
||||
memplMetrics := mempl.NopMetrics()
|
||||
|
||||
@@ -408,7 +403,7 @@ func newStateWithConfigAndBlockStore(
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV0:
|
||||
mempool = mempoolv0.NewCListMempool(config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
proxyAppConnMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv0.WithMetrics(memplMetrics),
|
||||
mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
|
||||
@@ -417,7 +412,7 @@ func newStateWithConfigAndBlockStore(
|
||||
logger := consensusLogger()
|
||||
mempool = mempoolv1.NewTxMempool(logger,
|
||||
config.Mempool,
|
||||
proxyAppConnConMem,
|
||||
proxyAppConnMem,
|
||||
state.LastBlockHeight,
|
||||
mempoolv1.WithMetrics(memplMetrics),
|
||||
mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
|
||||
@@ -435,6 +430,7 @@ func newStateWithConfigAndBlockStore(
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
|
||||
if err := stateStore.Save(state); err != nil { // for save height 1's validators info
|
||||
panic(err)
|
||||
}
|
||||
@@ -464,12 +460,30 @@ func loadPrivValidator(config *cfg.Config) *privval.FilePV {
|
||||
}
|
||||
|
||||
func randState(nValidators int) (*State, []*validatorStub) {
|
||||
return randStateWithApp(nValidators, kvstore.NewApplication())
|
||||
return randStateWithApp(nValidators, kvstore.NewInMemoryApplication())
|
||||
}
|
||||
|
||||
func randStateWithAppWithHeight(
|
||||
nValidators int,
|
||||
app abci.Application,
|
||||
height int64,
|
||||
) (*State, []*validatorStub) {
|
||||
c := test.ConsensusParams()
|
||||
c.ABCI.VoteExtensionsEnableHeight = height
|
||||
return randStateWithAppImpl(nValidators, app, c)
|
||||
}
|
||||
func randStateWithApp(nValidators int, app abci.Application) (*State, []*validatorStub) {
|
||||
c := test.ConsensusParams()
|
||||
return randStateWithAppImpl(nValidators, app, c)
|
||||
}
|
||||
|
||||
func randStateWithAppImpl(
|
||||
nValidators int,
|
||||
app abci.Application,
|
||||
consensusParams *types.ConsensusParams,
|
||||
) (*State, []*validatorStub) {
|
||||
// Get State
|
||||
state, privVals := randGenesisState(nValidators, false, 10)
|
||||
state, privVals := randGenesisState(nValidators, false, 10, consensusParams)
|
||||
|
||||
vss := make([]*validatorStub, nValidators)
|
||||
|
||||
@@ -499,7 +513,7 @@ func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration,
|
||||
func ensureNoNewEventOnChannel(ch <-chan tmpubsub.Message) {
|
||||
ensureNoNewEvent(
|
||||
ch,
|
||||
ensureTimeout,
|
||||
ensureTimeout*8/10, // 20% leniency for goroutine scheduling uncertainty
|
||||
"We should be stuck waiting, not receiving new event on the channel")
|
||||
}
|
||||
|
||||
@@ -693,6 +707,11 @@ func ensurePrevoteMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int
|
||||
ensureVoteMatch(t, voteCh, height, round, hash, tmproto.PrevoteType)
|
||||
}
|
||||
|
||||
func ensurePrecommitMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte) {
|
||||
t.Helper()
|
||||
ensureVoteMatch(t, voteCh, height, round, hash, tmproto.PrecommitType)
|
||||
}
|
||||
|
||||
func ensureVoteMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte, voteType tmproto.SignedMsgType) {
|
||||
t.Helper()
|
||||
select {
|
||||
@@ -725,7 +744,7 @@ func ensurePrecommitTimeout(ch <-chan tmpubsub.Message) {
|
||||
|
||||
func ensureNewEventOnChannel(ch <-chan tmpubsub.Message) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
case <-time.After(ensureTimeout * 12 / 10): // 20% leniency for goroutine scheduling uncertainty
|
||||
panic("Timeout expired while waiting for new activity on the channel")
|
||||
case <-ch:
|
||||
}
|
||||
@@ -747,9 +766,10 @@ func consensusLogger() log.Logger {
|
||||
}).With("module", "consensus")
|
||||
}
|
||||
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) {
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
t.Helper()
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30, nil)
|
||||
css := make([]*State, nValidators)
|
||||
logger := consensusLogger()
|
||||
configRootDirs := make([]string, 0, nValidators)
|
||||
@@ -767,7 +787,8 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
_, err := app.InitChain(context.Background(), &abci.RequestInitChain{Validators: vals})
|
||||
require.NoError(t, err)
|
||||
|
||||
css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
@@ -782,13 +803,15 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
|
||||
// nPeers = nValidators + nNotValidator
|
||||
func randConsensusNetWithPeers(
|
||||
t *testing.T,
|
||||
nValidators,
|
||||
nPeers int,
|
||||
testName string,
|
||||
tickerFunc func() TimeoutTicker,
|
||||
appFunc func(string) abci.Application,
|
||||
) ([]*State, *types.GenesisDoc, *cfg.Config, cleanupFunc) {
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
|
||||
c := test.ConsensusParams()
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower, c)
|
||||
css := make([]*State, nPeers)
|
||||
logger := consensusLogger()
|
||||
var peer0Config *cfg.Config
|
||||
@@ -798,6 +821,7 @@ func randConsensusNetWithPeers(
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
t.Cleanup(func() { _ = stateStore.Close() })
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
@@ -823,12 +847,12 @@ func randConsensusNetWithPeers(
|
||||
|
||||
app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i)))
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok {
|
||||
if _, ok := app.(*kvstore.Application); ok {
|
||||
// simulate handshake, receive app version. If don't do this, replay test will fail
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion
|
||||
state.Version.Consensus.App = kvstore.AppVersion
|
||||
}
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
// sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above
|
||||
_, err := app.InitChain(context.Background(), &abci.RequestInitChain{Validators: vals})
|
||||
require.NoError(t, err)
|
||||
|
||||
css[i] = newStateWithConfig(thisConfig, state, privVal, app)
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
@@ -853,7 +877,11 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
|
||||
//-------------------------------------------------------------------------------
|
||||
// genesis
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
func randGenesisDoc(numValidators int,
|
||||
randPower bool,
|
||||
minPower int64,
|
||||
consensusParams *types.ConsensusParams,
|
||||
) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
@@ -867,15 +895,21 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
InitialHeight: 1,
|
||||
ChainID: test.DefaultTestChainID,
|
||||
Validators: validators,
|
||||
GenesisTime: tmtime.Now(),
|
||||
InitialHeight: 1,
|
||||
ChainID: test.DefaultTestChainID,
|
||||
Validators: validators,
|
||||
ConsensusParams: consensusParams,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) {
|
||||
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
|
||||
func randGenesisState(
|
||||
numValidators int,
|
||||
randPower bool,
|
||||
minPower int64,
|
||||
consensusParams *types.ConsensusParams,
|
||||
) (sm.State, []types.PrivValidator) {
|
||||
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower, consensusParams)
|
||||
s0, _ := sm.MakeGenesisState(genDoc)
|
||||
return s0, privValidators
|
||||
}
|
||||
@@ -933,15 +967,15 @@ func newPersistentKVStore() abci.Application {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return kvstore.NewPersistentKVStoreApplication(dir)
|
||||
return kvstore.NewPersistentApplication(dir)
|
||||
}
|
||||
|
||||
func newKVStore() abci.Application {
|
||||
return kvstore.NewApplication()
|
||||
return kvstore.NewInMemoryApplication()
|
||||
}
|
||||
|
||||
func newPersistentKVStoreWithPath(dbDir string) abci.Application {
|
||||
return kvstore.NewPersistentKVStoreApplication(dbDir)
|
||||
return kvstore.NewPersistentApplication(dbDir)
|
||||
}
|
||||
|
||||
func signDataIsEqual(v1 *types.Vote, v2 *tmproto.Vote) bool {
|
||||
@@ -954,5 +988,6 @@ func signDataIsEqual(v1 *types.Vote, v2 *tmproto.Vote) bool {
|
||||
v1.Height == v2.GetHeight() &&
|
||||
v1.Round == v2.Round &&
|
||||
bytes.Equal(v1.ValidatorAddress.Bytes(), v2.GetValidatorAddress()) &&
|
||||
v1.ValidatorIndex == v2.GetValidatorIndex()
|
||||
v1.ValidatorIndex == v2.GetValidatorIndex() &&
|
||||
bytes.Equal(v1.Extension, v2.Extension)
|
||||
}
|
||||
|
||||
@@ -2,11 +2,14 @@ package consensus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -18,10 +21,15 @@ import (
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore,
|
||||
func(c *cfg.Config) {
|
||||
c.Consensus.TimeoutPropose = 3000 * time.Millisecond
|
||||
c.Consensus.TimeoutPrevote = 1000 * time.Millisecond
|
||||
c.Consensus.TimeoutPrecommit = 1000 * time.Millisecond
|
||||
})
|
||||
defer cleanup()
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := 0; i < N; i++ {
|
||||
ticker := NewTimeoutTicker()
|
||||
ticker.SetLogger(css[i].Logger)
|
||||
css[i].SetTimeoutTicker(ticker)
|
||||
@@ -29,9 +37,10 @@ func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
}
|
||||
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// this val sends a random precommit at each height
|
||||
byzValIdx := 0
|
||||
byzValIdx := N - 1
|
||||
byzVal := css[byzValIdx]
|
||||
byzR := reactors[byzValIdx]
|
||||
|
||||
@@ -43,7 +52,6 @@ func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
invalidDoPrevoteFunc(t, height, round, byzVal, byzR.Switch, pv)
|
||||
}
|
||||
byzVal.mtx.Unlock()
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// wait for a bunch of blocks
|
||||
// TODO: make this tighter by ensuring the halt happens by block 2
|
||||
@@ -61,6 +69,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw
|
||||
// - disable privValidator (so we don't do normal precommits)
|
||||
go func() {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
cs.privValidator = pv
|
||||
pubKey, err := cs.privValidator.GetPubKey()
|
||||
if err != nil {
|
||||
@@ -88,13 +97,16 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw
|
||||
t.Error(err)
|
||||
}
|
||||
precommit.Signature = p.Signature
|
||||
precommit.ExtensionSignature = p.ExtensionSignature
|
||||
cs.privValidator = nil // disable priv val so we don't do normal votes
|
||||
cs.mtx.Unlock()
|
||||
|
||||
peers := sw.Peers().List()
|
||||
for _, peer := range peers {
|
||||
cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: precommit.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -12,9 +12,10 @@ import (
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -28,8 +29,12 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
state, privVals := randGenesisState(1, false, 10, nil)
|
||||
app := kvstore.NewInMemoryApplication()
|
||||
resp, err := app.Info(context.Background(), proxy.RequestInfo)
|
||||
require.NoError(t, err)
|
||||
state.AppHash = resp.LastBlockAppHash
|
||||
cs := newStateWithConfig(config, state, privVals[0], app)
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
|
||||
@@ -37,7 +42,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
|
||||
ensureNewEventOnChannel(newBlockCh) // first block gets committed
|
||||
ensureNoNewEventOnChannel(newBlockCh)
|
||||
deliverTxsRange(cs, 0, 1)
|
||||
deliverTxsRange(t, cs, 0, 1)
|
||||
ensureNewEventOnChannel(newBlockCh) // commit txs
|
||||
ensureNewEventOnChannel(newBlockCh) // commit updated app hash
|
||||
ensureNoNewEventOnChannel(newBlockCh)
|
||||
@@ -48,8 +53,12 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
state, privVals := randGenesisState(1, false, 10, nil)
|
||||
app := kvstore.NewInMemoryApplication()
|
||||
resp, err := app.Info(context.Background(), proxy.RequestInfo)
|
||||
require.NoError(t, err)
|
||||
state.AppHash = resp.LastBlockAppHash
|
||||
cs := newStateWithConfig(config, state, privVals[0], app)
|
||||
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
|
||||
@@ -65,8 +74,8 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
state, privVals := randGenesisState(1, false, 10, nil)
|
||||
cs := newStateWithConfig(config, state, privVals[0], kvstore.NewInMemoryApplication())
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
|
||||
@@ -90,7 +99,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
round = 0
|
||||
|
||||
ensureNewRound(newRoundCh, height, round) // first round at next height
|
||||
deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
|
||||
deliverTxsRange(t, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
|
||||
ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds())
|
||||
|
||||
round++ // moving to the next round
|
||||
@@ -98,36 +107,32 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
ensureNewEventOnChannel(newBlockCh) // now we can commit the block
|
||||
}
|
||||
|
||||
func deliverTxsRange(cs *State, start, end int) {
|
||||
func deliverTxsRange(t *testing.T, cs *State, start, end int) {
|
||||
// Deliver some txs.
|
||||
for i := start; i < end; i++ {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
err := assertMempool(cs.txNotifier).CheckTx(txBytes, nil, mempl.TxInfo{})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error after CheckTx: %v", err))
|
||||
}
|
||||
err := assertMempool(cs.txNotifier).CheckTx(kvstore.NewTx(fmt.Sprintf("%d", i), "true"), nil, mempl.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
state, privVals := randGenesisState(1, false, 10, nil)
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB)
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], kvstore.NewInMemoryApplication(), blockDB)
|
||||
err := stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader)
|
||||
newBlockEventsCh := subscribe(cs.eventBus, types.EventQueryNewBlockEvents)
|
||||
|
||||
const numTxs int64 = 3000
|
||||
go deliverTxsRange(cs, 0, int(numTxs))
|
||||
go deliverTxsRange(t, cs, 0, int(numTxs))
|
||||
|
||||
startTestRound(cs, cs.Height, cs.Round)
|
||||
for n := int64(0); n < numTxs; {
|
||||
select {
|
||||
case msg := <-newBlockHeaderCh:
|
||||
headerEvent := msg.Data().(types.EventDataNewBlockHeader)
|
||||
n += headerEvent.NumTxs
|
||||
case msg := <-newBlockEventsCh:
|
||||
event := msg.Data().(types.EventDataNewBlockEvents)
|
||||
n += event.NumTxs
|
||||
case <-time.After(30 * time.Second):
|
||||
t.Fatal("Timed out waiting 30s to commit blocks with transactions")
|
||||
}
|
||||
@@ -135,8 +140,8 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMempoolRmBadTx(t *testing.T) {
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
app := NewCounterApplication()
|
||||
state, privVals := randGenesisState(1, false, 10, nil)
|
||||
app := kvstore.NewInMemoryApplication()
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false})
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
|
||||
@@ -144,14 +149,14 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// increment the counter by 1
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(0))
|
||||
txBytes := kvstore.NewTx("key", "value")
|
||||
res, err := app.FinalizeBlock(context.Background(), &abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
require.NoError(t, err)
|
||||
assert.False(t, res.TxResults[0].IsErr())
|
||||
assert.True(t, len(res.AgreedAppData) > 0)
|
||||
|
||||
resDeliver := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes})
|
||||
assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
|
||||
|
||||
resCommit := app.Commit()
|
||||
assert.True(t, len(resCommit.Data) > 0)
|
||||
_, err = app.Commit(context.Background(), &abci.RequestCommit{})
|
||||
require.NoError(t, err)
|
||||
|
||||
emptyMempoolCh := make(chan struct{})
|
||||
checkTxRespCh := make(chan struct{})
|
||||
@@ -159,9 +164,10 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
// Try to send the tx through the mempool.
|
||||
// CheckTx should not err, but the app should return a bad abci code
|
||||
// and the tx should get removed from the pool
|
||||
err := assertMempool(cs.txNotifier).CheckTx(txBytes, func(r *abci.Response) {
|
||||
if r.GetCheckTx().Code != code.CodeTypeBadNonce {
|
||||
t.Errorf("expected checktx to return bad nonce, got %v", r)
|
||||
invalidTx := []byte("invalidTx")
|
||||
err := assertMempool(cs.txNotifier).CheckTx(invalidTx, func(r *abci.ResponseCheckTx) {
|
||||
if r.Code != kvstore.CodeTypeInvalidTxFormat {
|
||||
t.Errorf("expected checktx to return invalid format, got %v", r)
|
||||
return
|
||||
}
|
||||
checkTxRespCh <- struct{}{}
|
||||
@@ -173,7 +179,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
|
||||
// check for the tx
|
||||
for {
|
||||
txs := assertMempool(cs.txNotifier).ReapMaxBytesMaxGas(int64(len(txBytes)), -1)
|
||||
txs := assertMempool(cs.txNotifier).ReapMaxBytesMaxGas(int64(len(invalidTx)), -1)
|
||||
if len(txs) == 0 {
|
||||
emptyMempoolCh <- struct{}{}
|
||||
return
|
||||
@@ -202,76 +208,3 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// CounterApplication that maintains a mempool state and resets it upon commit
|
||||
type CounterApplication struct {
|
||||
abci.BaseApplication
|
||||
|
||||
txCount int
|
||||
mempoolTxCount int
|
||||
}
|
||||
|
||||
func NewCounterApplication() *CounterApplication {
|
||||
return &CounterApplication{}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
|
||||
return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
txValue := txAsUint64(req.Tx)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return abci.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
app.txCount++
|
||||
return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
txValue := txAsUint64(req.Tx)
|
||||
if txValue != uint64(app.mempoolTxCount) {
|
||||
return abci.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)}
|
||||
}
|
||||
app.mempoolTxCount++
|
||||
return abci.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func txAsUint64(tx []byte) uint64 {
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(tx):], tx)
|
||||
return binary.BigEndian.Uint64(tx8)
|
||||
}
|
||||
|
||||
func (app *CounterApplication) Commit() abci.ResponseCommit {
|
||||
app.mempoolTxCount = app.txCount
|
||||
if app.txCount == 0 {
|
||||
return abci.ResponseCommit{}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return abci.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) PrepareProposal(
|
||||
req abci.RequestPrepareProposal) abci.ResponsePrepareProposal {
|
||||
txs := make([][]byte, 0, len(req.Txs))
|
||||
var totalBytes int64
|
||||
for _, tx := range req.Txs {
|
||||
totalBytes += int64(len(tx))
|
||||
if totalBytes > req.MaxTxBytes {
|
||||
break
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
return abci.ResponsePrepareProposal{Txs: txs}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) ProcessProposal(
|
||||
req abci.RequestProcessProposal) abci.ResponseProcessProposal {
|
||||
return abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
|
||||
@@ -118,18 +118,6 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "latest_block_height",
|
||||
Help: "The latest block height.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "state_syncing",
|
||||
Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
@@ -208,8 +196,6 @@ func NopMetrics() *Metrics {
|
||||
BlockSizeBytes: discard.NewGauge(),
|
||||
TotalTxs: discard.NewGauge(),
|
||||
CommittedHeight: discard.NewGauge(),
|
||||
BlockSyncing: discard.NewGauge(),
|
||||
StateSyncing: discard.NewGauge(),
|
||||
BlockParts: discard.NewCounter(),
|
||||
StepDurationSeconds: discard.NewHistogram(),
|
||||
BlockGossipPartsReceived: discard.NewCounter(),
|
||||
|
||||
@@ -61,10 +61,6 @@ type Metrics struct {
|
||||
TotalTxs metrics.Gauge
|
||||
// The latest block height.
|
||||
CommittedHeight metrics.Gauge `metrics_name:"latest_block_height"`
|
||||
// Whether or not a node is block syncing. 1 if yes, 0 if no.
|
||||
BlockSyncing metrics.Gauge
|
||||
// Whether or not a node is state syncing. 1 if yes, 0 if no.
|
||||
StateSyncing metrics.Gauge
|
||||
|
||||
// Number of block parts transmitted by each peer.
|
||||
BlockParts metrics.Counter `metrics_labels:"peer_id"`
|
||||
|
||||
@@ -15,173 +15,147 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message
|
||||
func MsgToProto(msg Message) (*tmcons.Message, error) {
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message.
|
||||
//
|
||||
// TODO: This needs to be removed, but WALToProto depends on this.
|
||||
func MsgToProto(msg Message) (proto.Message, error) {
|
||||
if msg == nil {
|
||||
return nil, errors.New("consensus: message is nil")
|
||||
}
|
||||
var pb tmcons.Message
|
||||
var pb proto.Message
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *NewRoundStepMessage:
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_NewRoundStep{
|
||||
NewRoundStep: &tmcons.NewRoundStep{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: uint32(msg.Step),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.NewRoundStep{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: uint32(msg.Step),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
}
|
||||
|
||||
case *NewValidBlockMessage:
|
||||
pbPartSetHeader := msg.BlockPartSetHeader.ToProto()
|
||||
pbBits := msg.BlockParts.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_NewValidBlock{
|
||||
NewValidBlock: &tmcons.NewValidBlock{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.NewValidBlock{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
}
|
||||
|
||||
case *ProposalMessage:
|
||||
pbP := msg.Proposal.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_Proposal{
|
||||
Proposal: &tmcons.Proposal{
|
||||
Proposal: *pbP,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.Proposal{
|
||||
Proposal: *pbP,
|
||||
}
|
||||
|
||||
case *ProposalPOLMessage:
|
||||
pbBits := msg.ProposalPOL.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_ProposalPol{
|
||||
ProposalPol: &tmcons.ProposalPOL{
|
||||
Height: msg.Height,
|
||||
ProposalPolRound: msg.ProposalPOLRound,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.ProposalPOL{
|
||||
Height: msg.Height,
|
||||
ProposalPolRound: msg.ProposalPOLRound,
|
||||
ProposalPol: *pbBits,
|
||||
}
|
||||
|
||||
case *BlockPartMessage:
|
||||
parts, err := msg.Part.ToProto()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("msg to proto error: %w", err)
|
||||
}
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_BlockPart{
|
||||
BlockPart: &tmcons.BlockPart{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: *parts,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.BlockPart{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: *parts,
|
||||
}
|
||||
|
||||
case *VoteMessage:
|
||||
vote := msg.Vote.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{
|
||||
Vote: vote,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.Vote{
|
||||
Vote: vote,
|
||||
}
|
||||
|
||||
case *HasVoteMessage:
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_HasVote{
|
||||
HasVote: &tmcons.HasVote{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.HasVote{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
}
|
||||
|
||||
case *VoteSetMaj23Message:
|
||||
bi := msg.BlockID.ToProto()
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetMaj23{
|
||||
VoteSetMaj23: &tmcons.VoteSetMaj23{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
},
|
||||
},
|
||||
pb = &tmcons.VoteSetMaj23{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
}
|
||||
|
||||
case *VoteSetBitsMessage:
|
||||
bi := msg.BlockID.ToProto()
|
||||
bits := msg.Votes.ToProto()
|
||||
|
||||
vsb := &tmcons.Message_VoteSetBits{
|
||||
VoteSetBits: &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
},
|
||||
vsb := &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
}
|
||||
|
||||
if bits != nil {
|
||||
vsb.VoteSetBits.Votes = *bits
|
||||
vsb.Votes = *bits
|
||||
}
|
||||
|
||||
pb = tmcons.Message{
|
||||
Sum: vsb,
|
||||
}
|
||||
pb = vsb
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("consensus: message not recognized: %T", msg)
|
||||
}
|
||||
|
||||
return &pb, nil
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
// MsgFromProto takes a consensus proto message and returns the native go type
|
||||
func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
if msg == nil {
|
||||
func MsgFromProto(p proto.Message) (Message, error) {
|
||||
if p == nil {
|
||||
return nil, errors.New("consensus: nil message")
|
||||
}
|
||||
var pb Message
|
||||
|
||||
switch msg := msg.Sum.(type) {
|
||||
case *tmcons.Message_NewRoundStep:
|
||||
rs, err := tmmath.SafeConvertUint8(int64(msg.NewRoundStep.Step))
|
||||
switch msg := p.(type) {
|
||||
case *tmcons.NewRoundStep:
|
||||
rs, err := tmmath.SafeConvertUint8(int64(msg.Step))
|
||||
// deny message based on possible overflow
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("denying message due to possible overflow: %w", err)
|
||||
}
|
||||
pb = &NewRoundStepMessage{
|
||||
Height: msg.NewRoundStep.Height,
|
||||
Round: msg.NewRoundStep.Round,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: cstypes.RoundStepType(rs),
|
||||
SecondsSinceStartTime: msg.NewRoundStep.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.NewRoundStep.LastCommitRound,
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
}
|
||||
case *tmcons.Message_NewValidBlock:
|
||||
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader)
|
||||
case *tmcons.NewValidBlock:
|
||||
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.BlockPartSetHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parts to proto error: %w", err)
|
||||
}
|
||||
|
||||
pbBits := new(bits.BitArray)
|
||||
pbBits.FromProto(msg.NewValidBlock.BlockParts)
|
||||
pbBits.FromProto(msg.BlockParts)
|
||||
|
||||
pb = &NewValidBlockMessage{
|
||||
Height: msg.NewValidBlock.Height,
|
||||
Round: msg.NewValidBlock.Round,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: *pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.NewValidBlock.IsCommit,
|
||||
IsCommit: msg.IsCommit,
|
||||
}
|
||||
case *tmcons.Message_Proposal:
|
||||
pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal)
|
||||
case *tmcons.Proposal:
|
||||
pbP, err := types.ProposalFromProto(&msg.Proposal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("proposal msg to proto error: %w", err)
|
||||
}
|
||||
@@ -189,26 +163,28 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
pb = &ProposalMessage{
|
||||
Proposal: pbP,
|
||||
}
|
||||
case *tmcons.Message_ProposalPol:
|
||||
case *tmcons.ProposalPOL:
|
||||
pbBits := new(bits.BitArray)
|
||||
pbBits.FromProto(&msg.ProposalPol.ProposalPol)
|
||||
pbBits.FromProto(&msg.ProposalPol)
|
||||
pb = &ProposalPOLMessage{
|
||||
Height: msg.ProposalPol.Height,
|
||||
ProposalPOLRound: msg.ProposalPol.ProposalPolRound,
|
||||
Height: msg.Height,
|
||||
ProposalPOLRound: msg.ProposalPolRound,
|
||||
ProposalPOL: pbBits,
|
||||
}
|
||||
case *tmcons.Message_BlockPart:
|
||||
parts, err := types.PartFromProto(&msg.BlockPart.Part)
|
||||
case *tmcons.BlockPart:
|
||||
parts, err := types.PartFromProto(&msg.Part)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blockpart msg to proto error: %w", err)
|
||||
}
|
||||
pb = &BlockPartMessage{
|
||||
Height: msg.BlockPart.Height,
|
||||
Round: msg.BlockPart.Round,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: parts,
|
||||
}
|
||||
case *tmcons.Message_Vote:
|
||||
vote, err := types.VoteFromProto(msg.Vote.Vote)
|
||||
case *tmcons.Vote:
|
||||
// Vote validation will be handled in the vote message ValidateBasic
|
||||
// call below.
|
||||
vote, err := types.VoteFromProto(msg.Vote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("vote msg to proto error: %w", err)
|
||||
}
|
||||
@@ -216,36 +192,36 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
pb = &VoteMessage{
|
||||
Vote: vote,
|
||||
}
|
||||
case *tmcons.Message_HasVote:
|
||||
case *tmcons.HasVote:
|
||||
pb = &HasVoteMessage{
|
||||
Height: msg.HasVote.Height,
|
||||
Round: msg.HasVote.Round,
|
||||
Type: msg.HasVote.Type,
|
||||
Index: msg.HasVote.Index,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
}
|
||||
case *tmcons.Message_VoteSetMaj23:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID)
|
||||
case *tmcons.VoteSetMaj23:
|
||||
bi, err := types.BlockIDFromProto(&msg.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err)
|
||||
}
|
||||
pb = &VoteSetMaj23Message{
|
||||
Height: msg.VoteSetMaj23.Height,
|
||||
Round: msg.VoteSetMaj23.Round,
|
||||
Type: msg.VoteSetMaj23.Type,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: *bi,
|
||||
}
|
||||
case *tmcons.Message_VoteSetBits:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID)
|
||||
case *tmcons.VoteSetBits:
|
||||
bi, err := types.BlockIDFromProto(&msg.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("voteSetBits msg to proto error: %w", err)
|
||||
}
|
||||
bits := new(bits.BitArray)
|
||||
bits.FromProto(&msg.VoteSetBits.Votes)
|
||||
bits.FromProto(&msg.Votes)
|
||||
|
||||
pb = &VoteSetBitsMessage{
|
||||
Height: msg.VoteSetBits.Height,
|
||||
Round: msg.VoteSetBits.Round,
|
||||
Type: msg.VoteSetBits.Type,
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: *bi,
|
||||
Votes: bits,
|
||||
}
|
||||
@@ -260,20 +236,6 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
// MustEncode takes the reactors msg, makes it proto and marshals it
|
||||
// this mimics `MustMarshalBinaryBare` in that is panics on error
|
||||
func MustEncode(msg Message) []byte {
|
||||
pb, err := MsgToProto(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
enc, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return enc
|
||||
}
|
||||
|
||||
// WALToProto takes a WAL message and return a proto walMessage and error
|
||||
func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) {
|
||||
var pb tmcons.WALMessage
|
||||
@@ -294,10 +256,14 @@ func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if w, ok := consMsg.(p2p.Wrapper); ok {
|
||||
consMsg = w.Wrap()
|
||||
}
|
||||
cm := consMsg.(*tmcons.Message)
|
||||
pb = tmcons.WALMessage{
|
||||
Sum: &tmcons.WALMessage_MsgInfo{
|
||||
MsgInfo: &tmcons.MsgInfo{
|
||||
Msg: *consMsg,
|
||||
Msg: *cm,
|
||||
PeerID: string(msg.PeerID),
|
||||
},
|
||||
},
|
||||
@@ -343,7 +309,11 @@ func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) {
|
||||
Step: msg.EventDataRoundState.Step,
|
||||
}
|
||||
case *tmcons.WALMessage_MsgInfo:
|
||||
walMsg, err := MsgFromProto(&msg.MsgInfo.Msg)
|
||||
um, err := msg.MsgInfo.Msg.Unwrap()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unwrap message: %w", err)
|
||||
}
|
||||
walMsg, err := MsgFromProto(um)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("msgInfo from proto error: %w", err)
|
||||
}
|
||||
|
||||
@@ -57,21 +57,23 @@ func TestMsgToProto(t *testing.T) {
|
||||
}
|
||||
pbProposal := proposal.ToProto()
|
||||
|
||||
pv := types.NewMockPV()
|
||||
pk, err := pv.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
val := types.NewValidator(pk, 100)
|
||||
|
||||
vote, err := types.MakeVote(
|
||||
1, types.BlockID{}, &types.ValidatorSet{Proposer: val, Validators: []*types.Validator{val}},
|
||||
pv, "chainID", time.Now())
|
||||
require.NoError(t, err)
|
||||
vote := types.MakeVoteNoError(
|
||||
t,
|
||||
types.NewMockPV(),
|
||||
"chainID",
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
tmproto.PrecommitType,
|
||||
bi,
|
||||
time.Now(),
|
||||
)
|
||||
pbVote := vote.ToProto()
|
||||
|
||||
testsCases := []struct {
|
||||
testName string
|
||||
msg Message
|
||||
want *tmcons.Message
|
||||
want proto.Message
|
||||
wantErr bool
|
||||
}{
|
||||
{"successful NewRoundStepMessage", &NewRoundStepMessage{
|
||||
@@ -80,17 +82,15 @@ func TestMsgToProto(t *testing.T) {
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_NewRoundStep{
|
||||
NewRoundStep: &tmcons.NewRoundStep{
|
||||
Height: 2,
|
||||
Round: 1,
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.NewRoundStep{
|
||||
Height: 2,
|
||||
Round: 1,
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
},
|
||||
|
||||
false},
|
||||
|
||||
{"successful NewValidBlockMessage", &NewValidBlockMessage{
|
||||
Height: 1,
|
||||
@@ -98,92 +98,78 @@ func TestMsgToProto(t *testing.T) {
|
||||
BlockPartSetHeader: psh,
|
||||
BlockParts: bits,
|
||||
IsCommit: false,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_NewValidBlock{
|
||||
NewValidBlock: &tmcons.NewValidBlock{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
BlockPartSetHeader: pbPsh,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: false,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.NewValidBlock{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
BlockPartSetHeader: pbPsh,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: false,
|
||||
},
|
||||
|
||||
false},
|
||||
{"successful BlockPartMessage", &BlockPartMessage{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: &parts,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_BlockPart{
|
||||
BlockPart: &tmcons.BlockPart{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: *pbParts,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.BlockPart{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: *pbParts,
|
||||
},
|
||||
|
||||
false},
|
||||
{"successful ProposalPOLMessage", &ProposalPOLMessage{
|
||||
Height: 1,
|
||||
ProposalPOLRound: 1,
|
||||
ProposalPOL: bits,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_ProposalPol{
|
||||
ProposalPol: &tmcons.ProposalPOL{
|
||||
Height: 1,
|
||||
ProposalPolRound: 1,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
}}, false},
|
||||
}, &tmcons.ProposalPOL{
|
||||
Height: 1,
|
||||
ProposalPolRound: 1,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
false},
|
||||
{"successful ProposalMessage", &ProposalMessage{
|
||||
Proposal: &proposal,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_Proposal{
|
||||
Proposal: &tmcons.Proposal{
|
||||
Proposal: *pbProposal,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.Proposal{
|
||||
Proposal: *pbProposal,
|
||||
},
|
||||
|
||||
false},
|
||||
{"successful VoteMessage", &VoteMessage{
|
||||
Vote: vote,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{
|
||||
Vote: pbVote,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.Vote{
|
||||
Vote: pbVote,
|
||||
},
|
||||
|
||||
false},
|
||||
{"successful VoteSetMaj23", &VoteSetMaj23Message{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: bi,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetMaj23{
|
||||
VoteSetMaj23: &tmcons.VoteSetMaj23{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.VoteSetMaj23{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
},
|
||||
|
||||
false},
|
||||
{"successful VoteSetBits", &VoteSetBitsMessage{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: bi,
|
||||
Votes: bits,
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetBits{
|
||||
VoteSetBits: &tmcons.VoteSetBits{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
Votes: *pbBits,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
}, &tmcons.VoteSetBits{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
Votes: *pbBits,
|
||||
},
|
||||
|
||||
false},
|
||||
{"failure", nil, &tmcons.Message{}, true},
|
||||
}
|
||||
for _, tt := range testsCases {
|
||||
@@ -365,6 +351,8 @@ func TestConsMsgsVectors(t *testing.T) {
|
||||
BlockID: bi,
|
||||
}
|
||||
vpb := v.ToProto()
|
||||
v.Extension = []byte("extension")
|
||||
vextPb := v.ToProto()
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
@@ -397,9 +385,12 @@ func TestConsMsgsVectors(t *testing.T) {
|
||||
{"BlockPart", &tmcons.Message{Sum: &tmcons.Message_BlockPart{
|
||||
BlockPart: &tmcons.BlockPart{Height: 1, Round: 1, Part: *pbParts}}},
|
||||
"2a36080110011a3008011204746573741a26080110011a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d"},
|
||||
{"Vote", &tmcons.Message{Sum: &tmcons.Message_Vote{
|
||||
{"Vote_without_ext", &tmcons.Message{Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{Vote: vpb}}},
|
||||
"32700a6e0802100122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a0608c0b89fdc0532146164645f6d6f72655f6578636c616d6174696f6e3801"},
|
||||
{"Vote_with_ext", &tmcons.Message{Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{Vote: vextPb}}},
|
||||
"327b0a790802100122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a0608c0b89fdc0532146164645f6d6f72655f6578636c616d6174696f6e38014a09657874656e73696f6e"},
|
||||
{"HasVote", &tmcons.Message{Sum: &tmcons.Message_HasVote{
|
||||
HasVote: &tmcons.HasVote{Height: 1, Round: 1, Type: tmproto.PrevoteType, Index: 1}}},
|
||||
"3a080801100118012001"},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user