Compare commits
30 Commits
lasarojc-p
...
sam/abci++
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28a7cbe97e | ||
|
|
f5bfacd2cf | ||
|
|
22d04dd19d | ||
|
|
5651a21adb | ||
|
|
bd75306a93 | ||
|
|
b76eb6906c | ||
|
|
b0e07355bd | ||
|
|
f7184192a0 | ||
|
|
a0c68b4941 | ||
|
|
662517b0db | ||
|
|
38897de6d7 | ||
|
|
e1f3d5c58c | ||
|
|
07804ea061 | ||
|
|
1f6a6176c0 | ||
|
|
b8162782c3 | ||
|
|
18382f97eb | ||
|
|
5bfc6d7a3b | ||
|
|
43ce473402 | ||
|
|
3b4c1b903a | ||
|
|
8a40180248 | ||
|
|
920c5ad813 | ||
|
|
bbf1169aea | ||
|
|
67d3a6e45b | ||
|
|
0c2483e9f3 | ||
|
|
b837ead457 | ||
|
|
944e9ebdc0 | ||
|
|
28d47d9a38 | ||
|
|
7a81dd25bf | ||
|
|
94c874da32 | ||
|
|
e7e48024ca |
4
.github/CODEOWNERS
vendored
@@ -7,6 +7,6 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
* @ebuchman @tendermint/tendermint-engineering
|
||||
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering
|
||||
|
||||
12
.github/dependabot.yml
vendored
@@ -53,10 +53,10 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
interval: weekly
|
||||
target-branch: "v0.37.x"
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
# Only allow automated security-related dependency updates until we cut the
|
||||
# final v0.37.0 release.
|
||||
open-pull-requests-limit: 0
|
||||
labels:
|
||||
- T:dependencies
|
||||
@@ -65,11 +65,9 @@ updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
open-pull-requests-limit: 0
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
6
.github/workflows/docker.yml
vendored
@@ -41,17 +41,17 @@ jobs:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.2.1
|
||||
uses: docker/setup-buildx-action@v2.0.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2.1.0
|
||||
uses: docker/login-action@v2.0.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3.2.0
|
||||
uses: docker/build-push-action@v3.1.1
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
4
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
4
.github/workflows/e2e-nightly-37x.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
4
.github/workflows/e2e-nightly-main.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
2
.github/workflows/fuzz-nightly.yml
vendored
@@ -76,7 +76,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
|
||||
41
.github/workflows/gosec.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Run Gosec
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'feature/*'
|
||||
- 'v0.37.x'
|
||||
- 'v0.34.x'
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
|
||||
jobs:
|
||||
Gosec:
|
||||
permissions:
|
||||
security-events: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout Source
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run Gosec Security Scanner
|
||||
uses: cosmos/gosec@master
|
||||
with:
|
||||
# Let the report trigger a failure with the Github Security scanner features.
|
||||
args: "-no-fail -fmt sarif -out results.sarif ./..."
|
||||
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
# Path to SARIF file relative to the root of the repository
|
||||
sarif_file: results.sarif
|
||||
2
.github/workflows/janitor.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.11.0
|
||||
- uses: styfle/cancel-workflow-action@0.10.1
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
5
.github/workflows/lint.yml
vendored
@@ -31,7 +31,10 @@ jobs:
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.50.1
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.47.3
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
19
.github/workflows/markdown-links.yml
vendored
@@ -1,20 +1,23 @@
|
||||
name: Check Markdown links
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# 2am UTC daily
|
||||
- cron: '0 2 * * *'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
strategy:
|
||||
matrix:
|
||||
branch: ['main', 'v0.37.x', 'v0.34.x']
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
ref: ${{ matrix.branch }}
|
||||
- uses: informalsystems/github-action-markdown-link-check@main
|
||||
PATTERNS: |
|
||||
**/**.md
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
with:
|
||||
check-modified-files-only: 'yes'
|
||||
config-file: '.md-link-check.json'
|
||||
if: env.GIT_DIFF
|
||||
|
||||
27
.github/workflows/pre-release.yml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
prerelease:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -38,28 +38,3 @@ jobs:
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
prerelease-success:
|
||||
needs: prerelease
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon pre-release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/workflows/proto-lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.9.0
|
||||
- uses: bufbuild/buf-setup-action@v1.8.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
27
.github/workflows/release.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
release:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -35,28 +35,3 @@ jobs:
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
release-success:
|
||||
needs: release
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.gitignore
vendored
@@ -55,5 +55,3 @@ proto/spec/**/*.pb.go
|
||||
*.pdf
|
||||
*.gz
|
||||
*.dvi
|
||||
# Python virtual environments
|
||||
.venv
|
||||
|
||||
@@ -2,6 +2,7 @@ linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
@@ -25,6 +26,7 @@ linters:
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
|
||||
@@ -2,16 +2,5 @@
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503],
|
||||
"httpHeaders": [
|
||||
{
|
||||
"urls": [
|
||||
"https://docs.github.com/",
|
||||
"https://help.github.com/"
|
||||
],
|
||||
"headers": {
|
||||
"Accept-Encoding": "zstd, br, gzip, deflate"
|
||||
}
|
||||
}
|
||||
]
|
||||
"aliveStatusCodes": [200, 206, 503]
|
||||
}
|
||||
|
||||
70
CHANGELOG.md
@@ -2,76 +2,6 @@
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.23
|
||||
|
||||
*Nov 9, 2022*
|
||||
|
||||
This release introduces some new Prometheus metrics to help in determining what
|
||||
kinds of messages are consuming the most P2P bandwidth. This builds towards our
|
||||
broader goal of optimizing Tendermint bandwidth consumption, and will give us
|
||||
meaningful insights once we can establish these metrics for a number of chains.
|
||||
|
||||
We now also return `Cache-Control` headers for select RPC endpoints to help
|
||||
facilitate caching.
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106
|
||||
|
||||
### IMPROVEMENTS
|
||||
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
|
||||
Envelope type and associated methods for sending and receiving Envelopes
|
||||
instead of raw bytes. This also adds new metrics,
|
||||
`tendermint_p2p_message_send_bytes_total` and
|
||||
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
|
||||
each message type have been sent.
|
||||
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
|
||||
caching of RPC responses (@JayT106)
|
||||
|
||||
The following RPC endpoints will return `Cache-Control` headers with a maximum
|
||||
age of 1 day:
|
||||
|
||||
- `/abci_info`
|
||||
- `/block`, if `height` is supplied
|
||||
- `/block_by_hash`
|
||||
- `/block_results`, if `height` is supplied
|
||||
- `/blockchain`
|
||||
- `/check_tx`
|
||||
- `/commit`, if `height` is supplied
|
||||
- `/consensus_params`, if `height` is supplied
|
||||
- `/genesis`
|
||||
- `/genesis_chunked`
|
||||
- `/tx`
|
||||
- `/validators`, if `height` is supplied
|
||||
|
||||
## v0.34.22
|
||||
|
||||
This release includes several bug fixes, [one of
|
||||
which](https://github.com/tendermint/tendermint/pull/9518) we discovered while
|
||||
building up a baseline for v0.34 against which to compare our upcoming v0.37
|
||||
release during our [QA process](./docs/qa/).
|
||||
|
||||
Special thanks to external contributors on this release: @RiccardoM
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#9423](https://github.com/tendermint/tendermint/pull/9423) Support
|
||||
HTTPS URLs from the WebSocket client (@RiccardoM, @cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [config] [\#9483](https://github.com/tendermint/tendermint/issues/9483)
|
||||
Calling `tendermint init` would incorrectly leave out the new `[storage]`
|
||||
section delimiter in the generated configuration file - this has now been
|
||||
fixed
|
||||
- [p2p] [\#9500](https://github.com/tendermint/tendermint/issues/9500) Prevent
|
||||
peers who have errored being added to the peer set (@jmalicevic)
|
||||
- [indexer] [\#9473](https://github.com/tendermint/tendermint/issues/9473) Fix
|
||||
bug that caused the psql indexer to index empty blocks whenever one of the
|
||||
transactions returned a non zero code. The relevant deduplication logic has
|
||||
been moved within the kv indexer only (@cmwaters)
|
||||
- [blocksync] [\#9518](https://github.com/tendermint/tendermint/issues/9518) A
|
||||
block sync stall was observed during our QA process whereby the node was
|
||||
unable to make progress. Retrying block requests after a timeout fixes this.
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Release highlights include:
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [p2p] \#9625 Remove unused p2p/trust package (@cmwaters)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
@@ -20,20 +19,14 @@
|
||||
|
||||
- Tooling
|
||||
- [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [metrics] \#9682 move state-syncing and block-syncing metrics to their respective packages (@cmwaters)
|
||||
labels have moved from block_syncing -> blocksync_syncing and state_syncing -> statesync_syncing
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [config] \#9680 Introduce `BootstrapPeers` to the config to allow nodes to list peers to be added to
|
||||
the addressbook upon start up (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [crypto/merkle] \#6443 & \#6513 Improve HashAlternatives performance (@cuonglm, @marbar3778)
|
||||
- [rpc] \#9650 Enable caching of RPC responses (@JayT106)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
@@ -103,4 +96,3 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)
|
||||
- [docker] \#9073 enable cross platform build using docker buildx
|
||||
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout
|
||||
|
||||
@@ -12,7 +12,7 @@ and hence to Tendermint.
|
||||
* We are committed to providing a friendly, safe and welcoming environment for
|
||||
all, regardless of level of experience, gender, gender identity and
|
||||
expression, sexual orientation, disability, personal appearance, body size,
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristics.
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
|
||||
* On Slack, please avoid using overtly sexual nicknames or other nicknames that
|
||||
might detract from a friendly, safe and welcoming environment for all.
|
||||
|
||||
@@ -12,7 +12,7 @@ landing changes in `main`.
|
||||
All work on the code base should be motivated by a [Github
|
||||
Issue](https://github.com/tendermint/tendermint/issues).
|
||||
[Search](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)
|
||||
is a good place to start when looking for places to contribute. If you
|
||||
is a good place start when looking for places to contribute. If you
|
||||
would like to work on an issue which already exists, please indicate so
|
||||
by leaving a comment.
|
||||
|
||||
@@ -213,7 +213,7 @@ Changes with multiple classifications should be doubly included (eg. a bug fix
|
||||
that is also a breaking change should be recorded under both).
|
||||
|
||||
Breaking changes are further subdivided according to the APIs/users they impact.
|
||||
Any change that affects multiple APIs/users should be recorded multiply - for
|
||||
Any change that effects multiple APIs/users should be recorded multiply - for
|
||||
instance, a change to the `Blockchain Protocol` that removes a field from the
|
||||
header should also be recorded under `CLI/RPC/Config` since the field will be
|
||||
removed from the header in RPC responses as well.
|
||||
@@ -247,7 +247,7 @@ To begin contributing, create a development branch either on `github.com/tenderm
|
||||
Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `main`. (Since pull requests are squash-merged, either is fine!)
|
||||
|
||||
Update the `UPGRADING.md` if the change you've made is breaking and the
|
||||
instructions should be in place for a user on how he/she can upgrade its
|
||||
instructions should be in place for a user on how he/she can upgrade it's
|
||||
software (ABCI application, Tendermint-based blockchain, light client, wallet).
|
||||
|
||||
Once you have submitted a pull request label the pull request with either `R:minor`, if the change should be included in the next minor release, or `R:major`, if the change is meant for a major release.
|
||||
|
||||
2
Makefile
@@ -271,7 +271,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
@golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
13
README.md
@@ -113,15 +113,10 @@ For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Supported Versions
|
||||
|
||||
Because we are a small core team, we have limited capacity to ship patch
|
||||
updates, including security updates. Consequently, we strongly recommend keeping
|
||||
Tendermint up-to-date. Upgrading instructions can be found in
|
||||
[UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
Currently supported versions include:
|
||||
|
||||
- v0.34.x
|
||||
- v0.37.x (release candidate)
|
||||
Because we are a small core team, we only ship patch updates, including security
|
||||
updates, to the most recent minor release and the second-most recent minor
|
||||
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
|
||||
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Resources
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig
|
||||
* Make use of table driven testing where possible and not-cumbersome
|
||||
* [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)
|
||||
* Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require)
|
||||
* When using mocks, it is recommended to use Testify [mock](<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
* When using mocks, it is recommended to use Testify [mock] (<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
) along with [Mockery](https://github.com/vektra/mockery) for autogeneration
|
||||
|
||||
## Errors
|
||||
|
||||
@@ -5,15 +5,6 @@ Tendermint Core.
|
||||
|
||||
## Unreleased
|
||||
|
||||
## Config Changes
|
||||
|
||||
* A new config field, `BootstrapPeers` has been introduced as a means of
|
||||
adding a list of addresses to the addressbook upon initializing a node. This is an
|
||||
alternative to `PersistentPeers`. `PersistentPeers` shold be only used for
|
||||
nodes that you want to keep a constant connection with i.e. sentry nodes
|
||||
|
||||
----
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* The `ABCIVersion` is now `1.0.0`.
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
// case of malicious tx or query). It only makes sense for publicly exposed
|
||||
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
|
||||
@@ -22,6 +24,8 @@ var _ Client = (*localClient)(nil)
|
||||
|
||||
// NewLocalClient creates a local client, which will be directly calling the
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = new(tmsync.Mutex)
|
||||
@@ -305,8 +309,7 @@ func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*type
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkSync(
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -315,8 +318,7 @@ func (app *localClient) LoadSnapshotChunkSync(
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkSync(
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*types.ResponseApplySnapshotChunk, error) {
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
|
||||
@@ -1,263 +0,0 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
type unsyncLocalClient struct {
|
||||
service.BaseService
|
||||
|
||||
types.Application
|
||||
|
||||
// This mutex is exclusively used to protect the callback.
|
||||
mtx sync.RWMutex
|
||||
Callback
|
||||
}
|
||||
|
||||
var _ Client = (*unsyncLocalClient)(nil)
|
||||
|
||||
// NewUnsyncLocalClient creates an unsynchronized local client, which will be
|
||||
// directly calling the methods of the given app.
|
||||
//
|
||||
// Unlike NewLocalClient, it does not hold a mutex around the application, so
|
||||
// it is up to the application to manage its synchronization properly.
|
||||
func NewUnsyncLocalClient(app types.Application) Client {
|
||||
cli := &unsyncLocalClient{
|
||||
Application: app,
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(nil, "unsyncLocalClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.Callback = cb
|
||||
}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
func (app *unsyncLocalClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) FlushAsync() *ReqRes {
|
||||
// Do nothing
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EchoAsync(msg string) *ReqRes {
|
||||
return app.callback(
|
||||
types.ToRequestEcho(msg),
|
||||
types.ToResponseEcho(msg),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
res := app.Application.Info(req)
|
||||
return app.callback(
|
||||
types.ToRequestInfo(req),
|
||||
types.ToResponseInfo(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
res := app.Application.DeliverTx(params)
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
res := app.Application.CheckTx(req)
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
res := app.Application.Query(req)
|
||||
return app.callback(
|
||||
types.ToRequestQuery(req),
|
||||
types.ToResponseQuery(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CommitAsync() *ReqRes {
|
||||
res := app.Application.Commit()
|
||||
return app.callback(
|
||||
types.ToRequestCommit(),
|
||||
types.ToResponseCommit(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
res := app.Application.InitChain(req)
|
||||
return app.callback(
|
||||
types.ToRequestInitChain(req),
|
||||
types.ToResponseInitChain(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
res := app.Application.BeginBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
res := app.Application.EndBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return app.callback(
|
||||
types.ToRequestListSnapshots(req),
|
||||
types.ToResponseListSnapshots(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return app.callback(
|
||||
types.ToRequestOfferSnapshot(req),
|
||||
types.ToResponseOfferSnapshot(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestLoadSnapshotChunk(req),
|
||||
types.ToResponseLoadSnapshotChunk(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestApplySnapshotChunk(req),
|
||||
types.ToResponseApplySnapshotChunk(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestPrepareProposal(req),
|
||||
types.ToResponsePrepareProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestProcessProposal(req),
|
||||
types.ToResponseProcessProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *unsyncLocalClient) FlushSync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
res := app.Application.DeliverTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
res := app.Application.CheckTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
res := app.Application.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
res := app.Application.InitChain(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
res := app.Application.BeginBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
res := app.Application.EndBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) LoadSnapshotChunkSync(
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ApplySnapshotChunkSync(
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*types.ResponseApplySnapshotChunk, error) {
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *unsyncLocalClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.Callback(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
rr.callbackInvoked = true
|
||||
return rr
|
||||
}
|
||||
573
abci/example/orderbook/app.go
Normal file
@@ -0,0 +1,573 @@
|
||||
package orderbook
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
fmt "fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
var _ types.Application = (*StateMachine)(nil)
|
||||
|
||||
const Version = 1
|
||||
|
||||
const (
|
||||
// In tendermint a zero code is okay and all non zero codes are errors
|
||||
StatusOK = iota
|
||||
StatusErrDecoding
|
||||
StatusErrUnknownMessage
|
||||
StatusErrValidateBasic
|
||||
StatusErrNoAccount
|
||||
StatusErrAccountExists
|
||||
StatusErrNoPair
|
||||
StatusErrPairExists
|
||||
StatusErrInvalidOrder
|
||||
StatusErrUnacceptableMessage
|
||||
StatusErrNoCommodity
|
||||
)
|
||||
|
||||
var (
|
||||
stateKey = []byte("state")
|
||||
accountKey = []byte("account")
|
||||
pairKey = []byte("pair")
|
||||
)
|
||||
|
||||
// StateMachine is the main struct that encompasses the logic of the orderbook
|
||||
type StateMachine struct {
|
||||
// inherit all the abci methods so we don't have to implement everything
|
||||
types.BaseApplication
|
||||
|
||||
// persisted state which is a key value store containing:
|
||||
// accountID -> account
|
||||
// pairID -> pair
|
||||
db dbm.DB
|
||||
|
||||
// in-memory state
|
||||
lastHeight int64 // the last height that was persisted
|
||||
lastHash []byte // the last hash that was persisted
|
||||
// list of accounts (this is used for the app hash)
|
||||
accounts []*Account
|
||||
pairs map[string]*Pair // lookup pairs
|
||||
commodities map[string]struct{} // lookup commodities
|
||||
publicKeys map[string]struct{} // lookup existence of an account
|
||||
// a list of transactions that have been modified by the most recent block
|
||||
// and will need to result in an update to the db
|
||||
touchedAccounts map[uint64]struct{}
|
||||
// new pairs added in this block which will needed to be added to the
|
||||
// db on "Commit"
|
||||
newPairs []*Pair
|
||||
|
||||
// app-side mempool (also emphemeral)
|
||||
// this takes ask and bid transactions from `CheckTx`
|
||||
// and matches them as a "MatchedOrder" which is
|
||||
// then proposed in a block
|
||||
//
|
||||
// it's important to note that there is no garbage collection
|
||||
// here. Bids and asks, potentially even invalid, will
|
||||
// continue to stay here until matched
|
||||
markets map[string]*Market // i.e. ATOM/USDC
|
||||
}
|
||||
|
||||
// New creates a StateMachine from a given database. If the database is
|
||||
// empty a fresh instance is created else the accounts, pairs and
|
||||
// state are loaded into memory.
|
||||
func New(db dbm.DB) (*StateMachine, error) {
|
||||
// iterate over all the account keys
|
||||
iter, err := db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer iter.Close()
|
||||
|
||||
var (
|
||||
accounts = make([]*Account, 0)
|
||||
publicKeys = make(map[string]struct{})
|
||||
commodities = make(map[string]struct{})
|
||||
pairs = make(map[string]*Pair)
|
||||
markets = make(map[string]*Market)
|
||||
lastHeight uint64
|
||||
lastHash []byte
|
||||
)
|
||||
|
||||
for ; iter.Valid(); iter.Next() {
|
||||
if bytes.HasPrefix(iter.Key(), pairKey) {
|
||||
var pair Pair
|
||||
if err := proto.Unmarshal(iter.Value(), &pair); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pairs[pair.String()] = &pair
|
||||
commodities[pair.BuyersDenomination] = struct{}{}
|
||||
markets[pair.String()] = NewMarket(&pair)
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(iter.Key(), accountKey) {
|
||||
var acc Account
|
||||
if err := proto.Unmarshal(iter.Value(), &acc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accounts = append(accounts, &acc)
|
||||
publicKeys[string(acc.PublicKey)] = struct{}{}
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(iter.Key(), stateKey) {
|
||||
state := iter.Value()
|
||||
lastHeight = binary.BigEndian.Uint64(state[:4])
|
||||
lastHash = state[4:]
|
||||
}
|
||||
}
|
||||
|
||||
return &StateMachine{
|
||||
accounts: accounts,
|
||||
pairs: pairs,
|
||||
commodities: commodities,
|
||||
publicKeys: publicKeys,
|
||||
markets: markets,
|
||||
lastHeight: int64(lastHeight),
|
||||
lastHash: lastHash,
|
||||
db: db,
|
||||
touchedAccounts: make(map[uint64]struct{}),
|
||||
newPairs: make([]*Pair, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Info is used by Tendermint to understand the state of the application.
|
||||
// This is useful for replay and syncing modes.
|
||||
func (sm *StateMachine) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{
|
||||
AppVersion: Version,
|
||||
LastBlockHeight: sm.lastHeight,
|
||||
LastBlockAppHash: sm.lastHash,
|
||||
}
|
||||
}
|
||||
|
||||
// CheckTx indicates which transactions should be accepted in the mempool. It is
|
||||
// not a perfect validity check because we're unsure of the state that the transaction
|
||||
// will be executed against. We should treat this as a gatekeeper to the mempool.
|
||||
// Apart from adding transactions to the app-side mempool, this check is stateless.
|
||||
func (sm *StateMachine) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
var msg = new(Msg)
|
||||
|
||||
err := proto.Unmarshal(req.Tx, msg)
|
||||
if err != nil {
|
||||
return types.ResponseCheckTx{Code: StatusErrDecoding, Log: err.Error()} // decoding error
|
||||
}
|
||||
|
||||
if err := msg.ValidateBasic(); err != nil {
|
||||
return types.ResponseCheckTx{Code: StatusErrValidateBasic, Log: err.Error()}
|
||||
}
|
||||
|
||||
// add either bids or asks to the market which will match them in PrepareProposal
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgAsk:
|
||||
market, ok := sm.markets[m.MsgAsk.Pair.String()]
|
||||
if !ok {
|
||||
return types.ResponseCheckTx{Code: StatusErrNoPair}
|
||||
}
|
||||
market.AddAsk(m.MsgAsk.AskOrder)
|
||||
case *Msg_MsgBid:
|
||||
market, ok := sm.markets[m.MsgBid.Pair.String()]
|
||||
if !ok {
|
||||
return types.ResponseCheckTx{Code: StatusErrNoPair}
|
||||
}
|
||||
market.AddBid(m.MsgBid.BidOrder)
|
||||
}
|
||||
|
||||
return types.ResponseCheckTx{Code: StatusOK}
|
||||
}
|
||||
|
||||
// ValidateTx validates the transactions against state.
|
||||
func (sm *StateMachine) ValidateTx(msg *Msg) uint32 {
|
||||
if err := msg.ValidateBasic(); err != nil {
|
||||
return StatusErrValidateBasic
|
||||
}
|
||||
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgRegisterPair:
|
||||
pair := m.MsgRegisterPair.Pair
|
||||
if _, ok := sm.pairs[pair.String()]; ok {
|
||||
return StatusErrPairExists
|
||||
}
|
||||
|
||||
reversePair := &Pair{BuyersDenomination: pair.SellersDenomination, SellersDenomination: pair.BuyersDenomination}
|
||||
if _, ok := sm.pairs[reversePair.String()]; ok {
|
||||
return StatusErrPairExists
|
||||
}
|
||||
|
||||
case *Msg_MsgAsk, *Msg_MsgBid: // MsgAsk and MsgBid are not allowed individually - they need to be matched as a TradeSet
|
||||
return StatusErrUnacceptableMessage //Todo add logic around msg ask and bid to allow
|
||||
|
||||
case *Msg_MsgCreateAccount:
|
||||
// check for duplicate accounts in state machine
|
||||
if _, ok := sm.publicKeys[string(m.MsgCreateAccount.PublicKey)]; ok {
|
||||
return StatusErrAccountExists
|
||||
}
|
||||
|
||||
// check that each of the commodities is present in at least one trading pair
|
||||
for _, commodity := range m.MsgCreateAccount.Commodities {
|
||||
if _, exists := sm.commodities[commodity.Denom]; !exists {
|
||||
return StatusErrNoCommodity
|
||||
}
|
||||
}
|
||||
|
||||
case *Msg_MsgTradeSet:
|
||||
// check the pair exists
|
||||
if _, ok := sm.pairs[m.MsgTradeSet.TradeSet.Pair.String()]; !ok {
|
||||
return StatusErrNoPair
|
||||
}
|
||||
|
||||
for _, order := range m.MsgTradeSet.TradeSet.MatchedOrders {
|
||||
// validate matched order i.e. users have funds and signatures are valid
|
||||
if !sm.isMatchedOrderValid(order, m.MsgTradeSet.TradeSet.Pair) {
|
||||
return StatusErrInvalidOrder
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return StatusErrUnknownMessage
|
||||
}
|
||||
|
||||
return StatusOK
|
||||
}
|
||||
|
||||
// PrepareProposal is called whenever the validator is the proposer for that round. First, it adds the non order
|
||||
// transactions provided by tendermint. The orderbook then loops through each market and tries to match as many
|
||||
// transactions as possible. For each new transaction it checks that the max bytes has not been exceeded.
|
||||
func (sm *StateMachine) PrepareProposal(req types.RequestPrepareProposal) types.ResponsePrepareProposal {
|
||||
// declare transaction with the size of 0
|
||||
txs := make([][]byte, 0)
|
||||
|
||||
// go through the transactions passed up via Tendermint first
|
||||
for _, tx := range req.Txs {
|
||||
var msg = new(Msg)
|
||||
err := proto.Unmarshal(tx, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// skip over the bids and asks that are proposed. We already have them
|
||||
if _, ok := msg.Sum.(*Msg_MsgBid); ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := msg.Sum.(*Msg_MsgAsk); ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// make sure we're proposing valid transactions
|
||||
if status := sm.ValidateTx(msg); status != StatusOK {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(txs)+len(tx) > int(req.MaxTxBytes) {
|
||||
return types.ResponsePrepareProposal{Txs: txs}
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
|
||||
// fetch and match all the bids and asks for each market and add these
|
||||
for _, market := range sm.markets {
|
||||
tradeSet := market.Match()
|
||||
// tradesets into bytes and bytes into a transaction
|
||||
if tradeSet == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Println("we have a tradeset")
|
||||
|
||||
tradeSet = sm.validateTradeSetAgainstState(tradeSet)
|
||||
if tradeSet == nil || len(tradeSet.MatchedOrders) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Println("we have a valid tradeset")
|
||||
|
||||
// wrap this as a message typ
|
||||
msgTradeSet := &MsgTradeSet{TradeSet: tradeSet}
|
||||
bz, err := proto.Marshal(msgTradeSet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// check to see that we don't over populate the block
|
||||
if len(txs)+len(bz) > int(req.MaxTxBytes) {
|
||||
return types.ResponsePrepareProposal{Txs: txs}
|
||||
}
|
||||
txs = append(txs, bz)
|
||||
}
|
||||
|
||||
return types.ResponsePrepareProposal{Txs: req.Txs}
|
||||
}
|
||||
|
||||
// Process Proposal either rejects or accepts transactions
|
||||
//
|
||||
// It uses the same validity function for prepare proposal. This ensures the coherence property
|
||||
// is adhered to i.e. all honest validators must accept a proposal by an honest proposer
|
||||
func (sm *StateMachine) ProcessProposal(req types.RequestProcessProposal) types.ResponseProcessProposal {
|
||||
for _, tx := range req.Txs {
|
||||
var msg = new(Msg)
|
||||
err := proto.Unmarshal(tx, msg)
|
||||
if err != nil {
|
||||
return rejectProposal()
|
||||
}
|
||||
|
||||
if status := sm.ValidateTx(msg); status != StatusOK {
|
||||
fmt.Printf("tx failed validation, status: %d\n", status)
|
||||
return rejectProposal()
|
||||
}
|
||||
}
|
||||
|
||||
return acceptProposal()
|
||||
}
|
||||
|
||||
func (sm *StateMachine) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
// reset the new pairs
|
||||
sm.newPairs = make([]*Pair, 0)
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// DeliverTx is called for each tx in a block once it has been finalized. This is where the
|
||||
// execution code lives. Most importantly it's where we update the user accounts following
|
||||
// a successful order.
|
||||
func (sm *StateMachine) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var msg = new(Msg)
|
||||
|
||||
err := proto.Unmarshal(req.Tx, msg)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{Code: StatusErrDecoding, Log: err.Error()} // decoding error
|
||||
}
|
||||
|
||||
if status := sm.ValidateTx(msg); status != StatusOK {
|
||||
return types.ResponseDeliverTx{Code: status}
|
||||
}
|
||||
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgRegisterPair:
|
||||
sm.markets[m.MsgRegisterPair.Pair.String()] = NewMarket(m.MsgRegisterPair.Pair)
|
||||
sm.pairs[m.MsgRegisterPair.Pair.String()] = m.MsgRegisterPair.Pair
|
||||
sm.commodities[m.MsgRegisterPair.Pair.BuyersDenomination] = struct{}{}
|
||||
sm.commodities[m.MsgRegisterPair.Pair.SellersDenomination] = struct{}{}
|
||||
sm.newPairs = append(sm.newPairs, m.MsgRegisterPair.Pair)
|
||||
|
||||
case *Msg_MsgCreateAccount:
|
||||
nextAccountID := uint64(len(sm.accounts))
|
||||
sm.accounts = append(sm.accounts, &Account{
|
||||
Index: nextAccountID,
|
||||
PublicKey: m.MsgCreateAccount.PublicKey,
|
||||
Commodities: m.MsgCreateAccount.Commodities,
|
||||
})
|
||||
sm.touchedAccounts[nextAccountID] = struct{}{}
|
||||
sm.publicKeys[string(m.MsgCreateAccount.PublicKey)] = struct{}{}
|
||||
|
||||
case *Msg_MsgTradeSet:
|
||||
pair := m.MsgTradeSet.TradeSet.Pair
|
||||
for _, order := range m.MsgTradeSet.TradeSet.MatchedOrders {
|
||||
buyer := sm.accounts[order.OrderBid.OwnerId]
|
||||
seller := sm.accounts[order.OrderAsk.OwnerId]
|
||||
|
||||
// the buyer gets quantity of the asset that the seller was selling
|
||||
buyer.AddCommodity(NewCommodity(pair.SellersDenomination, order.OrderAsk.Quantity))
|
||||
// the buyer gives up quantity * ask price of the buyers denomination
|
||||
buyer.SubtractCommodity(NewCommodity(pair.BuyersDenomination, order.OrderAsk.Quantity*order.OrderAsk.AskPrice))
|
||||
|
||||
// the seller gets quantity * ask price of the asset that the buyer was paying with
|
||||
seller.AddCommodity(NewCommodity(pair.BuyersDenomination, order.OrderAsk.Quantity*order.OrderAsk.AskPrice))
|
||||
// the seller gives up quantity of the commodity they were selling
|
||||
seller.SubtractCommodity(NewCommodity(pair.SellersDenomination, order.OrderAsk.Quantity))
|
||||
|
||||
// mark that these account have been touched
|
||||
sm.touchedAccounts[order.OrderBid.OwnerId] = struct{}{}
|
||||
sm.touchedAccounts[order.OrderAsk.OwnerId] = struct{}{}
|
||||
}
|
||||
|
||||
default:
|
||||
return types.ResponseDeliverTx{Code: StatusErrUnknownMessage}
|
||||
}
|
||||
|
||||
return types.ResponseDeliverTx{Code: 0}
|
||||
}
|
||||
|
||||
// EndBlock is used to update consensus params and the validator set. For the orderbook,
|
||||
// we keep both the same for thw
|
||||
func (sm *StateMachine) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||
return types.ResponseEndBlock{}
|
||||
}
|
||||
|
||||
// Commit is called to tell the app it is safe to persist state to disk.
|
||||
// We now take the in-memory representation and update the parts that have
|
||||
// changed on to disk.
|
||||
func (sm *StateMachine) Commit() types.ResponseCommit {
|
||||
batch := sm.db.NewBatch()
|
||||
|
||||
// write to accounts that were modified by the last block
|
||||
for accountID := range sm.touchedAccounts {
|
||||
value, err := proto.Marshal(sm.accounts[accountID])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(accountKey, accountID)
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// write the new pairs that were added by the last block
|
||||
pairID := len(sm.pairs) - len(sm.newPairs)
|
||||
for id, pair := range sm.newPairs {
|
||||
value, err := proto.Marshal(pair)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(pairKey, uint64(pairID+id))
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
hash := sm.hash()
|
||||
err := sm.updateState(batch, sm.lastHeight+1, hash)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = batch.WriteSync()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
// hash is just the the sha256 of the byte representation of all accounts.
|
||||
// remember that this needs to be deterministic for all state machines
|
||||
func (sm *StateMachine) hash() []byte {
|
||||
digest := bytes.NewBuffer(nil)
|
||||
for _, account := range sm.accounts {
|
||||
bz, err := proto.Marshal(account)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
digest.Write(bz)
|
||||
}
|
||||
return tmhash.Sum(digest.Bytes())
|
||||
}
|
||||
|
||||
func (sm *StateMachine) updateState(batch dbm.Batch, height int64, hash []byte) error {
|
||||
sm.lastHash = hash
|
||||
sm.lastHeight = height
|
||||
heightBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(heightBytes, uint64(height))
|
||||
return batch.Set(stateKey, append(heightBytes, hash...))
|
||||
}
|
||||
|
||||
func (sm *StateMachine) validateTradeSetAgainstState(tradeSet *TradeSet) *TradeSet {
|
||||
output := &TradeSet{Pair: tradeSet.Pair}
|
||||
|
||||
for _, matchedOrder := range tradeSet.MatchedOrders {
|
||||
if !sm.isMatchedOrderValid(matchedOrder, tradeSet.Pair) {
|
||||
continue
|
||||
}
|
||||
|
||||
// yayy! this matched order is still valid and can be executed
|
||||
output.MatchedOrders = append(output.MatchedOrders, matchedOrder)
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// isMatchedOrderValid is a check against current state to ensure that the order
|
||||
// is valid and can execute.
|
||||
//
|
||||
// This method is also called when preparing a proposal since `CheckTx` doesn't have
|
||||
// strict validity guarantees and there could be invalid transactions within the mempool
|
||||
//
|
||||
// Note: if one of the two orders are invalid we discard both. In the future we could
|
||||
// improve this by adding back the part of the order that might still be valid.
|
||||
func (sm *StateMachine) isMatchedOrderValid(order *MatchedOrder, pair *Pair) bool {
|
||||
if int(order.OrderBid.OwnerId) >= len(sm.accounts) {
|
||||
return false
|
||||
}
|
||||
bidOwner := sm.accounts[order.OrderBid.OwnerId]
|
||||
if bidOwner == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if int(order.OrderAsk.OwnerId) >= len(sm.accounts) {
|
||||
return false
|
||||
}
|
||||
askOwner := sm.accounts[order.OrderAsk.OwnerId]
|
||||
if askOwner == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
askCommodities := askOwner.FindCommidity(pair.SellersDenomination)
|
||||
if askCommodities == nil {
|
||||
return false
|
||||
}
|
||||
buyCommodities := bidOwner.FindCommidity(pair.BuyersDenomination)
|
||||
if buyCommodities == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Seller has enough of the commodity
|
||||
if askCommodities.Quantity-order.OrderAsk.Quantity < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Buyer has enough of the buying commodity
|
||||
if buyCommodities.Quantity-(order.OrderAsk.AskPrice*order.OrderAsk.Quantity) < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if !order.OrderAsk.ValidateSignature(ed25519.PubKey(askOwner.PublicKey), pair) {
|
||||
return false
|
||||
}
|
||||
if !order.OrderBid.ValidateSignature(ed25519.PubKey(bidOwner.PublicKey), pair) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// InitDB takes an empty DB instance and populates it with the
|
||||
// provided pairs and accounts. Note that the order here is important
|
||||
func InitDB(db dbm.DB, pairs []*Pair, accounts []*Account) error {
|
||||
batch := db.NewBatch()
|
||||
|
||||
for id, account := range accounts {
|
||||
value, err := proto.Marshal(account)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(accountKey, uint64(id))
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for id, pair := range pairs {
|
||||
value, err := proto.Marshal(pair)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := binary.BigEndian.AppendUint64(pairKey, uint64(id))
|
||||
fmt.Println(key)
|
||||
if err := batch.Set(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return batch.WriteSync()
|
||||
}
|
||||
|
||||
func rejectProposal() types.ResponseProcessProposal {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
|
||||
func acceptProposal() types.ResponseProcessProposal {
|
||||
return types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
327
abci/example/orderbook/app_test.go
Normal file
@@ -0,0 +1,327 @@
|
||||
package orderbook_test
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/orderbook"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
params "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// TODO: we should also check that CheckTx adds bids and asks to the app-side mempool
|
||||
func TestCheckTx(t *testing.T) {
|
||||
db := dbm.NewMemDB()
|
||||
require.NoError(t, orderbook.InitDB(db, []*orderbook.Pair{testPair}, nil))
|
||||
app, err := orderbook.New(db)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
msg *orderbook.Msg
|
||||
responseCode uint32
|
||||
expOrderSize int
|
||||
}{
|
||||
{
|
||||
name: "test empty tx",
|
||||
msg: &orderbook.Msg{},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 0,
|
||||
},
|
||||
{
|
||||
name: "test msg ask",
|
||||
msg: &orderbook.Msg{
|
||||
Sum: &orderbook.Msg_MsgAsk{
|
||||
MsgAsk: &orderbook.MsgAsk{
|
||||
Pair: testPair,
|
||||
AskOrder: &orderbook.OrderAsk{
|
||||
Quantity: 10,
|
||||
AskPrice: 1,
|
||||
OwnerId: 1,
|
||||
Signature: crypto.CRandBytes(ed25519.SignatureSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
responseCode: orderbook.StatusOK,
|
||||
expOrderSize: 1,
|
||||
},
|
||||
{
|
||||
name: "test msg ask wrong signature",
|
||||
msg: &orderbook.Msg{
|
||||
Sum: &orderbook.Msg_MsgAsk{
|
||||
MsgAsk: &orderbook.MsgAsk{
|
||||
Pair: testPair,
|
||||
AskOrder: &orderbook.OrderAsk{
|
||||
Quantity: 10,
|
||||
AskPrice: 1,
|
||||
OwnerId: 1,
|
||||
Signature: crypto.CRandBytes(62),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 1,
|
||||
},
|
||||
{
|
||||
name: "test msg bid",
|
||||
msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgBid{MsgBid: &orderbook.MsgBid{
|
||||
Pair: testPair,
|
||||
BidOrder: &orderbook.OrderBid{
|
||||
MaxQuantity: 15,
|
||||
MaxPrice: 5,
|
||||
OwnerId: 1,
|
||||
Signature: crypto.CRandBytes(ed25519.SignatureSize),
|
||||
},
|
||||
}}},
|
||||
responseCode: orderbook.StatusOK,
|
||||
expOrderSize: 2,
|
||||
},
|
||||
{
|
||||
name: "test msg bid blank",
|
||||
msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgBid{MsgBid: &orderbook.MsgBid{
|
||||
Pair: testPair,
|
||||
BidOrder: &orderbook.OrderBid{
|
||||
MaxQuantity: 0,
|
||||
MaxPrice: 0,
|
||||
OwnerId: 0,
|
||||
Signature: crypto.CRandBytes(ed25519.SignatureSize),
|
||||
},
|
||||
}}},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 2,
|
||||
},
|
||||
{
|
||||
name: "test msg register duplicate pair",
|
||||
msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgRegisterPair{MsgRegisterPair: &orderbook.MsgRegisterPair{
|
||||
Pair: &orderbook.Pair{BuyersDenomination: "ATOM", SellersDenomination: "ATOM"},
|
||||
}}},
|
||||
responseCode: orderbook.StatusErrValidateBasic,
|
||||
expOrderSize: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
bz, err := proto.Marshal(tc.msg)
|
||||
require.NoError(t, err)
|
||||
resp := app.CheckTx(types.RequestCheckTx{Tx: bz})
|
||||
require.Equal(t, tc.responseCode, resp.Code, resp.Log)
|
||||
bids, asks := app.Orders(testPair)
|
||||
require.Equal(t, tc.expOrderSize, len(bids)+len(asks))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// func ValidateTx(t *testing.T) {
|
||||
// db := dbm.NewMemDB()
|
||||
// require.NoError(t, orderbook.InitDB(db, []*orderbook.Pair{testPair}, nil))
|
||||
// app, err := orderbook.New(db)
|
||||
// require.NoError(t, err)
|
||||
|
||||
// for _, tc := range testCases {
|
||||
// t.Run(tc.name, func(t *testing.T) {
|
||||
// bz, err := proto.Marshal(tc.msg)
|
||||
// require.NoError(t, err)
|
||||
// resp := app.CheckTx(types.RequestCheckTx{Tx: bz})
|
||||
// require.Equal(t, tc.responseCode, resp.Code, resp.Log)
|
||||
// bids, asks := app.Orders(testPair)
|
||||
// require.Equal(t, tc.expOrderSize, len(bids)+len(asks))
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
// TODO: we should check that transactions in
|
||||
// a market are being validated and added to the proposal
|
||||
// // and that other transactions get in
|
||||
// func TestPrepareProposal(t *testing.T) {
|
||||
// db := dbm.NewMemDB()
|
||||
// require.NoError(t, orderbook.InitDB(db, []*orderbook.Pair{testPair}, nil))
|
||||
// app, err := orderbook.New(db)
|
||||
// require.NoError(t, err)
|
||||
|
||||
// for _, tc := range testCases {
|
||||
// t.Run(tc.name, func(t *testing.T) {
|
||||
// bz, err := proto.Marshal(tc.msg)
|
||||
// require.NoError(t, err)
|
||||
// resp := app.CheckTx(types.RequestCheckTx{Tx: bz})
|
||||
// require.Equal(t, tc.responseCode, resp.Code, resp.Log)
|
||||
// bids, asks := app.Orders(testPair)
|
||||
// require.Equal(t, tc.expOrderSize, len(bids)+len(asks))
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
// {
|
||||
// name: "test msg register pair",
|
||||
// msg: &orderbook.Msg{Sum: &orderbook.Msg_MsgRegisterPair{MsgRegisterPair: &orderbook.MsgRegisterPair{
|
||||
// Pair: &orderbook.Pair{BuyersDenomination: "ATOM", SellersDenomination: "AUD"},
|
||||
// }}},
|
||||
// responseCode: orderbook.StatusOK,
|
||||
// expOrderSize: 2,
|
||||
// pairSize: 2,
|
||||
// },
|
||||
|
||||
// TODO: we should test that transactions are
|
||||
// always valid i.e. ValidateTx. We could potentially
|
||||
// combine this with PrepareProposal
|
||||
// func TestProcessProposal(t *testing.T) {
|
||||
// app := orderbook.New(dbm.NewMemDB())
|
||||
// }
|
||||
|
||||
// TODO: we should test that a matched order
|
||||
// correctly updates the accounts. We should
|
||||
// also test that committing a block persists
|
||||
// it to the database and that we can now
|
||||
// query the new state
|
||||
// func TestFinalizeBlock(t *testing.T) {
|
||||
// app := orderbook.New(dbm.NewMemDB())
|
||||
// }
|
||||
|
||||
// TODO: test that we can start from new
|
||||
// and from existing state
|
||||
// func TestNewStateMachine(t *testing.T) {}
|
||||
|
||||
func TestEndToEnd(t *testing.T) {
|
||||
db := dbm.NewMemDB()
|
||||
app, err := orderbook.New(db)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
var (
|
||||
maxBytes = params.DefaultConsensusParams().Block.MaxBytes
|
||||
commodityNZD = &orderbook.Commodity{Denom: "NZD", Quantity: 100}
|
||||
commodityAUD = &orderbook.Commodity{Denom: "AUD", Quantity: 100}
|
||||
registerPairMsg = newRegisterPair("NZD", "AUD")
|
||||
pair = registerPairMsg.GetMsgRegisterPair().Pair
|
||||
pkAlice = ed25519.GenPrivKey()
|
||||
pkBob = ed25519.GenPrivKey()
|
||||
pubKeyAlice = pkAlice.PubKey().Bytes()
|
||||
pubKeyBob = pkBob.PubKey().Bytes()
|
||||
registerAlice = newRegisterAccount(pubKeyAlice, []*orderbook.Commodity{commodityAUD})
|
||||
registerBob = newRegisterAccount(pubKeyBob, []*orderbook.Commodity{commodityNZD})
|
||||
// bob is asking for 25 AUD for 5 NZD
|
||||
ask = &orderbook.Msg{Sum: &orderbook.Msg_MsgAsk{MsgAsk: orderbook.NewMsgAsk(pair, 5, 5, 1)}}
|
||||
// alice is bidding for 5 NZD for 25 AUD
|
||||
bid = &orderbook.Msg{Sum: &orderbook.Msg_MsgBid{MsgBid: orderbook.NewMsgBid(pair, 5, 5, 0)}}
|
||||
)
|
||||
|
||||
require.NoError(t, ask.GetMsgAsk().Sign(pkBob))
|
||||
require.NoError(t, bid.GetMsgBid().Sign(pkAlice))
|
||||
|
||||
testCases := []struct {
|
||||
txs [][]byte
|
||||
accepted bool
|
||||
// assertions to be made about the state of the application
|
||||
// after each block
|
||||
assertions func(t *testing.T, app *orderbook.StateMachine)
|
||||
}{
|
||||
{
|
||||
// block 1 sets up the trading pair
|
||||
txs: asTxs(registerPairMsg),
|
||||
accepted: true,
|
||||
assertions: func(t *testing.T, app *orderbook.StateMachine) {
|
||||
pairs := app.Pairs()
|
||||
require.Len(t, pairs, 1)
|
||||
require.Equal(t, pair, &pairs[0])
|
||||
},
|
||||
},
|
||||
{
|
||||
// block 2 registers two accounts: alice and bob
|
||||
txs: asTxs(registerAlice, registerBob),
|
||||
accepted: true,
|
||||
assertions: func(t *testing.T, app *orderbook.StateMachine) {
|
||||
alice := app.Account(0)
|
||||
require.False(t, alice.IsEmpty(), alice)
|
||||
require.Equal(t, pubKeyAlice, alice.PublicKey)
|
||||
require.Len(t, alice.Commodities, 1)
|
||||
require.Equal(t, alice.Commodities[0], commodityAUD)
|
||||
bob := app.Account(1)
|
||||
require.False(t, bob.IsEmpty(), bob)
|
||||
require.Equal(t, pubKeyBob, bob.PublicKey)
|
||||
require.Len(t, bob.Commodities, 1)
|
||||
require.Equal(t, bob.Commodities[0], commodityNZD)
|
||||
require.True(t, app.Account(2).IsEmpty())
|
||||
},
|
||||
},
|
||||
{
|
||||
// block 3 performs a trade between alice and bob
|
||||
txs: asTxs(ask, bid),
|
||||
accepted: true,
|
||||
assertions: func(t *testing.T, app *orderbook.StateMachine) {
|
||||
alice := app.Account(0)
|
||||
require.Equal(t, alice.Commodities[0].Quantity, 75) // 75 AUD
|
||||
require.Equal(t, alice.Commodities[1].Quantity, 5) // 5 NZD
|
||||
bob := app.Account(1)
|
||||
require.Equal(t, bob.Commodities[0].Quantity, 95) // 95 NZD
|
||||
require.Equal(t, bob.Commodities[0].Quantity, 5) // 5 AUD
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range testCases {
|
||||
for _, tx := range tc.txs {
|
||||
resp := app.CheckTx(types.RequestCheckTx{Tx: tx})
|
||||
require.EqualValues(t, orderbook.StatusOK, resp.Code)
|
||||
}
|
||||
txs := app.PrepareProposal(types.RequestPrepareProposal{MaxTxBytes: maxBytes, Txs: tc.txs}).Txs
|
||||
require.Equal(t, txs, tc.txs)
|
||||
if idx == 2 {
|
||||
fmt.Print(tc.txs)
|
||||
fmt.Println()
|
||||
fmt.Print(txs)
|
||||
}
|
||||
|
||||
result := app.ProcessProposal(types.RequestProcessProposal{Txs: txs})
|
||||
if tc.accepted {
|
||||
require.Equal(t, types.ResponseProcessProposal_ACCEPT, result.Status)
|
||||
} else {
|
||||
require.Equal(t, types.ResponseProcessProposal_REJECT, result.Status)
|
||||
continue
|
||||
}
|
||||
|
||||
app.BeginBlock(types.RequestBeginBlock{})
|
||||
for _, tx := range txs {
|
||||
app.DeliverTx(types.RequestDeliverTx{Tx: tx})
|
||||
}
|
||||
app.EndBlock(types.RequestEndBlock{})
|
||||
app.Commit()
|
||||
|
||||
if tc.assertions != nil {
|
||||
tc.assertions(t, app)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func asTxs(msgs ...*orderbook.Msg) [][]byte {
|
||||
output := make([][]byte, len(msgs))
|
||||
for i, msg := range msgs {
|
||||
bz, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
output[i] = bz
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func newRegisterPair(d1, d2 string) *orderbook.Msg {
|
||||
return &orderbook.Msg{Sum: &orderbook.Msg_MsgRegisterPair{MsgRegisterPair: &orderbook.MsgRegisterPair{
|
||||
Pair: &orderbook.Pair{BuyersDenomination: d1, SellersDenomination: d2},
|
||||
}}}
|
||||
}
|
||||
|
||||
func newRegisterAccount(pubkey []byte, commodities []*orderbook.Commodity) *orderbook.Msg {
|
||||
return &orderbook.Msg{Sum: &orderbook.Msg_MsgCreateAccount{MsgCreateAccount: &orderbook.MsgCreateAccount{
|
||||
PublicKey: pubkey,
|
||||
Commodities: commodities,
|
||||
}}}
|
||||
|
||||
}
|
||||
9
abci/example/orderbook/buf.gen.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
version: v1
|
||||
plugins:
|
||||
- name: gogofaster
|
||||
out: .
|
||||
opt:
|
||||
- Mgoogle/protobuf/timestamp.proto=github.com/cosmos/gogoproto/types
|
||||
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
|
||||
- plugins=grpc
|
||||
- paths=source_relative
|
||||
243
abci/example/orderbook/cmd/orderbook.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/orderbook"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
NewCLI().Run()
|
||||
}
|
||||
|
||||
type CLI struct {
|
||||
root *cobra.Command
|
||||
config *cfg.Config
|
||||
}
|
||||
|
||||
func NewCLI() *CLI {
|
||||
cli := &CLI{}
|
||||
cli.root = &cobra.Command{
|
||||
Use: "orderbook",
|
||||
Short: "orderbook abci++ example",
|
||||
}
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "init",
|
||||
Short: "initialize the file system for an orderbook node",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
root, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
viper.AddConfigPath(filepath.Join(root, "config"))
|
||||
viper.SetConfigName("config")
|
||||
|
||||
config := cfg.DefaultConfig()
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
// Config file not found; use default
|
||||
// This often happens when initializing a config for the first time
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := viper.Unmarshal(config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
config.SetRoot(root)
|
||||
cli.config = config
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
privValKeyFile := cli.config.PrivValidatorKeyFile()
|
||||
privValStateFile := cli.config.PrivValidatorStateFile()
|
||||
var pv *privval.FilePV
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
fmt.Print("found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv = privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv.Save()
|
||||
fmt.Print("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
|
||||
nodeKeyFile := cli.config.NodeKeyFile()
|
||||
if tmos.FileExists(nodeKeyFile) {
|
||||
fmt.Print("Found node key", "path", nodeKeyFile)
|
||||
} else {
|
||||
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Print("Generated node key", "path", nodeKeyFile)
|
||||
}
|
||||
|
||||
// genesis file
|
||||
genFile := cli.config.GenesisFile()
|
||||
if tmos.FileExists(genFile) {
|
||||
fmt.Print("Found genesis file", "path", genFile)
|
||||
} else {
|
||||
genDoc := types.GenesisDoc{
|
||||
ChainID: fmt.Sprintf("orderbook-chain-%v", tmrand.Int()),
|
||||
GenesisTime: tmtime.Now(),
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
pubKey, err := pv.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
Address: pubKey.Address(),
|
||||
PubKey: pubKey,
|
||||
Power: 10,
|
||||
}}
|
||||
|
||||
if err := genDoc.SaveAs(genFile); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Print("Generated genesis file", "path", genFile)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "run",
|
||||
Short: "runs an orderbook node",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
dbProvider := node.DefaultDBProvider
|
||||
appDB, err := dbProvider(&node.DBContext{"orderbook", cli.config})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app, err := orderbook.New(appDB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeKey, err := p2p.LoadOrGenNodeKey(cli.config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load or gen node key %s: %w", cli.config.NodeKeyFile(), err)
|
||||
}
|
||||
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
n, err := node.NewNode(
|
||||
cli.config,
|
||||
privval.LoadOrGenFilePV(cli.config.PrivValidatorKeyFile(), cli.config.PrivValidatorStateFile()),
|
||||
nodeKey,
|
||||
proxy.NewLocalClientCreator(app),
|
||||
node.DefaultGenesisDocProviderFunc(cli.config),
|
||||
dbProvider,
|
||||
node.DefaultMetricsProvider(cli.config.Instrumentation),
|
||||
logger,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := n.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmos.TrapSignal(logger, func() {
|
||||
if err := n.Stop(); err != nil {
|
||||
logger.Error("unable to stop the node", "error", err)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "create-account [commodities...]",
|
||||
Short: "creates a new account message and submits it to the chain",
|
||||
Example: "create-account 500BTC 10000USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "create-pair buyers-denomination sellers-denomination",
|
||||
Short: "creates a new pair message and submits it to the chain",
|
||||
Example: "create-pair BTC USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "bid buying-commodity price",
|
||||
Short: "creates a bid message and submits it to the chain",
|
||||
Example: "bid 10BTC 15000BTC/USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(&cobra.Command{
|
||||
Use: "ask selling-commodity price",
|
||||
Short: "creates an ask message and submits it to the chain",
|
||||
Example: "ask 5BTC 12000BTC/USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
querySubcommand := &cobra.Command{
|
||||
Use: "query",
|
||||
Short: "query the bal",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
querySubcommand.AddCommand(&cobra.Command{
|
||||
Use: "account pubkey|id",
|
||||
Short: "query the balance of an account",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
querySubcommand.AddCommand(&cobra.Command{
|
||||
Use: "pairs",
|
||||
Short: "list all the trading pairs",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
querySubcommand.AddCommand(&cobra.Command{
|
||||
Use: "orders pair",
|
||||
Short: "list all current orders for a given pair",
|
||||
Example: "orders BTC/USD",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
cli.root.AddCommand(querySubcommand)
|
||||
|
||||
return cli
|
||||
}
|
||||
|
||||
// Run runs the CLI.
|
||||
func (cli *CLI) Run() {
|
||||
if err := cli.root.Execute(); err != nil {
|
||||
fmt.Print(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
19
abci/example/orderbook/doc.go
Normal file
@@ -0,0 +1,19 @@
|
||||
//go:generate go install github.com/bufbuild/buf/cmd/buf
|
||||
//go:generate buf generate
|
||||
|
||||
// The orderbook presents a more advanced example of a Tendermint application than the simple kvstore
|
||||
//
|
||||
// An orderbook is a tool used in financial markets for enabling trading of various commodities. Without
|
||||
// delving into too much detail, an orderbook is made of two types of transactions: Bids and Asks. An Ask
|
||||
// is an offer by a seller for n amount of a commodity at an AskPrice and a bid is an offer from a buyer
|
||||
// for m amount of a commodity at a BidPrice. When the bid price exceeds the ask price, and the buyer quantity
|
||||
// is less than or equal to the sellers quantity, the order is matched. In actual terms, we neglect the
|
||||
// underlying denomination (i.e. USD) and effectively both participants are simultaneously a buyer and seller.
|
||||
//
|
||||
// This example falls far short of being a decentralized orderbook, but demonstrates how one can build an
|
||||
// app-side mempool, how one can use PrepareProposal and ProcessProposal to craft complex transactions,
|
||||
// how we can use signatures and validate transactions against state. How applications can manage concurrency,
|
||||
// and demonstrate the lifecycle of transactions from RPC -> CheckTx -> Mempool -> PrepareProposal -> ProcessProposal
|
||||
// -> DeliverTx -> Commit -> Querying
|
||||
|
||||
package orderbook
|
||||
254
abci/example/orderbook/market.go
Normal file
@@ -0,0 +1,254 @@
|
||||
package orderbook
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
type Market struct {
|
||||
// immutable
|
||||
pair *Pair // i.e. EUR/USD (a market is bidirectional)
|
||||
|
||||
mtx sync.RWMutex
|
||||
askOrders *AskOrders // i.e. buying EUR for USD
|
||||
lowestAsk float64
|
||||
bidOrders *BidOrders // i.e. selling EUR for USD or buying USD for EUR
|
||||
highestBid float64
|
||||
}
|
||||
|
||||
func NewMarket(p *Pair) *Market {
|
||||
askOrders := make(AskOrders, 0)
|
||||
bidOrders := make(BidOrders, 0)
|
||||
return &Market{pair: p, askOrders: &askOrders, bidOrders: &bidOrders}
|
||||
}
|
||||
|
||||
func (m *Market) AddBid(b *OrderBid) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
heap.Push(m.bidOrders, b)
|
||||
if b.MaxPrice > m.highestBid {
|
||||
m.highestBid = b.MaxPrice
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Market) AddAsk(a *OrderAsk) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
heap.Push(m.askOrders, a)
|
||||
if a.AskPrice < m.lowestAsk || m.lowestAsk == 0 {
|
||||
m.lowestAsk = a.AskPrice
|
||||
}
|
||||
}
|
||||
|
||||
// Match takes the set of bids and asks and matches them together.
|
||||
// A bid matches an ask when the MaxPrice is greater than the AskPrice
|
||||
// and the MaxQuantity is greater than the quantity.
|
||||
func (m *Market) Match() *TradeSet {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
// if one side doesn't have any orders than there is nothing to match
|
||||
// and we return early
|
||||
if m.askOrders.Len() == 0 || m.bidOrders.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.highestBid < m.lowestAsk {
|
||||
// no orders match, we return early.
|
||||
return nil
|
||||
}
|
||||
|
||||
t := &TradeSet{Pair: m.pair}
|
||||
bids := make([]*OrderBid, 0)
|
||||
asks := make([]*OrderAsk, 0)
|
||||
|
||||
// get all the bids that are greater than the lowest ask. In order from heighest bid to lowest bid
|
||||
for m.bidOrders.Len() > 0 {
|
||||
bid := heap.Pop(m.bidOrders).(*OrderBid)
|
||||
if bid.MaxPrice < m.lowestAsk {
|
||||
// we've reached the limit, push the bid back and break the loop
|
||||
heap.Push(m.bidOrders, bid)
|
||||
break
|
||||
} else {
|
||||
bids = append(bids, bid)
|
||||
}
|
||||
}
|
||||
|
||||
// get all the asks that are lower than the highest bid in the bids set. Ordered from lowest to highest ask
|
||||
for m.askOrders.Len() > 0 {
|
||||
ask := heap.Pop(m.askOrders).(*OrderAsk)
|
||||
if ask.AskPrice > bids[0].MaxPrice {
|
||||
// the ask price is greater than the highest bid; push the ask back and break theh loop
|
||||
heap.Push(m.askOrders, ask)
|
||||
break
|
||||
} else {
|
||||
asks = append(asks, ask)
|
||||
}
|
||||
}
|
||||
|
||||
// this is to keep track of the index of the bids that have been matched
|
||||
reserved := make(map[int]struct{})
|
||||
|
||||
// start from the highest ask and the highest bid and for each ask loop downwards through the slice of
|
||||
// bids until one is matched
|
||||
OUTER_LOOP:
|
||||
for i := len(asks) - 1; i >= 0; i-- {
|
||||
ask := asks[i]
|
||||
|
||||
// start with the highest bid and increment down since we're more likely to find a match
|
||||
for j := len(bids) - 1; j >= 0; j-- {
|
||||
if _, ok := reserved[j]; ok {
|
||||
// skip over the bids that have already been reserved
|
||||
continue
|
||||
}
|
||||
|
||||
bid := bids[j]
|
||||
if bid.MaxPrice >= ask.AskPrice {
|
||||
if bid.MaxQuantity >= ask.Quantity {
|
||||
// yay! we have a match
|
||||
t.AddFilledOrder(ask, bid)
|
||||
|
||||
// reserve the bid so we don't rematch it with another ask
|
||||
reserved[j] = struct{}{}
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
} else {
|
||||
// once we've dropped below the ask price there are no more possible bids and so we break
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// as we go from highest to lowest, asks that aren't matched become the new lowest ask price
|
||||
m.lowestAsk = ask.AskPrice
|
||||
|
||||
// no match found, add the ask order back into the heap
|
||||
heap.Push(m.askOrders, ask)
|
||||
}
|
||||
|
||||
// if all available asks were matched then
|
||||
// we never have the opportunity to update the lowest ask.
|
||||
// Now we reset it to 0
|
||||
if m.askOrders.Len() == 0 {
|
||||
m.lowestAsk = 0
|
||||
}
|
||||
|
||||
// add back the unmatched bids to the heap so they can be matched again in a later round.
|
||||
// We also neeed to recalculate the new highest bid. First we tackle an edge case whereby all
|
||||
// selected bids were matched. In this case we grab the next highest and set that as the new
|
||||
// highest bid
|
||||
m.highestBid = 0
|
||||
if len(reserved) == len(bids) && m.bidOrders.Len() > 0 {
|
||||
newHighestBid := heap.Pop(m.bidOrders).(*OrderBid)
|
||||
m.highestBid = newHighestBid.MaxPrice
|
||||
heap.Push(m.bidOrders, newHighestBid)
|
||||
}
|
||||
for j := 0; j < len(bids); j++ {
|
||||
if _, ok := reserved[j]; !ok {
|
||||
if bids[j].MaxPrice > m.highestBid {
|
||||
m.highestBid = bids[j].MaxPrice
|
||||
}
|
||||
heap.Push(m.bidOrders, bids[j])
|
||||
}
|
||||
}
|
||||
|
||||
if len(t.MatchedOrders) == 0 {
|
||||
return nil
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (m Market) LowestAsk() float64 {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
return m.lowestAsk
|
||||
}
|
||||
|
||||
func (m Market) HighestBid() float64 {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
return m.highestBid
|
||||
}
|
||||
|
||||
func (m Market) GetBids() []OrderBid {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
orders := make([]OrderBid, m.bidOrders.Len())
|
||||
for idx, order := range *m.bidOrders {
|
||||
orders[idx] = *order
|
||||
}
|
||||
return orders
|
||||
}
|
||||
|
||||
func (m Market) GetAsks() []OrderAsk {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
orders := make([]OrderAsk, m.askOrders.Len())
|
||||
for idx, order := range *m.askOrders {
|
||||
orders[idx] = *order
|
||||
}
|
||||
return orders
|
||||
}
|
||||
|
||||
// Heap ordered by lowest price
|
||||
type AskOrders []*OrderAsk
|
||||
|
||||
var _ heap.Interface = (*AskOrders)(nil)
|
||||
|
||||
func (a AskOrders) Len() int { return len(a) }
|
||||
|
||||
func (a AskOrders) Less(i, j int) bool {
|
||||
return a[i].AskPrice < a[j].AskPrice
|
||||
}
|
||||
|
||||
func (a AskOrders) Swap(i, j int) {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
|
||||
func (a *AskOrders) Push(x any) {
|
||||
item := x.(*OrderAsk)
|
||||
*a = append(*a, item)
|
||||
}
|
||||
|
||||
func (a *AskOrders) Pop() any {
|
||||
old := *a
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil
|
||||
*a = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// Heap ordered by highest price
|
||||
type BidOrders []*OrderBid
|
||||
|
||||
var _ heap.Interface = (*BidOrders)(nil)
|
||||
|
||||
func (b BidOrders) Len() int { return len(b) }
|
||||
|
||||
func (b BidOrders) Less(i, j int) bool {
|
||||
return b[i].MaxPrice > b[j].MaxPrice
|
||||
}
|
||||
|
||||
func (b BidOrders) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
func (b *BidOrders) Push(x any) {
|
||||
item := x.(*OrderBid)
|
||||
*b = append(*b, item)
|
||||
}
|
||||
|
||||
func (b *BidOrders) Pop() any {
|
||||
old := *b
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil
|
||||
*b = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
func (t *TradeSet) AddFilledOrder(ask *OrderAsk, bid *OrderBid) {
|
||||
t.MatchedOrders = append(t.MatchedOrders, &MatchedOrder{
|
||||
OrderAsk: ask,
|
||||
OrderBid: bid,
|
||||
})
|
||||
}
|
||||
179
abci/example/orderbook/market_test.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package orderbook_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/abci/example/orderbook"
|
||||
)
|
||||
|
||||
var testPair = &orderbook.Pair{BuyersDenomination: "ATOM", SellersDenomination: "USD"}
|
||||
|
||||
func testBid(price, quantity float64) *orderbook.OrderBid {
|
||||
return &orderbook.OrderBid{
|
||||
MaxPrice: price,
|
||||
MaxQuantity: quantity,
|
||||
}
|
||||
}
|
||||
|
||||
func testAsk(price, quantity float64) *orderbook.OrderAsk {
|
||||
return &orderbook.OrderAsk{
|
||||
AskPrice: price,
|
||||
Quantity: quantity,
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrackLowestAndHighestPrices(t *testing.T) {
|
||||
market := orderbook.NewMarket(testPair)
|
||||
require.Zero(t, market.LowestAsk())
|
||||
require.Zero(t, market.HighestBid())
|
||||
|
||||
market.AddBid(testBid(100, 10))
|
||||
require.EqualValues(t, 100, market.HighestBid())
|
||||
|
||||
market.AddAsk(testAsk(50, 10))
|
||||
require.EqualValues(t, 50, market.LowestAsk())
|
||||
|
||||
market.AddAsk(testAsk(30, 10))
|
||||
require.EqualValues(t, 30, market.LowestAsk())
|
||||
|
||||
market.AddAsk(testAsk(40, 10))
|
||||
require.EqualValues(t, 30, market.LowestAsk())
|
||||
}
|
||||
|
||||
func TestSimpleOrderMatching(t *testing.T) {
|
||||
testcases := []struct {
|
||||
bid *orderbook.OrderBid
|
||||
ask *orderbook.OrderAsk
|
||||
match bool
|
||||
}{
|
||||
{
|
||||
bid: testBid(50, 10),
|
||||
ask: testAsk(50, 10),
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
bid: testBid(60, 10),
|
||||
ask: testAsk(50, 10),
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
bid: testBid(50, 10),
|
||||
ask: testAsk(60, 10),
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
bid: testBid(50, 5),
|
||||
ask: testAsk(40, 10),
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
bid: testBid(50, 15),
|
||||
ask: testAsk(40, 10),
|
||||
match: true,
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range testcases {
|
||||
market := orderbook.NewMarket(testPair)
|
||||
market.AddAsk(tc.ask)
|
||||
market.AddBid(tc.bid)
|
||||
resp := market.Match()
|
||||
if tc.match {
|
||||
require.Len(t, resp.MatchedOrders, 1, idx)
|
||||
} else {
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiOrderMatching(t *testing.T) {
|
||||
testcases := []struct {
|
||||
bids []*orderbook.OrderBid
|
||||
asks []*orderbook.OrderAsk
|
||||
expected []*orderbook.MatchedOrder
|
||||
expectedLowestAsk float64
|
||||
expectedHighestBid float64
|
||||
}{
|
||||
{
|
||||
bids: []*orderbook.OrderBid{
|
||||
testBid(50, 20),
|
||||
testBid(40, 10),
|
||||
testBid(30, 15),
|
||||
},
|
||||
asks: []*orderbook.OrderAsk{
|
||||
testAsk(30, 15),
|
||||
testAsk(30, 5),
|
||||
},
|
||||
expected: []*orderbook.MatchedOrder{
|
||||
{
|
||||
OrderAsk: testAsk(30, 5),
|
||||
OrderBid: testBid(30, 15),
|
||||
},
|
||||
{
|
||||
OrderAsk: testAsk(30, 15),
|
||||
OrderBid: testBid(50, 20),
|
||||
},
|
||||
},
|
||||
expectedLowestAsk: 0,
|
||||
expectedHighestBid: 40,
|
||||
},
|
||||
{
|
||||
bids: []*orderbook.OrderBid{
|
||||
testBid(60, 20),
|
||||
testBid(80, 5),
|
||||
},
|
||||
asks: []*orderbook.OrderAsk{
|
||||
testAsk(60, 15),
|
||||
testAsk(70, 10),
|
||||
testAsk(50, 20),
|
||||
},
|
||||
expected: []*orderbook.MatchedOrder{
|
||||
{
|
||||
OrderAsk: testAsk(60, 15),
|
||||
OrderBid: testBid(60, 20),
|
||||
},
|
||||
},
|
||||
expectedLowestAsk: 50,
|
||||
expectedHighestBid: 80,
|
||||
},
|
||||
{
|
||||
bids: []*orderbook.OrderBid{
|
||||
testBid(60, 20),
|
||||
testBid(80, 5),
|
||||
},
|
||||
asks: []*orderbook.OrderAsk{},
|
||||
expected: []*orderbook.MatchedOrder{},
|
||||
expectedLowestAsk: 0,
|
||||
expectedHighestBid: 80,
|
||||
},
|
||||
{
|
||||
bids: []*orderbook.OrderBid{},
|
||||
asks: []*orderbook.OrderAsk{
|
||||
testAsk(70, 10),
|
||||
testAsk(50, 20),
|
||||
},
|
||||
expected: []*orderbook.MatchedOrder{},
|
||||
expectedLowestAsk: 50,
|
||||
expectedHighestBid: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range testcases {
|
||||
market := orderbook.NewMarket(testPair)
|
||||
for _, ask := range tc.asks {
|
||||
market.AddAsk(ask)
|
||||
}
|
||||
for _, bid := range tc.bids {
|
||||
market.AddBid(bid)
|
||||
}
|
||||
resp := market.Match()
|
||||
if len(tc.expected) == 0 {
|
||||
require.Nil(t, resp, idx)
|
||||
} else {
|
||||
require.Equal(t, tc.expected, resp.MatchedOrders, idx)
|
||||
}
|
||||
require.EqualValues(t, tc.expectedLowestAsk, market.LowestAsk(), idx)
|
||||
require.EqualValues(t, tc.expectedHighestBid, market.HighestBid(), idx)
|
||||
}
|
||||
}
|
||||
1795
abci/example/orderbook/msgs.pb.go
Normal file
40
abci/example/orderbook/msgs.proto
Normal file
@@ -0,0 +1,40 @@
|
||||
€syntax = "proto3";
|
||||
|
||||
package orderbook;
|
||||
option go_package = "github.com/tendermint/tendermint/abci/example/orderbook";
|
||||
|
||||
import "wire.proto";
|
||||
|
||||
message MsgBid {
|
||||
Pair pair = 1;
|
||||
OrderBid bid_order = 2;
|
||||
}
|
||||
|
||||
message MsgAsk {
|
||||
Pair pair = 1;
|
||||
OrderAsk ask_order = 2;
|
||||
}
|
||||
|
||||
message MsgCreateAccount {
|
||||
bytes public_key = 1;
|
||||
repeated Commodity commodities = 2;
|
||||
}
|
||||
|
||||
message MsgRegisterPair {
|
||||
Pair pair = 1;
|
||||
}
|
||||
|
||||
message MsgTradeSet {
|
||||
TradeSet trade_set = 1;
|
||||
}
|
||||
|
||||
message Msg {
|
||||
//a Msg has to be one of the below
|
||||
oneof sum {
|
||||
MsgBid msg_bid = 1;
|
||||
MsgAsk msg_ask = 2;
|
||||
MsgRegisterPair msg_register_pair = 3;
|
||||
MsgCreateAccount msg_create_account = 4;
|
||||
MsgTradeSet msg_trade_set = 5;
|
||||
}
|
||||
}
|
||||
BIN
abci/example/orderbook/orderbook
Executable file
31
abci/example/orderbook/query.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package orderbook
|
||||
|
||||
// Query the state of an account (returns a concrete copy)
|
||||
func (sm *StateMachine) Account(id uint64) Account {
|
||||
if int(id) >= len(sm.accounts) {
|
||||
return Account{}
|
||||
}
|
||||
return *sm.accounts[id]
|
||||
}
|
||||
|
||||
// Query all the pairs that the orderbook has (returns a concrete copy)
|
||||
func (sm *StateMachine) Pairs() []Pair {
|
||||
pairs := make([]Pair, len(sm.pairs))
|
||||
idx := 0
|
||||
for _, pair := range sm.pairs {
|
||||
pairs[idx] = *pair
|
||||
idx++
|
||||
}
|
||||
return pairs
|
||||
}
|
||||
|
||||
// Query the current orders for a pair (returns concrete copies)
|
||||
func (sm *StateMachine) Orders(pair *Pair) ([]OrderBid, []OrderAsk) {
|
||||
market, ok := sm.markets[pair.String()]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return market.GetBids(), market.GetAsks()
|
||||
}
|
||||
|
||||
func (sm *StateMachine) Height() int64 { return sm.lastHeight }
|
||||
299
abci/example/orderbook/types.go
Normal file
@@ -0,0 +1,299 @@
|
||||
package orderbook
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
)
|
||||
|
||||
func NewMsgBid(pair *Pair, maxPrice, maxQuantity float64, ownerId uint64) *MsgBid {
|
||||
return &MsgBid{
|
||||
Pair: pair,
|
||||
BidOrder: &OrderBid{
|
||||
MaxPrice: maxPrice,
|
||||
MaxQuantity: maxQuantity,
|
||||
OwnerId: ownerId,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (msg *MsgBid) Sign(pk crypto.PrivKey) error {
|
||||
sig, err := pk.Sign(msg.BidOrder.DeterministicSignatureBytes(msg.Pair))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg.BidOrder.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
func (msg *MsgBid) ValidateBasic() error {
|
||||
if err := msg.BidOrder.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := msg.Pair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(msg.BidOrder.Signature) != ed25519.SignatureSize {
|
||||
return errors.New("invalid signature size")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewMsgAsk(pair *Pair, askPrice, quantity float64, ownerId uint64) *MsgAsk {
|
||||
return &MsgAsk{
|
||||
Pair: pair,
|
||||
AskOrder: &OrderAsk{
|
||||
AskPrice: askPrice,
|
||||
Quantity: quantity,
|
||||
OwnerId: ownerId,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (msg *MsgAsk) Sign(pk crypto.PrivKey) error {
|
||||
sig, err := pk.Sign(msg.AskOrder.DeterministicSignatureBytes(msg.Pair))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg.AskOrder.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
func (msg *MsgAsk) ValidateBasic() error {
|
||||
if err := msg.AskOrder.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := msg.Pair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewMsgCreateAccount(commodities ...*Commodity) (*MsgCreateAccount, crypto.PrivKey) {
|
||||
pk := ed25519.GenPrivKey()
|
||||
return &MsgCreateAccount{
|
||||
PublicKey: pk.PubKey().Bytes(),
|
||||
Commodities: commodities,
|
||||
}, pk
|
||||
}
|
||||
|
||||
func (msg *MsgCreateAccount) ValidateBasic() error {
|
||||
if len(msg.PublicKey) != ed25519.PubKeySize {
|
||||
return errors.New("invalid pub key size")
|
||||
}
|
||||
|
||||
uniqueMap := make(map[string]struct{}, len(msg.Commodities))
|
||||
for _, c := range msg.Commodities {
|
||||
if err := c.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := uniqueMap[c.Denom]; ok {
|
||||
return fmt.Errorf("commodity %s declared twice", c.Denom)
|
||||
}
|
||||
uniqueMap[c.Denom] = struct{}{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewMsgRegisterPair(pair *Pair) *MsgRegisterPair {
|
||||
return &MsgRegisterPair{Pair: pair}
|
||||
}
|
||||
|
||||
func (msg *MsgRegisterPair) ValidateBasic() error {
|
||||
return msg.Pair.ValidateBasic()
|
||||
}
|
||||
|
||||
func NewCommodity(denom string, quantity float64) *Commodity {
|
||||
return &Commodity{
|
||||
Denom: denom,
|
||||
Quantity: quantity,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Commodity) ValidateBasic() error {
|
||||
if c.Quantity <= 0 {
|
||||
return errors.New("quantity must be greater than zero")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pair) ValidateBasic() error {
|
||||
if p.BuyersDenomination == "" || p.SellersDenomination == "" {
|
||||
return errors.New("inbound and outbound commodities must be present")
|
||||
}
|
||||
|
||||
if p.BuyersDenomination == p.SellersDenomination {
|
||||
return errors.New("commodities must not be the same")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderBid) ValidateBasic() error {
|
||||
if o.MaxQuantity == 0 {
|
||||
return errors.New("max quantity must be non zero")
|
||||
}
|
||||
|
||||
if o.MaxPrice <= 0 {
|
||||
return errors.New("min price must be greater than 0")
|
||||
}
|
||||
|
||||
if len(o.Signature) != ed25519.SignatureSize {
|
||||
return errors.New("invalid signature size")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderBid) ValidateSignature(pk crypto.PubKey, pair *Pair) bool {
|
||||
return pk.VerifySignature(o.DeterministicSignatureBytes(pair), o.Signature)
|
||||
}
|
||||
|
||||
func (o *OrderBid) DeterministicSignatureBytes(pair *Pair) []byte {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf.WriteString(pair.SellersDenomination)
|
||||
buf.WriteString(pair.BuyersDenomination)
|
||||
bz := buf.Bytes()
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.MaxQuantity))
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.MaxPrice))
|
||||
return bz
|
||||
}
|
||||
|
||||
func (m *MatchedOrder) ValidateBasic() error {
|
||||
if err := m.OrderAsk.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.OrderBid.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TradeSet) ValidateBasic() error {
|
||||
for _, matchedOrder := range t.MatchedOrders {
|
||||
if err := matchedOrder.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
// checking if there is an account
|
||||
if matchedOrder.OrderAsk.OwnerId == 0 {
|
||||
return errors.New("must have an owner id more than zero")
|
||||
}
|
||||
}
|
||||
// validate the pairs are valid
|
||||
if err := t.Pair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderAsk) ValidateBasic() error {
|
||||
if o.Quantity == 0 {
|
||||
return errors.New("quantity outbound must be non zero")
|
||||
}
|
||||
|
||||
if o.AskPrice <= 0 {
|
||||
return errors.New("min price must be greater than 0")
|
||||
}
|
||||
|
||||
if len(o.Signature) != ed25519.SignatureSize {
|
||||
return errors.New("invalid signature size")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OrderAsk) ValidateSignature(pk crypto.PubKey, pair *Pair) bool {
|
||||
return pk.VerifySignature(o.DeterministicSignatureBytes(pair), o.Signature)
|
||||
}
|
||||
|
||||
func (o *OrderAsk) DeterministicSignatureBytes(pair *Pair) []byte {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf.WriteString(pair.BuyersDenomination)
|
||||
buf.WriteString(pair.SellersDenomination)
|
||||
bz := buf.Bytes()
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.Quantity))
|
||||
bz = binary.BigEndian.AppendUint64(bz, math.Float64bits(o.AskPrice))
|
||||
return bz
|
||||
}
|
||||
|
||||
func (a Account) IsEmpty() bool {
|
||||
return len(a.PublicKey) == 0
|
||||
}
|
||||
|
||||
func (a *Account) FindCommidity(denom string) *Commodity {
|
||||
for _, c := range a.Commodities {
|
||||
if c.Denom == denom {
|
||||
return c
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (a *Account) AddCommodity(c *Commodity) {
|
||||
curr := a.FindCommidity(c.Denom)
|
||||
if curr == nil {
|
||||
a.Commodities = append(a.Commodities, c)
|
||||
} else {
|
||||
curr.Quantity += c.Quantity
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Account) SubtractCommodity(c *Commodity) {
|
||||
curr := a.FindCommidity(c.Denom)
|
||||
if curr == nil {
|
||||
panic("trying to remove a commodity the account does not have")
|
||||
}
|
||||
curr.Quantity -= c.Quantity
|
||||
}
|
||||
|
||||
func (msg *Msg) ValidateBasic() error {
|
||||
switch m := msg.Sum.(type) {
|
||||
case *Msg_MsgRegisterPair:
|
||||
if err := m.MsgRegisterPair.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgCreateAccount:
|
||||
if err := m.MsgCreateAccount.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgBid:
|
||||
if err := m.MsgBid.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgAsk:
|
||||
if err := m.MsgAsk.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *Msg_MsgTradeSet:
|
||||
if err := m.MsgTradeSet.TradeSet.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return errors.New("unknown tx")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
1867
abci/example/orderbook/wire.pb.go
Normal file
52
abci/example/orderbook/wire.proto
Normal file
@@ -0,0 +1,52 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package orderbook;
|
||||
option go_package = "github.com/tendermint/tendermint/abci/example/orderbook";
|
||||
|
||||
|
||||
message OrderAsk {
|
||||
double quantity = 1;
|
||||
double ask_price = 2;
|
||||
uint64 owner_id = 3;
|
||||
bytes signature = 4;
|
||||
}
|
||||
|
||||
message OrderBid {
|
||||
double max_quantity = 1;
|
||||
double max_price = 2;
|
||||
uint64 owner_id = 3;
|
||||
bytes signature = 4;
|
||||
}
|
||||
|
||||
message Pair {
|
||||
// the denomination that the buyer receives i.e. EUR
|
||||
string buyers_denomination = 1;
|
||||
// the denomination that the seller receives i.e. USD
|
||||
string sellers_denomination = 2;
|
||||
}
|
||||
|
||||
message Commodity {
|
||||
string denom = 1;
|
||||
double quantity = 2;
|
||||
}
|
||||
|
||||
// Accounts is the atomic piece of information that is persisted to disk.
|
||||
message Account {
|
||||
uint64 index = 1;
|
||||
bytes public_key = 2;
|
||||
// the set of commodities that the account has
|
||||
repeated Commodity commodities = 3;
|
||||
}
|
||||
|
||||
// TradeSet is the transaction that eventually is committed in a block
|
||||
// It is derived from a group of MsgBid and MsgAsk's
|
||||
message TradeSet {
|
||||
Pair pair = 1; // i.e. EUR/USD
|
||||
// the set of matched trades for that peer
|
||||
repeated MatchedOrder matched_orders = 2;
|
||||
}
|
||||
|
||||
message MatchedOrder {
|
||||
OrderAsk order_ask = 1;
|
||||
OrderBid order_bid = 2;
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Syncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Syncing: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "blocksync"
|
||||
)
|
||||
|
||||
//go:generate go run ../scripts/metricsgen -struct=Metrics
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// Whether or not a node is block syncing. 1 if yes, 0 if no.
|
||||
Syncing metrics.Gauge
|
||||
}
|
||||
@@ -19,6 +19,58 @@ const (
|
||||
BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
msg := bcproto.Message{}
|
||||
|
||||
switch pb := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
|
||||
case *bcproto.BlockResponse:
|
||||
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
|
||||
case *bcproto.NoBlockResponse:
|
||||
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
|
||||
case *bcproto.StatusRequest:
|
||||
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
|
||||
case *bcproto.StatusResponse:
|
||||
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", pb)
|
||||
}
|
||||
|
||||
bz, err := proto.Marshal(&msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *bcproto.Message_BlockRequest:
|
||||
return msg.BlockRequest, nil
|
||||
case *bcproto.Message_BlockResponse:
|
||||
return msg.BlockResponse, nil
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
return msg.NoBlockResponse, nil
|
||||
case *bcproto.Message_StatusRequest:
|
||||
return msg.StatusRequest, nil
|
||||
case *bcproto.Message_StatusResponse:
|
||||
return msg.StatusResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateMsg validates a message.
|
||||
func ValidateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
|
||||
@@ -80,7 +80,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
//nolint:lll // ignore line length in tests
|
||||
func TestBlocksyncMessageVectors(t *testing.T) {
|
||||
func TestBlockchainMessageVectors(t *testing.T) {
|
||||
block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil)
|
||||
block.Version.Block = 11 // overwrite updated protocol version
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@ const (
|
||||
maxTotalRequesters = 600
|
||||
maxPendingRequests = maxTotalRequesters
|
||||
maxPendingRequestsPerPeer = 20
|
||||
requestRetrySeconds = 30
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending us data at at least that rate, we
|
||||
@@ -603,7 +602,7 @@ OUTER_LOOP:
|
||||
}
|
||||
peer = bpr.pool.pickIncrAvailablePeer(bpr.height)
|
||||
if peer == nil {
|
||||
bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height)
|
||||
// log.Info("No peers available", "height", height)
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
continue PICK_PEER_LOOP
|
||||
}
|
||||
@@ -613,7 +612,6 @@ OUTER_LOOP:
|
||||
bpr.peerID = peer.id
|
||||
bpr.mtx.Unlock()
|
||||
|
||||
to := time.NewTimer(requestRetrySeconds * time.Second)
|
||||
// Send request and wait.
|
||||
bpr.pool.sendRequest(bpr.height, peer.id)
|
||||
WAIT_LOOP:
|
||||
@@ -626,11 +624,6 @@ OUTER_LOOP:
|
||||
return
|
||||
case <-bpr.Quit():
|
||||
return
|
||||
case <-to.C:
|
||||
bpr.Logger.Debug("Retrying block request after timeout", "height", bpr.height, "peer", bpr.peerID)
|
||||
// Simulate a redo
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP
|
||||
case peerID := <-bpr.redoCh:
|
||||
if peerID == bpr.peerID {
|
||||
bpr.reset()
|
||||
|
||||
@@ -30,7 +30,7 @@ const (
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blocksync reactor and block sync to
|
||||
// for when we switch from blockchain reactor and block sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
@@ -58,13 +58,11 @@ type Reactor struct {
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
blockSync bool, metrics *Metrics) *Reactor {
|
||||
blockSync bool) *Reactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
@@ -90,7 +88,6 @@ func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockS
|
||||
blockSync: blockSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
metrics: metrics,
|
||||
}
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("Reactor", bcR)
|
||||
return bcR
|
||||
@@ -146,20 +143,21 @@ func (bcR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
})
|
||||
msgBytes, err := EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height()})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
peer.Send(BlocksyncChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -184,53 +182,69 @@ func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.BlockResponse{Block: bl},
|
||||
})
|
||||
msgBytes, err := EncodeMsg(&bcproto.BlockResponse{Block: bl})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
return src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
})
|
||||
|
||||
msgBytes, err := EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlocksyncChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *Reactor) Receive(e p2p.Envelope) {
|
||||
if err := ValidateMsg(e.Message); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
bcR.Switch.StopPeerForError(e.Src, err)
|
||||
func (bcR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message)
|
||||
if err = ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch msg := e.Message.(type) {
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
bcR.respondToPeer(msg, e.Src)
|
||||
bcR.respondToPeer(msg, src)
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Block content is invalid", "err", err)
|
||||
return
|
||||
}
|
||||
bcR.pool.AddBlock(e.Src.ID(), bi, msg.Block.Size())
|
||||
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
e.Src.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
},
|
||||
msgBytes, err := EncodeMsg(&bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobut", "err", err)
|
||||
return
|
||||
}
|
||||
src.TrySend(BlocksyncChannel, msgBytes)
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height)
|
||||
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
|
||||
case *bcproto.NoBlockResponse:
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -239,8 +253,6 @@ func (bcR *Reactor) Receive(e p2p.Envelope) {
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
bcR.metrics.Syncing.Set(1)
|
||||
defer bcR.metrics.Syncing.Set(0)
|
||||
|
||||
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
defer trySyncTicker.Stop()
|
||||
@@ -273,10 +285,13 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
queued := peer.TrySend(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
})
|
||||
msgBytes, err := EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlocksyncChannel, msgBytes)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
@@ -288,7 +303,7 @@ func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest()
|
||||
go bcR.BroadcastStatusRequest() //nolint: errcheck
|
||||
|
||||
}
|
||||
}
|
||||
@@ -414,9 +429,14 @@ FOR_LOOP:
|
||||
}
|
||||
|
||||
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
|
||||
func (bcR *Reactor) BroadcastStatusRequest() {
|
||||
bcR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: BlocksyncChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
func (bcR *Reactor) BroadcastStatusRequest() error {
|
||||
bm, err := EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
return fmt.Errorf("could not convert msg to proto: %w", err)
|
||||
}
|
||||
|
||||
bcR.Switch.Broadcast(BlocksyncChannel, bm)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -145,14 +145,14 @@ func newReactor(
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync, NopMetrics())
|
||||
bcReactor.SetLogger(logger.With("module", "blocksync"))
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return ReactorPair{bcReactor, proxyApp}
|
||||
}
|
||||
|
||||
func TestNoBlockResponse(t *testing.T) {
|
||||
config = test.ResetTestRoot("blocksync_reactor_test")
|
||||
config = test.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
@@ -164,7 +164,7 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
reactorPairs[1] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor)
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
@@ -214,7 +214,7 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestBadBlockStopsPeer(t *testing.T) {
|
||||
config = test.ResetTestRoot("blocksync_reactor_test")
|
||||
config = test.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
@@ -239,7 +239,7 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
reactorPairs[3] = newReactor(t, log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[i].reactor)
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
@@ -278,7 +278,7 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKSYNC", reactorPairs[len(reactorPairs)-1].reactor)
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
|
||||
@@ -67,8 +67,7 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
//nolint:gosec,nolintlint
|
||||
resp, err := http.Get(endpoint)
|
||||
resp, err := http.Get(endpoint) //nolint: gosec
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/tendermint/tendermint/store"
|
||||
)
|
||||
|
||||
var removeBlock = false
|
||||
var removeBlock bool = false
|
||||
|
||||
func init() {
|
||||
RollbackStateCmd.Flags().BoolVar(&removeBlock, "hard", false, "remove last block as well as state")
|
||||
|
||||
@@ -66,7 +66,6 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().String("p2p.external-address", config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
|
||||
cmd.Flags().String("p2p.bootstrap_peers", config.P2P.BootstrapPeers, "comma-delimited ID@host:port peers to be added to the addressbook on startup")
|
||||
cmd.Flags().String("p2p.unconditional_peer_ids",
|
||||
config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers")
|
||||
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")
|
||||
|
||||
@@ -423,7 +423,6 @@ type RPCConfig struct {
|
||||
TLSKeyFile string `mapstructure:"tls_key_file"`
|
||||
|
||||
// pprof listen address (https://golang.org/pkg/net/http/pprof)
|
||||
// FIXME: This should be moved under the instrumentation section
|
||||
PprofListenAddress string `mapstructure:"pprof_laddr"`
|
||||
}
|
||||
|
||||
@@ -507,10 +506,6 @@ func (cfg *RPCConfig) IsCorsEnabled() bool {
|
||||
return len(cfg.CORSAllowedOrigins) != 0
|
||||
}
|
||||
|
||||
func (cfg *RPCConfig) IsPprofEnabled() bool {
|
||||
return len(cfg.PprofListenAddress) != 0
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) KeyFile() string {
|
||||
path := cfg.TLSKeyFile
|
||||
if filepath.IsAbs(path) {
|
||||
@@ -548,11 +543,6 @@ type P2PConfig struct { //nolint: maligned
|
||||
// We only use these if we can’t connect to peers in the addrbook
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
|
||||
// Comma separated list of peers to be added to the peer store
|
||||
// on startup. Either BootstrapPeers or PersistentPeers are
|
||||
// needed for peer discovery
|
||||
BootstrapPeers string `mapstructure:"bootstrap_peers"`
|
||||
|
||||
// Comma separated list of nodes to keep persistent connections to
|
||||
PersistentPeers string `mapstructure:"persistent_peers"`
|
||||
|
||||
@@ -713,28 +703,14 @@ type MempoolConfig struct {
|
||||
// Mempool version to use:
|
||||
// 1) "v0" - (default) FIFO mempool.
|
||||
// 2) "v1" - prioritized mempool.
|
||||
Version string `mapstructure:"version"`
|
||||
// RootDir is the root directory for all data. This should be configured via
|
||||
// the $TMHOME env variable or --home cmd flag rather than overriding this
|
||||
// struct field.
|
||||
RootDir string `mapstructure:"home"`
|
||||
// Recheck (default: true) defines whether Tendermint should recheck the
|
||||
// validity for all remaining transaction in the mempool after a block.
|
||||
// Since a block affects the application state, some transactions in the
|
||||
// mempool may become invalid. If this does not apply to your application,
|
||||
// you can disable rechecking.
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
// Broadcast (default: true) defines whether the mempool should relay
|
||||
// transactions to other peers. Setting this to false will stop the mempool
|
||||
// from relaying transactions to other peers until they are included in a
|
||||
// block. In other words, if Broadcast is disabled, only the peer you send
|
||||
// the tx to will see it until it is included in a block.
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
// WalPath (default: "") configures the location of the Write Ahead Log
|
||||
// (WAL) for the mempool. The WAL is disabled by default. To enable, set
|
||||
// WalPath to where you want the WAL to be written (e.g.
|
||||
// "data/mempool.wal").
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
// WARNING: There's a known memory leak with the prioritized mempool
|
||||
// that the team are working on. Read more here:
|
||||
// https://github.com/tendermint/tendermint/issues/8775
|
||||
Version string `mapstructure:"version"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
// Maximum number of transactions in the mempool
|
||||
Size int `mapstructure:"size"`
|
||||
// Limit the total size of all txs in the mempool.
|
||||
@@ -1228,10 +1204,6 @@ func (cfg *InstrumentationConfig) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *InstrumentationConfig) IsPrometheusEnabled() bool {
|
||||
return cfg.Prometheus && cfg.PrometheusListenAddr != ""
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Utils
|
||||
|
||||
|
||||
@@ -283,11 +283,6 @@ external_address = "{{ .P2P.ExternalAddress }}"
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = "{{ .P2P.Seeds }}"
|
||||
|
||||
# Comma separated list of peers to be added to the peer store
|
||||
# on startup. Either BootstrapPeers or PersistentPeers are
|
||||
# needed for peer discovery
|
||||
bootstrap_peers = "{{ .P2P.BootstrapPeers }}"
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
persistent_peers = "{{ .P2P.PersistentPeers }}"
|
||||
|
||||
@@ -354,24 +349,8 @@ dial_timeout = "{{ .P2P.DialTimeout }}"
|
||||
# 2) "v1" - prioritized mempool.
|
||||
version = "{{ .Mempool.Version }}"
|
||||
|
||||
# Recheck (default: true) defines whether Tendermint should recheck the
|
||||
# validity for all remaining transaction in the mempool after a block.
|
||||
# Since a block affects the application state, some transactions in the
|
||||
# mempool may become invalid. If this does not apply to your application,
|
||||
# you can disable rechecking.
|
||||
recheck = {{ .Mempool.Recheck }}
|
||||
|
||||
# Broadcast (default: true) defines whether the mempool should relay
|
||||
# transactions to other peers. Setting this to false will stop the mempool
|
||||
# from relaying transactions to other peers until they are included in a
|
||||
# block. In other words, if Broadcast is disabled, only the peer you send
|
||||
# the tx to will see it until it is included in a block.
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
|
||||
# WalPath (default: "") configures the location of the Write Ahead Log
|
||||
# (WAL) for the mempool. The WAL is disabled by default. To enable, set
|
||||
# WalPath to where you want the WAL to be written (e.g.
|
||||
# "data/mempool.wal").
|
||||
wal_dir = "{{ js .Mempool.WalPath }}"
|
||||
|
||||
# Maximum number of transactions in the mempool
|
||||
@@ -457,7 +436,7 @@ chunk_fetchers = "{{ .StateSync.ChunkFetchers }}"
|
||||
[blocksync]
|
||||
|
||||
# Block Sync version to use:
|
||||
#
|
||||
#
|
||||
# In v0.37, v1 and v2 of the block sync protocols were deprecated.
|
||||
# Please use v0 instead.
|
||||
#
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
mempoolv0 "github.com/tendermint/tendermint/mempool/v0"
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -166,16 +165,10 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
for i, peer := range peerList {
|
||||
if i < len(peerList)/2 {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer)
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: prevote1.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1}))
|
||||
} else {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer)
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: prevote2.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2}))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -527,26 +520,18 @@ func sendProposalAndParts(
|
||||
parts *types.PartSet,
|
||||
) {
|
||||
// proposal
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.Proposal{Proposal: *proposal.ToProto()},
|
||||
})
|
||||
msg := &ProposalMessage{Proposal: proposal}
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
|
||||
// parts
|
||||
for i := 0; i < int(parts.Total()); i++ {
|
||||
part := parts.GetPart(i)
|
||||
pp, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(err) // TODO: wbanfield better error handling
|
||||
msg := &BlockPartMessage{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
}
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: *pp,
|
||||
},
|
||||
})
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
}
|
||||
|
||||
// votes
|
||||
@@ -554,14 +539,9 @@ func sendProposalAndParts(
|
||||
prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
|
||||
precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
|
||||
cs.mtx.Unlock()
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{Vote: prevote.ToProto()},
|
||||
})
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{Vote: precommit.ToProto()},
|
||||
})
|
||||
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -599,7 +579,7 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
|
||||
func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
br.reactor.RemovePeer(peer, reason)
|
||||
}
|
||||
func (br *ByzantineReactor) Receive(e p2p.Envelope) {
|
||||
br.reactor.Receive(e)
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
br.reactor.Receive(chID, peer, msgBytes)
|
||||
}
|
||||
func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -95,10 +94,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw
|
||||
peers := sw.Peers().List()
|
||||
for _, peer := range peers {
|
||||
cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer)
|
||||
peer.Send(p2p.Envelope{
|
||||
Message: &tmcons.Vote{Vote: precommit.ToProto()},
|
||||
ChannelID: VoteChannel,
|
||||
})
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -118,6 +118,18 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "latest_block_height",
|
||||
Help: "The latest block height.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "state_syncing",
|
||||
Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
@@ -196,6 +208,8 @@ func NopMetrics() *Metrics {
|
||||
BlockSizeBytes: discard.NewGauge(),
|
||||
TotalTxs: discard.NewGauge(),
|
||||
CommittedHeight: discard.NewGauge(),
|
||||
BlockSyncing: discard.NewGauge(),
|
||||
StateSyncing: discard.NewGauge(),
|
||||
BlockParts: discard.NewCounter(),
|
||||
StepDurationSeconds: discard.NewHistogram(),
|
||||
BlockGossipPartsReceived: discard.NewCounter(),
|
||||
|
||||
@@ -61,6 +61,10 @@ type Metrics struct {
|
||||
TotalTxs metrics.Gauge
|
||||
// The latest block height.
|
||||
CommittedHeight metrics.Gauge `metrics_name:"latest_block_height"`
|
||||
// Whether or not a node is block syncing. 1 if yes, 0 if no.
|
||||
BlockSyncing metrics.Gauge
|
||||
// Whether or not a node is state syncing. 1 if yes, 0 if no.
|
||||
StateSyncing metrics.Gauge
|
||||
|
||||
// Number of block parts transmitted by each peer.
|
||||
BlockParts metrics.Counter `metrics_labels:"peer_id"`
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
@@ -14,147 +15,173 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message.
|
||||
//
|
||||
// TODO: This needs to be removed, but WALToProto depends on this.
|
||||
func MsgToProto(msg Message) (proto.Message, error) {
|
||||
// MsgToProto takes a consensus message type and returns the proto defined consensus message
|
||||
func MsgToProto(msg Message) (*tmcons.Message, error) {
|
||||
if msg == nil {
|
||||
return nil, errors.New("consensus: message is nil")
|
||||
}
|
||||
var pb proto.Message
|
||||
var pb tmcons.Message
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *NewRoundStepMessage:
|
||||
pb = &tmcons.NewRoundStep{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: uint32(msg.Step),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_NewRoundStep{
|
||||
NewRoundStep: &tmcons.NewRoundStep{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Step: uint32(msg.Step),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *NewValidBlockMessage:
|
||||
pbPartSetHeader := msg.BlockPartSetHeader.ToProto()
|
||||
pbBits := msg.BlockParts.ToProto()
|
||||
pb = &tmcons.NewValidBlock{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_NewValidBlock{
|
||||
NewValidBlock: &tmcons.NewValidBlock{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
BlockPartSetHeader: pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *ProposalMessage:
|
||||
pbP := msg.Proposal.ToProto()
|
||||
pb = &tmcons.Proposal{
|
||||
Proposal: *pbP,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_Proposal{
|
||||
Proposal: &tmcons.Proposal{
|
||||
Proposal: *pbP,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *ProposalPOLMessage:
|
||||
pbBits := msg.ProposalPOL.ToProto()
|
||||
pb = &tmcons.ProposalPOL{
|
||||
Height: msg.Height,
|
||||
ProposalPolRound: msg.ProposalPOLRound,
|
||||
ProposalPol: *pbBits,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_ProposalPol{
|
||||
ProposalPol: &tmcons.ProposalPOL{
|
||||
Height: msg.Height,
|
||||
ProposalPolRound: msg.ProposalPOLRound,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *BlockPartMessage:
|
||||
parts, err := msg.Part.ToProto()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("msg to proto error: %w", err)
|
||||
}
|
||||
pb = &tmcons.BlockPart{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: *parts,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_BlockPart{
|
||||
BlockPart: &tmcons.BlockPart{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Part: *parts,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *VoteMessage:
|
||||
vote := msg.Vote.ToProto()
|
||||
pb = &tmcons.Vote{
|
||||
Vote: vote,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{
|
||||
Vote: vote,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *HasVoteMessage:
|
||||
pb = &tmcons.HasVote{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_HasVote{
|
||||
HasVote: &tmcons.HasVote{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *VoteSetMaj23Message:
|
||||
bi := msg.BlockID.ToProto()
|
||||
pb = &tmcons.VoteSetMaj23{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
pb = tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetMaj23{
|
||||
VoteSetMaj23: &tmcons.VoteSetMaj23{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
case *VoteSetBitsMessage:
|
||||
bi := msg.BlockID.ToProto()
|
||||
bits := msg.Votes.ToProto()
|
||||
|
||||
vsb := &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
vsb := &tmcons.Message_VoteSetBits{
|
||||
VoteSetBits: &tmcons.VoteSetBits{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: bi,
|
||||
},
|
||||
}
|
||||
|
||||
if bits != nil {
|
||||
vsb.Votes = *bits
|
||||
vsb.VoteSetBits.Votes = *bits
|
||||
}
|
||||
|
||||
pb = vsb
|
||||
pb = tmcons.Message{
|
||||
Sum: vsb,
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("consensus: message not recognized: %T", msg)
|
||||
}
|
||||
|
||||
return pb, nil
|
||||
return &pb, nil
|
||||
}
|
||||
|
||||
// MsgFromProto takes a consensus proto message and returns the native go type
|
||||
func MsgFromProto(p proto.Message) (Message, error) {
|
||||
if p == nil {
|
||||
func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
if msg == nil {
|
||||
return nil, errors.New("consensus: nil message")
|
||||
}
|
||||
var pb Message
|
||||
|
||||
switch msg := p.(type) {
|
||||
case *tmcons.NewRoundStep:
|
||||
rs, err := tmmath.SafeConvertUint8(int64(msg.Step))
|
||||
switch msg := msg.Sum.(type) {
|
||||
case *tmcons.Message_NewRoundStep:
|
||||
rs, err := tmmath.SafeConvertUint8(int64(msg.NewRoundStep.Step))
|
||||
// deny message based on possible overflow
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("denying message due to possible overflow: %w", err)
|
||||
}
|
||||
pb = &NewRoundStepMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Height: msg.NewRoundStep.Height,
|
||||
Round: msg.NewRoundStep.Round,
|
||||
Step: cstypes.RoundStepType(rs),
|
||||
SecondsSinceStartTime: msg.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.LastCommitRound,
|
||||
SecondsSinceStartTime: msg.NewRoundStep.SecondsSinceStartTime,
|
||||
LastCommitRound: msg.NewRoundStep.LastCommitRound,
|
||||
}
|
||||
case *tmcons.NewValidBlock:
|
||||
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.BlockPartSetHeader)
|
||||
case *tmcons.Message_NewValidBlock:
|
||||
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parts to proto error: %w", err)
|
||||
}
|
||||
|
||||
pbBits := new(bits.BitArray)
|
||||
pbBits.FromProto(msg.BlockParts)
|
||||
pbBits.FromProto(msg.NewValidBlock.BlockParts)
|
||||
|
||||
pb = &NewValidBlockMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Height: msg.NewValidBlock.Height,
|
||||
Round: msg.NewValidBlock.Round,
|
||||
BlockPartSetHeader: *pbPartSetHeader,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: msg.IsCommit,
|
||||
IsCommit: msg.NewValidBlock.IsCommit,
|
||||
}
|
||||
case *tmcons.Proposal:
|
||||
pbP, err := types.ProposalFromProto(&msg.Proposal)
|
||||
case *tmcons.Message_Proposal:
|
||||
pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("proposal msg to proto error: %w", err)
|
||||
}
|
||||
@@ -162,26 +189,26 @@ func MsgFromProto(p proto.Message) (Message, error) {
|
||||
pb = &ProposalMessage{
|
||||
Proposal: pbP,
|
||||
}
|
||||
case *tmcons.ProposalPOL:
|
||||
case *tmcons.Message_ProposalPol:
|
||||
pbBits := new(bits.BitArray)
|
||||
pbBits.FromProto(&msg.ProposalPol)
|
||||
pbBits.FromProto(&msg.ProposalPol.ProposalPol)
|
||||
pb = &ProposalPOLMessage{
|
||||
Height: msg.Height,
|
||||
ProposalPOLRound: msg.ProposalPolRound,
|
||||
Height: msg.ProposalPol.Height,
|
||||
ProposalPOLRound: msg.ProposalPol.ProposalPolRound,
|
||||
ProposalPOL: pbBits,
|
||||
}
|
||||
case *tmcons.BlockPart:
|
||||
parts, err := types.PartFromProto(&msg.Part)
|
||||
case *tmcons.Message_BlockPart:
|
||||
parts, err := types.PartFromProto(&msg.BlockPart.Part)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blockpart msg to proto error: %w", err)
|
||||
}
|
||||
pb = &BlockPartMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Height: msg.BlockPart.Height,
|
||||
Round: msg.BlockPart.Round,
|
||||
Part: parts,
|
||||
}
|
||||
case *tmcons.Vote:
|
||||
vote, err := types.VoteFromProto(msg.Vote)
|
||||
case *tmcons.Message_Vote:
|
||||
vote, err := types.VoteFromProto(msg.Vote.Vote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("vote msg to proto error: %w", err)
|
||||
}
|
||||
@@ -189,36 +216,36 @@ func MsgFromProto(p proto.Message) (Message, error) {
|
||||
pb = &VoteMessage{
|
||||
Vote: vote,
|
||||
}
|
||||
case *tmcons.HasVote:
|
||||
case *tmcons.Message_HasVote:
|
||||
pb = &HasVoteMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Index: msg.Index,
|
||||
Height: msg.HasVote.Height,
|
||||
Round: msg.HasVote.Round,
|
||||
Type: msg.HasVote.Type,
|
||||
Index: msg.HasVote.Index,
|
||||
}
|
||||
case *tmcons.VoteSetMaj23:
|
||||
bi, err := types.BlockIDFromProto(&msg.BlockID)
|
||||
case *tmcons.Message_VoteSetMaj23:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err)
|
||||
}
|
||||
pb = &VoteSetMaj23Message{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Height: msg.VoteSetMaj23.Height,
|
||||
Round: msg.VoteSetMaj23.Round,
|
||||
Type: msg.VoteSetMaj23.Type,
|
||||
BlockID: *bi,
|
||||
}
|
||||
case *tmcons.VoteSetBits:
|
||||
bi, err := types.BlockIDFromProto(&msg.BlockID)
|
||||
case *tmcons.Message_VoteSetBits:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("voteSetBits msg to proto error: %w", err)
|
||||
}
|
||||
bits := new(bits.BitArray)
|
||||
bits.FromProto(&msg.Votes)
|
||||
bits.FromProto(&msg.VoteSetBits.Votes)
|
||||
|
||||
pb = &VoteSetBitsMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
Height: msg.VoteSetBits.Height,
|
||||
Round: msg.VoteSetBits.Round,
|
||||
Type: msg.VoteSetBits.Type,
|
||||
BlockID: *bi,
|
||||
Votes: bits,
|
||||
}
|
||||
@@ -233,6 +260,20 @@ func MsgFromProto(p proto.Message) (Message, error) {
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
// MustEncode takes the reactors msg, makes it proto and marshals it
|
||||
// this mimics `MustMarshalBinaryBare` in that is panics on error
|
||||
func MustEncode(msg Message) []byte {
|
||||
pb, err := MsgToProto(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
enc, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return enc
|
||||
}
|
||||
|
||||
// WALToProto takes a WAL message and return a proto walMessage and error
|
||||
func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) {
|
||||
var pb tmcons.WALMessage
|
||||
@@ -253,14 +294,10 @@ func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if w, ok := consMsg.(p2p.Wrapper); ok {
|
||||
consMsg = w.Wrap()
|
||||
}
|
||||
cm := consMsg.(*tmcons.Message)
|
||||
pb = tmcons.WALMessage{
|
||||
Sum: &tmcons.WALMessage_MsgInfo{
|
||||
MsgInfo: &tmcons.MsgInfo{
|
||||
Msg: *cm,
|
||||
Msg: *consMsg,
|
||||
PeerID: string(msg.PeerID),
|
||||
},
|
||||
},
|
||||
@@ -306,11 +343,7 @@ func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) {
|
||||
Step: msg.EventDataRoundState.Step,
|
||||
}
|
||||
case *tmcons.WALMessage_MsgInfo:
|
||||
um, err := msg.MsgInfo.Msg.Unwrap()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unwrap message: %w", err)
|
||||
}
|
||||
walMsg, err := MsgFromProto(um)
|
||||
walMsg, err := MsgFromProto(&msg.MsgInfo.Msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("msgInfo from proto error: %w", err)
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestMsgToProto(t *testing.T) {
|
||||
testsCases := []struct {
|
||||
testName string
|
||||
msg Message
|
||||
want proto.Message
|
||||
want *tmcons.Message
|
||||
wantErr bool
|
||||
}{
|
||||
{"successful NewRoundStepMessage", &NewRoundStepMessage{
|
||||
@@ -80,15 +80,17 @@ func TestMsgToProto(t *testing.T) {
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
}, &tmcons.NewRoundStep{
|
||||
Height: 2,
|
||||
Round: 1,
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_NewRoundStep{
|
||||
NewRoundStep: &tmcons.NewRoundStep{
|
||||
Height: 2,
|
||||
Round: 1,
|
||||
Step: 1,
|
||||
SecondsSinceStartTime: 1,
|
||||
LastCommitRound: 2,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
|
||||
{"successful NewValidBlockMessage", &NewValidBlockMessage{
|
||||
Height: 1,
|
||||
@@ -96,78 +98,92 @@ func TestMsgToProto(t *testing.T) {
|
||||
BlockPartSetHeader: psh,
|
||||
BlockParts: bits,
|
||||
IsCommit: false,
|
||||
}, &tmcons.NewValidBlock{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
BlockPartSetHeader: pbPsh,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: false,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_NewValidBlock{
|
||||
NewValidBlock: &tmcons.NewValidBlock{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
BlockPartSetHeader: pbPsh,
|
||||
BlockParts: pbBits,
|
||||
IsCommit: false,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"successful BlockPartMessage", &BlockPartMessage{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: &parts,
|
||||
}, &tmcons.BlockPart{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: *pbParts,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_BlockPart{
|
||||
BlockPart: &tmcons.BlockPart{
|
||||
Height: 100,
|
||||
Round: 1,
|
||||
Part: *pbParts,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"successful ProposalPOLMessage", &ProposalPOLMessage{
|
||||
Height: 1,
|
||||
ProposalPOLRound: 1,
|
||||
ProposalPOL: bits,
|
||||
}, &tmcons.ProposalPOL{
|
||||
Height: 1,
|
||||
ProposalPolRound: 1,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_ProposalPol{
|
||||
ProposalPol: &tmcons.ProposalPOL{
|
||||
Height: 1,
|
||||
ProposalPolRound: 1,
|
||||
ProposalPol: *pbBits,
|
||||
},
|
||||
}}, false},
|
||||
{"successful ProposalMessage", &ProposalMessage{
|
||||
Proposal: &proposal,
|
||||
}, &tmcons.Proposal{
|
||||
Proposal: *pbProposal,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_Proposal{
|
||||
Proposal: &tmcons.Proposal{
|
||||
Proposal: *pbProposal,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"successful VoteMessage", &VoteMessage{
|
||||
Vote: vote,
|
||||
}, &tmcons.Vote{
|
||||
Vote: pbVote,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_Vote{
|
||||
Vote: &tmcons.Vote{
|
||||
Vote: pbVote,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"successful VoteSetMaj23", &VoteSetMaj23Message{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: bi,
|
||||
}, &tmcons.VoteSetMaj23{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetMaj23{
|
||||
VoteSetMaj23: &tmcons.VoteSetMaj23{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"successful VoteSetBits", &VoteSetBitsMessage{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: bi,
|
||||
Votes: bits,
|
||||
}, &tmcons.VoteSetBits{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
Votes: *pbBits,
|
||||
},
|
||||
|
||||
false},
|
||||
}, &tmcons.Message{
|
||||
Sum: &tmcons.Message_VoteSetBits{
|
||||
VoteSetBits: &tmcons.VoteSetBits{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Type: 1,
|
||||
BlockID: pbBi,
|
||||
Votes: *pbBits,
|
||||
},
|
||||
},
|
||||
}, false},
|
||||
{"failure", nil, &tmcons.Message{}, true},
|
||||
}
|
||||
for _, tt := range testsCases {
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
@@ -119,6 +121,8 @@ func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
|
||||
conR.mtx.Lock()
|
||||
conR.waitSync = false
|
||||
conR.mtx.Unlock()
|
||||
conR.Metrics.BlockSyncing.Set(0)
|
||||
conR.Metrics.StateSyncing.Set(0)
|
||||
|
||||
if skipWAL {
|
||||
conR.conS.doWALCatchup = false
|
||||
@@ -144,7 +148,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
Priority: 6,
|
||||
SendQueueCapacity: 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: DataChannel, // maybe split between gossiping current block and catchup stuff
|
||||
@@ -153,7 +156,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: VoteChannel,
|
||||
@@ -161,7 +163,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 100 * 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
{
|
||||
ID: VoteSetBitsChannel,
|
||||
@@ -169,7 +170,6 @@ func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 2,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
MessageType: &tmcons.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -223,33 +223,34 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
// Peer state updates can happen in parallel, but processing of
|
||||
// proposals, block parts, and votes are ordered by the receiveRoutine
|
||||
// NOTE: blocks on consensus state for proposals, block parts, and votes
|
||||
func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
if !conR.IsRunning() {
|
||||
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID)
|
||||
conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
|
||||
return
|
||||
}
|
||||
msg, err := MsgFromProto(e.Message)
|
||||
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
conR.Logger.Error("Error decoding message", "src", e.Src, "chId", e.ChannelID, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = msg.ValidateBasic(); err != nil {
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
conR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", msg)
|
||||
conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
|
||||
// Get peer states
|
||||
ps, ok := e.Src.Get(types.PeerStateKey).(*PeerState)
|
||||
ps, ok := src.Get(types.PeerStateKey).(*PeerState)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("Peer %v has no state", e.Src))
|
||||
panic(fmt.Sprintf("Peer %v has no state", src))
|
||||
}
|
||||
|
||||
switch e.ChannelID {
|
||||
switch chID {
|
||||
case StateChannel:
|
||||
switch msg := msg.(type) {
|
||||
case *NewRoundStepMessage:
|
||||
@@ -257,8 +258,8 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
initialHeight := conR.conS.state.InitialHeight
|
||||
conR.conS.mtx.Unlock()
|
||||
if err = msg.ValidateHeight(initialHeight); err != nil {
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
ps.ApplyNewRoundStepMessage(msg)
|
||||
@@ -277,7 +278,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
// Peer claims to have a maj23 for some BlockID at H,R,S,
|
||||
err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
|
||||
if err != nil {
|
||||
conR.Switch.StopPeerForError(e.Src, err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
// Respond with a VoteSetBitsMessage showing which votes we have.
|
||||
@@ -291,19 +292,13 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
default:
|
||||
panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
|
||||
}
|
||||
eMsg := &tmcons.VoteSetBits{
|
||||
src.TrySend(VoteSetBitsChannel, MustEncode(&VoteSetBitsMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: msg.BlockID.ToProto(),
|
||||
}
|
||||
if votes := ourVotes.ToProto(); votes != nil {
|
||||
eMsg.Votes = *votes
|
||||
}
|
||||
e.Src.TrySend(p2p.Envelope{
|
||||
ChannelID: VoteSetBitsChannel,
|
||||
Message: eMsg,
|
||||
})
|
||||
BlockID: msg.BlockID,
|
||||
Votes: ourVotes,
|
||||
}))
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -316,13 +311,13 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
switch msg := msg.(type) {
|
||||
case *ProposalMessage:
|
||||
ps.SetHasProposal(msg.Proposal)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
case *ProposalPOLMessage:
|
||||
ps.ApplyProposalPOLMessage(msg)
|
||||
case *BlockPartMessage:
|
||||
ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index))
|
||||
conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1)
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -342,7 +337,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
|
||||
ps.SetHasVote(msg.Vote)
|
||||
|
||||
cs.peerMsgQueue <- msgInfo{msg, e.Src.ID()}
|
||||
cs.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
|
||||
default:
|
||||
// don't punish (leave room for soft upgrades)
|
||||
@@ -381,7 +376,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
}
|
||||
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", e.ChannelID))
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -435,39 +430,29 @@ func (conR *Reactor) unsubscribeFromBroadcastEvents() {
|
||||
|
||||
func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
conR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: nrsMsg,
|
||||
})
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(nrsMsg))
|
||||
}
|
||||
|
||||
func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
|
||||
psh := rs.ProposalBlockParts.Header()
|
||||
csMsg := &tmcons.NewValidBlock{
|
||||
csMsg := &NewValidBlockMessage{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
BlockPartSetHeader: psh.ToProto(),
|
||||
BlockParts: rs.ProposalBlockParts.BitArray().ToProto(),
|
||||
BlockPartSetHeader: rs.ProposalBlockParts.Header(),
|
||||
BlockParts: rs.ProposalBlockParts.BitArray(),
|
||||
IsCommit: rs.Step == cstypes.RoundStepCommit,
|
||||
}
|
||||
conR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: csMsg,
|
||||
})
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(csMsg))
|
||||
}
|
||||
|
||||
// Broadcasts HasVoteMessage to peers that care.
|
||||
func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
msg := &tmcons.HasVote{
|
||||
msg := &HasVoteMessage{
|
||||
Height: vote.Height,
|
||||
Round: vote.Round,
|
||||
Type: vote.Type,
|
||||
Index: vote.ValidatorIndex,
|
||||
}
|
||||
conR.Switch.Broadcast(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: msg,
|
||||
})
|
||||
conR.Switch.Broadcast(StateChannel, MustEncode(msg))
|
||||
/*
|
||||
// TODO: Make this broadcast more selective.
|
||||
for _, peer := range conR.Switch.Peers().List() {
|
||||
@@ -478,11 +463,7 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
prs := ps.GetRoundState()
|
||||
if prs.Height == vote.Height {
|
||||
// TODO: Also filter on round?
|
||||
e := p2p.Envelope{
|
||||
ChannelID: StateChannel, struct{ ConsensusMessage }{msg},
|
||||
Message: p,
|
||||
}
|
||||
peer.TrySend(e)
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg})
|
||||
} else {
|
||||
// Height doesn't match
|
||||
// TODO: check a field, maybe CatchupCommitRound?
|
||||
@@ -492,11 +473,11 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
*/
|
||||
}
|
||||
|
||||
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep) {
|
||||
nrsMsg = &tmcons.NewRoundStep{
|
||||
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) {
|
||||
nrsMsg = &NewRoundStepMessage{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Step: uint32(rs.Step),
|
||||
Step: rs.Step,
|
||||
SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()),
|
||||
LastCommitRound: rs.LastCommit.GetRound(),
|
||||
}
|
||||
@@ -506,10 +487,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *tmcons.NewRoundStep)
|
||||
func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) {
|
||||
rs := conR.getRoundState()
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: nrsMsg,
|
||||
})
|
||||
peer.Send(StateChannel, MustEncode(nrsMsg))
|
||||
}
|
||||
|
||||
func (conR *Reactor) updateRoundStateRoutine() {
|
||||
@@ -548,19 +526,13 @@ OUTER_LOOP:
|
||||
if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) {
|
||||
if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
|
||||
part := rs.ProposalBlockParts.GetPart(index)
|
||||
parts, err := part.ToProto()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
msg := &BlockPartMessage{
|
||||
Height: rs.Height, // This tells peer that this part applies to us.
|
||||
Round: rs.Round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
}
|
||||
logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: rs.Height, // This tells peer that this part applies to us.
|
||||
Round: rs.Round, // This tells peer that this part applies to us.
|
||||
Part: *parts,
|
||||
},
|
||||
}) {
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
@@ -606,11 +578,9 @@ OUTER_LOOP:
|
||||
if rs.Proposal != nil && !prs.Proposal {
|
||||
// Proposal: share the proposal metadata with peer.
|
||||
{
|
||||
msg := &ProposalMessage{Proposal: rs.Proposal}
|
||||
logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.Proposal{Proposal: *rs.Proposal.ToProto()},
|
||||
}) {
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
// NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected!
|
||||
ps.SetHasProposal(rs.Proposal)
|
||||
}
|
||||
@@ -620,15 +590,13 @@ OUTER_LOOP:
|
||||
// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
|
||||
// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
|
||||
if 0 <= rs.Proposal.POLRound {
|
||||
msg := &ProposalPOLMessage{
|
||||
Height: rs.Height,
|
||||
ProposalPOLRound: rs.Proposal.POLRound,
|
||||
ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
|
||||
}
|
||||
logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
|
||||
peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.ProposalPOL{
|
||||
Height: rs.Height,
|
||||
ProposalPolRound: rs.Proposal.POLRound,
|
||||
ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(),
|
||||
},
|
||||
})
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
@@ -665,20 +633,13 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt
|
||||
return
|
||||
}
|
||||
// Send the part
|
||||
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
|
||||
pp, err := part.ToProto()
|
||||
if err != nil {
|
||||
logger.Error("Could not convert part to proto", "index", index, "error", err)
|
||||
return
|
||||
msg := &BlockPartMessage{
|
||||
Height: prs.Height, // Not our height, so it doesn't matter.
|
||||
Round: prs.Round, // Not our height, so it doesn't matter.
|
||||
Part: part,
|
||||
}
|
||||
if peer.Send(p2p.Envelope{
|
||||
ChannelID: DataChannel,
|
||||
Message: &tmcons.BlockPart{
|
||||
Height: prs.Height, // Not our height, so it doesn't matter.
|
||||
Round: prs.Round, // Not our height, so it doesn't matter.
|
||||
Part: *pp,
|
||||
},
|
||||
}) {
|
||||
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
|
||||
if peer.Send(DataChannel, MustEncode(msg)) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
} else {
|
||||
logger.Debug("Sending block part for catchup failed")
|
||||
@@ -837,16 +798,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
|
||||
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -858,15 +815,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -878,16 +832,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
|
||||
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: tmproto.PrevoteType,
|
||||
BlockID: maj23,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -902,15 +852,12 @@ OUTER_LOOP:
|
||||
if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() &&
|
||||
prs.Height >= conR.conS.blockStore.Base() {
|
||||
if commit := conR.conS.LoadCommit(prs.Height); commit != nil {
|
||||
peer.TrySend(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Message: &tmcons.VoteSetMaj23{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: commit.BlockID.ToProto(),
|
||||
},
|
||||
})
|
||||
peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round,
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: commit.BlockID,
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
@@ -1124,13 +1071,9 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in
|
||||
// Returns true if vote was sent.
|
||||
func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
|
||||
if vote, ok := ps.PickVoteToSend(votes); ok {
|
||||
msg := &VoteMessage{vote}
|
||||
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
|
||||
if ps.peer.Send(p2p.Envelope{
|
||||
ChannelID: VoteChannel,
|
||||
Message: &tmcons.Vote{
|
||||
Vote: vote.ToProto(),
|
||||
},
|
||||
}) {
|
||||
if ps.peer.Send(VoteChannel, MustEncode(msg)) {
|
||||
ps.SetHasVote(vote)
|
||||
return true
|
||||
}
|
||||
@@ -1496,6 +1439,15 @@ func init() {
|
||||
tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits")
|
||||
}
|
||||
|
||||
func decodeMsg(bz []byte) (msg Message, err error) {
|
||||
pb := &tmcons.Message{}
|
||||
if err = proto.Unmarshal(bz, pb); err != nil {
|
||||
return msg, err
|
||||
}
|
||||
|
||||
return MsgFromProto(pb)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// NewRoundStepMessage is sent for every step taken in the ConsensusState.
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
mempoolv1 "github.com/tendermint/tendermint/mempool/v1"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
p2pmock "github.com/tendermint/tendermint/p2p/mock"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
@@ -266,18 +265,15 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Src: peer,
|
||||
Message: &tmcons.HasVote{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType},
|
||||
})
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
reactor.AddPeer(peer)
|
||||
})
|
||||
}
|
||||
@@ -292,18 +288,15 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
// we should call InitPeer here
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.Panics(t, func() {
|
||||
reactor.Receive(p2p.Envelope{
|
||||
ChannelID: StateChannel,
|
||||
Src: peer,
|
||||
Message: &tmcons.HasVote{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType},
|
||||
})
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -99,4 +99,4 @@ configuration file that we can update with PRs.
|
||||
Because the build processes are identical (as is the information contained
|
||||
herein), this file should be kept in sync as much as possible with its
|
||||
[counterpart in the Cosmos SDK
|
||||
repo](https://github.com/cosmos/cosmos-sdk/blob/main/docs/README.md).
|
||||
repo](https://github.com/cosmos/cosmos-sdk/blob/master/docs/DOCS_README.md).
|
||||
|
||||
@@ -78,7 +78,6 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-039: Peer-Behaviour](./adr-039-peer-behaviour.md)
|
||||
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
||||
- [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md)
|
||||
- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md)
|
||||
- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md)
|
||||
- [ADR-079: Ed25519 Verification](./adr-079-ed25519-verification.md)
|
||||
- [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md)
|
||||
@@ -115,6 +114,7 @@ None
|
||||
- [ADR-064: Batch Verification](./adr-064-batch-verification.md)
|
||||
- [ADR-068: Reverse-Sync](./adr-068-reverse-sync.md)
|
||||
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
|
||||
- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md)
|
||||
- [ADR-073: Adopt LibP2P](./adr-073-libp2p.md)
|
||||
- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md)
|
||||
- [ADR-080: Reverse Sync](./adr-080-reverse-sync.md)
|
||||
|
||||
@@ -61,7 +61,7 @@ The following protocols and application features require a reliable source of ti
|
||||
* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/main/spec/light-client/verification/README.md#definitions-1) and the block time for block verification.
|
||||
* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/8029cf7a0fcc89a5004e173ec065aa48ad5ba3c8/spec/consensus/evidence.md#verification).
|
||||
* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/ce75de4019b0129f6efcbb0e752cd2cc9e6136d3/params-change/Staking.md#unbondingtime).
|
||||
* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.45/ibc/overview.html#acknowledgements)
|
||||
* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements)
|
||||
|
||||
Finally, inflation distribution in the Cosmos Hub uses an approximation of time to calculate an annual percentage rate.
|
||||
This approximation of time is calculated using [block heights with an estimated number of blocks produced in a year](https://github.com/cosmos/governance/blob/master/params-change/Mint.md#blocksperyear).
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
title: Tendermint Quality Assurance
|
||||
description: This is a report on the process followed and results obtained when running v0.34.x on testnets
|
||||
order: 2
|
||||
---
|
||||
|
||||
# Tendermint Quality Assurance
|
||||
|
||||
This directory keeps track of the process followed by the Tendermint Core team
|
||||
for Quality Assurance before cutting a release.
|
||||
This directory is to live in multiple branches. On each release branch,
|
||||
the contents of this directory reflect the status of the process
|
||||
at the time the Quality Assurance process was applied for that release.
|
||||
|
||||
File [method](./method.md) keeps track of the process followed to obtain the results
|
||||
used to decide if a release is passing the Quality Assurance process.
|
||||
The results obtained in each release are stored in their own directory.
|
||||
The following releases have undergone the Quality Assurance process:
|
||||
|
||||
* [v0.34.x](./v034/), which was tested just before releasing v0.34.22
|
||||
* [v0.37.x](./v037/), with v.34.x acting as a baseline
|
||||
@@ -1,214 +0,0 @@
|
||||
---
|
||||
order: 1
|
||||
title: Method
|
||||
---
|
||||
|
||||
# Method
|
||||
|
||||
This document provides a detailed description of the QA process.
|
||||
It is intended to be used by engineers reproducing the experimental setup for future tests of Tendermint.
|
||||
|
||||
The (first iteration of the) QA process as described [in the RELEASES.md document][releases]
|
||||
was applied to version v0.34.x in order to have a set of results acting as benchmarking baseline.
|
||||
This baseline is then compared with results obtained in later versions.
|
||||
|
||||
Out of the testnet-based test cases described in [the releases document][releases] we focused on two of them:
|
||||
_200 Node Test_, and _Rotating Nodes Test_.
|
||||
|
||||
[releases]: https://github.com/tendermint/tendermint/blob/v0.37.x/RELEASES.md#large-scale-testnets
|
||||
|
||||
## Software Dependencies
|
||||
|
||||
### Infrastructure Requirements to Run the Tests
|
||||
|
||||
* An account at Digital Ocean (DO), with a high droplet limit (>202)
|
||||
* The machine to orchestrate the tests should have the following installed:
|
||||
* A clone of the [testnet repository][testnet-repo]
|
||||
* This repository contains all the scripts mentioned in the reminder of this section
|
||||
* [Digital Ocean CLI][doctl]
|
||||
* [Terraform CLI][Terraform]
|
||||
* [Ansible CLI][Ansible]
|
||||
|
||||
[testnet-repo]: https://github.com/interchainio/tendermint-testnet
|
||||
[Ansible]: https://docs.ansible.com/ansible/latest/index.html
|
||||
[Terraform]: https://www.terraform.io/docs
|
||||
[doctl]: https://docs.digitalocean.com/reference/doctl/how-to/install/
|
||||
|
||||
### Requirements for Result Extraction
|
||||
|
||||
* Matlab or Octave
|
||||
* [Prometheus][prometheus] server installed
|
||||
* blockstore DB of one of the full nodes in the testnet
|
||||
* Prometheus DB
|
||||
|
||||
[prometheus]: https://prometheus.io/
|
||||
|
||||
## 200 Node Testnet
|
||||
|
||||
### Running the test
|
||||
|
||||
This section explains how the tests were carried out for reproducibility purposes.
|
||||
|
||||
1. [If you haven't done it before]
|
||||
Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`.
|
||||
2. Copy file `testnets/testnet200.toml` onto `testnet.toml` (do NOT commit this change)
|
||||
3. Set the variable `VERSION_TAG` in the `Makefile` to the git hash that is to be tested.
|
||||
4. Follow steps 5-10 of the `README.md` to configure and start the 200 node testnet
|
||||
* WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests (see step 9)
|
||||
5. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric.
|
||||
All nodes should be increasing their heights.
|
||||
6. `ssh` into the `testnet-load-runner`, then copy script `script/200-node-loadscript.sh` and run it from the load runner node.
|
||||
* Before running it, you need to edit the script to provide the IP address of a full node.
|
||||
This node will receive all transactions from the load runner node.
|
||||
* This script will take about 40 mins to run
|
||||
* It is running 90-seconds-long experiments in a loop with different loads
|
||||
7. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine
|
||||
8. Verify that the data was collected without errors
|
||||
* at least one blockstore DB for a Tendermint validator
|
||||
* the Prometheus database from the Prometheus node
|
||||
* for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s)
|
||||
9. **Run `make terraform-destroy`**
|
||||
* Don't forget to type `yes`! Otherwise you're in trouble.
|
||||
|
||||
### Result Extraction
|
||||
|
||||
The method for extracting the results described here is highly manual (and exploratory) at this stage.
|
||||
The Core team should improve it at every iteration to increase the amount of automation.
|
||||
|
||||
#### Steps
|
||||
|
||||
1. Unzip the blockstore into a directory
|
||||
2. Extract the latency report and the raw latencies for all the experiments. Run these commands from the directory containing the blockstore
|
||||
* `go run github.com/tendermint/tendermint/test/loadtime/cmd/report@3ec6e424d --database-type goleveldb --data-dir ./ > results/report.txt`
|
||||
* `go run github.com/tendermint/tendermint/test/loadtime/cmd/report@3ec6e424d --database-type goleveldb --data-dir ./ --csv results/raw.csv`
|
||||
3. File `report.txt` contains an unordered list of experiments with varying concurrent connections and transaction rate
|
||||
* Create files `report01.txt`, `report02.txt`, `report04.txt` and, for each experiment in file `report.txt`,
|
||||
copy its related lines to the filename that matches the number of connections.
|
||||
* Sort the experiments in `report01.txt` in ascending tx rate order. Likewise for `report02.txt` and `report04.txt`.
|
||||
4. Generate file `report_tabbed.txt` by showing the contents `report01.txt`, `report02.txt`, `report04.txt` side by side
|
||||
* This effectively creates a table where rows are a particular tx rate and columns are a particular number of websocket connections.
|
||||
5. Extract the raw latencies from file `raw.csv` using the following bash loop. This creates a `.csv` file and a `.dat` file per experiment.
|
||||
The format of the `.dat` files is amenable to loading them as matrices in Octave
|
||||
|
||||
```bash
|
||||
uuids=($(cat report01.txt report02.txt report04.txt | grep '^Experiment ID: ' | awk '{ print $3 }'))
|
||||
c=1
|
||||
for i in 01 02 04; do
|
||||
for j in 0025 0050 0100 0200; do
|
||||
echo $i $j $c "${uuids[$c]}"
|
||||
filename=c${i}_r${j}
|
||||
grep ${uuids[$c]} raw.csv > ${filename}.csv
|
||||
cat ${filename}.csv | tr , ' ' | awk '{ print $2, $3 }' > ${filename}.dat
|
||||
c=$(expr $c + 1)
|
||||
done
|
||||
done
|
||||
```
|
||||
|
||||
6. Enter Octave
|
||||
7. Load all `.dat` files generated in step 5 into matrices using this Octave code snippet
|
||||
|
||||
```octave
|
||||
conns = { "01"; "02"; "04" };
|
||||
rates = { "0025"; "0050"; "0100"; "0200" };
|
||||
for i = 1:length(conns)
|
||||
for j = 1:length(rates)
|
||||
filename = strcat("c", conns{i}, "_r", rates{j}, ".dat");
|
||||
load("-ascii", filename);
|
||||
endfor
|
||||
endfor
|
||||
```
|
||||
|
||||
8. Set variable release to the current release undergoing QA
|
||||
|
||||
```octave
|
||||
release = "v0.34.x";
|
||||
```
|
||||
|
||||
9. Generate a plot with all (or some) experiments, where the X axis is the experiment time,
|
||||
and the y axis is the latency of transactions.
|
||||
The following snippet plots all experiments.
|
||||
|
||||
```octave
|
||||
legends = {};
|
||||
hold off;
|
||||
for i = 1:length(conns)
|
||||
for j = 1:length(rates)
|
||||
data_name = strcat("c", conns{i}, "_r", rates{j});
|
||||
l = strcat("c=", conns{i}, " r=", rates{j});
|
||||
m = eval(data_name); plot((m(:,1) - min(m(:,1))) / 1e+9, m(:,2) / 1e+9, ".");
|
||||
hold on;
|
||||
legends(1, end+1) = l;
|
||||
endfor
|
||||
endfor
|
||||
legend(legends, "location", "northeastoutside");
|
||||
xlabel("experiment time (s)");
|
||||
ylabel("latency (s)");
|
||||
t = sprintf("200-node testnet - %s", release);
|
||||
title(t);
|
||||
```
|
||||
|
||||
10. Consider adjusting the axis, in case you want to compare your results to the baseline, for instance
|
||||
|
||||
```octave
|
||||
axis([0, 100, 0, 30], "tic");
|
||||
```
|
||||
|
||||
11. Use Octave's GUI menu to save the plot (e.g. as `.png`)
|
||||
|
||||
12. Repeat steps 9 and 10 to obtain as many plots as deemed necessary.
|
||||
|
||||
13. To generate a latency vs throughput plot, using the raw CSV file generated
|
||||
in step 2, follow the instructions for the [`latency_throughput.py`] script.
|
||||
|
||||
[`latency_throughput.py`]: ../../scripts/qa/reporting/README.md
|
||||
|
||||
#### Extracting Prometheus Metrics
|
||||
|
||||
1. Stop the prometheus server if it is running as a service (e.g. a `systemd` unit).
|
||||
2. Unzip the prometheus database retrieved from the testnet, and move it to replace the
|
||||
local prometheus database.
|
||||
3. Start the prometheus server and make sure no error logs appear at start up.
|
||||
4. Introduce the metrics you want to gather or plot.
|
||||
|
||||
## Rotating Node Testnet
|
||||
|
||||
### Running the test
|
||||
|
||||
This section explains how the tests were carried out for reproducibility purposes.
|
||||
|
||||
1. [If you haven't done it before]
|
||||
Follow steps 1-4 of the `README.md` at the top of the testnet repository to configure Terraform, and `doctl`.
|
||||
2. Copy file `testnet_rotating.toml` onto `testnet.toml` (do NOT commit this change)
|
||||
3. Set variable `VERSION_TAG` to the git hash that is to be tested.
|
||||
4. Run `make terraform-apply EPHEMERAL_SIZE=25`
|
||||
* WARNING: Do NOT forget to run `make terraform-destroy` as soon as you are done with the tests
|
||||
5. Follow steps 6-10 of the `README.md` to configure and start the "stable" part of the rotating node testnet
|
||||
6. As a sanity check, connect to the Prometheus node's web interface and check the graph for the `tendermint_consensus_height` metric.
|
||||
All nodes should be increasing their heights.
|
||||
7. On a different shell,
|
||||
* run `make runload ROTATE_CONNECTIONS=X ROTATE_TX_RATE=Y`
|
||||
* `X` and `Y` should reflect a load below the saturation point (see, e.g.,
|
||||
[this paragraph](./v034/README.md#finding-the-saturation-point) for further info)
|
||||
8. Run `make rotate` to start the script that creates the ephemeral nodes, and kills them when they are caught up.
|
||||
* WARNING: If you run this command from your laptop, the laptop needs to be up and connected for full length
|
||||
of the experiment.
|
||||
9. When the height of the chain reaches 3000, stop the `make rotate` script
|
||||
10. When the rotate script has made two iterations (i.e., all ephemeral nodes have caught up twice)
|
||||
after height 3000 was reached, stop `make rotate`
|
||||
11. Run `make retrieve-data` to gather all relevant data from the testnet into the orchestrating machine
|
||||
12. Verify that the data was collected without errors
|
||||
* at least one blockstore DB for a Tendermint validator
|
||||
* the Prometheus database from the Prometheus node
|
||||
* for extra care, you can run `zip -T` on the `prometheus.zip` file and (one of) the `blockstore.db.zip` file(s)
|
||||
13. **Run `make terraform-destroy`**
|
||||
|
||||
Steps 8 to 10 are highly manual at the moment and will be improved in next iterations.
|
||||
|
||||
### Result Extraction
|
||||
|
||||
In order to obtain a latency plot, follow the instructions above for the 200 node experiment, but:
|
||||
|
||||
* The `results.txt` file contains only one experiment
|
||||
* Therefore, no need for any `for` loops
|
||||
|
||||
As for prometheus, the same method as for the 200 node experiment can be applied.
|
||||
@@ -1,278 +0,0 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
title: Tendermint Quality Assurance Results for v0.34.x
|
||||
description: This is a report on the results obtained when running v0.34.x on testnets
|
||||
order: 2
|
||||
---
|
||||
|
||||
# v0.34.x
|
||||
|
||||
## 200 Node Testnet
|
||||
|
||||
### Finding the Saturation Point
|
||||
|
||||
The first goal when examining the results of the tests is identifying the saturation point.
|
||||
The saturation point is a setup with a transaction load big enough to prevent the testnet
|
||||
from being stable: the load runner tries to produce slightly more transactions than can
|
||||
be processed by the testnet.
|
||||
|
||||
The following table summarizes the results for v0.34.x, for the different experiments
|
||||
(extracted from file [`v034_report_tabbed.txt`](./img/v034_report_tabbed.txt)).
|
||||
|
||||
The X axis of this table is `c`, the number of connections created by the load runner process to the target node.
|
||||
The Y axis of this table is `r`, the rate or number of transactions issued per second.
|
||||
|
||||
| | c=1 | c=2 | c=4 |
|
||||
| :--- | ----: | ----: | ----: |
|
||||
| r=25 | 2225 | 4450 | 8900 |
|
||||
| r=50 | 4450 | 8900 | 17800 |
|
||||
| r=100 | 8900 | 17800 | 35600 |
|
||||
| r=200 | 17800 | 35600 | 38660 |
|
||||
|
||||
The table shows the number of 1024-byte-long transactions that were produced by the load runner,
|
||||
and processed by Tendermint, during the 90 seconds of the experiment's duration.
|
||||
Each cell in the table refers to an experiment with a particular number of websocket connections (`c`)
|
||||
to a chosen validator, and the number of transactions per second that the load runner
|
||||
tries to produce (`r`). Note that the overall load that the tool attempts to generate is $c \cdot r$.
|
||||
|
||||
We can see that the saturation point is beyond the diagonal that spans cells
|
||||
|
||||
* `r=200,c=2`
|
||||
* `r=100,c=4`
|
||||
|
||||
given that the total transactions should be close to the product of the rate, the number of connections,
|
||||
and the experiment time (89 seconds, since the last batch never gets sent).
|
||||
|
||||
All experiments below the saturation diagonal (`r=200,c=4`) have in common that the total
|
||||
number of transactions processed is noticeably less than the product $c \cdot r \cdot 89$,
|
||||
which is the expected number of transactions when the system is able to deal well with the
|
||||
load.
|
||||
With `r=200,c=4`, we obtained 38660 whereas the theoretical number of transactions should
|
||||
have been $200 \cdot 4 \cdot 89 = 71200$.
|
||||
|
||||
At this point, we chose an experiment at the limit of the saturation diagonal,
|
||||
in order to further study the performance of this release.
|
||||
**The chosen experiment is `r=200,c=2`**.
|
||||
|
||||
This is a plot of the CPU load (average over 1 minute, as output by `top`) of the load runner for `r=200,c=2`,
|
||||
where we can see that the load stays close to 0 most of the time.
|
||||
|
||||

|
||||
|
||||
### Examining latencies
|
||||
|
||||
The method described [here](../method.md) allows us to plot the latencies of transactions
|
||||
for all experiments.
|
||||
|
||||

|
||||
|
||||
As we can see, even the experiments beyond the saturation diagonal managed to keep
|
||||
transaction latency stable (i.e. not constantly increasing).
|
||||
Our interpretation for this is that contention within Tendermint was propagated,
|
||||
via the websockets, to the load runner,
|
||||
hence the load runner could not produce the target load, but a fraction of it.
|
||||
|
||||
Further examination of the Prometheus data (see below), showed that the mempool contained many transactions
|
||||
at steady state, but did not grow much without quickly returning to this steady state. This demonstrates
|
||||
that the transactions were able to be processed by the Tendermint network at least as quickly as they
|
||||
were submitted to the mempool. Finally, the test script made sure that, at the end of an experiment, the
|
||||
mempool was empty so that all transactions submitted to the chain were processed.
|
||||
|
||||
Finally, the number of points present in the plot appears to be much less than expected given the
|
||||
number of transactions in each experiment, particularly close to or above the saturation diagonal.
|
||||
This is a visual effect of the plot; what appear to be points in the plot are actually potentially huge
|
||||
clusters of points. To corroborate this, we have zoomed in the plot above by setting (carefully chosen)
|
||||
tiny axis intervals. The cluster shown below looks like a single point in the plot above.
|
||||
|
||||

|
||||
|
||||
The plot of latencies can we used as a baseline to compare with other releases.
|
||||
|
||||
The following plot summarizes average latencies versus overall throughputs
|
||||
across different numbers of WebSocket connections to the node into which
|
||||
transactions are being loaded.
|
||||
|
||||

|
||||
|
||||
### Prometheus Metrics on the Chosen Experiment
|
||||
|
||||
As mentioned [above](#finding-the-saturation-point), the chosen experiment is `r=200,c=2`.
|
||||
This section further examines key metrics for this experiment extracted from Prometheus data.
|
||||
|
||||
#### Mempool Size
|
||||
|
||||
The mempool size, a count of the number of transactions in the mempool, was shown to be stable and homogeneous
|
||||
at all full nodes. It did not exhibit any unconstrained growth.
|
||||
The plot below shows the evolution over time of the cumulative number of transactions inside all full nodes' mempools
|
||||
at a given time.
|
||||
The two spikes that can be observed correspond to a period where consensus instances proceeded beyond the initial round
|
||||
at some nodes.
|
||||
|
||||

|
||||
|
||||
The plot below shows evolution of the average over all full nodes, which oscillates between 1500 and 2000
|
||||
outstanding transactions.
|
||||
|
||||

|
||||
|
||||
The peaks observed coincide with the moments when some nodes proceeded beyond the initial round of consensus (see below).
|
||||
|
||||
#### Peers
|
||||
|
||||
The number of peers was stable at all nodes.
|
||||
It was higher for the seed nodes (around 140) than for the rest (between 21 and 74).
|
||||
The fact that non-seed nodes reach more than 50 peers is due to #9548.
|
||||
|
||||

|
||||
|
||||
#### Consensus Rounds per Height
|
||||
|
||||
Most heights took just one round, but some nodes needed to advance to round 1 at some point.
|
||||
|
||||

|
||||
|
||||
#### Blocks Produced per Minute, Transactions Processed per Minute
|
||||
|
||||
The blocks produced per minute are the slope of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 2 minutes, the height goes from 530 to 569.
|
||||
This results in an average of 19.5 blocks produced per minute.
|
||||
|
||||
The transactions processed per minute are the slope of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 2 minutes, the total goes from 64525 to 100125 transactions,
|
||||
resulting in 17800 transactions per minute. However, we can see in the plot that
|
||||
all transactions in the load are processed long before the two minutes.
|
||||
If we adjust the time window when transactions are processed (approx. 105 seconds),
|
||||
we obtain 20343 transactions per minute.
|
||||
|
||||
#### Memory Resident Set Size
|
||||
|
||||
Resident Set Size of all monitored processes is plotted below.
|
||||
|
||||

|
||||
|
||||
The average over all processes oscillates around 1.2 GiB and does not demonstrate unconstrained growth.
|
||||
|
||||

|
||||
|
||||
#### CPU utilization
|
||||
|
||||
The best metric from Prometheus to gauge CPU utilization in a Unix machine is `load1`,
|
||||
as it usually appears in the
|
||||
[output of `top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux).
|
||||
|
||||

|
||||
|
||||
It is contained in most cases below 5, which is generally considered acceptable load.
|
||||
|
||||
### Test Result
|
||||
|
||||
**Result: N/A** (v0.34.x is the baseline)
|
||||
|
||||
Date: 2022-10-14
|
||||
|
||||
Version: 3ec6e424d6ae4c96867c2dcf8310572156068bb6
|
||||
|
||||
## Rotating Node Testnet
|
||||
|
||||
For this testnet, we will use a load that can safely be considered below the saturation
|
||||
point for the size of this testnet (between 13 and 38 full nodes): `c=4,r=800`.
|
||||
|
||||
N.B.: The version of Tendermint used for these tests is affected by #9539.
|
||||
However, the reduced load that reaches the mempools is orthogonal to functionality
|
||||
we are focusing on here.
|
||||
|
||||
### Latencies
|
||||
|
||||
The plot of all latencies can be seen in the following plot.
|
||||
|
||||

|
||||
|
||||
We can observe there are some very high latencies, towards the end of the test.
|
||||
Upon suspicion that they are duplicate transactions, we examined the latencies
|
||||
raw file and discovered there are more than 100K duplicate transactions.
|
||||
|
||||
The following plot shows the latencies file where all duplicate transactions have
|
||||
been removed, i.e., only the first occurrence of a duplicate transaction is kept.
|
||||
|
||||

|
||||
|
||||
This problem, existing in `v0.34.x`, will need to be addressed, perhaps in the same way
|
||||
we addressed it when running the 200 node test with high loads: increasing the `cache_size`
|
||||
configuration parameter.
|
||||
|
||||
### Prometheus Metrics
|
||||
|
||||
The set of metrics shown here are less than for the 200 node experiment.
|
||||
We are only interested in those for which the catch-up process (blocksync) may have an impact.
|
||||
|
||||
#### Blocks and Transactions per minute
|
||||
|
||||
Just as shown for the 200 node test, the blocks produced per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 5229 seconds, the height goes from 2 to 3638.
|
||||
This results in an average of 41 blocks produced per minute.
|
||||
|
||||
The following plot shows only the heights reported by ephemeral nodes
|
||||
(which are also included in the plot above). Note that the _height_ metric
|
||||
is only showed _once the node has switched to consensus_, hence the gaps
|
||||
when nodes are killed, wiped out, started from scratch, and catching up.
|
||||
|
||||

|
||||
|
||||
The transactions processed per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
The small lines we see periodically close to `y=0` are the transactions that
|
||||
ephemeral nodes start processing when they are caught up.
|
||||
|
||||
Over a period of 5229 minutes, the total goes from 0 to 387697 transactions,
|
||||
resulting in 4449 transactions per minute. We can see some abrupt changes in
|
||||
the plot's gradient. This will need to be investigated.
|
||||
|
||||
#### Peers
|
||||
|
||||
The plot below shows the evolution in peers throughout the experiment.
|
||||
The periodic changes observed are due to the ephemeral nodes being stopped,
|
||||
wiped out, and recreated.
|
||||
|
||||

|
||||
|
||||
The validators' plots are concentrated at the higher part of the graph, whereas the ephemeral nodes
|
||||
are mostly at the lower part.
|
||||
|
||||
#### Memory Resident Set Size
|
||||
|
||||
The average Resident Set Size (RSS) over all processes seems stable, and slightly growing toward the end.
|
||||
This might be related to the increased in transaction load observed above.
|
||||
|
||||

|
||||
|
||||
The memory taken by the validators and the ephemeral nodes (when they are up) is comparable.
|
||||
|
||||
#### CPU utilization
|
||||
|
||||
The plot shows metric `load1` for all nodes.
|
||||
|
||||

|
||||
|
||||
It is contained under 5 most of the time, which is considered normal load.
|
||||
The purple line, which follows a different pattern is the validator receiving all
|
||||
transactions, via RPC, from the load runner process.
|
||||
|
||||
### Test Result
|
||||
|
||||
**Result: N/A**
|
||||
|
||||
Date: 2022-10-10
|
||||
|
||||
Version: a28c987f5a604ff66b515dd415270063e6fb069d
|
||||
|
Before Width: | Height: | Size: 42 KiB |
|
Before Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 378 KiB |
|
Before Width: | Height: | Size: 150 KiB |
|
Before Width: | Height: | Size: 759 KiB |
|
Before Width: | Height: | Size: 2.4 MiB |
|
Before Width: | Height: | Size: 192 KiB |
|
Before Width: | Height: | Size: 130 KiB |
|
Before Width: | Height: | Size: 1.0 MiB |
|
Before Width: | Height: | Size: 926 KiB |
|
Before Width: | Height: | Size: 157 KiB |
|
Before Width: | Height: | Size: 534 KiB |
@@ -1,52 +0,0 @@
|
||||
Experiment ID: 3d5cf4ef-1a1a-4b46-aa2d-da5643d2e81e │Experiment ID: 80e472ec-13a1-4772-a827-3b0c907fb51d │Experiment ID: 07aca6cf-c5a4-4696-988f-e3270fc6333b
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 25 │ Rate: 25 │ Rate: 25
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 2225 │ Total Valid Tx: 4450 │ Total Valid Tx: 8900
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 599.404362ms │ Minimum Latency: 448.145181ms │ Minimum Latency: 412.485729ms
|
||||
Maximum Latency: 3.539686885s │ Maximum Latency: 3.237392049s │ Maximum Latency: 12.026665368s
|
||||
Average Latency: 1.441485349s │ Average Latency: 1.441267946s │ Average Latency: 2.150192457s
|
||||
Standard Deviation: 541.049869ms │ Standard Deviation: 525.040007ms │ Standard Deviation: 2.233852478s
|
||||
│ │
|
||||
Experiment ID: 953dc544-dd40-40e8-8712-20c34c3ce45e │Experiment ID: d31fc258-16e7-45cd-9dc8-13ab87bc0b0a │Experiment ID: 15d90a7e-b941-42f4-b411-2f15f857739e
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 50 │ Rate: 50 │ Rate: 50
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 4450 │ Total Valid Tx: 8900 │ Total Valid Tx: 17800
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 482.046942ms │ Minimum Latency: 435.458913ms │ Minimum Latency: 510.746448ms
|
||||
Maximum Latency: 3.761483455s │ Maximum Latency: 7.175583584s │ Maximum Latency: 6.551497882s
|
||||
Average Latency: 1.450408183s │ Average Latency: 1.681673116s │ Average Latency: 1.738083875s
|
||||
Standard Deviation: 587.560056ms │ Standard Deviation: 1.147902047s │ Standard Deviation: 943.46522ms
|
||||
│ │
|
||||
Experiment ID: 9a0b9980-9ce6-4db5-a80a-65ca70294b87 │Experiment ID: df8fa4f4-80af-4ded-8a28-356d15018b43 │Experiment ID: d0e41c2c-89c0-4f38-8e34-ca07adae593a
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 100 │ Rate: 100 │ Rate: 100
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 8900 │ Total Valid Tx: 17800 │ Total Valid Tx: 35600
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 477.417219ms │ Minimum Latency: 564.29247ms │ Minimum Latency: 840.71089ms
|
||||
Maximum Latency: 6.63744785s │ Maximum Latency: 6.988553219s │ Maximum Latency: 9.555312398s
|
||||
Average Latency: 1.561216103s │ Average Latency: 1.76419063s │ Average Latency: 3.200941683s
|
||||
Standard Deviation: 1.011333552s │ Standard Deviation: 1.068459423s │ Standard Deviation: 1.732346601s
|
||||
│ │
|
||||
Experiment ID: 493df3ee-4a36-4bce-80f8-6d65da66beda │Experiment ID: 13060525-f04f-46f6-8ade-286684b2fe50 │Experiment ID: 1777cbd2-8c96-42e4-9ec7-9b21f2225e4d
|
||||
│ │
|
||||
Connections: 1 │ Connections: 2 │ Connections: 4
|
||||
Rate: 200 │ Rate: 200 │ Rate: 200
|
||||
Size: 1024 │ Size: 1024 │ Size: 1024
|
||||
│ │
|
||||
Total Valid Tx: 17800 │ Total Valid Tx: 35600 │ Total Valid Tx: 38660
|
||||
Total Negative Latencies: 0 │ Total Negative Latencies: 0 │ Total Negative Latencies: 0
|
||||
Minimum Latency: 493.705261ms │ Minimum Latency: 955.090573ms │ Minimum Latency: 1.9485821s
|
||||
Maximum Latency: 7.440921872s │ Maximum Latency: 10.086673491s │ Maximum Latency: 17.73103976s
|
||||
Average Latency: 1.875510582s │ Average Latency: 3.438130099s │ Average Latency: 8.143862237s
|
||||
Standard Deviation: 1.304336995s │ Standard Deviation: 1.966391574s │ Standard Deviation: 3.943140002s
|
||||
|
||||
|
Before Width: | Height: | Size: 157 KiB |
|
Before Width: | Height: | Size: 140 KiB |
|
Before Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 1.5 MiB |
|
Before Width: | Height: | Size: 486 KiB |
|
Before Width: | Height: | Size: 193 KiB |
|
Before Width: | Height: | Size: 197 KiB |
@@ -1,326 +0,0 @@
|
||||
---
|
||||
order: 1
|
||||
parent:
|
||||
title: Tendermint Quality Assurance Results for v0.37.x
|
||||
description: This is a report on the results obtained when running v0.37.x on testnets
|
||||
order: 2
|
||||
---
|
||||
|
||||
# v0.37.x
|
||||
|
||||
## Issues discovered
|
||||
|
||||
During this iteration of the QA process, the following issues were found:
|
||||
|
||||
* (critical, fixed) [\#9533] - This bug caused full nodes to sometimes get stuck
|
||||
when blocksyncing, requiring a manual restart to unblock them. Importantly,
|
||||
this bug was also present in v0.34.x and the fix was also backported in
|
||||
[\#9534].
|
||||
* (critical, fixed) [\#9539] - `loadtime` is very likely to include more than
|
||||
one "=" character in transactions, with is rejected by the e2e application.
|
||||
* (critical, fixed) [\#9581] - Absent prometheus label makes Tendermint crash
|
||||
when enabling Prometheus metric collection
|
||||
* (non-critical, not fixed) [\#9548] - Full nodes can go over 50 connected
|
||||
peers, which is not intended by the default configuration.
|
||||
* (non-critical, not fixed) [\#9537] - With the default mempool cache setting,
|
||||
duplicated transactions are not rejected when gossipped and eventually flood
|
||||
all mempools. The 200 node testnets were thus run with a value of 200000 (as
|
||||
opposed to the default 10000)
|
||||
|
||||
## 200 Node Testnet
|
||||
|
||||
### Finding the Saturation Point
|
||||
|
||||
The first goal is to identify the saturation point and compare it with the baseline (v0.34.x).
|
||||
For further details, see [this paragraph](../v034/README.md#finding-the-saturation-point)
|
||||
in the baseline version.
|
||||
|
||||
The following table summarizes the results for v0.37.x, for the different experiments
|
||||
(extracted from file [`v037_report_tabbed.txt`](./img/v037_report_tabbed.txt)).
|
||||
|
||||
The X axis of this table is `c`, the number of connections created by the load runner process to the target node.
|
||||
The Y axis of this table is `r`, the rate or number of transactions issued per second.
|
||||
|
||||
| | c=1 | c=2 | c=4 |
|
||||
| :--- | ----: | ----: | ----: |
|
||||
| r=25 | 2225 | 4450 | 8900 |
|
||||
| r=50 | 4450 | 8900 | 17800 |
|
||||
| r=100 | 8900 | 17800 | 35600 |
|
||||
| r=200 | 17800 | 35600 | 38660 |
|
||||
|
||||
For comparison, this is the table with the baseline version.
|
||||
|
||||
| | c=1 | c=2 | c=4 |
|
||||
| :--- | ----: | ----: | ----: |
|
||||
| r=25 | 2225 | 4450 | 8900 |
|
||||
| r=50 | 4450 | 8900 | 17800 |
|
||||
| r=100 | 8900 | 17800 | 35400 |
|
||||
| r=200 | 17800 | 35600 | 37358 |
|
||||
|
||||
The saturation point is beyond the diagonal:
|
||||
|
||||
* `r=200,c=2`
|
||||
* `r=100,c=4`
|
||||
|
||||
which is at the same place as the baseline. For more details on the saturation point, see
|
||||
[this paragraph](../v034/README.md#finding-the-saturation-point) in the baseline version.
|
||||
|
||||
The experiment chosen to examine Prometheus metrics is the same as in the baseline:
|
||||
**`r=200,c=2`**.
|
||||
|
||||
The load runner's CPU load was negligible (near 0) when running `r=200,c=2`.
|
||||
|
||||
### Examining latencies
|
||||
|
||||
The method described [here](../method.md) allows us to plot the latencies of transactions
|
||||
for all experiments.
|
||||
|
||||

|
||||
|
||||
The data seen in the plot is similar to that of the baseline.
|
||||
|
||||

|
||||
|
||||
Therefore, for further details on these plots,
|
||||
see [this paragraph](../v034/README.md#examining-latencies) in the baseline version.
|
||||
|
||||
The following plot summarizes average latencies versus overall throughputs
|
||||
across different numbers of WebSocket connections to the node into which
|
||||
transactions are being loaded.
|
||||
|
||||

|
||||
|
||||
This is similar to that of the baseline plot:
|
||||
|
||||

|
||||
|
||||
### Prometheus Metrics on the Chosen Experiment
|
||||
|
||||
As mentioned [above](#finding-the-saturation-point), the chosen experiment is `r=200,c=2`.
|
||||
This section further examines key metrics for this experiment extracted from Prometheus data.
|
||||
|
||||
#### Mempool Size
|
||||
|
||||
The mempool size, a count of the number of transactions in the mempool, was shown to be stable and homogeneous
|
||||
at all full nodes. It did not exhibit any unconstrained growth.
|
||||
The plot below shows the evolution over time of the cumulative number of transactions inside all full nodes' mempools
|
||||
at a given time.
|
||||
|
||||

|
||||
|
||||
The plot below shows evolution of the average over all full nodes, which oscillate between 1500 and 2000 outstanding transactions.
|
||||
|
||||

|
||||
|
||||
The peaks observed coincide with the moments when some nodes reached round 1 of consensus (see below).
|
||||
|
||||
**These plots yield similar results to the baseline**:
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
#### Peers
|
||||
|
||||
The number of peers was stable at all nodes.
|
||||
It was higher for the seed nodes (around 140) than for the rest (between 16 and 78).
|
||||
|
||||

|
||||
|
||||
Just as in the baseline, the fact that non-seed nodes reach more than 50 peers is due to #9548.
|
||||
|
||||
**This plot yields similar results to the baseline**:
|
||||
|
||||

|
||||
|
||||
#### Consensus Rounds per Height
|
||||
|
||||
Most heights took just one round, but some nodes needed to advance to round 1 at some point.
|
||||
|
||||

|
||||
|
||||
**This plot yields slightly better results than the baseline**:
|
||||
|
||||

|
||||
|
||||
#### Blocks Produced per Minute, Transactions Processed per Minute
|
||||
|
||||
The blocks produced per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 2 minutes, the height goes from 477 to 524.
|
||||
This results in an average of 23.5 blocks produced per minute.
|
||||
|
||||
The transactions processed per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 2 minutes, the total goes from 64525 to 100125 transactions,
|
||||
resulting in 17800 transactions per minute. However, we can see in the plot that
|
||||
all transactions in the load are process long before the two minutes.
|
||||
If we adjust the time window when transactions are processed (approx. 90 seconds),
|
||||
we obtain 23733 transactions per minute.
|
||||
|
||||
**These plots yield similar results to the baseline**:
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
#### Memory Resident Set Size
|
||||
|
||||
Resident Set Size of all monitored processes is plotted below.
|
||||
|
||||

|
||||
|
||||
The average over all processes oscillates around 380 MiB and does not demonstrate unconstrained growth.
|
||||
|
||||

|
||||
|
||||
**These plots yield similar results to the baseline**:
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
#### CPU utilization
|
||||
|
||||
The best metric from Prometheus to gauge CPU utilization in a Unix machine is `load1`,
|
||||
as it usually appears in the
|
||||
[output of `top`](https://www.digitalocean.com/community/tutorials/load-average-in-linux).
|
||||
|
||||

|
||||
|
||||
It is contained below 5 on most nodes.
|
||||
|
||||
**This plot yields similar results to the baseline**:
|
||||
|
||||

|
||||
|
||||
### Test Result
|
||||
|
||||
**Result: PASS**
|
||||
|
||||
Date: 2022-10-14
|
||||
|
||||
Version: 1cf9d8e276afe8595cba960b51cd056514965fd1
|
||||
|
||||
## Rotating Node Testnet
|
||||
|
||||
We use the same load as in the baseline: `c=4,r=800`.
|
||||
|
||||
Just as in the baseline tests, the version of Tendermint used for these tests is affected by #9539.
|
||||
See this paragraph in the [baseline report](../v034/README.md#rotating-node-testnet) for further details.
|
||||
Finally, note that this setup allows for a fairer comparison between this version and the baseline.
|
||||
|
||||
### Latencies
|
||||
|
||||
The plot of all latencies can be seen here.
|
||||
|
||||

|
||||
|
||||
Which is similar to the baseline.
|
||||
|
||||

|
||||
|
||||
Note that we are comparing against the baseline plot with _unique_
|
||||
transactions. This is because the problem with duplicate transactions
|
||||
detected during the baseline experiment did not show up for `v0.37`,
|
||||
which is _not_ proof that the problem is not present in `v0.37`.
|
||||
|
||||
### Prometheus Metrics
|
||||
|
||||
The set of metrics shown here match those shown on the baseline (`v0.34`) for the same experiment.
|
||||
We also show the baseline results for comparison.
|
||||
|
||||
#### Blocks and Transactions per minute
|
||||
|
||||
The blocks produced per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 4446 seconds, the height goes from 5 to 3323.
|
||||
This results in an average of 45 blocks produced per minute,
|
||||
which is similar to the baseline, shown below.
|
||||
|
||||

|
||||
|
||||
The following two plots show only the heights reported by ephemeral nodes.
|
||||
The second plot is the baseline plot for comparison.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
By the length of the segments, we can see that ephemeral nodes in `v0.37`
|
||||
catch up slightly faster.
|
||||
|
||||
The transactions processed per minute are the gradient of this plot.
|
||||
|
||||

|
||||
|
||||
Over a period of 3852 seconds, the total goes from 597 to 267298 transactions in one of the validators,
|
||||
resulting in 4154 transactions per minute, which is slightly lower than the baseline,
|
||||
although the baseline had to deal with duplicate transactions.
|
||||
|
||||
For comparison, this is the baseline plot.
|
||||
|
||||

|
||||
|
||||
#### Peers
|
||||
|
||||
The plot below shows the evolution of the number of peers throughout the experiment.
|
||||
|
||||

|
||||
|
||||
This is the baseline plot, for comparison.
|
||||
|
||||

|
||||
|
||||
The plotted values and their evolution are comparable in both plots.
|
||||
|
||||
For further details on these plots, see the baseline report.
|
||||
|
||||
#### Memory Resident Set Size
|
||||
|
||||
The average Resident Set Size (RSS) over all processes looks slightly more stable
|
||||
on `v0.37` (first plot) than on the baseline (second plot).
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
The memory taken by the validators and the ephemeral nodes when they are up is comparable (not shown in the plots),
|
||||
just as observed in the baseline.
|
||||
|
||||
#### CPU utilization
|
||||
|
||||
The plot shows metric `load1` for all nodes.
|
||||
|
||||

|
||||
|
||||
This is the baseline plot.
|
||||
|
||||

|
||||
|
||||
In both cases, it is contained under 5 most of the time, which is considered normal load.
|
||||
The green line in the `v0.37` plot and the purple line in the baseline plot (`v0.34`)
|
||||
correspond to the validators receiving all transactions, via RPC, from the load runner process.
|
||||
In both cases, they oscillate around 5 (normal load). The main difference is that other
|
||||
nodes are generally less loaded in `v0.37`.
|
||||
|
||||
### Test Result
|
||||
|
||||
**Result: PASS**
|
||||
|
||||
Date: 2022-10-10
|
||||
|
||||
Version: 155110007b9d8b83997a799016c1d0844c8efbaf
|
||||
|
||||
[\#9533]: https://github.com/tendermint/tendermint/pull/9533
|
||||
[\#9534]: https://github.com/tendermint/tendermint/pull/9534
|
||||
[\#9539]: https://github.com/tendermint/tendermint/issues/9539
|
||||
[\#9548]: https://github.com/tendermint/tendermint/issues/9548
|
||||
[\#9537]: https://github.com/tendermint/tendermint/issues/9537
|
||||
[\#9581]: https://github.com/tendermint/tendermint/issues/9581
|
||||
|
Before Width: | Height: | Size: 42 KiB |
|
Before Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 411 KiB |
|
Before Width: | Height: | Size: 887 KiB |
|
Before Width: | Height: | Size: 2.3 MiB |
|
Before Width: | Height: | Size: 183 KiB |
|
Before Width: | Height: | Size: 133 KiB |
|
Before Width: | Height: | Size: 589 KiB |
|
Before Width: | Height: | Size: 816 KiB |
|
Before Width: | Height: | Size: 154 KiB |