mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-11 23:32:50 +00:00
Compare commits
20 Commits
stepper-de
...
wb/issue-9
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf1a8e180a | ||
|
|
8b57f6bb7c | ||
|
|
114e521e9a | ||
|
|
b490437bb2 | ||
|
|
63181ba692 | ||
|
|
0ba6562578 | ||
|
|
735171419e | ||
|
|
9ad3132dc6 | ||
|
|
e768dee0cb | ||
|
|
006e14e8ea | ||
|
|
f02d22cf8f | ||
|
|
729d3e1885 | ||
|
|
8960a19822 | ||
|
|
d6b8a8c052 | ||
|
|
9df9615bbf | ||
|
|
7775dfa7f9 | ||
|
|
2ff11e5bc2 | ||
|
|
70fd943bdf | ||
|
|
f1835338d9 | ||
|
|
bdf0217bd8 |
20
.github/dependabot.yml
vendored
20
.github/dependabot.yml
vendored
@@ -10,6 +10,16 @@ updates:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.37.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
@@ -40,6 +50,16 @@ updates:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.37.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
|
||||
8
.github/mergify.yml
vendored
8
.github/mergify.yml
vendored
@@ -17,6 +17,14 @@ pull_request_rules:
|
||||
{{ title }} (#{{ number }})
|
||||
|
||||
{{ body }}
|
||||
- name: backport patches to v0.37.x branch
|
||||
conditions:
|
||||
- base=main
|
||||
- label=S:backport-to-v0.37.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.37.x
|
||||
- name: backport patches to v0.34.x branch
|
||||
conditions:
|
||||
- base=main
|
||||
|
||||
104
.github/workflows/e2e-nightly-37x.yml
vendored
Normal file
104
.github/workflows/e2e-nightly-37x.yml
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
# Runs randomly generated E2E testnets nightly on the v0.37.x branch.
|
||||
|
||||
# !! This file should be kept in sync with the e2e-nightly-main.yml file,
|
||||
# modulo changes to the version labels.
|
||||
|
||||
name: e2e-nightly-37x
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.37.x'
|
||||
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "::set-output name=branch::`git branch --show-current`"
|
||||
echo "::set-output name=commit::`git rev-parse HEAD`"
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
git-commit: ${{ steps.git-info.outputs.commit }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":white_check_mark: Nightly E2E tests for `${{ env.BRANCH }}` passed."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -38,6 +38,7 @@ terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
test/app/grpc_client
|
||||
test/loadtime/build
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
|
||||
@@ -36,6 +36,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [rpc] \#9276 Added `header` and `header_by_hash` queries to the RPC client (@samricotta)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
91
RELEASES.md
91
RELEASES.md
@@ -22,8 +22,8 @@ We use Mergify's [backport feature](https://mergify.io/features/backports) to
|
||||
automatically backport to the needed branch. There should be a label for any
|
||||
backport branch that you'll be targeting. To notify the bot to backport a pull
|
||||
request, mark the pull request with the label corresponding to the correct
|
||||
backport branch. For example, to backport to v0.35.x, add the label
|
||||
`S:backport-to-v0.35.x`. Once the original pull request is merged, the bot will
|
||||
backport branch. For example, to backport to v0.38.x, add the label
|
||||
`S:backport-to-v0.38.x`. Once the original pull request is merged, the bot will
|
||||
try to cherry-pick the pull request to the backport branch. If the bot fails to
|
||||
backport, it will open a pull request. The author of the original pull request
|
||||
is responsible for solving the conflicts and merging the pull request.
|
||||
@@ -40,37 +40,52 @@ branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072)
|
||||
for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
the 0.38.x line.
|
||||
|
||||
1. Start on `main`
|
||||
|
||||
2. Create and push the backport branch:
|
||||
2. Ensure that there is a [branch protection
|
||||
rule](https://github.com/tendermint/tendermint/settings/branches) for the
|
||||
branch you are about to create (you will need admin access to the repository
|
||||
in order to do this).
|
||||
|
||||
3. Create and push the backport branch:
|
||||
```sh
|
||||
git checkout -b v0.35.x
|
||||
git push origin v0.35.x
|
||||
git checkout -b v0.38.x
|
||||
git push origin v0.38.x
|
||||
```
|
||||
|
||||
3. Create a PR to update the documentation directory for the backport branch.
|
||||
4. Create a PR to update the documentation directory for the backport branch.
|
||||
|
||||
We only maintain RFC and ADR documents on main, to avoid confusion. In
|
||||
addition, we rewrite Markdown URLs pointing to main to point to the
|
||||
backport branch, so that generated documentation will link to the correct
|
||||
versions of files elsewhere in the repository. For context on the latter, see
|
||||
https://github.com/tendermint/tendermint/issues/7675.
|
||||
We rewrite any URLs pointing to `main` to point to the backport branch,
|
||||
so that generated documentation will link to the correct versions of files
|
||||
elsewhere in the repository. The following files are to be excluded from this
|
||||
search:
|
||||
|
||||
To prepare the PR:
|
||||
* [`README.md`](./README.md)
|
||||
* [`CHANGELOG.md`](./CHANGELOG.md)
|
||||
* [`UPGRADING.md`](./UPGRADING.md)
|
||||
|
||||
The following links are to always point to `main`, regardless of where they
|
||||
occur in the codebase:
|
||||
|
||||
* `https://github.com/tendermint/tendermint/blob/main/LICENSE`
|
||||
|
||||
Be sure to search for all of the following links and replace `main` with your
|
||||
corresponding branch label or version (e.g. `v0.38.x` or `v0.38`):
|
||||
|
||||
* `github.com/tendermint/tendermint/blob/main` ->
|
||||
`github.com/tendermint/tendermint/blob/v0.38.x`
|
||||
* `github.com/tendermint/tendermint/tree/main` ->
|
||||
`github.com/tendermint/tendermint/tree/v0.38.x`
|
||||
* `docs.tendermint.com/main` -> `docs.tendermint.com/v0.38`
|
||||
|
||||
Once you have updated all of the relevant documentation:
|
||||
```sh
|
||||
# Remove the RFC and ADR documents from the backport.
|
||||
# We only maintain these on main to avoid confusion.
|
||||
git rm -r docs/rfc docs/architecture
|
||||
|
||||
# Update absolute links to point to the backport.
|
||||
go run ./scripts/linkpatch -recur -target v0.35.x -skip-path docs/DOCS_README.md,docs/README.md docs
|
||||
|
||||
# Create and push the PR.
|
||||
git checkout -b update-docs-v035x
|
||||
git commit -m "Update docs for v0.35.x backport branch." docs
|
||||
git push -u origin update-docs-v035x
|
||||
git checkout -b update-docs-v038x
|
||||
git commit -m "Update docs for v0.38.x backport branch."
|
||||
git push -u origin update-docs-v038x
|
||||
```
|
||||
|
||||
Be sure to merge this PR before making other changes on the newly-created
|
||||
@@ -82,15 +97,15 @@ After doing these steps, go back to `main` and do the following:
|
||||
it up to GitHub.
|
||||
For example:
|
||||
```sh
|
||||
git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."
|
||||
git push origin v0.36.0-dev
|
||||
git tag -a v0.39.0-dev -m "Development base for Tendermint v0.39."
|
||||
git push origin v0.39.0-dev
|
||||
```
|
||||
|
||||
2. Create a new workflow to run e2e nightlies for the new backport branch. (See
|
||||
[e2e-nightly-main.yml][e2e] for an example.)
|
||||
|
||||
3. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x`
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.38.x`
|
||||
[label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered.
|
||||
|
||||
4. Add a new section to the Dependabot config (`.github/dependabot.yml`) to
|
||||
@@ -106,7 +121,7 @@ create a release candidate (RC) for our friends and partners to test out. We use
|
||||
git tags to create RCs, and we build them off of backport branches.
|
||||
|
||||
Tags for RCs should follow the "standard" release naming conventions, with
|
||||
`-rcX` at the end (for example, `v0.35.0-rc0`).
|
||||
`-rcX` at the end (for example, `v0.38.0-rc0`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important
|
||||
that these branches have distinct names from the tags/release names.)
|
||||
@@ -114,7 +129,7 @@ that these branches have distinct names from the tags/release names.)
|
||||
If this is the first RC for a minor release, you'll have to make a new backport
|
||||
branch (see above). Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
1. Start from the backport branch (e.g. `v0.38.x`).
|
||||
2. Run the integration tests and the e2e nightlies
|
||||
(which can be triggered from the Github UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||
@@ -137,9 +152,9 @@ branch (see above). Otherwise:
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
6. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||
`git tag -a v0.38.0-rc0 -m "Release Candidate v0.38.0-rc0`
|
||||
7. Push the tag back up to origin:
|
||||
`git push origin v0.35.0-rc0`
|
||||
`git push origin v0.38.0-rc0`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
8. Future RCs will continue to be built off of this branch.
|
||||
|
||||
@@ -156,7 +171,7 @@ branch, as described above.
|
||||
Before performing these steps, be sure the
|
||||
[Minor Release Checklist](#minor-release-checklist) has been completed.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
1. Start on the backport branch (e.g. `v0.38.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
@@ -172,9 +187,9 @@ Before performing these steps, be sure the
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.35.0`.
|
||||
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||
- `git push origin v0.35.0`
|
||||
This will trigger the actual release `v0.38.0`.
|
||||
- `git tag -a v0.38.0 -m 'Release v0.38.0'`
|
||||
- `git push origin v0.38.0`
|
||||
6. Make sure that `main` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
7. Add the release to the documentation site generator config (see
|
||||
[DOCS_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
@@ -198,7 +213,7 @@ changes may merit a release candidate.
|
||||
|
||||
To create a patch release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.38.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
@@ -209,10 +224,10 @@ To create a patch release:
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during patch releases, and only field additions are valid patch changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
4. Open a PR with these changes that will land them back on `v0.38.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
- `git tag -a v0.38.1 -m 'Release v0.38.1'`
|
||||
- `git push origin v0.38.1`
|
||||
6. Create a pull request back to main with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:patch` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into main.
|
||||
|
||||
@@ -1164,6 +1164,7 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain
|
||||
func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block {
|
||||
return bs.chain[int64(len(bs.chain))-1]
|
||||
}
|
||||
func (bs *mockBlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { return nil }
|
||||
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
block := bs.chain[height-1]
|
||||
bps, err := block.MakePartSet(types.BlockPartSizeBytes)
|
||||
|
||||
9
go.mod
9
go.mod
@@ -27,9 +27,9 @@ require (
|
||||
github.com/ory/dockertest v3.3.5+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/rs/cors v1.8.2
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
|
||||
github.com/sasha-s/go-deadlock v0.3.1
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/viper v1.12.0
|
||||
@@ -147,6 +147,7 @@ require (
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hexops/gotextdiff v1.0.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/informalsystems/tm-load-test v1.0.0 // indirect
|
||||
github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect
|
||||
github.com/jgautheron/goconst v1.5.1 // indirect
|
||||
github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f // indirect
|
||||
@@ -175,7 +176,7 @@ require (
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 // indirect
|
||||
github.com/mgechev/revive v1.2.1 // indirect
|
||||
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 // indirect
|
||||
github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/buildkit v0.10.3 // indirect
|
||||
@@ -211,6 +212,7 @@ require (
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect
|
||||
github.com/sashamelentyev/usestdlibvars v1.8.0 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/securego/gosec/v2 v2.12.0 // indirect
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
@@ -255,6 +257,7 @@ require (
|
||||
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
gonum.org/v1/gonum v0.11.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220725144611-272f38e5d71b // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.66.6 // indirect
|
||||
|
||||
@@ -25,6 +25,8 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
|
||||
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""),
|
||||
"genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), ""),
|
||||
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"),
|
||||
"header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height"),
|
||||
"header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash"),
|
||||
"block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash"),
|
||||
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height"),
|
||||
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"),
|
||||
@@ -108,6 +110,22 @@ func makeBlockFunc(c *lrpc.Client) rpcBlockFunc {
|
||||
}
|
||||
}
|
||||
|
||||
type rpcHeaderFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultHeader, error)
|
||||
|
||||
func makeHeaderFunc(c *lrpc.Client) rpcHeaderFunc {
|
||||
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultHeader, error) {
|
||||
return c.Header(ctx.Context(), height)
|
||||
}
|
||||
}
|
||||
|
||||
type rpcHeaderByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultHeader, error)
|
||||
|
||||
func makeHeaderByHashFunc(c *lrpc.Client) rpcHeaderByHashFunc {
|
||||
return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultHeader, error) {
|
||||
return c.HeaderByHash(ctx.Context(), hash)
|
||||
}
|
||||
}
|
||||
|
||||
type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error)
|
||||
|
||||
func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc {
|
||||
|
||||
@@ -441,6 +441,40 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Header fetches and verifies the header directly via the light client
|
||||
func (c *Client) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) {
|
||||
lb, err := c.updateLightClientIfNeededTo(ctx, height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ctypes.ResultHeader{Header: lb.Header}, nil
|
||||
}
|
||||
|
||||
// HeaderByHash calls rpcclient#HeaderByHash and updates the client if it's falling behind.
|
||||
func (c *Client) HeaderByHash(ctx context.Context, hash tmbytes.HexBytes) (*ctypes.ResultHeader, error) {
|
||||
res, err := c.next.HeaderByHash(ctx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := res.Header.ValidateBasic(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lb, err := c.updateLightClientIfNeededTo(ctx, &res.Header.Height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !bytes.Equal(lb.Header.Hash(), res.Header.Hash()) {
|
||||
return nil, fmt.Errorf("primary header hash does not match trusted header hash. (%X != %X)",
|
||||
lb.Header.Hash(), res.Header.Hash())
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
|
||||
// Update the light client if we're behind and retrieve the light block at the requested height
|
||||
// or at the latest height if no height is provided.
|
||||
|
||||
@@ -98,9 +98,11 @@ type baseRPCClient struct {
|
||||
caller jsonrpcclient.Caller
|
||||
}
|
||||
|
||||
var _ rpcClient = (*HTTP)(nil)
|
||||
var _ rpcClient = (*BatchHTTP)(nil)
|
||||
var _ rpcClient = (*baseRPCClient)(nil)
|
||||
var (
|
||||
_ rpcClient = (*HTTP)(nil)
|
||||
_ rpcClient = (*BatchHTTP)(nil)
|
||||
_ rpcClient = (*baseRPCClient)(nil)
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// HTTP
|
||||
@@ -444,6 +446,31 @@ func (c *baseRPCClient) BlockResults(
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *baseRPCClient) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) {
|
||||
result := new(ctypes.ResultHeader)
|
||||
params := make(map[string]interface{})
|
||||
if height != nil {
|
||||
params["height"] = height
|
||||
}
|
||||
_, err := c.caller.Call(ctx, "header", params, result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *baseRPCClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) {
|
||||
result := new(ctypes.ResultHeader)
|
||||
params := map[string]interface{}{
|
||||
"hash": hash,
|
||||
}
|
||||
_, err := c.caller.Call(ctx, "header_by_hash", params, result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
|
||||
result := new(ctypes.ResultCommit)
|
||||
params := make(map[string]interface{})
|
||||
|
||||
@@ -67,6 +67,8 @@ type SignClient interface {
|
||||
Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error)
|
||||
BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error)
|
||||
BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error)
|
||||
Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error)
|
||||
HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error)
|
||||
Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error)
|
||||
Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error)
|
||||
Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error)
|
||||
|
||||
@@ -169,6 +169,14 @@ func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.Result
|
||||
return core.BlockResults(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) {
|
||||
return core.Header(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) {
|
||||
return core.HeaderByHash(c.ctx, hash)
|
||||
}
|
||||
|
||||
func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
|
||||
return core.Commit(c.ctx, height)
|
||||
}
|
||||
|
||||
@@ -458,6 +458,51 @@ func (_m *Client) GenesisChunked(_a0 context.Context, _a1 uint) (*coretypes.Resu
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
// Header provides a mock function with given fields: ctx, height
|
||||
func (_m *Client) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) {
|
||||
ret := _m.Called(ctx, height)
|
||||
|
||||
var r0 *coretypes.ResultHeader
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultHeader); ok {
|
||||
r0 = rf(ctx, height)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*coretypes.ResultHeader)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok {
|
||||
r1 = rf(ctx, height)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// HeaderByHash provides a mock function with given fields: ctx, hash
|
||||
func (_m *Client) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) {
|
||||
ret := _m.Called(ctx, hash)
|
||||
|
||||
var r0 *coretypes.ResultHeader
|
||||
if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) *coretypes.ResultHeader); ok {
|
||||
r0 = rf(ctx, hash)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*coretypes.ResultHeader)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, bytes.HexBytes) error); ok {
|
||||
r1 = rf(ctx, hash)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Health provides a mock function with given fields: _a0
|
||||
func (_m *Client) Health(_a0 context.Context) (*coretypes.ResultHealth, error) {
|
||||
|
||||
@@ -285,6 +285,15 @@ func TestAppCalls(t *testing.T) {
|
||||
require.NoError(err)
|
||||
require.Equal(block, blockByHash)
|
||||
|
||||
// check that the header matches the block hash
|
||||
header, err := c.Header(context.Background(), &apph)
|
||||
require.NoError(err)
|
||||
require.Equal(block.Block.Header, *header.Header)
|
||||
|
||||
headerByHash, err := c.HeaderByHash(context.Background(), block.BlockID.Hash)
|
||||
require.NoError(err)
|
||||
require.Equal(header, headerByHash)
|
||||
|
||||
// now check the results
|
||||
blockResults, err := c.BlockResults(context.Background(), &txh)
|
||||
require.Nil(err, "%d: %+v", i, err)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
@@ -75,6 +76,38 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) {
|
||||
return min, max, nil
|
||||
}
|
||||
|
||||
// Header gets block header at a given height.
|
||||
// If no height is provided, it will fetch the latest header.
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/header
|
||||
func Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) {
|
||||
height, err := getHeight(env.BlockStore.Height(), heightPtr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockMeta := env.BlockStore.LoadBlockMeta(height)
|
||||
if blockMeta == nil {
|
||||
return &ctypes.ResultHeader{}, nil
|
||||
}
|
||||
|
||||
return &ctypes.ResultHeader{Header: &blockMeta.Header}, nil
|
||||
}
|
||||
|
||||
// HeaderByHash gets header by hash.
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/header_by_hash
|
||||
func HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) {
|
||||
// N.B. The hash parameter is HexBytes so that the reflective parameter
|
||||
// decoding logic in the HTTP service will correctly translate from JSON.
|
||||
// See https://github.com/tendermint/tendermint/issues/6802 for context.
|
||||
|
||||
blockMeta := env.BlockStore.LoadBlockMetaByHash(hash)
|
||||
if blockMeta == nil {
|
||||
return &ctypes.ResultHeader{}, nil
|
||||
}
|
||||
|
||||
return &ctypes.ResultHeader{Header: &blockMeta.Header}, nil
|
||||
}
|
||||
|
||||
// Block gets block at a given height.
|
||||
// If no height is provided, it will fetch the latest block.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/block
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/state/mocks"
|
||||
)
|
||||
|
||||
func TestBlockchainInfo(t *testing.T) {
|
||||
@@ -86,7 +86,10 @@ func TestBlockResults(t *testing.T) {
|
||||
})
|
||||
err := env.StateStore.SaveABCIResponses(100, results)
|
||||
require.NoError(t, err)
|
||||
env.BlockStore = mockBlockStore{height: 100}
|
||||
mockstore := &mocks.BlockStore{}
|
||||
mockstore.On("Height").Return(int64(100))
|
||||
mockstore.On("Base").Return(int64(1))
|
||||
env.BlockStore = mockstore
|
||||
|
||||
testCases := []struct {
|
||||
height int64
|
||||
@@ -116,21 +119,3 @@ func TestBlockResults(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type mockBlockStore struct {
|
||||
height int64
|
||||
}
|
||||
|
||||
func (mockBlockStore) Base() int64 { return 1 }
|
||||
func (store mockBlockStore) Height() int64 { return store.height }
|
||||
func (store mockBlockStore) Size() int64 { return store.height }
|
||||
func (mockBlockStore) LoadBaseMeta() *types.BlockMeta { return nil }
|
||||
func (mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return nil }
|
||||
func (mockBlockStore) LoadBlock(height int64) *types.Block { return nil }
|
||||
func (mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return nil }
|
||||
func (mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil }
|
||||
func (mockBlockStore) LoadBlockCommit(height int64) *types.Commit { return nil }
|
||||
func (mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return nil }
|
||||
func (mockBlockStore) PruneBlocks(height int64) (uint64, error) { return 0, nil }
|
||||
func (mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
}
|
||||
|
||||
@@ -24,6 +24,8 @@ var Routes = map[string]*rpc.RPCFunc{
|
||||
"block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash"),
|
||||
"block_results": rpc.NewRPCFunc(BlockResults, "height"),
|
||||
"commit": rpc.NewRPCFunc(Commit, "height"),
|
||||
"header": rpc.NewRPCFunc(Header, "height"),
|
||||
"header_by_hash": rpc.NewRPCFunc(HeaderByHash, "hash"),
|
||||
"check_tx": rpc.NewRPCFunc(CheckTx, "tx"),
|
||||
"tx": rpc.NewRPCFunc(Tx, "hash,prove"),
|
||||
"tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"),
|
||||
|
||||
@@ -39,6 +39,11 @@ type ResultBlock struct {
|
||||
Block *types.Block `json:"block"`
|
||||
}
|
||||
|
||||
// ResultHeader represents the response for a Header RPC Client query
|
||||
type ResultHeader struct {
|
||||
Header *types.Header `json:"header"`
|
||||
}
|
||||
|
||||
// Commit and Header
|
||||
type ResultCommit struct {
|
||||
types.SignedHeader `json:"signed_header"`
|
||||
|
||||
@@ -500,7 +500,7 @@ paths:
|
||||
$ref: "#/components/schemas/ErrorResponse"
|
||||
/net_info:
|
||||
get:
|
||||
summary: Network informations
|
||||
summary: Network information
|
||||
operationId: net_info
|
||||
tags:
|
||||
- Info
|
||||
@@ -637,6 +637,64 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ErrorResponse"
|
||||
/header:
|
||||
get:
|
||||
summary: Get header at a specified height
|
||||
operationId: header
|
||||
parameters:
|
||||
- in: query
|
||||
name: height
|
||||
schema:
|
||||
type: integer
|
||||
default: 0
|
||||
example: 1
|
||||
description: height to return. If no height is provided, it will fetch the latest header.
|
||||
tags:
|
||||
- Info
|
||||
description: |
|
||||
Get Header.
|
||||
responses:
|
||||
"200":
|
||||
description: Header informations.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/BlockHeader"
|
||||
"500":
|
||||
description: Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ErrorResponse"
|
||||
/header_by_hash:
|
||||
get:
|
||||
summary: Get header by hash
|
||||
operationId: header_by_hash
|
||||
parameters:
|
||||
- in: query
|
||||
name: hash
|
||||
description: header hash
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED"
|
||||
tags:
|
||||
- Info
|
||||
description: |
|
||||
Get Header By Hash.
|
||||
responses:
|
||||
"200":
|
||||
description: Header informations.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/BlockHeader"
|
||||
"500":
|
||||
description: Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ErrorResponse"
|
||||
/block:
|
||||
get:
|
||||
summary: Get block at a specified height
|
||||
|
||||
@@ -121,6 +121,22 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadBlockMetaByHash provides a mock function with given fields: hash
|
||||
func (_m *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta {
|
||||
ret := _m.Called(hash)
|
||||
|
||||
var r0 *types.BlockMeta
|
||||
if rf, ok := ret.Get(0).(func([]byte) *types.BlockMeta); ok {
|
||||
r0 = rf(hash)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.BlockMeta)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadBlockPart provides a mock function with given fields: height, index
|
||||
func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
|
||||
ret := _m.Called(height, index)
|
||||
|
||||
@@ -29,6 +29,7 @@ type BlockStore interface {
|
||||
PruneBlocks(height int64) (uint64, error)
|
||||
|
||||
LoadBlockByHash(hash []byte) *types.Block
|
||||
LoadBlockMetaByHash(hash []byte) *types.BlockMeta
|
||||
LoadBlockPart(height int64, index int) *types.Part
|
||||
|
||||
LoadBlockCommit(height int64) *types.Commit
|
||||
|
||||
@@ -196,6 +196,26 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
return blockMeta
|
||||
}
|
||||
|
||||
// LoadBlockMetaByHash returns the blockmeta who's header corresponds to the given
|
||||
// hash. If none is found, returns nil.
|
||||
func (bs *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta {
|
||||
bz, err := bs.db.Get(calcBlockHashKey(hash))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(bz) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s := string(bz)
|
||||
height, err := strconv.ParseInt(s, 10, 64)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to extract height from %s: %v", s, err))
|
||||
}
|
||||
return bs.LoadBlockMeta(height)
|
||||
}
|
||||
|
||||
// LoadBlockCommit returns the Commit for the given height.
|
||||
// This commit consists of the +2/3 and other Precommit-votes for block at `height`,
|
||||
// and it comes from the block.LastCommit for `height+1`.
|
||||
|
||||
@@ -476,6 +476,7 @@ func TestPruneBlocks(t *testing.T) {
|
||||
require.Nil(t, bs.LoadBlockByHash(prunedBlock.Hash()))
|
||||
require.Nil(t, bs.LoadBlockCommit(1199))
|
||||
require.Nil(t, bs.LoadBlockMeta(1199))
|
||||
require.Nil(t, bs.LoadBlockMetaByHash(prunedBlock.Hash()))
|
||||
require.Nil(t, bs.LoadBlockPart(1199, 1))
|
||||
|
||||
for i := int64(1); i < 1200; i++ {
|
||||
@@ -552,6 +553,28 @@ func TestLoadBlockMeta(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBlockMetaByHash(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
bs := NewBlockStore(dbm.NewMemDB())
|
||||
|
||||
b1 := state.MakeBlock(state.LastBlockHeight+1, test.MakeNTxs(state.LastBlockHeight+1, 10), new(types.Commit), nil, state.Validators.GetProposer().Address)
|
||||
partSet, err := b1.MakePartSet(2)
|
||||
require.NoError(t, err)
|
||||
seenCommit := makeTestCommit(1, tmtime.Now())
|
||||
bs.SaveBlock(b1, partSet, seenCommit)
|
||||
|
||||
baseBlock := bs.LoadBlockMetaByHash(b1.Hash())
|
||||
assert.EqualValues(t, b1.Header.Height, baseBlock.Header.Height)
|
||||
assert.EqualValues(t, b1.Header.LastBlockID, baseBlock.Header.LastBlockID)
|
||||
assert.EqualValues(t, b1.Header.ChainID, baseBlock.Header.ChainID)
|
||||
}
|
||||
|
||||
func TestBlockFetchAtHeight(t *testing.T) {
|
||||
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
defer cleanup()
|
||||
|
||||
33
test/loadtime/Makefile
Normal file
33
test/loadtime/Makefile
Normal file
@@ -0,0 +1,33 @@
|
||||
GOMOD="github.com/tendermint/tendermint/test/loadtime"
|
||||
OUTPUT?=build/
|
||||
|
||||
build:
|
||||
go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT)load ./cmd/load/
|
||||
go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT)report ./cmd/report/
|
||||
.PHONY: build
|
||||
|
||||
check-proto-gen-deps:
|
||||
ifeq (,$(shell which protoc))
|
||||
$(error "protoc is required for Protobuf generation. See instructions for your platform on how to install it.")
|
||||
endif
|
||||
ifeq (,$(shell which protoc-gen-go))
|
||||
$(error "protoc-gen-go is required for Protobuf generation. See instructions for your platform on how to install it.")
|
||||
endif
|
||||
.PHONY: check-proto-gen-deps
|
||||
|
||||
check-proto-format-deps:
|
||||
ifeq (,$(shell which clang-format))
|
||||
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
|
||||
endif
|
||||
.PHONY: check-proto-format-deps
|
||||
|
||||
proto-format: check-proto-format-deps
|
||||
@echo "Formatting Protobuf files"
|
||||
@find . -name '*.proto' -exec clang-format -i {} \;
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-gen: check-proto-gen-deps
|
||||
@echo "Generating Protobuf files"
|
||||
@find . -name '*.proto' -exec protoc \
|
||||
--go_out=paths=source_relative:. {} \;
|
||||
.PHONY: proto-gen
|
||||
66
test/loadtime/README.md
Normal file
66
test/loadtime/README.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# loadtime
|
||||
|
||||
This directory contains the `loadtime` tools, a set of tools for generating
|
||||
transaction load against Tendermint and measuring their resulting latency.
|
||||
`loadtime` generates transactions that contain the timestamp of when they were
|
||||
generated as well as additional metadata to track the variables used when
|
||||
generating the load.
|
||||
|
||||
|
||||
## Building the tool set
|
||||
|
||||
The `Makefile` contains a target for building the `loadtime` tools.
|
||||
|
||||
The following command will build the tool and place the resulting binaries in `./build/`.
|
||||
|
||||
```bash
|
||||
make build
|
||||
```
|
||||
|
||||
## `load`
|
||||
|
||||
The `load` binary is built when `make build` is invoked. The `load` tool generates
|
||||
transactions and broadcasts them to Tendermint.
|
||||
|
||||
`load` leverages the [tm-load-test](https://github.com/informalsystems/tm-load-test)
|
||||
framework. As a result, all flags and options specified on the `tm-load-test` apply to
|
||||
`load`.
|
||||
|
||||
Below is a basic invocation for generating load against a Tendermint websocket running
|
||||
on `localhost:25567`
|
||||
|
||||
```bash
|
||||
./build/load \
|
||||
-c 1 -T 10 -r 1000 -s 1024 \
|
||||
--broadcast-tx-method sync \
|
||||
--endpoints ws://localhost:26657/websocket
|
||||
```
|
||||
|
||||
## `report`
|
||||
|
||||
The `report` binary is built when `make build` is invoked. The `report` tool
|
||||
reads all of the blocks from the specified blockstore database and calculates
|
||||
transaction latency metrics. `report` reads transactions generated by `load`
|
||||
and uses the difference between the timestamp contained in the transaction and
|
||||
the timestamp of the block the transaction was executed in to determine transaction latency.
|
||||
`report` outputs a set of metrics calculated on the list of latencies, including
|
||||
minimum, maximum, and average latency as well as the standard deviation.
|
||||
|
||||
Below is a basic invocation of the report tool with a data directory under `/home/test/.tendermint/data/`
|
||||
where the data was saved in a `goleveldb` database.
|
||||
|
||||
|
||||
```bash
|
||||
./build/report --database-type goleveldb --data-dir ~/.tendermint/data
|
||||
```
|
||||
|
||||
|
||||
The `report` tool also supports outputting the raw data as `csv`. This can be
|
||||
useful if you want to use a more powerful tool to aggregate and analyze the data.
|
||||
|
||||
Below is an invocation of the report tool that outputs the data to a `csv` file
|
||||
in `out.csv`
|
||||
|
||||
```bash
|
||||
./build/report --database-type goleveldb --data-dir ~/.tendermint/data --csv out.csv
|
||||
```
|
||||
11
test/loadtime/basic.sh
Executable file
11
test/loadtime/basic.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# A basic invocation of the loadtime tool.
|
||||
|
||||
./build/load \
|
||||
-c 1 -T 10 -r 1000 -s 1024 \
|
||||
--broadcast-tx-method sync \
|
||||
--endpoints ws://localhost:26657/websocket
|
||||
|
||||
65
test/loadtime/cmd/load/main.go
Normal file
65
test/loadtime/cmd/load/main.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/informalsystems/tm-load-test/pkg/loadtest"
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
)
|
||||
|
||||
// Ensure all of the interfaces are correctly satisfied.
|
||||
var (
|
||||
_ loadtest.ClientFactory = (*ClientFactory)(nil)
|
||||
_ loadtest.Client = (*TxGenerator)(nil)
|
||||
)
|
||||
|
||||
// ClientFactory implements the loadtest.ClientFactory interface.
|
||||
type ClientFactory struct{}
|
||||
|
||||
// TxGenerator is responsible for generating transactions.
|
||||
// TxGenerator holds the set of information that will be used to generate
|
||||
// each transaction.
|
||||
type TxGenerator struct {
|
||||
conns uint64
|
||||
rate uint64
|
||||
size uint64
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := loadtest.RegisterClientFactory("loadtime-client", &ClientFactory{}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
loadtest.Run(&loadtest.CLIConfig{
|
||||
AppName: "loadtime",
|
||||
AppShortDesc: "Generate timestamped transaction load.",
|
||||
AppLongDesc: "loadtime generates transaction load for the purpose of measuring the end-to-end latency of a transaction from submission to execution in a Tendermint network.",
|
||||
DefaultClientFactory: "loadtime-client",
|
||||
})
|
||||
}
|
||||
|
||||
func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error {
|
||||
psb, err := payload.MaxUnpaddedSize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if psb > cfg.Size {
|
||||
return fmt.Errorf("payload size exceeds configured size")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) {
|
||||
return &TxGenerator{
|
||||
conns: uint64(cfg.Connections),
|
||||
rate: uint64(cfg.Rate),
|
||||
size: uint64(cfg.Size),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *TxGenerator) GenerateTx() ([]byte, error) {
|
||||
return payload.NewBytes(&payload.Payload{
|
||||
Connections: c.conns,
|
||||
Rate: c.rate,
|
||||
Size: c.size,
|
||||
})
|
||||
}
|
||||
87
test/loadtime/cmd/report/main.go
Normal file
87
test/loadtime/cmd/report/main.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/test/loadtime/report"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var (
|
||||
db = flag.String("database-type", "goleveldb", "the type of database holding the blockstore")
|
||||
dir = flag.String("data-dir", "", "path to the directory containing the tendermint databases")
|
||||
csvOut = flag.String("csv", "", "dump the extracted latencies as raw csv for use in additional tooling")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *db == "" {
|
||||
log.Fatalf("must specify a database-type")
|
||||
}
|
||||
if *dir == "" {
|
||||
log.Fatalf("must specify a data-dir")
|
||||
}
|
||||
d := strings.TrimPrefix(*dir, "~/")
|
||||
if d != *dir {
|
||||
h, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
d = h + "/" + d
|
||||
}
|
||||
_, err := os.Stat(d)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dbType := dbm.BackendType(*db)
|
||||
db, err := dbm.NewDB("blockstore", dbType, d)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s := store.NewBlockStore(db)
|
||||
defer s.Close()
|
||||
r, err := report.GenerateFromBlockStore(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if *csvOut != "" {
|
||||
cf, err := os.Create(*csvOut)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
w := csv.NewWriter(cf)
|
||||
err = w.WriteAll(toRecords(r.All))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf(""+
|
||||
"Total Valid Tx: %d\n"+
|
||||
"Total Invalid Tx: %d\n"+
|
||||
"Total Negative Latencies: %d\n"+
|
||||
"Minimum Latency: %s\n"+
|
||||
"Maximum Latency: %s\n"+
|
||||
"Average Latency: %s\n"+
|
||||
"Standard Deviation: %s\n", len(r.All), r.ErrorCount, r.NegativeCount, r.Min, r.Max, r.Avg, r.StdDev)
|
||||
}
|
||||
|
||||
func toRecords(l []time.Duration) [][]string {
|
||||
res := make([][]string, len(l)+1)
|
||||
|
||||
res[0] = make([]string, 1)
|
||||
res[0][0] = "duration_ns"
|
||||
for i, v := range l {
|
||||
res[1+i] = []string{strconv.FormatInt(int64(v), 10)}
|
||||
}
|
||||
return res
|
||||
}
|
||||
87
test/loadtime/payload/payload.go
Normal file
87
test/loadtime/payload/payload.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package payload
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const keyPrefix = "a="
|
||||
|
||||
// NewBytes generates a new payload and returns the encoded representation of
|
||||
// the payload as a slice of bytes. NewBytes uses the fields on the Options
|
||||
// to create the payload.
|
||||
func NewBytes(p *Payload) ([]byte, error) {
|
||||
p.Padding = make([]byte, 1)
|
||||
if p.Time == nil {
|
||||
p.Time = timestamppb.Now()
|
||||
}
|
||||
us, err := CalculateUnpaddedSize(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.Size < uint64(us) {
|
||||
return nil, fmt.Errorf("configured size %d not large enough to fit unpadded transaction of size %d", p.Size, us)
|
||||
}
|
||||
p.Padding = make([]byte, p.Size-uint64(us))
|
||||
_, err = rand.Read(p.Padding)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// prepend a single key so that the kv store only ever stores a single
|
||||
// transaction instead of storing all tx and ballooning in size.
|
||||
return append([]byte(keyPrefix), b...), nil
|
||||
}
|
||||
|
||||
// FromBytes extracts a paylod from the byte representation of the payload.
|
||||
// FromBytes leaves the padding untouched, returning it to the caller to handle
|
||||
// or discard per their preference.
|
||||
func FromBytes(b []byte) (*Payload, error) {
|
||||
p := &Payload{}
|
||||
tr := bytes.TrimPrefix(b, []byte(keyPrefix))
|
||||
if bytes.Equal(b, tr) {
|
||||
return nil, errors.New("payload bytes missing key prefix")
|
||||
}
|
||||
err := proto.Unmarshal(tr, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// MaxUnpaddedSize returns the maximum size that a payload may be if no padding
|
||||
// is included.
|
||||
func MaxUnpaddedSize() (int, error) {
|
||||
p := &Payload{
|
||||
Time: timestamppb.Now(),
|
||||
Connections: math.MaxUint64,
|
||||
Rate: math.MaxUint64,
|
||||
Size: math.MaxUint64,
|
||||
Padding: make([]byte, 1),
|
||||
}
|
||||
return CalculateUnpaddedSize(p)
|
||||
}
|
||||
|
||||
// CalculateUnpaddedSize calculates the size of the passed in payload for the
|
||||
// purpose of determining how much padding to add to add to reach the target size.
|
||||
// CalculateUnpaddedSize returns an error if the payload Padding field is longer than 1.
|
||||
func CalculateUnpaddedSize(p *Payload) (int, error) {
|
||||
if len(p.Padding) != 1 {
|
||||
return 0, fmt.Errorf("expected length of padding to be 1, received %d", len(p.Padding))
|
||||
}
|
||||
b, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b) + len(keyPrefix), nil
|
||||
}
|
||||
190
test/loadtime/payload/payload.pb.go
Normal file
190
test/loadtime/payload/payload.pb.go
Normal file
@@ -0,0 +1,190 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc v3.20.1
|
||||
// source: payload/payload.proto
|
||||
|
||||
package payload
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Payload struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Connections uint64 `protobuf:"varint,1,opt,name=connections,proto3" json:"connections,omitempty"`
|
||||
Rate uint64 `protobuf:"varint,2,opt,name=rate,proto3" json:"rate,omitempty"`
|
||||
Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
|
||||
Time *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=time,proto3" json:"time,omitempty"`
|
||||
Padding []byte `protobuf:"bytes,5,opt,name=padding,proto3" json:"padding,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Payload) Reset() {
|
||||
*x = Payload{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_payload_payload_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Payload) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Payload) ProtoMessage() {}
|
||||
|
||||
func (x *Payload) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_payload_payload_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Payload.ProtoReflect.Descriptor instead.
|
||||
func (*Payload) Descriptor() ([]byte, []int) {
|
||||
return file_payload_payload_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Payload) GetConnections() uint64 {
|
||||
if x != nil {
|
||||
return x.Connections
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Payload) GetRate() uint64 {
|
||||
if x != nil {
|
||||
return x.Rate
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Payload) GetSize() uint64 {
|
||||
if x != nil {
|
||||
return x.Size
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Payload) GetTime() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Time
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Payload) GetPadding() []byte {
|
||||
if x != nil {
|
||||
return x.Padding
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_payload_payload_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_payload_payload_proto_rawDesc = []byte{
|
||||
0x0a, 0x15, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61,
|
||||
0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6c, 0x6f, 0x61, 0x64, 0x74, 0x69, 0x6d,
|
||||
0x65, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x01, 0x0a, 0x07, 0x50,
|
||||
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e,
|
||||
0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x74, 0x65,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x72, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04,
|
||||
0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65,
|
||||
0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
||||
0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65,
|
||||
0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69,
|
||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d,
|
||||
0x69, 0x6e, 0x74, 0x2f, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x74, 0x2f, 0x74,
|
||||
0x65, 0x73, 0x74, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x70, 0x61, 0x79,
|
||||
0x6c, 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_payload_payload_proto_rawDescOnce sync.Once
|
||||
file_payload_payload_proto_rawDescData = file_payload_payload_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_payload_payload_proto_rawDescGZIP() []byte {
|
||||
file_payload_payload_proto_rawDescOnce.Do(func() {
|
||||
file_payload_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_payload_payload_proto_rawDescData)
|
||||
})
|
||||
return file_payload_payload_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_payload_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_payload_payload_proto_goTypes = []interface{}{
|
||||
(*Payload)(nil), // 0: loadtime.payload.Payload
|
||||
(*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp
|
||||
}
|
||||
var file_payload_payload_proto_depIdxs = []int32{
|
||||
1, // 0: loadtime.payload.Payload.time:type_name -> google.protobuf.Timestamp
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_payload_payload_proto_init() }
|
||||
func file_payload_payload_proto_init() {
|
||||
if File_payload_payload_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_payload_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Payload); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_payload_payload_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_payload_payload_proto_goTypes,
|
||||
DependencyIndexes: file_payload_payload_proto_depIdxs,
|
||||
MessageInfos: file_payload_payload_proto_msgTypes,
|
||||
}.Build()
|
||||
File_payload_payload_proto = out.File
|
||||
file_payload_payload_proto_rawDesc = nil
|
||||
file_payload_payload_proto_goTypes = nil
|
||||
file_payload_payload_proto_depIdxs = nil
|
||||
}
|
||||
17
test/loadtime/payload/payload.proto
Normal file
17
test/loadtime/payload/payload.proto
Normal file
@@ -0,0 +1,17 @@
|
||||
syntax = "proto3";
|
||||
package loadtime.payload;
|
||||
|
||||
option go_package = "github.com/tendermint/tendermint/test/loadtime/payload";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
// Payload is the structure of the loadtime transaction. Proto has a compact
|
||||
// encoded representation, making it ideal for the loadtime usecase which aims to
|
||||
// keep the generated transactions small.
|
||||
message Payload {
|
||||
uint64 connections = 1;
|
||||
uint64 rate = 2;
|
||||
uint64 size = 3;
|
||||
google.protobuf.Timestamp time = 4;
|
||||
bytes padding = 5;
|
||||
}
|
||||
50
test/loadtime/payload/payload_test.go
Normal file
50
test/loadtime/payload/payload_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package payload_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
)
|
||||
|
||||
const payloadSizeTarget = 1024 // 1kb
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
s, err := payload.MaxUnpaddedSize()
|
||||
if err != nil {
|
||||
t.Fatalf("calculating max unpadded size %s", err)
|
||||
}
|
||||
if s > payloadSizeTarget {
|
||||
t.Fatalf("unpadded payload size %d exceeds target %d", s, payloadSizeTarget)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
const (
|
||||
testConns = 512
|
||||
testRate = 4
|
||||
)
|
||||
b, err := payload.NewBytes(&payload.Payload{
|
||||
Size: payloadSizeTarget,
|
||||
Connections: testConns,
|
||||
Rate: testRate,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("generating payload %s", err)
|
||||
}
|
||||
if len(b) < payloadSizeTarget {
|
||||
t.Fatalf("payload size in bytes %d less than expected %d", len(b), payloadSizeTarget)
|
||||
}
|
||||
p, err := payload.FromBytes(b)
|
||||
if err != nil {
|
||||
t.Fatalf("reading payload %s", err)
|
||||
}
|
||||
if p.Size != payloadSizeTarget {
|
||||
t.Fatalf("payload size value %d does not match expected %d", p.Size, payloadSizeTarget)
|
||||
}
|
||||
if p.Connections != testConns {
|
||||
t.Fatalf("payload connections value %d does not match expected %d", p.Connections, testConns)
|
||||
}
|
||||
if p.Rate != testRate {
|
||||
t.Fatalf("payload rate value %d does not match expected %d", p.Rate, testRate)
|
||||
}
|
||||
}
|
||||
153
test/loadtime/report/report.go
Normal file
153
test/loadtime/report/report.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package report
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"gonum.org/v1/gonum/stat"
|
||||
)
|
||||
|
||||
// BlockStore defines the set of methods needed by the report generator from
|
||||
// Tendermint's store.Blockstore type. Using an interface allows for tests to
|
||||
// more easily simulate the required behavior without having to use the more
|
||||
// complex real API.
|
||||
type BlockStore interface {
|
||||
Height() int64
|
||||
Base() int64
|
||||
LoadBlock(int64) *types.Block
|
||||
}
|
||||
|
||||
// Report contains the data calculated from reading the timestamped transactions
|
||||
// of each block found in the blockstore.
|
||||
type Report struct {
|
||||
Max, Min, Avg, StdDev time.Duration
|
||||
|
||||
// ErrorCount is the number of parsing errors encountered while reading the
|
||||
// transaction data. Parsing errors may occur if a transaction not generated
|
||||
// by the payload package is submitted to the chain.
|
||||
ErrorCount int
|
||||
|
||||
// NegativeCount is the number of negative durations encountered while
|
||||
// reading the transaction data. A negative duration means that
|
||||
// a transaction timestamp was greater than the timestamp of the block it
|
||||
// was included in and likely indicates an issue with the experimental
|
||||
// setup.
|
||||
NegativeCount int
|
||||
|
||||
// All contains all data points gathered from all valid transactions.
|
||||
// The order of the contents of All is not guaranteed to be match the order of transactions
|
||||
// in the chain.
|
||||
All []time.Duration
|
||||
}
|
||||
|
||||
// GenerateFromBlockStore creates a Report using the data in the provided
|
||||
// BlockStore.
|
||||
func GenerateFromBlockStore(s BlockStore) (Report, error) {
|
||||
type payloadData struct {
|
||||
l time.Duration
|
||||
err error
|
||||
}
|
||||
type txData struct {
|
||||
tx []byte
|
||||
bt time.Time
|
||||
}
|
||||
|
||||
// Deserializing to proto can be slow but does not depend on other data
|
||||
// and can therefore be done in parallel.
|
||||
// Deserializing in parallel does mean that the resulting data is
|
||||
// not guaranteed to be delivered in the same order it was given to the
|
||||
// worker pool.
|
||||
const poolSize = 16
|
||||
|
||||
txc := make(chan txData)
|
||||
pdc := make(chan payloadData, poolSize)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(poolSize)
|
||||
for i := 0; i < poolSize; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for b := range txc {
|
||||
p, err := payload.FromBytes(b.tx)
|
||||
if err != nil {
|
||||
pdc <- payloadData{err: err}
|
||||
continue
|
||||
}
|
||||
|
||||
l := b.bt.Sub(p.Time.AsTime())
|
||||
pdc <- payloadData{l: l}
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(pdc)
|
||||
}()
|
||||
|
||||
r := Report{
|
||||
Max: 0,
|
||||
Min: math.MaxInt64,
|
||||
}
|
||||
var sum int64
|
||||
go func() {
|
||||
base, height := s.Base(), s.Height()
|
||||
prev := s.LoadBlock(base)
|
||||
for i := base + 1; i < height; i++ {
|
||||
// Data from two adjacent block are used here simultaneously,
|
||||
// blocks of height H and H+1. The transactions of the block of
|
||||
// height H are used with the timestamp from the block of height
|
||||
// H+1. This is done because the timestamp from H+1 is calculated
|
||||
// by using the precommits submitted at height H. The timestamp in
|
||||
// block H+1 represents the time at which block H was committed.
|
||||
//
|
||||
// In the (very unlikely) event that the very last block of the
|
||||
// chain contains payload transactions, those transactions will not
|
||||
// be used in the latency calculations because the last block whose
|
||||
// transactions are used is the block one before the last.
|
||||
cur := s.LoadBlock(i)
|
||||
for _, tx := range prev.Data.Txs {
|
||||
txc <- txData{tx: tx, bt: cur.Time}
|
||||
}
|
||||
prev = cur
|
||||
}
|
||||
close(txc)
|
||||
}()
|
||||
for pd := range pdc {
|
||||
if pd.err != nil {
|
||||
r.ErrorCount++
|
||||
continue
|
||||
}
|
||||
r.All = append(r.All, pd.l)
|
||||
if pd.l > r.Max {
|
||||
r.Max = pd.l
|
||||
}
|
||||
if pd.l < r.Min {
|
||||
r.Min = pd.l
|
||||
}
|
||||
if int64(pd.l) < 0 {
|
||||
r.NegativeCount++
|
||||
}
|
||||
// Using an int64 here makes an assumption about the scale and quantity of the data we are processing.
|
||||
// If all latencies were 2 seconds, we would need around 4 billion records to overflow this.
|
||||
// We are therefore assuming that the data does not exceed these bounds.
|
||||
sum += int64(pd.l)
|
||||
}
|
||||
if len(r.All) == 0 {
|
||||
r.Min = 0
|
||||
return r, nil
|
||||
}
|
||||
r.Avg = time.Duration(sum / int64(len(r.All)))
|
||||
r.StdDev = time.Duration(int64(stat.StdDev(toFloat(r.All), nil)))
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func toFloat(in []time.Duration) []float64 {
|
||||
r := make([]float64, len(in))
|
||||
for i, v := range in {
|
||||
r[i] = float64(int64(v))
|
||||
}
|
||||
return r
|
||||
}
|
||||
114
test/loadtime/report/report_test.go
Normal file
114
test/loadtime/report/report_test.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package report_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/test/loadtime/payload"
|
||||
"github.com/tendermint/tendermint/test/loadtime/report"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
type mockBlockStore struct {
|
||||
base int64
|
||||
blocks []*types.Block
|
||||
}
|
||||
|
||||
func (m *mockBlockStore) Height() int64 {
|
||||
return m.base + int64(len(m.blocks))
|
||||
}
|
||||
|
||||
func (m *mockBlockStore) Base() int64 {
|
||||
return m.base
|
||||
}
|
||||
|
||||
func (m *mockBlockStore) LoadBlock(i int64) *types.Block {
|
||||
return m.blocks[i-m.base]
|
||||
}
|
||||
|
||||
func TestGenerateReport(t *testing.T) {
|
||||
t1 := time.Now()
|
||||
b1, err := payload.NewBytes(&payload.Payload{
|
||||
Time: timestamppb.New(t1.Add(-10 * time.Second)),
|
||||
Size: 1024,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("generating payload %s", err)
|
||||
}
|
||||
b2, err := payload.NewBytes(&payload.Payload{
|
||||
Time: timestamppb.New(t1.Add(-4 * time.Second)),
|
||||
Size: 1024,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("generating payload %s", err)
|
||||
}
|
||||
b3, err := payload.NewBytes(&payload.Payload{
|
||||
Time: timestamppb.New(t1.Add(2 * time.Second)),
|
||||
Size: 1024,
|
||||
})
|
||||
t2 := t1.Add(time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("generating payload %s", err)
|
||||
}
|
||||
s := &mockBlockStore{
|
||||
blocks: []*types.Block{
|
||||
{
|
||||
Data: types.Data{
|
||||
Txs: []types.Tx{b1, b2},
|
||||
},
|
||||
},
|
||||
{
|
||||
// The timestamp from block H+1 is used to calculate the
|
||||
// latency for the transactions in block H.
|
||||
Header: types.Header{
|
||||
Time: t1,
|
||||
},
|
||||
Data: types.Data{
|
||||
Txs: []types.Tx{[]byte("error")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Data: types.Data{
|
||||
Txs: []types.Tx{b3, b3},
|
||||
},
|
||||
},
|
||||
{
|
||||
Header: types.Header{
|
||||
Time: t2,
|
||||
},
|
||||
Data: types.Data{
|
||||
Txs: []types.Tx{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
r, err := report.GenerateFromBlockStore(s)
|
||||
if err != nil {
|
||||
t.Fatalf("generating report %s", err)
|
||||
}
|
||||
if len(r.All) != 4 {
|
||||
t.Fatalf("report contained different number of data points from expected. Expected %d but contained %d", 4, len(r.All))
|
||||
}
|
||||
if r.ErrorCount != 1 {
|
||||
t.Fatalf("ErrorCount did not match expected. Expected %d but contained %d", 1, r.ErrorCount)
|
||||
}
|
||||
if r.NegativeCount != 2 {
|
||||
t.Fatalf("NegativeCount did not match expected. Expected %d but contained %d", 2, r.NegativeCount)
|
||||
}
|
||||
if r.Avg != 3*time.Second {
|
||||
t.Fatalf("Avg did not match expected. Expected %s but contained %s", 3*time.Second, r.Avg)
|
||||
}
|
||||
if r.Min != -time.Second {
|
||||
t.Fatalf("Min did not match expected. Expected %s but contained %s", time.Second, r.Min)
|
||||
}
|
||||
if r.Max != 10*time.Second {
|
||||
t.Fatalf("Max did not match expected. Expected %s but contained %s", 10*time.Second, r.Max)
|
||||
}
|
||||
// Verified using online standard deviation calculator:
|
||||
// https://www.calculator.net/standard-deviation-calculator.html?numberinputs=10%2C+4%2C+-1%2C+-1&ctype=s&x=45&y=12
|
||||
expectedStdDev := 5228129047 * time.Nanosecond
|
||||
if r.StdDev != expectedStdDev {
|
||||
t.Fatalf("StdDev did not match expected. Expected %s but contained %s", expectedStdDev, r.StdDev)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user