mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-14 16:52:49 +00:00
Compare commits
28 Commits
feature/ab
...
main_backu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9feb1c023 | ||
|
|
188069b1d5 | ||
|
|
b1fbe26ffa | ||
|
|
9dd34ce4a1 | ||
|
|
bbcd128846 | ||
|
|
8b7ae932ff | ||
|
|
82ec855be4 | ||
|
|
6878b38812 | ||
|
|
1a5d5ed63b | ||
|
|
189d120c9f | ||
|
|
8895854e95 | ||
|
|
de29f6e46d | ||
|
|
9998dff847 | ||
|
|
2f4f224faa | ||
|
|
dfb6589d8c | ||
|
|
365af06517 | ||
|
|
3324f49fb7 | ||
|
|
ac48630fdb | ||
|
|
49502dae92 | ||
|
|
c3302b0dc9 | ||
|
|
5ba0d131c4 | ||
|
|
654e5652e4 | ||
|
|
0952a99f45 | ||
|
|
d09f4f503d | ||
|
|
ba84060b07 | ||
|
|
739b92bf01 | ||
|
|
1c0995c809 | ||
|
|
17c94bb0dc |
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -30,12 +30,6 @@ updates:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
###################################
|
||||
##
|
||||
## Update All Go Dependencies
|
||||
|
||||
38
.github/workflows/e2e-manual-multiversion.yml
vendored
Normal file
38
.github/workflows/e2e-manual-multiversion.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
# Manually run the nightly E2E tests for a particular branch, but test with
|
||||
# multiple versions.
|
||||
name: e2e-manual-multiversion
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
e2e-manual-multiversion-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', '04']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
# Generate multi-version tests with double the quantity of E2E nodes
|
||||
# based on the current branch as compared to the latest version.
|
||||
run: ./build/generator -g 5 -m "latest:1,local:2" -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
2
.github/workflows/e2e-manual.yml
vendored
2
.github/workflows/e2e-manual.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
group: ['00', '01', '02', '03', '04']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
|
||||
8
.github/workflows/e2e-nightly-34x.yml
vendored
8
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -30,8 +30,7 @@ jobs:
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "::set-output name=branch::`git branch --show-current`"
|
||||
echo "::set-output name=commit::`git rev-parse HEAD`"
|
||||
echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
@@ -49,7 +48,6 @@ jobs:
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
git-commit: ${{ steps.git-info.outputs.commit }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
@@ -63,7 +61,7 @@ jobs:
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
|
||||
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
@@ -72,7 +70,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
8
.github/workflows/e2e-nightly-37x.yml
vendored
8
.github/workflows/e2e-nightly-37x.yml
vendored
@@ -30,8 +30,7 @@ jobs:
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "::set-output name=branch::`git branch --show-current`"
|
||||
echo "::set-output name=commit::`git rev-parse HEAD`"
|
||||
echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
@@ -49,7 +48,6 @@ jobs:
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
git-commit: ${{ steps.git-info.outputs.commit }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
@@ -63,7 +61,7 @@ jobs:
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
|
||||
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
@@ -72,7 +70,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
4
.github/workflows/e2e-nightly-main.yml
vendored
4
.github/workflows/e2e-nightly-main.yml
vendored
@@ -52,7 +52,7 @@ jobs:
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}"
|
||||
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
2
.github/workflows/fuzz-nightly.yml
vendored
2
.github/workflows/fuzz-nightly.yml
vendored
@@ -64,7 +64,7 @@ jobs:
|
||||
|
||||
- name: Set crashers count
|
||||
working-directory: test/fuzz
|
||||
run: echo "::set-output name=count::$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
run: echo "count=$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')" >> $GITHUB_OUTPUT
|
||||
id: set-crashers-count
|
||||
|
||||
outputs:
|
||||
|
||||
31
.github/workflows/govulncheck.yml
vendored
Normal file
31
.github/workflows/govulncheck.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Check for Go vulnerabilities
|
||||
# Runs https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck to proactively
|
||||
# check for vulnerabilities in code packages if there were any changes made to
|
||||
# any Go code or dependencies.
|
||||
#
|
||||
# Run `make vulncheck` from the root of the repo to run this workflow locally.
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
govulncheck:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: govulncheck
|
||||
run: make vulncheck
|
||||
if: "env.GIT_DIFF != ''"
|
||||
4
.github/workflows/pre-release.yml
vendored
4
.github/workflows/pre-release.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG_PENDING.md > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
|
||||
2
.github/workflows/proto-lint.yml
vendored
2
.github/workflows/proto-lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.9.0
|
||||
- uses: bufbuild/buf-setup-action@v1.10.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}"
|
||||
fi
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
60
.github/workflows/testapp-docker.yml
vendored
Normal file
60
.github/workflows/testapp-docker.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Docker E2E Node
|
||||
# Build & Push rebuilds the e2e Testapp docker image on every push to main and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/tendermint/e2e-node
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermint/e2e-node
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}"
|
||||
fi
|
||||
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.2.1
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3.2.0
|
||||
with:
|
||||
context: .
|
||||
file: ./test/e2e/docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'beep_boop' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
@@ -12,19 +12,25 @@
|
||||
|
||||
- Go API
|
||||
- [p2p] \#9625 Remove unused p2p/trust package (@cmwaters)
|
||||
- [rpc] \#9655 Remove global environment and replace with constructor. (@williambanfield,@tychoish)
|
||||
- [node] \#9655 Move DBContext and DBProvider from the node package to the config package. (@williambanfield,@tychoish)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
- Tooling
|
||||
- [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [metrics] \#9682 move state-syncing and block-syncing metrics to their respective packages (@cmwaters)
|
||||
- [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [metrics] \#9682 move state-syncing and block-syncing metrics to their respective packages (@cmwaters)
|
||||
labels have moved from block_syncing -> blocksync_syncing and state_syncing -> statesync_syncing
|
||||
- [inspect] \#9655 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [proxy] \#9830 Introduce `NewUnsyncLocalClientCreator`, which allows local
|
||||
ABCI clients to have the same concurrency model as remote clients (i.e. one
|
||||
mutex per client "connection", for each of the four ABCI "connections").
|
||||
- [config] \#9680 Introduce `BootstrapPeers` to the config to allow nodes to list peers to be added to
|
||||
the addressbook upon start up (@cmwaters)
|
||||
|
||||
@@ -50,24 +56,24 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [config] \#9259 Rename the fastsync section and the fast_sync key blocksync and block_sync respectively
|
||||
- [config] \#9259 Rename the fastsync section and the fast_sync key blocksync and block_sync respectively
|
||||
|
||||
- Apps
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
- [abci] \#5783 Make length delimiter encoding consistent (`uint64`) between ABCI and P2P wire-level protocols
|
||||
- [abci] \#9145 Removes unused Response/Request `SetOption` from ABCI (@samricotta)
|
||||
- [abci/params] \#9287 Deduplicate `ConsensusParams` and `BlockParams` so only `types` proto definitions are used (@cmwaters)
|
||||
- Remove `TimeIotaMs` and use a hard-coded 1 millisecond value to ensure monotonically increasing block times.
|
||||
- Rename `AppVersion` to `App` so as to not stutter.
|
||||
- [types] \#9287 Reduce the use of protobuf types in core logic. (@cmwaters)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams` have become native types.
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
- [abci] \#5783 Make length delimiter encoding consistent (`uint64`) between ABCI and P2P wire-level protocols
|
||||
- [abci] \#9145 Removes unused Response/Request `SetOption` from ABCI (@samricotta)
|
||||
- [abci/params] \#9287 Deduplicate `ConsensusParams` and `BlockParams` so only `types` proto definitions are used (@cmwaters)
|
||||
- Remove `TimeIotaMs` and use a hard-coded 1 millisecond value to ensure monotonically increasing block times.
|
||||
- Rename `AppVersion` to `App` so as to not stutter.
|
||||
- [types] \#9287 Reduce the use of protobuf types in core logic. (@cmwaters)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams` have become native types.
|
||||
They still utilize protobuf when being sent over the wire or written to disk.
|
||||
- Moved `ValidateConsensusParams` inside (now native type) `ConsensusParams`, and renamed it to `ValidateBasic`.
|
||||
- [abci] \#9301 New ABCI methods `PrepareProposal` and `ProcessProposal` which give the app control over transactions proposed and allows for verification of proposed blocks.
|
||||
- [abci] \#8216 Renamed `EvidenceType` to `MisbehaviorType` and `Evidence` to `Misbehavior` as a more accurate label of their contents. (@williambanfield, @sergio-mena)
|
||||
- [abci] \#9122 Renamed `LastCommitInfo` to `CommitInfo` in preparation for vote extensions. (@cmwaters)
|
||||
- [abci] \#8656, \#8901 Added cli commands for `PrepareProposal` and `ProcessProposal`. (@jmalicevic, @hvanz)
|
||||
- [abci] \#6403 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
- Moved `ValidateConsensusParams` inside (now native type) `ConsensusParams`, and renamed it to `ValidateBasic`.
|
||||
- [abci] \#9301 New ABCI methods `PrepareProposal` and `ProcessProposal` which give the app control over transactions proposed and allows for verification of proposed blocks.
|
||||
- [abci] \#8216 Renamed `EvidenceType` to `MisbehaviorType` and `Evidence` to `Misbehavior` as a more accurate label of their contents. (@williambanfield, @sergio-mena)
|
||||
- [abci] \#9122 Renamed `LastCommitInfo` to `CommitInfo` in preparation for vote extensions. (@cmwaters)
|
||||
- [abci] \#8656, \#8901 Added cli commands for `PrepareProposal` and `ProcessProposal`. (@jmalicevic, @hvanz)
|
||||
- [abci] \#6403 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
@@ -83,6 +89,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [abci] \#9301 New ABCI methods `PrepareProposal` and `ProcessProposal` which give the app control over transactions proposed and allows for verification of proposed blocks.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [crypto] \#9250 Update to use btcec v2 and the latest btcutil. (@wcsiu)
|
||||
|
||||
- [cli] \#9171 add `--hard` flag to rollback command (and a boolean to the `RollbackState` method). This will rollback
|
||||
|
||||
4
Makefile
4
Makefile
@@ -274,6 +274,10 @@ lint:
|
||||
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
vulncheck:
|
||||
@go run golang.org/x/vuln/cmd/govulncheck@latest ./...
|
||||
.PHONY: vulncheck
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
###############################################################################
|
||||
|
||||
@@ -148,7 +148,7 @@ Currently supported versions include:
|
||||
- [Tendermint Core Blog](https://medium.com/tendermint/tagged/tendermint-core)
|
||||
- [Cosmos Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
## Join us!
|
||||
## Join us
|
||||
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.io).
|
||||
If you'd like to work full-time on Tendermint Core,
|
||||
|
||||
64
RELEASES.md
64
RELEASES.md
@@ -50,6 +50,7 @@ the 0.38.x line.
|
||||
in order to do this).
|
||||
|
||||
3. Create and push the backport branch:
|
||||
|
||||
```sh
|
||||
git checkout -b v0.38.x
|
||||
git push origin v0.38.x
|
||||
@@ -81,6 +82,7 @@ the 0.38.x line.
|
||||
* `docs.tendermint.com/main` -> `docs.tendermint.com/v0.38`
|
||||
|
||||
Once you have updated all of the relevant documentation:
|
||||
|
||||
```sh
|
||||
# Create and push the PR.
|
||||
git checkout -b update-docs-v038x
|
||||
@@ -113,7 +115,7 @@ create an alpha or beta version, or release candidate (RC) for our friends and
|
||||
partners to test out. We use git tags to create pre-releases, and we build them
|
||||
off of backport branches, for example:
|
||||
|
||||
- `v0.38.0-alpha.1` - The first alpha release of `v0.38.0`. Subsequent alpha
|
||||
* `v0.38.0-alpha.1` - The first alpha release of `v0.38.0`. Subsequent alpha
|
||||
releases will be numbered `v0.38.0-alpha.2`, `v0.38.0-alpha.3`, etc.
|
||||
|
||||
Alpha releases are to be considered the _most_ unstable of pre-releases, and
|
||||
@@ -121,14 +123,14 @@ off of backport branches, for example:
|
||||
adopters to start integrating and testing new functionality before we're done
|
||||
with QA.
|
||||
|
||||
- `v0.38.0-beta.1` - The first beta release of `v0.38.0`. Subsequent beta
|
||||
* `v0.38.0-beta.1` - The first beta release of `v0.38.0`. Subsequent beta
|
||||
releases will be numbered `v0.38.0-beta.2`, `v0.38.0-beta.3`, etc.
|
||||
|
||||
Beta releases can be considered more stable than alpha releases in that we
|
||||
will have QA'd them better than alpha releases, but there still may be
|
||||
minor breaking API changes if users have strong demands for such changes.
|
||||
|
||||
- `v0.38.0-rc1` - The first release candidate (RC) of `v0.38.0`. Subsequent RCs
|
||||
* `v0.38.0-rc1` - The first release candidate (RC) of `v0.38.0`. Subsequent RCs
|
||||
will be numbered `v0.38.0-rc2`, `v0.38.0-rc3`, etc.
|
||||
|
||||
RCs are considered more stable than beta releases in that we will have
|
||||
@@ -146,18 +148,18 @@ backport branch (see above). Otherwise:
|
||||
1. Start from the backport branch (e.g. `v0.38.x`).
|
||||
2. Run the integration tests and the E2E nightlies
|
||||
(which can be triggered from the GitHub UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-37x.yml).
|
||||
e.g., <https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-37x.yml>).
|
||||
3. Prepare the pre-release documentation:
|
||||
- Ensure that all relevant changes are in the `CHANGELOG_PENDING.md` file.
|
||||
* Ensure that all relevant changes are in the `CHANGELOG_PENDING.md` file.
|
||||
This file's contents must only be included in the `CHANGELOG.md` when we
|
||||
cut final releases.
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
* Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
4. Prepare the versioning:
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary.
|
||||
* Bump TMVersionDefault version in `version.go`
|
||||
* Bump P2P and block protocol versions in `version.go`, if necessary.
|
||||
Check the changelog for breaking changes in these components.
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
* Bump ABCI protocol version in `version.go`, if necessary
|
||||
5. Open a PR with these changes against the backport branch.
|
||||
6. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
7. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
@@ -179,33 +181,33 @@ Before performing these steps, be sure the
|
||||
1. Start on the backport branch (e.g. `v0.38.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the pre-releases into a
|
||||
* "Squash" changes from the changelog entries for the pre-releases into a
|
||||
single entry, and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or
|
||||
simplifying any intra-pre-release changes. It may also help to alphabetize
|
||||
the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
* Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
* Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
* Bump TMVersionDefault version in `version.go`
|
||||
* Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
* Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.38.0`.
|
||||
- `git tag -a v0.38.0 -m 'Release v0.38.0'`
|
||||
- `git push origin v0.38.0`
|
||||
* `git tag -a v0.38.0 -m 'Release v0.38.0'`
|
||||
* `git push origin v0.38.0`
|
||||
6. Make sure that `main` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
7. Add the release to the documentation site generator config (see
|
||||
[DOCS\_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
- Start on branch `main`.
|
||||
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
* Start on branch `main`.
|
||||
* Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
ensure the newest release is the default for the landing page.
|
||||
- Add a new entry to `themeConfig.versions` in
|
||||
* Add a new entry to `themeConfig.versions` in
|
||||
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
|
||||
release in the dropdown versions menu.
|
||||
- Commit these changes to `main` and backport them into the backport
|
||||
* Commit these changes to `main` and backport them into the backport
|
||||
branch for this release.
|
||||
|
||||
## Patch release
|
||||
@@ -222,21 +224,21 @@ To create a patch release:
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.38.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the TMDefaultVersion in `version.go`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
* Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
* Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
* Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
* Reset the `CHANGELOG_PENDING.md`
|
||||
* Bump the TMDefaultVersion in `version.go`
|
||||
* Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during patch releases, and only field additions are valid patch changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.38.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.38.1 -m 'Release v0.38.1'`
|
||||
- `git push origin v0.38.1`
|
||||
* `git tag -a v0.38.1 -m 'Release v0.38.1'`
|
||||
* `git push origin v0.38.1`
|
||||
6. Create a pull request back to main with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:patch` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into main.
|
||||
* Remove all `R:patch` labels from the pull requests that were included in the release.
|
||||
* Do not merge the backport branch into main.
|
||||
|
||||
## Minor Release Checklist
|
||||
|
||||
|
||||
@@ -1,263 +0,0 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
type unsyncLocalClient struct {
|
||||
service.BaseService
|
||||
|
||||
types.Application
|
||||
|
||||
// This mutex is exclusively used to protect the callback.
|
||||
mtx sync.RWMutex
|
||||
Callback
|
||||
}
|
||||
|
||||
var _ Client = (*unsyncLocalClient)(nil)
|
||||
|
||||
// NewUnsyncLocalClient creates an unsynchronized local client, which will be
|
||||
// directly calling the methods of the given app.
|
||||
//
|
||||
// Unlike NewLocalClient, it does not hold a mutex around the application, so
|
||||
// it is up to the application to manage its synchronization properly.
|
||||
func NewUnsyncLocalClient(app types.Application) Client {
|
||||
cli := &unsyncLocalClient{
|
||||
Application: app,
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(nil, "unsyncLocalClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.Callback = cb
|
||||
}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
func (app *unsyncLocalClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) FlushAsync() *ReqRes {
|
||||
// Do nothing
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EchoAsync(msg string) *ReqRes {
|
||||
return app.callback(
|
||||
types.ToRequestEcho(msg),
|
||||
types.ToResponseEcho(msg),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
res := app.Application.Info(req)
|
||||
return app.callback(
|
||||
types.ToRequestInfo(req),
|
||||
types.ToResponseInfo(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
res := app.Application.DeliverTx(params)
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
res := app.Application.CheckTx(req)
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
res := app.Application.Query(req)
|
||||
return app.callback(
|
||||
types.ToRequestQuery(req),
|
||||
types.ToResponseQuery(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CommitAsync() *ReqRes {
|
||||
res := app.Application.Commit()
|
||||
return app.callback(
|
||||
types.ToRequestCommit(),
|
||||
types.ToResponseCommit(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
res := app.Application.InitChain(req)
|
||||
return app.callback(
|
||||
types.ToRequestInitChain(req),
|
||||
types.ToResponseInitChain(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
res := app.Application.BeginBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
res := app.Application.EndBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return app.callback(
|
||||
types.ToRequestListSnapshots(req),
|
||||
types.ToResponseListSnapshots(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return app.callback(
|
||||
types.ToRequestOfferSnapshot(req),
|
||||
types.ToResponseOfferSnapshot(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestLoadSnapshotChunk(req),
|
||||
types.ToResponseLoadSnapshotChunk(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestApplySnapshotChunk(req),
|
||||
types.ToResponseApplySnapshotChunk(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestPrepareProposal(req),
|
||||
types.ToResponsePrepareProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestProcessProposal(req),
|
||||
types.ToResponseProcessProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *unsyncLocalClient) FlushSync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
res := app.Application.DeliverTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
res := app.Application.CheckTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
res := app.Application.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
res := app.Application.InitChain(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
res := app.Application.BeginBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
res := app.Application.EndBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) LoadSnapshotChunkSync(
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ApplySnapshotChunkSync(
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*types.ResponseApplySnapshotChunk, error) {
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *unsyncLocalClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.Callback(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
rr.callbackInvoked = true
|
||||
return rr
|
||||
}
|
||||
@@ -460,11 +460,14 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
|
||||
fmt.Println("Available commands:")
|
||||
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", prepareProposalCmd.Use, prepareProposalCmd.Short)
|
||||
fmt.Printf("%s: %s\n", processProposalCmd.Use, processProposalCmd.Short)
|
||||
|
||||
fmt.Println("Use \"[command] --help\" for more information about a command.")
|
||||
|
||||
return nil
|
||||
|
||||
@@ -8,4 +8,5 @@ const (
|
||||
CodeTypeUnauthorized uint32 = 3
|
||||
CodeTypeUnknownError uint32 = 4
|
||||
CodeTypeExecuted uint32 = 5
|
||||
CodeTypeRejected uint32 = 6
|
||||
)
|
||||
|
||||
@@ -4,7 +4,7 @@ There are two app's here: the KVStoreApplication and the PersistentKVStoreApplic
|
||||
|
||||
## KVStoreApplication
|
||||
|
||||
The KVStoreApplication is a simple merkle key-value store.
|
||||
The KVStoreApplication is a simple merkle key-value store.
|
||||
Transactions of the form `key=value` are stored as key-value pairs in the tree.
|
||||
Transactions without an `=` sign set the value to the key.
|
||||
The app has no replay protection (other than what the mempool provides).
|
||||
@@ -27,4 +27,4 @@ Validator set changes are effected using the following transaction format:
|
||||
|
||||
where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one).
|
||||
To remove a validator from the validator set, set power to `0`.
|
||||
There is no sybil protection against new validators joining.
|
||||
There is no sybil protection against new validators joining.
|
||||
|
||||
@@ -122,6 +122,10 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if len(req.Tx) == 0 {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeRejected}
|
||||
}
|
||||
|
||||
if req.Type == types.CheckTxType_Recheck {
|
||||
if _, ok := app.txToRemove[string(req.Tx)]; ok {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeExecuted, GasWanted: 1}
|
||||
|
||||
@@ -70,6 +70,24 @@ func TestKVStoreKV(t *testing.T) {
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreEmptyTX(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
tx := []byte("")
|
||||
reqCheck := types.RequestCheckTx{Tx: tx}
|
||||
resCheck := kvstore.CheckTx(reqCheck)
|
||||
require.Equal(t, resCheck.Code, code.CodeTypeRejected)
|
||||
|
||||
txs := make([][]byte, 0, 4)
|
||||
txs = append(txs, []byte("key=value"), []byte("key"), []byte(""), []byte("kee=value"))
|
||||
reqPrepare := types.RequestPrepareProposal{Txs: txs, MaxTxBytes: 10 * 1024}
|
||||
resPrepare := kvstore.PrepareProposal(reqPrepare)
|
||||
require.Equal(t, len(reqPrepare.Txs), len(resPrepare.Txs)+1, "Empty transaction not properly removed")
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
|
||||
@@ -324,11 +324,15 @@ func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.Response
|
||||
}
|
||||
|
||||
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
|
||||
// proposal for transactions with the prefix stripped.
|
||||
// proposal for transactions with the prefix stripped, while discarding invalid empty transactions.
|
||||
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte, maxTxBytes int64) [][]byte {
|
||||
txs := make([][]byte, 0, len(blockData))
|
||||
var totalBytes int64
|
||||
for _, tx := range blockData {
|
||||
if len(tx) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
txMod := tx
|
||||
if isPrepareTx(tx) {
|
||||
txMod = bytes.Replace(tx, []byte(PreparePrefix), []byte(ReplacePrefix), 1)
|
||||
|
||||
87
cmd/tendermint/commands/inspect.go
Normal file
87
cmd/tendermint/commands/inspect.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/inspect"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer/block"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// InspectCmd is the command for starting an inspect server.
|
||||
var InspectCmd = &cobra.Command{
|
||||
Use: "inspect",
|
||||
Short: "Run an inspect server for investigating Tendermint state",
|
||||
Long: `
|
||||
inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging
|
||||
issues with Tendermint.
|
||||
|
||||
When the Tendermint consensus engine detects inconsistent state, it will crash the
|
||||
Tendermint process. Tendermint will not start up while in this inconsistent state.
|
||||
The inspect command can be used to query the block and state store using Tendermint
|
||||
RPC calls to debug issues of inconsistent state.
|
||||
`,
|
||||
|
||||
RunE: runInspect,
|
||||
}
|
||||
|
||||
func init() {
|
||||
InspectCmd.Flags().
|
||||
String("rpc.laddr",
|
||||
config.RPC.ListenAddress, "RPC listenener address. Port required")
|
||||
InspectCmd.Flags().
|
||||
String("db-backend",
|
||||
config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb")
|
||||
InspectCmd.Flags().
|
||||
String("db-dir", config.DBPath, "database directory")
|
||||
}
|
||||
|
||||
func runInspect(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
|
||||
go func() {
|
||||
<-c
|
||||
cancel()
|
||||
}()
|
||||
|
||||
blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
defer blockStore.Close()
|
||||
|
||||
stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateStore := state.NewStore(stateDB, state.StoreOptions{DiscardABCIResponses: false})
|
||||
defer stateStore.Close()
|
||||
|
||||
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txIndexer, blockIndexer, err := block.IndexerFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ins := inspect.New(config.RPC, blockStore, stateStore, txIndexer, blockIndexer, logger)
|
||||
|
||||
logger.Info("starting inspect server")
|
||||
if err := ins.Run(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -30,6 +30,7 @@ func main() {
|
||||
cmd.VersionCmd,
|
||||
cmd.RollbackStateCmd,
|
||||
cmd.CompactGoLevelDBCmd,
|
||||
cmd.InspectCmd,
|
||||
debug.DebugCmd,
|
||||
cli.NewCompletionCmd(rootCmd, true),
|
||||
)
|
||||
|
||||
30
config/db.go
Normal file
30
config/db.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
// ServiceProvider takes a config and a logger and returns a ready to go Node.
|
||||
type ServiceProvider func(context.Context, *Config, log.Logger) (service.Service, error)
|
||||
|
||||
// DBContext specifies config information for loading a new DB.
|
||||
type DBContext struct {
|
||||
ID string
|
||||
Config *Config
|
||||
}
|
||||
|
||||
// DBProvider takes a DBContext and returns an instantiated DB.
|
||||
type DBProvider func(*DBContext) (dbm.DB, error)
|
||||
|
||||
// DefaultDBProvider returns a database using the DBBackend and DBDir
|
||||
// specified in the Config.
|
||||
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
|
||||
dbType := dbm.BackendType(ctx.Config.DBBackend)
|
||||
|
||||
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
|
||||
}
|
||||
@@ -1,3 +1,3 @@
|
||||
# Consensus
|
||||
# Consensus
|
||||
|
||||
See the [consensus spec](https://github.com/tendermint/tendermint/tree/main/spec/consensus) for more information.
|
||||
See the [consensus spec](https://github.com/tendermint/tendermint/tree/main/spec/consensus) for more information.
|
||||
|
||||
@@ -54,7 +54,7 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr
|
||||
if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step {
|
||||
return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m)
|
||||
}
|
||||
case <-newStepSub.Cancelled():
|
||||
case <-newStepSub.Canceled():
|
||||
return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was canceled")
|
||||
case <-ticker:
|
||||
return fmt.Errorf("failed to read off newStepSub.Out()")
|
||||
|
||||
@@ -97,7 +97,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
case <-newBlockSub.Out():
|
||||
case <-newBlockSub.Cancelled():
|
||||
case <-newBlockSub.Canceled():
|
||||
t.Fatal("newBlockSub was canceled")
|
||||
case <-time.After(120 * time.Second):
|
||||
t.Fatal("Timed out waiting for new block (see trace above)")
|
||||
@@ -1198,6 +1198,7 @@ func (bs *mockBlockStore) PruneBlocks(height int64, state sm.State) (uint64, int
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) DeleteLatestBlock() error { return nil }
|
||||
func (bs *mockBlockStore) Close() error { return nil }
|
||||
|
||||
//---------------------------------------
|
||||
// Test handshake/init chain
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Merkle Tree
|
||||
|
||||
For smaller static data structures that don't require immutable snapshots or mutability;
|
||||
For smaller static data structures that don't require immutable snapshots or mutability;
|
||||
for instance the transactions and validation signatures of a block can be hashed using this simple merkle tree logic.
|
||||
|
||||
@@ -262,7 +262,7 @@ deployment, the role of sending messages is taken by Tendermint, which
|
||||
connects to the app using three separate connections, each with its own
|
||||
pattern of messages.
|
||||
|
||||
For examples of running an ABCI app with Tendermint, see the
|
||||
For examples of running an ABCI app with Tendermint, see the
|
||||
[getting started guide](./getting-started.md).
|
||||
|
||||
## Bounties
|
||||
|
||||
@@ -23,7 +23,7 @@ using Tendermint.
|
||||
|
||||
The first apps we will work with are written in Go. To install them, you
|
||||
need to [install Go](https://golang.org/doc/install), put
|
||||
`$GOPATH/bin` in your `$PATH` and enable go modules. If you use `bash`,
|
||||
`$GOPATH/bin` in your `$PATH` and enable go modules. If you use `bash`,
|
||||
follow these instructions:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -84,7 +84,7 @@ the `psql` indexer type.
|
||||
Example:
|
||||
|
||||
```shell
|
||||
$ psql ... -f state/indexer/sink/psql/schema.sql
|
||||
psql ... -f state/indexer/sink/psql/schema.sql
|
||||
```
|
||||
|
||||
## Default Indexes
|
||||
|
||||
@@ -39,7 +39,7 @@ When writing a p2p service, there are two primary responsibilities:
|
||||
The first responsibility is handled by the Switch:
|
||||
|
||||
- Responsible for routing connections between peers
|
||||
- Notably _only handles TCP connections_; RPC/HTTP is separate
|
||||
- Notably *only handles TCP connections*; RPC/HTTP is separate
|
||||
- Is a dependency for every reactor; all reactors expose a function `setSwitch`
|
||||
- Holds onto channels (channels on the TCP connection--NOT Go channels) and uses them to route
|
||||
- Is a global object, with a global namespace for messages
|
||||
@@ -56,7 +56,7 @@ The second responsibility is handled by a combination of the PEX and the Address
|
||||
Here are some relevant facts about TCP:
|
||||
|
||||
1. All TCP connections have a "frame window size" which represents the packet size to the "confidence;" i.e., if you are sending packets along a new connection, you must start out with small packets. As the packets are received successfully, you can start to send larger and larger packets. (This curve is illustrated below.) This means that TCP connections are slow to spin up.
|
||||
2. The syn/ack process also means that there's a high overhead for small, frequent messages
|
||||
2. The syn/ack process also means that there's a high overhead for small, frequent messages
|
||||
3. Sockets are represented by file descriptors.
|
||||
|
||||

|
||||
@@ -114,7 +114,7 @@ Furthermore, all reactors expose:
|
||||
|
||||
The `receive` method can be called many times by the mconnection. It has the same signature across all reactors.
|
||||
|
||||
The `addReactor` call does a for loop over all the channels on the reactor and creates a map of channel IDs->reactors. The switch holds onto this map, and passes it to the _transport_, a thin wrapper around TCP connections.
|
||||
The `addReactor` call does a for loop over all the channels on the reactor and creates a map of channel IDs->reactors. The switch holds onto this map, and passes it to the *transport*, a thin wrapper around TCP connections.
|
||||
|
||||
The following is an exhaustive (?) list of reactors:
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ reformalization of BFT in a more modern setting, with emphasis on
|
||||
peer-to-peer networking and cryptographic authentication. The name
|
||||
derives from the way transactions are batched in blocks, where each
|
||||
block contains a cryptographic hash of the previous one, forming a
|
||||
chain.
|
||||
chain.
|
||||
|
||||
Tendermint consists of two chief technical components: a blockchain
|
||||
consensus engine and a generic application interface. The consensus
|
||||
@@ -55,7 +55,7 @@ atop a classical, non-BFT consensus algorithm. Zookeeper uses an
|
||||
algorithm called Zookeeper Atomic Broadcast, while etcd and consul use
|
||||
the Raft log replication algorithm. A
|
||||
typical cluster contains 3-5 machines, and can tolerate crash failures
|
||||
in less than 1/2 of the machines (e.g., 1 out of 3 or 2 out of 5),
|
||||
in less than 1/2 of the machines (e.g., 1 out of 3 or 2 out of 5),
|
||||
but even a single Byzantine fault can jeopardize the whole system.
|
||||
|
||||
Each offering provides a slightly different implementation of a
|
||||
@@ -142,7 +142,7 @@ in design and suffers from "spaghetti code".
|
||||
Another problem with monolithic design is that it limits you to the
|
||||
language of the blockchain stack (or vice versa). In the case of
|
||||
Ethereum which supports a Turing-complete bytecode virtual-machine, it
|
||||
limits you to languages that compile down to that bytecode; while the
|
||||
limits you to languages that compile down to that bytecode; while the
|
||||
[list](https://github.com/pirapira/awesome-ethereum-virtual-machine#programming-languages-that-compile-into-evm)
|
||||
is growing, it is still very limited.
|
||||
|
||||
@@ -156,7 +156,7 @@ protocol.
|
||||
|
||||
[Tendermint Core](https://github.com/tendermint/tendermint), the
|
||||
"consensus engine", communicates with the application via a socket
|
||||
protocol that satisfies the ABCI, the Tendermint Socket Protocol
|
||||
protocol that satisfies the ABCI, the Tendermint Socket Protocol
|
||||
(TSP, or Teaspoon).
|
||||
|
||||
To draw an analogy, lets talk about a well-known cryptocurrency,
|
||||
@@ -267,7 +267,7 @@ committed in a chain, with one block at each **height**. A block may
|
||||
fail to be committed, in which case the protocol moves to the next
|
||||
**round**, and a new validator gets to propose a block for that height.
|
||||
Two stages of voting are required to successfully commit a block; we
|
||||
call them **pre-vote** and **pre-commit**.
|
||||
call them **pre-vote** and **pre-commit**.
|
||||
|
||||
There is a picture of a couple doing the polka because validators are
|
||||
doing something like a polka dance. When more than two-thirds of the
|
||||
|
||||
@@ -62,5 +62,6 @@ sections.
|
||||
- [RFC-023: Semi-permanent Testnet](./rfc-023-semi-permanent-testnet.md)
|
||||
- [RFC-024: Block Structure Consolidation](./rfc-024-block-structure-consolidation.md)
|
||||
- [RFC-025: Application Defined Transaction Storage](./rfc-025-support-app-side-mempool.md)
|
||||
- [RFC-027: P2P Message Bandwidth Report](./rfc-027-p2p-message-bandwidth-report.md)
|
||||
|
||||
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->
|
||||
|
||||
BIN
docs/rfc/images/receive-rate-all.png
Normal file
BIN
docs/rfc/images/receive-rate-all.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 204 KiB |
BIN
docs/rfc/images/send-rate-all.png
Normal file
BIN
docs/rfc/images/send-rate-all.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 213 KiB |
BIN
docs/rfc/images/top-3-percent-receive.png
Normal file
BIN
docs/rfc/images/top-3-percent-receive.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 78 KiB |
BIN
docs/rfc/images/top-3-percent-send.png
Normal file
BIN
docs/rfc/images/top-3-percent-send.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 63 KiB |
@@ -332,11 +332,11 @@ some advantages over gRPC for our domain. Specifically:
|
||||
- We can still get the benefits of generated data types using protocol buffers, even
|
||||
without using gRPC:
|
||||
|
||||
- Protobuf defines a standard JSON encoding for all message types so
|
||||
- Protobuf defines a standard JSON encoding for all message types so
|
||||
languages with protobuf support do not need to worry about type mapping
|
||||
oddities.
|
||||
|
||||
- Using JSON means that even languages _without_ good protobuf support can
|
||||
- Using JSON means that even languages _without_ good protobuf support can
|
||||
implement the protocol with a bit more work, and I expect this situation to
|
||||
be rare.
|
||||
|
||||
|
||||
@@ -68,13 +68,13 @@ a series of metrics to the P2P layer to better understand the inefficiencies it
|
||||
|
||||
The following metrics can help us understand the sources of latency in the Tendermint P2P stack:
|
||||
|
||||
* Number of messages sent and received per second
|
||||
* Time of a message spent on the P2P layer send and receive queues
|
||||
- Number of messages sent and received per second
|
||||
- Time of a message spent on the P2P layer send and receive queues
|
||||
|
||||
The following metrics exist and should be leveraged in addition to those added:
|
||||
|
||||
* Number of peers node's connected to
|
||||
* Number of bytes per channel sent and received from each peer
|
||||
- Number of peers node's connected to
|
||||
- Number of bytes per channel sent and received from each peer
|
||||
|
||||
### Sync
|
||||
|
||||
@@ -88,6 +88,7 @@ over a network so that we understand how much overhead Tendermint actually adds.
|
||||
|
||||
The operation is likely to be _incredibly_ dependent on the environment in which
|
||||
the node is being run. The factors that will influence syncing include:
|
||||
|
||||
1. Number of peers that a syncing node may fetch from.
|
||||
2. Speed of the disk that a validator is writing to.
|
||||
3. Speed of the network connection between the different peers that node is
|
||||
@@ -103,9 +104,9 @@ how much overhead Tendermint incurs.
|
||||
We should additionally add metrics to the blocksync operation to more clearly pinpoint
|
||||
slow operations. The following metrics should be added to the block syncing operation:
|
||||
|
||||
* Time to fetch and validate each block
|
||||
* Time to execute a block
|
||||
* Blocks sync'd per unit time
|
||||
- Time to fetch and validate each block
|
||||
- Time to execute a block
|
||||
- Blocks sync'd per unit time
|
||||
|
||||
### Application
|
||||
|
||||
@@ -146,7 +147,7 @@ usecase of Tendermint and do not necessarily need to be addressed at this time.
|
||||
|
||||
### RPC
|
||||
|
||||
#### Claim: The Query API is slow.
|
||||
#### Claim: The Query API is slow
|
||||
|
||||
The query API locks a mutex across the ABCI connections. This causes consensus to
|
||||
slow during queries, as ABCI is no longer able to make progress. This is known
|
||||
@@ -192,11 +193,11 @@ it takes for popular chains with many validators to gather all votes within a st
|
||||
|
||||
Metrics that would improve visibility into this include:
|
||||
|
||||
* Amount of time for a node to gather votes in a step.
|
||||
* Amount of time for a node to gather all block parts.
|
||||
* Number of votes each node sends to gossip (i.e. not its own votes, but votes it is
|
||||
- Amount of time for a node to gather votes in a step.
|
||||
- Amount of time for a node to gather all block parts.
|
||||
- Number of votes each node sends to gossip (i.e. not its own votes, but votes it is
|
||||
transmitting for a peer).
|
||||
* Total number of votes each node sends to receives (A node may receive duplicate votes
|
||||
- Total number of votes each node sends to receives (A node may receive duplicate votes
|
||||
so understanding how frequently this occurs will be valuable in evaluating the performance
|
||||
of the gossip system).
|
||||
|
||||
@@ -261,17 +262,17 @@ event system. This has implications for the consensus system, which [publishes e
|
||||
To better understand the performance of the event system, we should add metrics to track the timing of
|
||||
event sends. The following metrics would be a good start for tracking this performance:
|
||||
|
||||
* Time in event send, labeled by Event Type
|
||||
* Time in event receive, labeled by subscriber
|
||||
* Event throughput, measured in events per unit time.
|
||||
- Time in event send, labeled by Event Type
|
||||
- Time in event receive, labeled by subscriber
|
||||
- Event throughput, measured in events per unit time.
|
||||
|
||||
### References
|
||||
|
||||
[modular-hashing]: https://github.com/tendermint/tendermint/pull/6773
|
||||
[issue-2186]: https://github.com/tendermint/tendermint/issues/2186
|
||||
[issue-2187]: https://github.com/tendermint/tendermint/issues/2187
|
||||
[rfc-002]: https://github.com/tendermint/tendermint/pull/6913
|
||||
[adr-57]: https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-057-RPC.md
|
||||
[issue-1319]: https://github.com/tendermint/tendermint/issues/1319
|
||||
[abci-commit-description]: https://github.com/tendermint/tendermint/blob/main/spec/abci/abci++_methods.md#commit
|
||||
[abci-local-client-code]: https://github.com/tendermint/tendermint/blob/511bd3eb7f037855a793a27ff4c53c12f085b570/abci/client/local_client.go#L84
|
||||
[hub-signature]: https://github.com/cosmos/gaia/blob/0ecb6ed8a244d835807f1ced49217d54a9ca2070/docs/resources/genesis.md#consensus-parameters
|
||||
|
||||
@@ -44,13 +44,13 @@ Proto allows fields to be encoded in any order and even be repeated.
|
||||
|
||||
3. Presence or absence of default values.
|
||||
|
||||
Types in proto have defined default values similar to Go's zero values.
|
||||
Types in proto have defined default values similar to Go's zero values.
|
||||
Writing or omitting a default value are both legal ways of encoding a wire message.
|
||||
|
||||
4. Serialization of 'unknown' fields.
|
||||
4. Serialization of 'unknown' fields.
|
||||
|
||||
Unknown fields can be present when a message is created by a binary with a newer
|
||||
version of the proto that contains fields that the deserializer in a different
|
||||
Unknown fields can be present when a message is created by a binary with a newer
|
||||
version of the proto that contains fields that the deserializer in a different
|
||||
binary does not yet know about. Deserializers in binaries that do not know about the field
|
||||
will maintain the bytes of the unknown field but not place them into the deserialized structure.
|
||||
|
||||
@@ -60,26 +60,26 @@ We have a few options to consider when producing this stable representation.
|
||||
|
||||
#### Use only compliant serializers and constrain field usage
|
||||
|
||||
According to [Cosmos-SDK ADR-27][cosmos-sdk-adr-27], when message types obey a simple
|
||||
According to [Cosmos-SDK ADR-27][cosmos-sdk-adr-27], when message types obey a simple
|
||||
set of rules, gogoproto produces a consistent byte representation of serialized messages.
|
||||
This seems promising, although more research is needed to guarantee gogoproto always
|
||||
produces a consistent set of bytes on serialized messages. This would solve the problem
|
||||
produces a consistent set of bytes on serialized messages. This would solve the problem
|
||||
within Tendermint as written in Go, but would require ensuring that there are similar
|
||||
serializers written in other languages that produce the same output as gogoproto.
|
||||
|
||||
#### Reorder serialized bytes to ensure determinism.
|
||||
#### Reorder serialized bytes to ensure determinism
|
||||
|
||||
The serialized form of a proto message can be transformed into a canonical representation
|
||||
by applying simple rules to the serialized bytes. Re-ordering the serialized bytes
|
||||
would allow Tendermint to produce a canonical byte representation without having to
|
||||
simultaneously maintain a custom proto marshaller.
|
||||
|
||||
This could be implemented as a function in many languages that performed the following
|
||||
This could be implemented as a function in many languages that performed the following
|
||||
producing bytes to sign or hashing:
|
||||
|
||||
1. Does not add any of the data from unknown fields into the type to hash.
|
||||
|
||||
Tendermint should not run into a case where it needs to verify the integrity of
|
||||
Tendermint should not run into a case where it needs to verify the integrity of
|
||||
data with unknown fields for the following reasons:
|
||||
|
||||
The purpose of checking hash equality within Tendermint is to ensure that
|
||||
@@ -91,7 +91,7 @@ despite not understanding what their internal structure is. It's not clear what
|
||||
mean to verify that a block contains data that a process does not know about.
|
||||
|
||||
The same reasoning applies for signature verification within Tendermint. Processes
|
||||
verify that a digital signature signed over a set of bytes by locally reconstructing the
|
||||
verify that a digital signature signed over a set of bytes by locally reconstructing the
|
||||
data structure that the digital signature signed using the process's local data.
|
||||
|
||||
2. Reordered all message fields to be in tag-sorted order.
|
||||
@@ -133,8 +133,6 @@ properly.
|
||||
### References
|
||||
|
||||
[proto-spec-encoding]: https://developers.google.com/protocol-buffers/docs/encoding
|
||||
[spec-issue]: https://github.com/tendermint/tendermint/issues/5005
|
||||
[cosmos-sdk-adr-27]: https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-027-deterministic-protobuf-serialization.md
|
||||
[cer-proto-3]: https://github.com/regen-network/canonical-proto3
|
||||
[wire-pb]: https://github.com/creachadair/wirepb
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ have been updated until a height at which the parameters existed.
|
||||
#### Only Update HashedParams on Hash-Breaking Releases
|
||||
|
||||
An alternate solution to never hashing defaults is to not update the hashed
|
||||
parameters on non-hash-breaking releases. This means that when new consensus
|
||||
parameters on non-hash-breaking releases. This means that when new consensus
|
||||
parameters are added to Tendermint, there may be a release that makes use of the
|
||||
parameters but does not verify that they are the same across all validators by
|
||||
referencing them in the hash. This seems reasonably safe given the fact that
|
||||
@@ -107,7 +107,7 @@ default values of the new parameters for a single height.
|
||||
|
||||
As documented in the upcoming [ADR-74][adr-74], popular chains often simply use the default
|
||||
values. Additionally, great care is being taken to ensure that logic governed by upcoming
|
||||
consensus parameters is not liveness-breaking. This means that, at worst-case,
|
||||
consensus parameters is not liveness-breaking. This means that, at worst-case,
|
||||
chains will experience a single slow height while waiting for the new values to
|
||||
by applied.
|
||||
|
||||
|
||||
@@ -156,7 +156,5 @@ gossiped along with the transaction.
|
||||
[cosmos-sdk-gas]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/docs/basics/gas-fees.md
|
||||
[cosmos-sdk-fees]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/docs/basics/tx-lifecycle.md#gas-and-fees
|
||||
[anoma-gas]: https://github.com/anoma/anoma/blob/6974fe1532a59db3574fc02e7f7e65d1216c1eb2/docs/src/specs/ledger.md#transaction-execution
|
||||
[cosmos-sdk-fee]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/types/tx/tx.pb.go#L780-L794
|
||||
[issue-7750]: https://github.com/tendermint/tendermint/issues/7750
|
||||
[reap-max-bytes-max-gas]: https://github.com/tendermint/tendermint/blob/1ac58469f32a98f1c0e2905ca1773d9eac7b7103/internal/mempool/types.go#L45
|
||||
[add-mempool-error]: https://github.com/tendermint/tendermint/blob/205bfca66f6da1b2dded381efb9ad3792f9404cf/rpc/coretypes/responses.go#L239
|
||||
|
||||
@@ -329,7 +329,7 @@ something like this (subject to refinement):
|
||||
- [KV transaction indexer][kv-index]
|
||||
- [Pluggable custom event indexing][i7135] (#7135)
|
||||
- [PostgreSQL event sink][psql]
|
||||
- [PostgreSQL database][postgres]
|
||||
- [PostgreSQL database][postgres]
|
||||
- [Query filter language][query]
|
||||
- [Stream events to postgres for indexing][i1161] (#1161)
|
||||
- [Unbuffered event subscription slow down the consensus][i7247] (#7247)
|
||||
@@ -346,7 +346,6 @@ something like this (subject to refinement):
|
||||
[kv-index]: https://github.com/tendermint/tendermint/blob/main/state/indexer/block/kv
|
||||
[postgres]: https://postgresql.org/
|
||||
[psql]: https://github.com/tendermint/tendermint/tree/main/state/indexer/sink/psql
|
||||
[psql]: https://github.com/tendermint/tendermint/tree/main/state/indexer/sink/psql
|
||||
[query]: https://pkg.go.dev/github.com/tendermint/tendermint/internal/pubsub/query/syntax
|
||||
[sdk]: https://github.com/cosmos/cosmos-sdk
|
||||
[tmdb]: https://pkg.go.dev/github.com/tendermint/tm-db#DB
|
||||
|
||||
@@ -77,7 +77,7 @@ from the mempool, so this would be a pretty straightforward change.
|
||||
Transaction replacement would would enable applications to aggregate or disaggregate transactions.
|
||||
|
||||
For aggregation, a set of transactions that all related work, such as transferring
|
||||
tokens between the same two accounts, could be replaced with a single transaction,
|
||||
tokens between the same two accounts, could be replaced with a single transaction,
|
||||
i.e. one that transfers a single sum from one account to the other.
|
||||
Applications that make frequent use of aggregation may be able to achieve a higher throughput.
|
||||
Aggregation would decrease the space occupied by a single client-submitted transaction in the block, allowing
|
||||
@@ -189,7 +189,7 @@ it did not want executed.
|
||||
In this validation scheme, applications that allow replacement would check that
|
||||
each listed replaced transaction was correctly reflected in the replacement transaction.
|
||||
In order to perform such validation, the node would need to have the replaced transactions
|
||||
locally. This could be accomplished one of a few ways: by querying the mempool,
|
||||
locally. This could be accomplished one of a few ways: by querying the mempool,
|
||||
by adding an additional p2p gossip channel for transaction replacements, or by including the replaced transactions
|
||||
in the block. Replacement validation via mempool querying would require the node
|
||||
to have received all of the replaced transactions in the mempool which is far from
|
||||
@@ -255,7 +255,5 @@ power instead of immediately solving the problem for them.
|
||||
### References
|
||||
|
||||
[inclusion-proof]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/types/tx.go#L67
|
||||
[tx-serach-result]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/rpc/coretypes/responses.go#L267
|
||||
[tx-rpc-func]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/rpc/core/tx.go#L21
|
||||
[tx-result-index]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/state/indexer/tx/kv/kv.go#L90
|
||||
[abci-event-type]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/abci/types/types.pb.go#L3168
|
||||
|
||||
@@ -16,29 +16,29 @@ become confusing quickly. Different sources appear to use slightly different
|
||||
meanings of each term and this can certainly add to the confusion. Below is
|
||||
a brief glossary that may be helpful in understanding the discussion that follows.
|
||||
|
||||
* **Short Signature**: A signature that does not vary in length with the
|
||||
- **Short Signature**: A signature that does not vary in length with the
|
||||
number of signers.
|
||||
* **Multi-Signature**: A signature generated over a single message
|
||||
- **Multi-Signature**: A signature generated over a single message
|
||||
where, given the message and signature, a verifier is able to determine that
|
||||
all parties signed the message. May be short or may vary with the number of signers.
|
||||
* **Aggregated Signature**: A _short_ signature generated over messages with
|
||||
- **Aggregated Signature**: A _short_ signature generated over messages with
|
||||
possibly different content where, given the messages and signature, a verifier
|
||||
should be able to determine that all the parties signed the designated messages.
|
||||
* **Threshold Signature**: A _short_ signature generated from multiple signers
|
||||
- **Threshold Signature**: A _short_ signature generated from multiple signers
|
||||
where, given a message and the signature, a verifier is able to determine that
|
||||
a large enough share of the parties signed the message. The identities of the
|
||||
parties that contributed to the signature are not revealed.
|
||||
* **BLS Signature**: An elliptic-curve pairing-based signature system that
|
||||
- **BLS Signature**: An elliptic-curve pairing-based signature system that
|
||||
has some nice properties for short multi-signatures. May stand for
|
||||
*Boneh-Lynn-Schacham* or *Barreto-Lynn-Scott* depending on the context. A
|
||||
_Boneh-Lynn-Schacham_ or _Barreto-Lynn-Scott_ depending on the context. A
|
||||
BLS signature is type of signature scheme that is distinct from other forms
|
||||
of elliptic-curve signatures such as ECDSA and EdDSA.
|
||||
* **Interactive**: Cryptographic scheme where parties need to perform one or
|
||||
- **Interactive**: Cryptographic scheme where parties need to perform one or
|
||||
more request-response cycles to produce the cryptographic material. For
|
||||
example, an interactive signature scheme may require the signer and the
|
||||
verifier to cooperate to create and/or verify the signature, rather than a
|
||||
signature being created ahead of time.
|
||||
* **Non-interactive**: Cryptographic scheme where parties do not need to
|
||||
- **Non-interactive**: Cryptographic scheme where parties do not need to
|
||||
perform any request-response cycles to produce the cryptographic material.
|
||||
|
||||
### Brief notes on pairing-based elliptic-curve cryptography
|
||||
@@ -89,11 +89,11 @@ depth discussion, see the specific paper on BLS12-381, [Short signatures from
|
||||
|
||||
BLS signatures have already gained traction within several popular projects.
|
||||
|
||||
* Algorand is working on an implementation.
|
||||
* [Zcash][zcash-adoption] has adopted BLS12-381 into the protocol.
|
||||
* [Ethereum 2.0][eth-2-adoption] has adopted BLS12-381 into the protocol.
|
||||
* [Chia Network][chia-adoption] has adopted BLS for signing blocks.
|
||||
* [Ostracon][line-ostracon-pr], a fork of Tendermint has adopted BLS for signing blocks.
|
||||
- Algorand is working on an implementation.
|
||||
- [Zcash][zcash-adoption] has adopted BLS12-381 into the protocol.
|
||||
- [Ethereum 2.0][eth-2-adoption] has adopted BLS12-381 into the protocol.
|
||||
- [Chia Network][chia-adoption] has adopted BLS for signing blocks.
|
||||
- [Ostracon][line-ostracon-pr], a fork of Tendermint has adopted BLS for signing blocks.
|
||||
|
||||
### What systems may be affected by adding aggregated signatures?
|
||||
|
||||
@@ -106,7 +106,7 @@ overhead. How costly this is is still subject to further investigation and
|
||||
performance testing.
|
||||
|
||||
Even if vote signatures were aggregated before gossip, each validator would still
|
||||
need to receive and verify vote extension data from each (individual) peer validator in
|
||||
need to receive and verify vote extension data from each (individual) peer validator in
|
||||
order for consensus to proceed. That displaces any advantage gained by aggregating signatures across the vote message in the presence of vote extensions.
|
||||
|
||||
#### Block Creation
|
||||
@@ -190,7 +190,7 @@ check an aggregated signature from 1024 validators versus our ed25519 library's
|
||||
|
||||
#### Reduce Light-Client Verification Time
|
||||
|
||||
The light client aims to be a faster and lighter-weight way to verify that a
|
||||
The light client aims to be a faster and lighter-weight way to verify that a
|
||||
block was voted on by a Tendermint network. The light client fetches
|
||||
Tendermint block headers and commit signatures, performing public key
|
||||
verification to ensure that the associated validator set signed the block.
|
||||
@@ -212,7 +212,7 @@ able to check if some singular validator's key signed the block.
|
||||
##### Vote Gossip
|
||||
|
||||
It is possible to aggregate subsets of signatures during voting, so that the
|
||||
network need not gossip all *n* validator signatures to all *n* validators.
|
||||
network need not gossip all _n_ validator signatures to all _n_ validators.
|
||||
Theoretically, subsets of the signatures could be aggregated during consensus
|
||||
and vote messages could carry those aggregated signatures. Implementing this
|
||||
would certainly increase the complexity of the gossip layer but could possibly
|
||||
@@ -243,12 +243,12 @@ possible for storing highly sensitive private key material.
|
||||
|
||||
Below is a list of popular HSMs along with their support for BLS signatures.
|
||||
|
||||
* YubiKey
|
||||
* [No support][yubi-key-bls-support]
|
||||
* Amazon Cloud HSM
|
||||
* [No support][cloud-hsm-support]
|
||||
* Ledger
|
||||
* [Lists support for the BLS12-381 curve][ledger-bls-announce]
|
||||
- YubiKey
|
||||
- [No support][yubi-key-bls-support]
|
||||
- Amazon Cloud HSM
|
||||
- [No support][cloud-hsm-support]
|
||||
- Ledger
|
||||
- [Lists support for the BLS12-381 curve][ledger-bls-announce]
|
||||
|
||||
I cannot find support listed for Google Cloud, although perhaps it exists.
|
||||
|
||||
@@ -261,7 +261,7 @@ reasonably unclear benefit.
|
||||
|
||||
### Can aggregated signatures be added as soft-upgrades?
|
||||
|
||||
In my estimation, yes. With the implementation of proposer-based timestamps,
|
||||
In my estimation, yes. With the implementation of proposer-based timestamps,
|
||||
all validators now produce signatures on only one of two messages:
|
||||
|
||||
1. A [CanonicalVote][canonical-vote-proto] where the BlockID is the hash of the block or
|
||||
@@ -409,7 +409,7 @@ the block.
|
||||
|
||||
### Library Support
|
||||
|
||||
Libraries for BLS signature creation are limited in number, although active
|
||||
Libraries for BLS signature creation are limited in number, although active
|
||||
development appears to be ongoing. Cryptographic algorithms are difficult to
|
||||
implement correctly and correctness issues are extremely serious and dangerous.
|
||||
No further exploration of BLS should be undertaken without strong assurance of
|
||||
@@ -422,7 +422,7 @@ and is supported by funds from the Ethereum foundation, adopting a new cryptogra
|
||||
library presents some serious risks. Namely, if the support for the library were
|
||||
to be discontinued, Tendermint may become saddled with the requirement of supporting
|
||||
a very complex piece of software or force a massive ecosystem-wide migration away
|
||||
from BLS signatures.
|
||||
from BLS signatures.
|
||||
|
||||
This is one of the more serious reasons to avoid adopting BLS signatures at this
|
||||
time. There is no gold standard library. Some projects look promising, but no
|
||||
@@ -472,7 +472,7 @@ of re-aggregating the public key. Aggregation is _not_ constant time in the
|
||||
number of keys and instead grows linearly. When [benchmarked locally][blst-verify-bench-agg],
|
||||
blst public key aggregation of 128 keys took 2.43 milliseconds. This, along with
|
||||
the 1.5 milliseconds to verify a signature would raise light client signature
|
||||
verification time to 3.9 milliseconds, a time above the previously mentioned
|
||||
verification time to 3.9 milliseconds, a time above the previously mentioned
|
||||
batch verification time using our ed25519 library of 2.0 milliseconds.
|
||||
|
||||
Schemes to cache aggregated subsets of keys could certainly cut this time down at the
|
||||
@@ -501,8 +501,8 @@ the associated protocols.
|
||||
|
||||
## Open Questions
|
||||
|
||||
* *Q*: Can you aggregate Ed25519 signatures in Tendermint?
|
||||
* There is a suggested scheme in github issue [7892][suggested-ed25519-agg],
|
||||
- _Q_: Can you aggregate Ed25519 signatures in Tendermint?
|
||||
- There is a suggested scheme in github issue [7892][suggested-ed25519-agg],
|
||||
but additional rigor would be required to fully verify its correctness.
|
||||
|
||||
## Current Consideration
|
||||
@@ -523,9 +523,7 @@ standards develop.
|
||||
|
||||
### References
|
||||
|
||||
[line-ostracon-repo]: https://github.com/line/ostracon
|
||||
[line-ostracon-pr]: https://github.com/line/ostracon/pull/117
|
||||
[mit-BLS-lecture]: https://youtu.be/BFwc2XA8rSk?t=2521
|
||||
[gcp-storage-pricing]: https://cloud.google.com/storage/pricing#north-america_2
|
||||
[yubi-key-bls-support]: https://github.com/Yubico/yubihsm-shell/issues/66
|
||||
[cloud-hsm-support]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/pkcs11-key-types.html
|
||||
@@ -533,7 +531,6 @@ standards develop.
|
||||
[bls-ietf-terms]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-1.3
|
||||
[bls-ietf-pop]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-3.3
|
||||
[multi-signatures-smaller-blockchains]: https://eprint.iacr.org/2018/483.pdf
|
||||
[ibc-tendermint]: https://github.com/cosmos/ibc/tree/master/spec/client/ics-007-tendermint-client
|
||||
[zcash-adoption]: https://github.com/zcash/zcash/issues/2502
|
||||
[chia-adoption]: https://github.com/Chia-Network/chia-blockchain#chia-blockchain
|
||||
[bls-ietf-ecdsa-compare]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-1.1
|
||||
@@ -551,5 +548,4 @@ standards develop.
|
||||
[bls-weil-pairing]: https://www.iacr.org/archive/asiacrypt2001/22480516.pdf
|
||||
[summing-zero-paper]: https://eprint.iacr.org/2021/323.pdf
|
||||
[circl]: https://github.com/cloudflare/circl
|
||||
[light-client-evidence]: https://github.com/tendermint/tendermint/blob/a6fd1fe20116d4b1f7e819cded81cece8e5c1ac7/types/evidence.go#L245
|
||||
[suggested-ed25519-agg]: https://github.com/tendermint/tendermint/issues/7892
|
||||
|
||||
@@ -85,7 +85,7 @@ applications using it. In particular:
|
||||
the server that are not clearly documented anywhere, and it is very easy for
|
||||
small changes in both the client and the server to lead to tricky deadlocks,
|
||||
panics, race conditions, and slowdowns. As a recent example of this, see
|
||||
https://github.com/tendermint/tendermint/pull/8581.
|
||||
<https://github.com/tendermint/tendermint/pull/8581>.
|
||||
|
||||
These limitations are fixable, but one important question is whether it is
|
||||
worthwhile to fix them. We can add request and method identifiers, for
|
||||
@@ -190,28 +190,28 @@ design.
|
||||
|
||||
- **Standardize on gRPC**
|
||||
|
||||
- ✅ Addresses existing performance and operational issues.
|
||||
- ✅ Replaces custom code with a well-maintained widely-used library.
|
||||
- ✅ Aligns with Cosmos SDK, which already uses gRPC extensively.
|
||||
- ✅ Aligns with priv validator interface, for which the socket protocol is already deprecated for gRPC.
|
||||
- ❓ Applications will be hard to implement in a language without gRPC support.
|
||||
- ⛔ All users of the socket protocol have to migrate to gRPC, and we believe most current out-of-process applications use the socket protocol.
|
||||
- ✅ Addresses existing performance and operational issues.
|
||||
- ✅ Replaces custom code with a well-maintained widely-used library.
|
||||
- ✅ Aligns with Cosmos SDK, which already uses gRPC extensively.
|
||||
- ✅ Aligns with priv validator interface, for which the socket protocol is already deprecated for gRPC.
|
||||
- ❓ Applications will be hard to implement in a language without gRPC support.
|
||||
- ⛔ All users of the socket protocol have to migrate to gRPC, and we believe most current out-of-process applications use the socket protocol.
|
||||
|
||||
- **Standardize on socket protocol**
|
||||
|
||||
- ✅ Less immediate impact for existing users (but see below).
|
||||
- ✅ Simplifies ABCI API surface by removing gRPC.
|
||||
- ❓ Users of the socket protocol will have a (smaller) migration.
|
||||
- ❓ Potentially easier to implement for languages that do not have support.
|
||||
- ⛔ Need to do all the work to fix the socket protocol (which will require existing users to update anyway later).
|
||||
- ⛔ Ongoing maintenance burden for per-language server implementations.
|
||||
- ✅ Less immediate impact for existing users (but see below).
|
||||
- ✅ Simplifies ABCI API surface by removing gRPC.
|
||||
- ❓ Users of the socket protocol will have a (smaller) migration.
|
||||
- ❓ Potentially easier to implement for languages that do not have support.
|
||||
- ⛔ Need to do all the work to fix the socket protocol (which will require existing users to update anyway later).
|
||||
- ⛔ Ongoing maintenance burden for per-language server implementations.
|
||||
|
||||
- **Keep both options**
|
||||
|
||||
- ✅ Less immediate impact for existing users (but see below).
|
||||
- ❓ Users of the socket protocol will have a (smaller) migration.
|
||||
- ⛔ Still need to do all the work to fix the socket protocol (which will require existing users to update anyway later).
|
||||
- ⛔ Requires ongoing maintenance and support of both gRPC and socket protocol integrations.
|
||||
- ✅ Less immediate impact for existing users (but see below).
|
||||
- ❓ Users of the socket protocol will have a (smaller) migration.
|
||||
- ⛔ Still need to do all the work to fix the socket protocol (which will require existing users to update anyway later).
|
||||
- ⛔ Requires ongoing maintenance and support of both gRPC and socket protocol integrations.
|
||||
|
||||
|
||||
## References
|
||||
|
||||
@@ -42,6 +42,7 @@ Once the decision has been made to run and operate a service,
|
||||
one of the next strategic questions is that of deploying said service.
|
||||
The author strongly holds the opinion that, when possible,
|
||||
a continuous delivery model offers the most compelling set of advantages:
|
||||
|
||||
- The code on a particular branch (likely `main` or `master`) is exactly what is,
|
||||
or what will very soon be, running in production
|
||||
- There are no manual steps involved in deploying -- other than merging your pull request,
|
||||
@@ -50,6 +51,7 @@ a continuous delivery model offers the most compelling set of advantages:
|
||||
|
||||
In summary, if the tendermint authors build, maintain, and continuously deliver an application
|
||||
intended to serve as a long-lived testnet, they will be able to state with confidence:
|
||||
|
||||
- We operate the software in a production-like environment and we have observed it to be
|
||||
stable and performant to our requirements
|
||||
- We have discovered issues in production before any external parties have consumed our software,
|
||||
|
||||
@@ -140,7 +140,7 @@ these are not its primary concern.
|
||||
|
||||
### Data to consider removing
|
||||
|
||||
This section proposes a list of data that could be completely removed from the
|
||||
This section proposes a list of data that could be completely removed from the
|
||||
Merkle tree with no loss to the functionality of our consensus algorithm.
|
||||
|
||||
Where the change is possible but would hamper external protocols or make
|
||||
@@ -241,7 +241,7 @@ only one header.
|
||||
validation. The light client uses this field to ensure that the validator set
|
||||
it fetched from a full node is correct. It can be sure of the correctness of
|
||||
the retrieved structure by hashing it and checking the hash against the `ValidatorsHash`
|
||||
of the block it is verifying. Because a validator that the light client trusts
|
||||
of the block it is verifying. Because a validator that the light client trusts
|
||||
signed over the `ValidatorsHash`, it can be certain of the validity of the
|
||||
structure. Without this check, phony validator sets could be handed to the light
|
||||
client and the code tricked into believing a different validator set was present
|
||||
@@ -268,7 +268,7 @@ of basic information about the chain.
|
||||
|
||||
#### ProofOfLockRound
|
||||
|
||||
The *proof of lock round* is the round of consensus for a height in which the
|
||||
The _proof of lock round_ is the round of consensus for a height in which the
|
||||
Tendermint algorithm observed a super majority of voting power on the network for
|
||||
a block.
|
||||
|
||||
@@ -318,7 +318,7 @@ or unchanged from previous blocks_. For example, we propagate the `ValidatorAddr
|
||||
for each block in the `CommitSig` structure even when it never changed from a
|
||||
previous height. We could achieve a speed-up in many cases by communicating the
|
||||
hashes _first_ and letting peers request additional information when they do not
|
||||
recognize the communicated hash.
|
||||
recognize the communicated hash.
|
||||
|
||||
For example, in the case of the `ValidatorAddress`es, the node would first
|
||||
communicate the `ValidatorsHash` of the block to its peers. The peers would
|
||||
@@ -353,12 +353,10 @@ this would not be light client breaking.
|
||||
|
||||
## References
|
||||
|
||||
[light-verify-trusting]: https://github.com/tendermint/tendermint/blob/208a15dadf01e4e493c187d8c04a55a61758c3cc/types/validation.go#L124
|
||||
[part-set-header]: https://github.com/tendermint/tendermint/blob/208a15dadf01e4e493c187d8c04a55a61758c3cc/types/part_set.go#L94
|
||||
[block-id]: https://github.com/tendermint/tendermint/blob/208a15dadf01e4e493c187d8c04a55a61758c3cc/types/block.go#L1090
|
||||
[psh-check]: https://github.com/tendermint/tendermint/blob/208a15dadf01e4e493c187d8c04a55a61758c3cc/types/part_set.go#L116
|
||||
[proposer-selection]: https://github.com/tendermint/tendermint/blob/208a15dadf01e4e493c187d8c04a55a61758c3cc/spec/consensus/proposer-selection.md
|
||||
[chain-experiment]: https://github.com/williambanfield/tmtools/blob/master/hash-changes/RUN.txt
|
||||
[val-hash]: https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/types/validator.go#L160
|
||||
[proposer-check]: https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/internal/state/validation.go#L102
|
||||
[save-block]: https://github.com/tendermint/tendermint/blob/59f0236b845c83009bffa62ed44053b04370b8a9/internal/store/store.go#L490
|
||||
|
||||
@@ -12,7 +12,7 @@ of the Tendermint mempool becomes much less clear. This RFC discusses possible
|
||||
changes that should be considered to Tendermint to better support applications
|
||||
that intend to use `PrepareProposal` to implement much more powerful transaction
|
||||
ordering and filtering functionality than Tendermint can provide. It proposes
|
||||
scoping down the responsibilities of Tendermint to suit this new use case.
|
||||
scoping down the responsibilities of Tendermint to suit this new use case.
|
||||
|
||||
## Background
|
||||
|
||||
@@ -293,7 +293,6 @@ future designs are made in this area:
|
||||
|
||||
## References
|
||||
|
||||
[mempool-cache]:https://github.com/tendermint/tendermint/blob/c8302c5fcb7f1ffafdefc5014a26047df1d27c99/mempool/v1/mempool.go#L41
|
||||
[cache-when-clear]:https://github.com/tendermint/tendermint/issues/7723
|
||||
[update-remove-from-cache]:https://github.com/tendermint/tendermint/pull/233
|
||||
[update-keep-in-cache]:https://github.com/tendermint/tendermint/issues/2855
|
||||
|
||||
287
docs/rfc/rfc-027-p2p-message-bandwidth-report.md
Normal file
287
docs/rfc/rfc-027-p2p-message-bandwidth-report.md
Normal file
@@ -0,0 +1,287 @@
|
||||
# RFC 27: P2P Message Bandwidth Report
|
||||
|
||||
## Changelog
|
||||
|
||||
- Nov 7, 2022: initial draft (@williambanfield)
|
||||
- Nov 15, 2022: draft completed (@williambanfield)
|
||||
|
||||
## Abstract
|
||||
|
||||
Node operators and application developers complain that Tendermint nodes consume
|
||||
larges amounts of network bandwidth. This RFC catalogues the major sources of bandwidth
|
||||
consumption within Tendermint and suggests modifications to Tendermint that may reduce
|
||||
bandwidth consumption for nodes.
|
||||
|
||||
## Background
|
||||
Multiple teams running validators in production report that the validator
|
||||
consumes a lot of bandwidth. They report that operators running on a network
|
||||
with hundreds of validators consumes multiple terabytes of bandwidth per day.
|
||||
Prometheus data collected from a validator node running on the Osmosis chain
|
||||
shows that Tendermint sends and receives large amounts of data to peers. In the
|
||||
nearly three hours of observation, Tendermint sent nearly 42 gigabytes and
|
||||
received about 26 gigabytes, for an estimated 366 gigabytes sent daily and 208
|
||||
gigabytes received daily. While this is shy of the reported terabytes number,
|
||||
operators running multiple nodes for a 'sentry' pattern could easily send and
|
||||
receive a terabyte of data.
|
||||
|
||||
Sending and receiving large amounts of data has a cost for node operators. Most
|
||||
cloud platforms charge for network traffic egress. Google Cloud charges between
|
||||
[$.05 to $.12 per gigabyte of egress traffic][gcloud-pricing], and ingress is
|
||||
free. Hetzner [charges 1€ per TB used over the 10-20TB base bandwidth per
|
||||
month][hetzner-pricing], which will be easily hit if multiple terabytes are
|
||||
sent and received per day. Using the values collected from the validator on
|
||||
Osmosis, a single node on Google cloud may cost $18 to $44 a day running on
|
||||
Google cloud. On Hetzner, the estimated 18TB a month of both sending and
|
||||
receiving may cost between 0 and 10 Euro a month per node.
|
||||
|
||||
## Discussion
|
||||
|
||||
### Overview of Major Bandwidth Usage
|
||||
|
||||
To determine which components of Tendermint were consuming the most bandwidth,
|
||||
I gathered prometheus metrics from the [Blockpane][blockpane] validator running
|
||||
on the Osmosis network for several hours. The data reveal that three message
|
||||
types account for 98% of the total bandwidth consumed. These message types are
|
||||
as follows:
|
||||
|
||||
|
||||
1. [consensus.BlockPart][block-part-message]
|
||||
2. [mempool.Txs][mempool-txs-message]
|
||||
3. [consensus.Vote][vote-message]
|
||||
|
||||
|
||||
The image below of p2p data collected from the Blockpane validator illustrate
|
||||
the total bandwidth consumption of these three message types.
|
||||
|
||||
|
||||
#### Send:
|
||||
|
||||
##### Top 3 Percent:
|
||||
|
||||

|
||||
|
||||
##### Rate For All Messages:
|
||||
|
||||

|
||||
|
||||
#### Receive:
|
||||
|
||||
##### Top 3 Percent:
|
||||
|
||||

|
||||
|
||||
##### Rate For All Messages:
|
||||
|
||||

|
||||
|
||||
### Investigation of Message Usage
|
||||
|
||||
This section discusses the usage of each of the three highest consumption messages.
|
||||
#### BlockPart Transmission
|
||||
|
||||
Sending `BlockPart` messages consumes the most bandwidth out of all p2p
|
||||
messages types as observed in the Blockpane Osmosis validator. In the almost 3
|
||||
hour observation, the validator sent about 20 gigabytes of `BlockPart`
|
||||
messages.
|
||||
|
||||
A block is proposed each round of Tendermint consensus. The paper does not
|
||||
define a specific way that the block is to be transmitted, just that all
|
||||
participants will receive it via a gossip network.
|
||||
|
||||
The Go implementation of Tendermint transmits the block in 'parts'. It
|
||||
serializes the block to wire-format proto and splits this byte representation
|
||||
into a set of 4 kilobyte arrays and sends these arrays to its peers, each in a
|
||||
separate message.
|
||||
|
||||
The logic for sending `BlockPart` messages resides in the code for the
|
||||
[consensus.Reactor][gossip-data-routine]. The consensus reactor starts a new
|
||||
`gossipDataRoutine` for each peer it connects to. This routine repeatedly picks
|
||||
a part of the block that Tendermint believes the peer does not know about yet
|
||||
and gossips it to the peer. The set of `BlockParts` that Tendermint considers
|
||||
its peer as having is only updated in one of four ways:
|
||||
|
||||
|
||||
1. Our peer tells us they have entered a new round [via a `NewRoundStep`
|
||||
message][new-round-step-message-send]. This message is only sent when a node
|
||||
moves to a new round or height and only resets the data we collect about a
|
||||
peer's blockpart state.
|
||||
1. [We receive a block part from the peer][block-part-receive].
|
||||
1. [We send][block-part-send-1] [the peer a block part][block-part-send-2].
|
||||
1. Our peer tells us about the parts they have block [via `NewValidBlock`
|
||||
messages][new-valid-block-message-send]. This message is only sent when the
|
||||
peer has a quorum of prevotes or precommits for a block.
|
||||
|
||||
Each node receives block parts from all of its peers. The particular block part
|
||||
to send at any given time is randomly selected from the set of parts that the
|
||||
peer node is not yet known to have. Given that these are the only times that
|
||||
Tendermint learns of its peers' block parts, it's very likely that a node has
|
||||
an incomplete understanding of its peers' block parts and is transmitting block
|
||||
parts to a peer that the peer has received from some other node.
|
||||
|
||||
Multiple potential mechanisms exist to reduce the number of duplicate block
|
||||
parts a node receives. One set of mechanisms relies on more frequently
|
||||
communicating the set of block parts a node needs to its peers. Another
|
||||
potential mechanism requires a larger overhaul to the way blocks are gossiped
|
||||
in the network.
|
||||
|
||||
#### Mempool Tx Transmission
|
||||
|
||||
The Tendermint mempool stages transactions that are yet to be committed to the
|
||||
blockchain and communicates these transactions to its peers. Each message
|
||||
contains one transaction. Data collected from the Blockpane node running on
|
||||
Osmosis indicates that the validator sent about 12 gigabytes of `Txs` messages
|
||||
during the nearly 3 hour observation period.
|
||||
|
||||
The Tendermint mempool starts a new [broadcastTxRoutine][broadcast-tx-routine]
|
||||
for each peer that it is informed of. The routine sends all transactions that
|
||||
the mempool is aware of to all peers with few exceptions. The only exception is
|
||||
if the mempool received a transaction from a peer, then it marks it as such and
|
||||
won't resend to that peer. Otherwise, it retains no information about which
|
||||
transactions it already sent to a peer. In some cases it may therefore resend
|
||||
transactions the peer already has. This can occur if the mempool removes a
|
||||
transaction from the `CList` data structure used to store the list of
|
||||
transactions while it is about to be sent and if the transaction was the tail
|
||||
of the `CList` during removal. This will be more likely to occur if a large
|
||||
number of transactions from the end of the list are removed during `RecheckTx`,
|
||||
since multiple transactions will become the tail and then be deleted. It is
|
||||
unclear at the moment how frequently this occurs on production chains.
|
||||
|
||||
Beyond ensuring that transactions are rebroadcast to peers less frequently,
|
||||
there is not a simple scheme to communicate fewer transactions to peers. Peers
|
||||
cannot communicate what transactions they need since they do not know which
|
||||
transactions exist on the network.
|
||||
|
||||
#### Vote Transmission
|
||||
|
||||
Tendermint votes, both prevotes and precommits, are central to Tendermint
|
||||
consensus and are gossiped by all nodes to all peers during each consensus
|
||||
round. Data collected from the Blockpane node running on Osmosis indicates that
|
||||
about 9 gigabytes of `Vote` messages were sent during the nearly 3 hour period
|
||||
of observation. Examination of the [Vote message][vote-msg] indicates that it
|
||||
contains 184 bytes of data, with the proto encoding adding a few additional
|
||||
bytes when transmitting.
|
||||
|
||||
The Tendermint consensus reactor starts a new
|
||||
[gossipVotesRoutine][gossip-votes-routine] for each peer that it connects to.
|
||||
The reactor sends all votes to all peers unless it knows that the peer already
|
||||
has the vote or the reactor learns that the peer is in a different round and
|
||||
that thus the vote no longer applies. Tendermint learns that a peer has a vote
|
||||
in one of 4 ways:
|
||||
|
||||
1. Tendermint sent the peer the vote.
|
||||
1. Tendermint received the vote from the peer.
|
||||
1. The peer [sent a `HasVote` message][apply-has-vote]. This message is broadcast
|
||||
to all peers [each time validator receives a vote it hasn't seen before
|
||||
corresponding to its current height and round][publish-event-vote].
|
||||
1. The peer [sent a `VoteSetBits` message][apply-vote-set-bits]. This message is
|
||||
[sent as a response to a peer that sends a `VoteSetMaj23`][vote-set-bits-send].
|
||||
|
||||
Given that Tendermint informs all peers of _each_ vote message it receives, all
|
||||
nodes should be well informed of which votes their peers have. Given that the
|
||||
vote messages were the third largest consumer of bandwidth in the observation
|
||||
on Osmosis, it's possible that this system is not currently working correctly.
|
||||
Further analysis should examine where votes may be being retransmitted.
|
||||
|
||||
### Suggested Improvements to Lower Message Transmission Bandwidth
|
||||
|
||||
#### Gossip Known BlockPart Data
|
||||
|
||||
The `BlockPart` messages, by far, account for the majority of the data sent to
|
||||
each peer. At the moment, peers do not inform the node of which block parts
|
||||
they already have. This means that each block part is _very likely_ to be
|
||||
transmitted many times to each node. This frivolous consumption is even worse
|
||||
in networks with large blocks.
|
||||
|
||||
The very simple solution to this issue is to copy the technique used in
|
||||
consensus for informing peers when the node receives a vote. The consensus
|
||||
reactor can be augmented with a `HasBlockPart` message that is broadcast to
|
||||
each peer every time the node receives a block part. By informing each peer
|
||||
every time the node receives a block part, we can drastically reduce the amount
|
||||
of duplicate data sent to each node. There would be no algorithmic way of
|
||||
enforcing that a peer accurately reports its block parts, so providing this
|
||||
message would be a somewhat altruistic action on the part of the node. Such a
|
||||
system [has been proposed in the past][i627] as well, so this is certainly not
|
||||
totally new ground.
|
||||
|
||||
Measuring the size of duplicately received blockparts before and after this
|
||||
change would help validate this approach.
|
||||
|
||||
#### Compress Transmitted Data
|
||||
|
||||
Tendermint's data is sent uncompressed on the wire. The messages are not
|
||||
compressed before sending and the transport performs no compression either.
|
||||
Some of the information communicated by Tendermint is a poor candidate for
|
||||
compression: Data such as digital signatures and hashes have high entropy and
|
||||
therefore do not compress well. However, transactions may contain lots of
|
||||
information that has less entropy. Compression within Tendermint may be added
|
||||
at several levels. Compression may be performed at the [Tendermint 'packet'
|
||||
level][must-wrap-packet] or at the [Tendermint message send
|
||||
level][message-send].
|
||||
|
||||
#### Transmit Less Data During Block Gossip
|
||||
|
||||
Block, vote, and mempool gossiping transmit much of same data. The mempool
|
||||
reactor gossips candidate transactions to each peer. The consensus reactor,
|
||||
when gossiping the votes, sends vote metadata and the digital signature of that
|
||||
signs over that metadata. Finally, when a block is proposed, the proposing node
|
||||
amalgamates the received votes, a set of transaction, and adds a header to
|
||||
produce the block. This block is then serialized and gossiped as a list of
|
||||
bytes. However, the data that the block contains, namely the votes and the
|
||||
transactions were most likely _already transmitted to the nodes on the network_
|
||||
via mempool transaction gossip and consensus vote gossip.
|
||||
|
||||
Therefore, block gossip can be updated to transmit a representation of the data
|
||||
contained in the block that assumes the peers will already have most of this
|
||||
data. Namely, the block gossip can be updated to only send 1) a list of
|
||||
transaction hashes and 2) a bit array of votes selected for the block along
|
||||
with the header and other required block metadata.
|
||||
|
||||
This new proposed method for gossiping block data could accompany a slight
|
||||
update to the mempool transaction gossip and consensus vote gossip. Since all
|
||||
of the contents of each block will not be gossiped together, it's possible that
|
||||
some nodes are missing a proposed transaction or the vote of a validator
|
||||
indicated in the new block gossip format during block gossip. The mempool and
|
||||
consensus reactors may therefore be updated to provide a `NeedTxs` and
|
||||
`NeedVotes` message. Each of these messages would allow a node to request a set
|
||||
of data from their peers. When a node receives one of these, it will then
|
||||
transmit the Tx/Votes indicate in the associated message regardless of whether
|
||||
it believes it has transmitted them to the peer before. The gossip layer will
|
||||
ensure that each peer eventually receives all of the data in the block.
|
||||
However, if a transaction is needed immediately by a peer so that it can verify
|
||||
and execute a block during consensus, a mechanism such as the `NeedTxs` and
|
||||
`NeedVotes` messages should be added to ensure it receives the messages
|
||||
quickly.
|
||||
|
||||
The same logic may applied for evidence transmission as well, since all nodes
|
||||
should receive evidence and therefore do not need to re-transmit it in a block
|
||||
part.
|
||||
|
||||
A similar idea has been proposed in the past as [Compact Block
|
||||
Propagation][compact-block-propagation].
|
||||
|
||||
|
||||
## References
|
||||
|
||||
[blockpane]: https://www.mintscan.io/osmosis/validators/osmovaloper1z0sh4s80u99l6y9d3vfy582p8jejeeu6tcucs2
|
||||
[block-part-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/consensus/types.proto#L44
|
||||
[mempool-txs-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/mempool/types.proto#L6
|
||||
[vote-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/consensus/types.proto#L51
|
||||
[gossip-data-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L537
|
||||
[block-part-receive]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L324
|
||||
[block-part-send-1]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L566
|
||||
[block-part-send-2]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L684.
|
||||
[new-valid-block-message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L268
|
||||
[new-round-step-message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L266
|
||||
[broadcast-tx-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/reactor.go#L197
|
||||
[gossip-votes-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L694
|
||||
[apply-has-vote]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L1429
|
||||
[apply-vote-set-bits]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L1445
|
||||
[publish-event-vote]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/state.go#L2083
|
||||
[vote-set-bits-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L306
|
||||
[must-wrap-packet]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/p2p/conn/connection.go#L889-L918
|
||||
[message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/p2p/peer.go#L285
|
||||
[gcloud-pricing]: https://cloud.google.com/vpc/network-pricing#vpc-pricing
|
||||
[hetzner-pricing]: https://docs.hetzner.com/robot/general/traffic
|
||||
[vote-msg]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/types/types.pb.go#L468
|
||||
[i627]: https://github.com/tendermint/tendermint/issues/627
|
||||
[compact-block-propagation]: https://github.com/tendermint/tendermint/issues/7932
|
||||
@@ -3,6 +3,7 @@ order: 10
|
||||
---
|
||||
|
||||
# Block Sync
|
||||
|
||||
*Formerly known as Fast Sync*
|
||||
|
||||
In a proof of work blockchain, syncing with the chain is the same
|
||||
|
||||
@@ -18,52 +18,52 @@ Listen address can be changed in the config file (see
|
||||
|
||||
The following metrics are available:
|
||||
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
|------------------------------------------|-----------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `abci_connection_method_timing_seconds` | Histogram | `method`, `type` | Timings for each of the ABCI methods |
|
||||
| `blocksync_syncing` | Gauge | | Either 0 (not block syncing) or 1 (syncing) |
|
||||
| `consensus_height` | Gauge | | Height of the chain |
|
||||
| `consensus_validators` | Gauge | | Number of validators |
|
||||
| `consensus_validators_power` | Gauge | | Total voting power of all validators |
|
||||
| `consensus_validator_power` | Gauge | | Voting power of the node if in the validator set |
|
||||
| `consensus_validator_last_signed_height` | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| `consensus_validator_missed_blocks` | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| `consensus_missing_validators` | Gauge | | Number of validators who did not sign |
|
||||
| `consensus_missing_validators_power` | Gauge | | Total voting power of the missing validators |
|
||||
| `consensus_byzantine_validators` | Gauge | | Number of validators who tried to double sign |
|
||||
| `consensus_byzantine_validators_power` | Gauge | | Total voting power of the byzantine validators |
|
||||
| `consensus_block_interval_seconds` | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| `consensus_rounds` | Gauge | | Number of rounds |
|
||||
| `consensus_num_txs` | Gauge | | Number of transactions |
|
||||
| `consensus_total_txs` | Gauge | | Total number of transactions committed |
|
||||
| `consensus_block_parts` | Counter | `peer_id` | Number of blockparts transmitted by peer |
|
||||
| `consensus_latest_block_height` | Gauge | | /status sync\_info number |
|
||||
| `consensus_block_size_bytes` | Gauge | | Block size in bytes |
|
||||
| `consensus_step_duration` | Histogram | `step` | Histogram of durations for each step in the consensus protocol |
|
||||
| `consensus_round_duration` | Histogram | | Histogram of durations for all the rounds that have occurred since the process started |
|
||||
| `consensus_block_gossip_parts_received` | Counter | `matches_current` | Number of block parts received by the node |
|
||||
| `consensus_quorum_prevote_delay` | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum |
|
||||
| `consensus_full_prevote_delay` | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted |
|
||||
| `consensus_proposal_receive_count` | Counter | `status` | Total number of proposals received by the node since process start |
|
||||
| `consensus_proposal_create_count` | Counter | | Total number of proposals created by the node since process start |
|
||||
| `consensus_round_voting_power_percent` | Gauge | `vote_type` | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round |
|
||||
| `consensus_late_votes` | Counter | `vote_type` | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. |
|
||||
| `p2p_message_send_bytes_total` | Counter | `message_type` | Number of bytes sent to all peers per message type |
|
||||
| `p2p_message_receive_bytes_total` | Counter | `message_type` | Number of bytes received from all peers per message type |
|
||||
| `p2p_peers` | Gauge | | Number of peers node's connected to |
|
||||
| `p2p_peer_receive_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel received from a given peer |
|
||||
| `p2p_peer_send_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel sent to a given peer |
|
||||
| `p2p_peer_pending_send_bytes` | Gauge | `peer_id` | Number of pending bytes to be sent to a given peer |
|
||||
| `p2p_num_txs` | Gauge | `peer_id` | Number of transactions submitted by each peer\_id |
|
||||
| `p2p_pending_send_bytes` | Gauge | `peer_id` | Amount of data pending to be sent to peer |
|
||||
| `mempool_size` | Gauge | | Number of uncommitted transactions |
|
||||
| `mempool_tx_size_bytes` | Histogram | | Transaction sizes in bytes |
|
||||
| `mempool_failed_txs` | Counter | | Number of failed transactions |
|
||||
| `mempool_recheck_times` | Counter | | Number of transactions rechecked in the mempool |
|
||||
| `state_block_processing_time` | Histogram | | Time between BeginBlock and EndBlock in ms |
|
||||
| `state_consensus_param_updates` | Counter | | Number of consensus parameter updates returned by the application since process start |
|
||||
| `state_validator_set_updates` | Counter | | Number of validator set updates returned by the application since process start |
|
||||
| `statesync_syncing` | Gauge | | Either 0 (not state syncing) or 1 (syncing) |
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
|--------------------------------------------|-----------|------------------|--------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| abci\_connection\_method\_timing\_seconds | Histogram | method, type | Timings for each of the ABCI methods |
|
||||
| blocksync\_syncing | Gauge | | Either 0 (not block syncing) or 1 (syncing) |
|
||||
| consensus\_height | Gauge | | Height of the chain |
|
||||
| consensus\_validators | Gauge | | Number of validators |
|
||||
| consensus\_validators\_power | Gauge | | Total voting power of all validators |
|
||||
| consensus\_validator\_power | Gauge | | Voting power of the node if in the validator set |
|
||||
| consensus\_validator\_last\_signed\_height | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| consensus\_validator\_missed\_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| consensus\_missing\_validators | Gauge | | Number of validators who did not sign |
|
||||
| consensus\_missing\_validators\_power | Gauge | | Total voting power of the missing validators |
|
||||
| consensus\_byzantine\_validators | Gauge | | Number of validators who tried to double sign |
|
||||
| consensus\_byzantine\_validators\_power | Gauge | | Total voting power of the byzantine validators |
|
||||
| consensus\_block\_interval\_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| consensus\_rounds | Gauge | | Number of rounds |
|
||||
| consensus\_num\_txs | Gauge | | Number of transactions |
|
||||
| consensus\_total\_txs | Gauge | | Total number of transactions committed |
|
||||
| consensus\_block\_parts | Counter | peer\_id | Number of blockparts transmitted by peer |
|
||||
| consensus\_latest\_block\_height | Gauge | | /status sync\_info number |
|
||||
| consensus\_block\_size\_bytes | Gauge | | Block size in bytes |
|
||||
| consensus\_step\_duration | Histogram | step | Histogram of durations for each step in the consensus protocol |
|
||||
| consensus\_round\_duration | Histogram | | Histogram of durations for all the rounds that have occurred since the process started |
|
||||
| consensus\_block\_gossip\_parts\_received | Counter | matches\_current | Number of block parts received by the node |
|
||||
| consensus\_quorum\_prevote\_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum |
|
||||
| consensus\_full\_prevote\_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted |
|
||||
| consensus\_proposal\_receive\_count | Counter | status | Total number of proposals received by the node since process start |
|
||||
| consensus\_proposal\_create\_count | Counter | | Total number of proposals created by the node since process start |
|
||||
| consensus\_round\_voting\_power\_percent | Gauge | vote\_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round |
|
||||
| consensus\_late\_votes | Counter | vote\_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. |
|
||||
| p2p\_message\_send\_bytes\_total | Counter | message\_type | Number of bytes sent to all peers per message type |
|
||||
| p2p\_message\_receive\_bytes\_total | Counter | message\_type | Number of bytes received from all peers per message type |
|
||||
| p2p\_peers | Gauge | | Number of peers node's connected to |
|
||||
| p2p\_peer\_receive\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel received from a given peer |
|
||||
| p2p\_peer\_send\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel sent to a given peer |
|
||||
| p2p\_peer\_pending\_send\_bytes | Gauge | peer\_id | Number of pending bytes to be sent to a given peer |
|
||||
| p2p\_num\_txs | Gauge | peer\_id | Number of transactions submitted by each peer\_id |
|
||||
| p2p\_pending\_send\_bytes | Gauge | peer\_id | Amount of data pending to be sent to peer |
|
||||
| mempool\_size | Gauge | | Number of uncommitted transactions |
|
||||
| mempool\_tx\_size\_bytes | Histogram | | Transaction sizes in bytes |
|
||||
| mempool\_failed\_txs | Counter | | Number of failed transactions |
|
||||
| mempool\_recheck\_times | Counter | | Number of transactions rechecked in the mempool |
|
||||
| state\_block\_processing\_time | Histogram | | Time between BeginBlock and EndBlock in ms |
|
||||
| state\_consensus\_param\_updates | Counter | | Number of consensus parameter updates returned by the application since process start |
|
||||
| state\_validator\_set\_updates | Counter | | Number of validator set updates returned by the application since process start |
|
||||
| statesync\_syncing | Gauge | | Either 0 (not state syncing) or 1 (syncing) |
|
||||
|
||||
## Useful queries
|
||||
|
||||
|
||||
@@ -220,8 +220,8 @@ Recovering from data corruption can be hard and time-consuming. Here are two app
|
||||
./scripts/wal2json/wal2json "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal
|
||||
```
|
||||
|
||||
3) Search for a "CORRUPTED MESSAGE" line.
|
||||
4) By looking at the previous message and the message after the corrupted one
|
||||
3) Search for a "CORRUPTED MESSAGE" line.
|
||||
4) By looking at the previous message and the message after the corrupted one
|
||||
and looking at the logs, try to rebuild the message. If the consequent
|
||||
messages are marked as corrupted too (this may happen if length header
|
||||
got corrupted or some writes did not make it to the WAL ~ truncation),
|
||||
@@ -232,7 +232,7 @@ Recovering from data corruption can be hard and time-consuming. Here are two app
|
||||
$EDITOR /tmp/corrupted_wal
|
||||
```
|
||||
|
||||
5) After editing, convert this file back into binary form by running:
|
||||
5) After editing, convert this file back into binary form by running:
|
||||
|
||||
```sh
|
||||
./scripts/json2wal/json2wal /tmp/corrupted_wal $TMHOME/data/cs.wal/wal
|
||||
|
||||
@@ -4,41 +4,41 @@ order: 11
|
||||
|
||||
# State Sync
|
||||
|
||||
With block sync a node is downloading all of the data of an application from genesis and verifying it.
|
||||
With state sync your node will download data related to the head or near the head of the chain and verify the data.
|
||||
This leads to drastically shorter times for joining a network.
|
||||
With block sync a node is downloading all of the data of an application from genesis and verifying it.
|
||||
With state sync your node will download data related to the head or near the head of the chain and verify the data.
|
||||
This leads to drastically shorter times for joining a network.
|
||||
|
||||
## Using State Sync
|
||||
|
||||
State sync will continuously work in the background to supply nodes with chunked data when bootstrapping.
|
||||
|
||||
> NOTE: Before trying to use state sync, see if the application you are operating a node for supports it.
|
||||
> NOTE: Before trying to use state sync, see if the application you are operating a node for supports it.
|
||||
|
||||
Under the state sync section in `config.toml` you will find multiple settings that need to be configured in order for your node to use state sync.
|
||||
|
||||
Lets breakdown the settings:
|
||||
|
||||
- `enable`: Enable is to inform the node that you will be using state sync to bootstrap your node.
|
||||
- `rpc_servers`: RPC servers are needed because state sync utilizes the light client for verification.
|
||||
- 2 servers are required, more is always helpful.
|
||||
- `rpc_servers`: RPC servers are needed because state sync utilizes the light client for verification.
|
||||
- 2 servers are required, more is always helpful.
|
||||
- `temp_dir`: Temporary directory is store the chunks in the machines local storage, If nothing is set it will create a directory in `/tmp`
|
||||
|
||||
The next information you will need to acquire it through publicly exposed RPC's or a block explorer which you trust.
|
||||
The next information you will need to acquire it through publicly exposed RPC's or a block explorer which you trust.
|
||||
|
||||
- `trust_height`: Trusted height defines at which height your node should trust the chain.
|
||||
- `trust_hash`: Trusted hash is the hash in the `BlockID` corresponding to the trusted height.
|
||||
- `trust_period`: Trust period is the period in which headers can be verified.
|
||||
- `trust_period`: Trust period is the period in which headers can be verified.
|
||||
> :warning: This value should be significantly smaller than the unbonding period.
|
||||
|
||||
If you are relying on publicly exposed RPC's to get the need information, you can use `curl`.
|
||||
|
||||
Example:
|
||||
Example:
|
||||
|
||||
```bash
|
||||
curl -s https://233.123.0.140:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}"
|
||||
```
|
||||
|
||||
The response will be:
|
||||
The response will be:
|
||||
|
||||
```json
|
||||
{
|
||||
|
||||
@@ -47,8 +47,8 @@ definition](https://github.com/tendermint/tendermint/blob/main/types/genesis.go)
|
||||
- `chain_id`: ID of the blockchain. **This must be unique for
|
||||
every blockchain.** If your testnet blockchains do not have unique
|
||||
chain IDs, you will have a bad time. The ChainID must be less than 50 symbols.
|
||||
- `initial_height`: Height at which Tendermint should begin at. If a blockchain is conducting a network upgrade,
|
||||
starting from the stopped height brings uniqueness to previous heights.
|
||||
- `initial_height`: Height at which Tendermint should begin at. If a blockchain is conducting a network upgrade,
|
||||
starting from the stopped height brings uniqueness to previous heights.
|
||||
- `consensus_params` [spec](https://github.com/tendermint/tendermint/blob/main/spec/core/state.md#consensusparams)
|
||||
- `block`
|
||||
- `max_bytes`: Max block size, in bytes.
|
||||
|
||||
@@ -55,3 +55,47 @@ given destination directory. Each archive will contain:
|
||||
|
||||
Note: goroutine.out and heap.out will only be written if a profile address is
|
||||
provided and is operational. This command is blocking and will log any error.
|
||||
|
||||
## Tendermint Inspect
|
||||
|
||||
Tendermint includes an `inspect` command for querying Tendermint's state store and block
|
||||
store over Tendermint RPC.
|
||||
|
||||
When the Tendermint consensus engine detects inconsistent state, it will crash the
|
||||
entire Tendermint process.
|
||||
While in this inconsistent state, a node running Tendermint's consensus engine will not start up.
|
||||
The `inspect` command runs only a subset of Tendermint's RPC endpoints for querying the block store
|
||||
and state store.
|
||||
`inspect` allows operators to query a read-only view of the stage.
|
||||
`inspect` does not run the consensus engine at all and can therefore be used to debug
|
||||
processes that have crashed due to inconsistent state.
|
||||
|
||||
### Running inspect
|
||||
|
||||
Start up the `inspect` tool on the machine where Tendermint crashed using:
|
||||
```bash
|
||||
tendermint inspect --home=</path/to/app.d>
|
||||
```
|
||||
|
||||
`inspect` will use the data directory specified in your Tendermint configuration file.
|
||||
`inspect` will also run the RPC server at the address specified in your Tendermint configuration file.
|
||||
|
||||
### Using inspect
|
||||
|
||||
With the `inspect` server running, you can access RPC endpoints that are critically important
|
||||
for debugging.
|
||||
Calling the `/status`, `/consensus_state` and `/dump_consensus_state` RPC endpoint
|
||||
will return useful information about the Tendermint consensus state.
|
||||
|
||||
To start the `inspect` process, run
|
||||
```bash
|
||||
tendermint inspect
|
||||
```
|
||||
|
||||
### RPC endpoints
|
||||
|
||||
The list of available RPC endpoints can be found by making a request to the RPC port.
|
||||
For an `inspect` process running on `127.0.0.1:26657`, navigate your browser to
|
||||
`http://127.0.0.1:26657/` to retrieve the list of enabled RPC endpoints.
|
||||
|
||||
Additional information on the Tendermint RPC endpoints can be found in the [rpc documentation](https://docs.tendermint.com/master/rpc).
|
||||
|
||||
@@ -13,10 +13,10 @@ experience with Tendermint Core.
|
||||
Tendermint Core is a service that provides a Byzantine Fault Tolerant consensus engine
|
||||
for state-machine replication. The replicated state-machine, or "application", can be written
|
||||
in any language that can send and receive protocol buffer messages in a client-server model.
|
||||
Applications written in Go can also use Tendermint as a library and run the service in the same
|
||||
process as the application.
|
||||
Applications written in Go can also use Tendermint as a library and run the service in the same
|
||||
process as the application.
|
||||
|
||||
By following along this tutorial you will create a Tendermint Core application called kvstore,
|
||||
By following along this tutorial you will create a Tendermint Core application called kvstore,
|
||||
a (very) simple distributed BFT key-value store.
|
||||
The application will be written in Go and
|
||||
some understanding of the Go programming language is expected.
|
||||
@@ -30,15 +30,15 @@ We strongly advise against using unreleased commits for your development.
|
||||
|
||||
### Built-in app vs external app
|
||||
|
||||
On the one hand, to get maximum performance you can run your application in
|
||||
the same process as the Tendermint Core, as long as your application is written in Go.
|
||||
On the one hand, to get maximum performance you can run your application in
|
||||
the same process as the Tendermint Core, as long as your application is written in Go.
|
||||
[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written
|
||||
this way.
|
||||
this way.
|
||||
This is the approach followed in this tutorial.
|
||||
|
||||
On the other hand, having a separate application might give you better security
|
||||
guarantees as two processes would be communicating via established binary protocol.
|
||||
Tendermint Core will not have access to application's state.
|
||||
On the other hand, having a separate application might give you better security
|
||||
guarantees as two processes would be communicating via established binary protocol.
|
||||
Tendermint Core will not have access to application's state.
|
||||
If that is the way you wish to proceed, use the [Creating an application in Go](./go.md) guide instead of this one.
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ go mod init kvstore
|
||||
go get github.com/tendermint/tendermint@latest
|
||||
```
|
||||
|
||||
After running the above commands you will see two generated files, `go.mod` and `go.sum`.
|
||||
After running the above commands you will see two generated files, `go.mod` and `go.sum`.
|
||||
The go.mod file should look similar to:
|
||||
|
||||
```go
|
||||
@@ -114,11 +114,11 @@ go build
|
||||
## 1.3 Writing a Tendermint Core application
|
||||
|
||||
Tendermint Core communicates with the application through the Application
|
||||
BlockChain Interface (ABCI). The messages exchanged through the interface are
|
||||
BlockChain Interface (ABCI). The messages exchanged through the interface are
|
||||
defined in the ABCI [protobuf
|
||||
file](https://github.com/tendermint/tendermint/blob/main/proto/tendermint/abci/types.proto).
|
||||
|
||||
We begin by creating the basic scaffolding for an ABCI application by
|
||||
We begin by creating the basic scaffolding for an ABCI application by
|
||||
creating a new type, `KVStoreApplication`, which implements the
|
||||
methods defined by the `abcitypes.Application` interface.
|
||||
|
||||
@@ -203,7 +203,7 @@ to the project when you ran `go get`. If your IDE is not recognizing the types,
|
||||
go get github.com/tendermint/tendermint@latest
|
||||
```
|
||||
|
||||
Now go back to the `main.go` and modify the `main` function so it matches the following,
|
||||
Now go back to the `main.go` and modify the `main` function so it matches the following,
|
||||
where an instance of the `KVStoreApplication` type is created.
|
||||
|
||||
```go
|
||||
@@ -221,17 +221,18 @@ and to start it along with the Tendermint Service.
|
||||
|
||||
|
||||
### 1.3.1 Add a persistent data store
|
||||
|
||||
Our application will need to write its state out to persistent storage so that it
|
||||
can stop and start without losing all of its data.
|
||||
|
||||
For this tutorial, we will use [BadgerDB](https://github.com/dgraph-io/badger), a
|
||||
a fast embedded key-value store.
|
||||
a fast embedded key-value store.
|
||||
|
||||
First, add Badger as a dependency of your go module using the `go get` command:
|
||||
|
||||
`go get github.com/dgraph-io/badger/v3`
|
||||
|
||||
Next, let's update the application and its constructor to receive a handle to the database, as follows:
|
||||
Next, let's update the application and its constructor to receive a handle to the database, as follows:
|
||||
|
||||
```go
|
||||
type KVStoreApplication struct {
|
||||
@@ -246,7 +247,7 @@ func NewKVStoreApplication(db *badger.DB) *KVStoreApplication {
|
||||
}
|
||||
```
|
||||
|
||||
The `onGoingBlock` keeps track of the Badger transaction that will update the application's state when a block
|
||||
The `onGoingBlock` keeps track of the Badger transaction that will update the application's state when a block
|
||||
is completed. Don't worry about it for now, we'll get to that later.
|
||||
|
||||
Next, update the `import` stanza at the top to include the Badger library:
|
||||
@@ -265,7 +266,8 @@ Finally, update the `main.go` file to invoke the updated constructor:
|
||||
```
|
||||
|
||||
### 1.3.2 CheckTx
|
||||
When Tendermint Core receives a new transaction from a client, or from another full node, Tendermint asks the application if
|
||||
|
||||
When Tendermint Core receives a new transaction from a client, or from another full node, Tendermint asks the application if
|
||||
the transaction is acceptable, using the `CheckTx` method.
|
||||
Invalid transactions will not be shared with other nodes and will not become part of any blocks and, therefore, will not be executed by the application.
|
||||
|
||||
@@ -295,17 +297,17 @@ func (app *KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.R
|
||||
}
|
||||
```
|
||||
|
||||
While this `CheckTx` is simple and only validates that the transaction is well-formed,
|
||||
While this `CheckTx` is simple and only validates that the transaction is well-formed,
|
||||
it is very common for `CheckTx` to make more complex use of the state of an application.
|
||||
For example, you may refuse to overwrite an existing value, or you can associate
|
||||
For example, you may refuse to overwrite an existing value, or you can associate
|
||||
versions to the key/value pairs and allow the caller to specify a version to
|
||||
perform a conditional update.
|
||||
|
||||
Depending on the checks and on the conditions violated, the function may return
|
||||
different values, but any response with a non-zero code will be considered invalid
|
||||
by Tendermint. Our `CheckTx` logic returns 0 to Tendermint when a transaction passes
|
||||
its validation checks. The specific value of the code is meaningless to Tendermint.
|
||||
Non-zero codes are logged by Tendermint so applications can provide more specific
|
||||
different values, but any response with a non-zero code will be considered invalid
|
||||
by Tendermint. Our `CheckTx` logic returns 0 to Tendermint when a transaction passes
|
||||
its validation checks. The specific value of the code is meaningless to Tendermint.
|
||||
Non-zero codes are logged by Tendermint so applications can provide more specific
|
||||
information on why the transaction was rejected.
|
||||
|
||||
Note that `CheckTx` does not execute the transaction, it only verifies that that the transaction could be executed. We do not know yet if the rest of the network has agreed to accept this transaction into a block.
|
||||
@@ -334,7 +336,7 @@ receive a block.
|
||||
- `EndBlock` is called once to indicate to the application that no more transactions
|
||||
will be delivered to the application in within this block.
|
||||
|
||||
Note that, to implement these calls in our application we're going to make use of Badger's
|
||||
Note that, to implement these calls in our application we're going to make use of Badger's
|
||||
transaction mechanism. We will always refer to these as Badger transactions, not to
|
||||
confuse them with the transactions included in the blocks delivered by Tendermint,
|
||||
the _application transactions_.
|
||||
@@ -372,26 +374,26 @@ func (app *KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcityp
|
||||
|
||||
Note that we check the validity of the transaction _again_ during `DeliverTx`.
|
||||
Transactions are not guaranteed to be valid when they are delivered to an
|
||||
application, even if they were valid when they were proposed.
|
||||
application, even if they were valid when they were proposed.
|
||||
This can happen if the application state is used to determine transaction
|
||||
validity. Application state may have changed between the initial execution of `CheckTx`
|
||||
and the transaction delivery in `DeliverTx` in a way that rendered the transaction
|
||||
no longer valid.
|
||||
|
||||
`EndBlock` is called to inform the application that the full block has been delivered
|
||||
and give the application a chance to perform any other computation needed, before the
|
||||
and give the application a chance to perform any other computation needed, before the
|
||||
effects of the transactions become permanent.
|
||||
|
||||
Note that `EndBlock` **cannot** yet commit the Badger transaction we were building
|
||||
Note that `EndBlock` **cannot** yet commit the Badger transaction we were building
|
||||
in during `DeliverTx`.
|
||||
Since other methods, such as `Query`, rely on a consistent view of the application's
|
||||
state, the application should only update its state by committing the Badger transactions
|
||||
state, the application should only update its state by committing the Badger transactions
|
||||
when the full block has been delivered and the `Commit` method is invoked.
|
||||
|
||||
The `Commit` method tells the application to make permanent the effects of
|
||||
The `Commit` method tells the application to make permanent the effects of
|
||||
the application transactions.
|
||||
Let's update the method to terminate the pending Badger transaction and
|
||||
persist the resulting state:
|
||||
persist the resulting state:
|
||||
|
||||
```go
|
||||
func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit {
|
||||
@@ -416,10 +418,11 @@ import (
|
||||
|
||||
You may have noticed that the application we are writing will crash if it receives
|
||||
an unexpected error from the Badger database during the `DeliverTx` or `Commit` methods.
|
||||
This is not an accident. If the application received an error from the database, there
|
||||
This is not an accident. If the application received an error from the database, there
|
||||
is no deterministic way for it to make progress so the only safe option is to terminate.
|
||||
|
||||
### 1.3.4 Query
|
||||
|
||||
When a client tries to read some information from the `kvstore`, the request will be
|
||||
handled in the `Query` method. To do this, let's rewrite the `Query` method in `app.go`:
|
||||
|
||||
@@ -454,14 +457,16 @@ Since it reads only committed data from the store, transactions that are part of
|
||||
that is being processed are not reflected in the query result.
|
||||
|
||||
### 1.3.5 PrepareProposal and ProcessProposal
|
||||
|
||||
`PrepareProposal` and `ProcessProposal` are methods introduced in Tendermint v0.37.0
|
||||
to give the application more control over the construction and processing of transaction blocks.
|
||||
|
||||
When Tendermint Core sees that valid transactions (validated through `CheckTx`) are available to be
|
||||
included in blocks, it groups some of these transactions and then gives the application a chance
|
||||
included in blocks, it groups some of these transactions and then gives the application a chance
|
||||
to modify the group by invoking `PrepareProposal`.
|
||||
|
||||
The application is free to modify the group before returning from the call.
|
||||
The application is free to modify the group before returning from the call, as long as the resulting set
|
||||
does not use more bytes than `RequestPrepareProposal.max_tx_bytes'
|
||||
For example, the application may reorder, add, or even remove transactions from the group to improve the
|
||||
execution of the block once accepted.
|
||||
In the following code, the application simply returns the unmodified group of transactions:
|
||||
@@ -475,7 +480,7 @@ func (app *KVStoreApplication) PrepareProposal(proposal abcitypes.RequestPrepare
|
||||
Once a proposed block is received by a node, the proposal is passed to the application to give
|
||||
its blessing before voting to accept the proposal.
|
||||
|
||||
This mechanism may be used for different reasons, for example to deal with blocks manipulated
|
||||
This mechanism may be used for different reasons, for example to deal with blocks manipulated
|
||||
by malicious nodes, in which case the block should not be considered valid.
|
||||
The following code simply accepts all proposals:
|
||||
|
||||
@@ -654,6 +659,7 @@ messages). Normally, you would use `SignerRemote` to connect to an external
|
||||
return nil, fmt.Errorf("failed to load node's key: %w", err)
|
||||
}
|
||||
```
|
||||
|
||||
Now we have everything set up to run the Tendermint node. We construct
|
||||
a node by passing it the configuration, the logger, a handle to our application and
|
||||
the genesis information:
|
||||
@@ -683,6 +689,7 @@ Finally, we start the node, i.e., the Tendermint Core service inside our applica
|
||||
node.Wait()
|
||||
}()
|
||||
```
|
||||
|
||||
The additional logic at the end of the file allows the program to catch SIGTERM. This means that the node can shut down gracefully when an operator tries to kill the program:
|
||||
|
||||
```go
|
||||
@@ -757,7 +764,8 @@ Open another terminal window and run the following curl command:
|
||||
```bash
|
||||
curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"'
|
||||
```
|
||||
If everything went well, you should see a response indicating which height the
|
||||
|
||||
If everything went well, you should see a response indicating which height the
|
||||
transaction was included in the blockchain.
|
||||
|
||||
Finally, let's make sure that transaction really was persisted by the application.
|
||||
@@ -778,7 +786,7 @@ The request returns a `json` object with a `key` and `value` field set.
|
||||
```
|
||||
|
||||
Those values don't look like the `key` and `value` we sent to Tendermint.
|
||||
What's going on here?
|
||||
What's going on here?
|
||||
|
||||
The response contains a `base64` encoded representation of the data we submitted.
|
||||
To get the original value out of this data, we can use the `base64` command line utility:
|
||||
|
||||
@@ -13,10 +13,10 @@ experience with Tendermint Core.
|
||||
Tendermint Core is a service that provides a Byzantine Fault Tolerant consensus engine
|
||||
for state-machine replication. The replicated state-machine, or "application", can be written
|
||||
in any language that can send and receive protocol buffer messages in a client-server model.
|
||||
Applications written in Go can also use Tendermint as a library and run the service in the same
|
||||
process as the application.
|
||||
Applications written in Go can also use Tendermint as a library and run the service in the same
|
||||
process as the application.
|
||||
|
||||
By following along this tutorial you will create a Tendermint Core application called kvstore,
|
||||
By following along this tutorial you will create a Tendermint Core application called kvstore,
|
||||
a (very) simple distributed BFT key-value store.
|
||||
The application will be written in Go and
|
||||
some understanding of the Go programming language is expected.
|
||||
@@ -28,15 +28,16 @@ Note: Please use the latest released version of this guide and of Tendermint.
|
||||
We strongly advise against using unreleased commits for your development.
|
||||
|
||||
### Built-in app vs external app
|
||||
On the one hand, to get maximum performance you can run your application in
|
||||
the same process as the Tendermint Core, as long as your application is written in Go.
|
||||
|
||||
On the one hand, to get maximum performance you can run your application in
|
||||
the same process as the Tendermint Core, as long as your application is written in Go.
|
||||
[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written
|
||||
this way.
|
||||
this way.
|
||||
If that is the way you wish to proceed, use the [Creating a built-in application in Go](./go-built-in.md) guide instead of this one.
|
||||
|
||||
On the other hand, having a separate application might give you better security
|
||||
guarantees as two processes would be communicating via established binary protocol.
|
||||
Tendermint Core will not have access to application's state.
|
||||
On the other hand, having a separate application might give you better security
|
||||
guarantees as two processes would be communicating via established binary protocol.
|
||||
Tendermint Core will not have access to application's state.
|
||||
This is the approach followed in this tutorial.
|
||||
|
||||
## 1.1 Installing Go
|
||||
@@ -87,7 +88,7 @@ go mod init kvstore
|
||||
go get github.com/tendermint/tendermint@latest
|
||||
```
|
||||
|
||||
After running the above commands you will see two generated files, `go.mod` and `go.sum`.
|
||||
After running the above commands you will see two generated files, `go.mod` and `go.sum`.
|
||||
The go.mod file should look similar to:
|
||||
|
||||
```go
|
||||
@@ -112,11 +113,11 @@ go build
|
||||
## 1.3 Writing a Tendermint Core application
|
||||
|
||||
Tendermint Core communicates with the application through the Application
|
||||
BlockChain Interface (ABCI). The messages exchanged through the interface are
|
||||
BlockChain Interface (ABCI). The messages exchanged through the interface are
|
||||
defined in the ABCI [protobuf
|
||||
file](https://github.com/tendermint/tendermint/blob/main/proto/tendermint/abci/types.proto).
|
||||
|
||||
We begin by creating the basic scaffolding for an ABCI application by
|
||||
We begin by creating the basic scaffolding for an ABCI application by
|
||||
creating a new type, `KVStoreApplication`, which implements the
|
||||
methods defined by the `abcitypes.Application` interface.
|
||||
|
||||
@@ -201,7 +202,7 @@ to the project when you ran `go get`. If your IDE is not recognizing the types,
|
||||
go get github.com/tendermint/tendermint@latest
|
||||
```
|
||||
|
||||
Now go back to the `main.go` and modify the `main` function so it matches the following,
|
||||
Now go back to the `main.go` and modify the `main` function so it matches the following,
|
||||
where an instance of the `KVStoreApplication` type is created.
|
||||
|
||||
|
||||
@@ -220,17 +221,18 @@ and to start it along with the Tendermint Service.
|
||||
|
||||
|
||||
### 1.3.1 Add a persistent data store
|
||||
|
||||
Our application will need to write its state out to persistent storage so that it
|
||||
can stop and start without losing all of its data.
|
||||
|
||||
For this tutorial, we will use [BadgerDB](https://github.com/dgraph-io/badger), a
|
||||
a fast embedded key-value store.
|
||||
a fast embedded key-value store.
|
||||
|
||||
First, add Badger as a dependency of your go module using the `go get` command:
|
||||
|
||||
`go get github.com/dgraph-io/badger/v3`
|
||||
|
||||
Next, let's update the application and its constructor to receive a handle to the database, as follows:
|
||||
Next, let's update the application and its constructor to receive a handle to the database, as follows:
|
||||
|
||||
```go
|
||||
type KVStoreApplication struct {
|
||||
@@ -245,7 +247,7 @@ func NewKVStoreApplication(db *badger.DB) *KVStoreApplication {
|
||||
}
|
||||
```
|
||||
|
||||
The `onGoingBlock` keeps track of the Badger transaction that will update the application's state when a block
|
||||
The `onGoingBlock` keeps track of the Badger transaction that will update the application's state when a block
|
||||
is completed. Don't worry about it for now, we'll get to that later.
|
||||
|
||||
Next, update the `import` stanza at the top to include the Badger library:
|
||||
@@ -265,7 +267,8 @@ Finally, update the `main.go` file to invoke the updated constructor:
|
||||
|
||||
|
||||
### 1.3.2 CheckTx
|
||||
When Tendermint Core receives a new transaction from a client, Tendermint asks the application if
|
||||
|
||||
When Tendermint Core receives a new transaction from a client, Tendermint asks the application if
|
||||
the transaction is acceptable, using the `CheckTx` method.
|
||||
|
||||
In our application, a transaction is a string with the form `key=value`, indicating a key and value to write to the store.
|
||||
@@ -294,17 +297,17 @@ func (app *KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.R
|
||||
}
|
||||
```
|
||||
|
||||
While this `CheckTx` is simple and only validates that the transaction is well-formed,
|
||||
While this `CheckTx` is simple and only validates that the transaction is well-formed,
|
||||
it is very common for `CheckTx` to make more complex use of the state of an application.
|
||||
For example, you may refuse to overwrite an existing value, or you can associate
|
||||
For example, you may refuse to overwrite an existing value, or you can associate
|
||||
versions to the key/value pairs and allow the caller to specify a version to
|
||||
perform a conditional update.
|
||||
|
||||
Depending on the checks and on the conditions violated, the function may return
|
||||
different values, but any response with a non-zero code will be considered invalid
|
||||
by Tendermint. Our `CheckTx` logic returns 0 to Tendermint when a transaction passes
|
||||
its validation checks. The specific value of the code is meaningless to Tendermint.
|
||||
Non-zero codes are logged by Tendermint so applications can provide more specific
|
||||
different values, but any response with a non-zero code will be considered invalid
|
||||
by Tendermint. Our `CheckTx` logic returns 0 to Tendermint when a transaction passes
|
||||
its validation checks. The specific value of the code is meaningless to Tendermint.
|
||||
Non-zero codes are logged by Tendermint so applications can provide more specific
|
||||
information on why the transaction was rejected.
|
||||
|
||||
Note that `CheckTx` does not execute the transaction, it only verifies that that the transaction could be executed. We do not know yet if the rest of the network has agreed to accept this transaction into a block.
|
||||
@@ -333,7 +336,7 @@ receive a block.
|
||||
- `EndBlock` is called once to indicate to the application that no more transactions
|
||||
will be delivered to the application in within this block.
|
||||
|
||||
Note that, to implement these calls in our application we're going to make use of Badger's
|
||||
Note that, to implement these calls in our application we're going to make use of Badger's
|
||||
transaction mechanism. We will always refer to these as Badger transactions, not to
|
||||
confuse them with the transactions included in the blocks delivered by Tendermint,
|
||||
the _application transactions_.
|
||||
@@ -371,26 +374,26 @@ func (app *KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcityp
|
||||
|
||||
Note that we check the validity of the transaction _again_ during `DeliverTx`.
|
||||
Transactions are not guaranteed to be valid when they are delivered to an
|
||||
application, even if they were valid when they were proposed.
|
||||
application, even if they were valid when they were proposed.
|
||||
This can happen if the application state is used to determine transaction
|
||||
validity. Application state may have changed between the initial execution of `CheckTx`
|
||||
and the transaction delivery in `DeliverTx` in a way that rendered the transaction
|
||||
no longer valid.
|
||||
|
||||
`EndBlock` is called to inform the application that the full block has been delivered
|
||||
and give the application a chance to perform any other computation needed, before the
|
||||
and give the application a chance to perform any other computation needed, before the
|
||||
effects of the transactions become permanent.
|
||||
|
||||
Note that `EndBlock` **cannot** yet commit the Badger transaction we were building
|
||||
Note that `EndBlock` **cannot** yet commit the Badger transaction we were building
|
||||
in during `DeliverTx`.
|
||||
Since other methods, such as `Query`, rely on a consistent view of the application's
|
||||
state, the application should only update its state by committing the Badger transactions
|
||||
state, the application should only update its state by committing the Badger transactions
|
||||
when the full block has been delivered and the `Commit` method is invoked.
|
||||
|
||||
The `Commit` method tells the application to make permanent the effects of
|
||||
The `Commit` method tells the application to make permanent the effects of
|
||||
the application transactions.
|
||||
Let's update the method to terminate the pending Badger transaction and
|
||||
persist the resulting state:
|
||||
persist the resulting state:
|
||||
|
||||
```go
|
||||
func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit {
|
||||
@@ -415,10 +418,11 @@ import (
|
||||
|
||||
You may have noticed that the application we are writing will crash if it receives
|
||||
an unexpected error from the Badger database during the `DeliverTx` or `Commit` methods.
|
||||
This is not an accident. If the application received an error from the database, there
|
||||
This is not an accident. If the application received an error from the database, there
|
||||
is no deterministic way for it to make progress so the only safe option is to terminate.
|
||||
|
||||
### 1.3.4 Query
|
||||
|
||||
When a client tries to read some information from the `kvstore`, the request will be
|
||||
handled in the `Query` method. To do this, let's rewrite the `Query` method in `app.go`:
|
||||
|
||||
@@ -453,14 +457,16 @@ Since it reads only committed data from the store, transactions that are part of
|
||||
that is being processed are not reflected in the query result.
|
||||
|
||||
### 1.3.5 PrepareProposal and ProcessProposal
|
||||
`PrepareProposal` and `ProcessProposal` are methods introduced in Tendermint v0.37.0
|
||||
|
||||
`PrepareProposal` and `ProcessProposal` are methods introduced in Tendermint v0.37.0
|
||||
to give the application more control over the construction and processing of transaction blocks.
|
||||
|
||||
When Tendermint Core sees that valid transactions (validated through `CheckTx`) are available to be
|
||||
included in blocks, it groups some of these transactions and then gives the application a chance
|
||||
included in blocks, it groups some of these transactions and then gives the application a chance
|
||||
to modify the group by invoking `PrepareProposal`.
|
||||
|
||||
The application is free to modify the group before returning from the call.
|
||||
The application is free to modify the group before returning from the call, as long as the resulting set
|
||||
does not use more bytes than `RequestPrepareProposal.max_tx_bytes'
|
||||
For example, the application may reorder, add, or even remove transactions from the group to improve the
|
||||
execution of the block once accepted.
|
||||
In the following code, the application simply returns the unmodified group of transactions:
|
||||
@@ -474,7 +480,7 @@ func (app *KVStoreApplication) PrepareProposal(proposal abcitypes.RequestPrepare
|
||||
Once a proposed block is received by a node, the proposal is passed to the application to give
|
||||
its blessing before voting to accept the proposal.
|
||||
|
||||
This mechanism may be used for different reasons, for example to deal with blocks manipulated
|
||||
This mechanism may be used for different reasons, for example to deal with blocks manipulated
|
||||
by malicious nodes, in which case the block should not be considered valid.
|
||||
The following code simply accepts all proposals:
|
||||
|
||||
@@ -588,6 +594,7 @@ which connects to our server and send us transactions and other messages.
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
<-c
|
||||
```
|
||||
|
||||
## 1.5 Initializing and Running
|
||||
|
||||
Our application is almost ready to run, but first we'll need to populate the Tendermint Core configuration files.
|
||||
@@ -630,8 +637,8 @@ I[2022-11-09|17:01:28.726] service start msg="Sta
|
||||
I[2022-11-09|17:01:28.726] Waiting for new connection...
|
||||
```
|
||||
|
||||
Then we need to start Tendermint Core service and point it to our application.
|
||||
Open a new terminal window and cd to the same folder where the app is running.
|
||||
Then we need to start Tendermint Core service and point it to our application.
|
||||
Open a new terminal window and cd to the same folder where the app is running.
|
||||
Then execute the following command:
|
||||
|
||||
```bash
|
||||
@@ -671,7 +678,8 @@ Open another terminal window and run the following curl command:
|
||||
```bash
|
||||
curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"'
|
||||
```
|
||||
If everything went well, you should see a response indicating which height the
|
||||
|
||||
If everything went well, you should see a response indicating which height the
|
||||
transaction was included in the blockchain.
|
||||
|
||||
Finally, let's make sure that transaction really was persisted by the application.
|
||||
@@ -692,7 +700,7 @@ The request returns a `json` object with a `key` and `value` field set.
|
||||
```
|
||||
|
||||
Those values don't look like the `key` and `value` we sent to Tendermint.
|
||||
What's going on here?
|
||||
What's going on here?
|
||||
|
||||
The response contains a `base64` encoded representation of the data we submitted.
|
||||
To get the original value out of this data, we can use the `base64` command line utility:
|
||||
|
||||
66
go.mod
66
go.mod
@@ -23,7 +23,7 @@ require (
|
||||
github.com/pointlander/peg v1.0.1
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/prometheus/common v0.37.0
|
||||
github.com/prometheus/common v0.39.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/rs/cors v1.8.2
|
||||
github.com/sasha-s/go-deadlock v0.3.1
|
||||
@@ -32,25 +32,28 @@ require (
|
||||
github.com/spf13/viper v1.14.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
golang.org/x/crypto v0.3.0
|
||||
golang.org/x/net v0.2.0
|
||||
golang.org/x/crypto v0.4.0
|
||||
golang.org/x/net v0.4.0
|
||||
google.golang.org/grpc v1.51.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/bufbuild/buf v1.9.0
|
||||
github.com/bufbuild/buf v1.10.0
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.0
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3
|
||||
github.com/cosmos/gogoproto v1.4.3
|
||||
github.com/go-git/go-git/v5 v5.5.1
|
||||
github.com/gofrs/uuid v4.3.1+incompatible
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae
|
||||
github.com/vektra/mockery/v2 v2.15.0
|
||||
golang.org/x/sync v0.1.0
|
||||
gonum.org/v1/gonum v0.12.0
|
||||
google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8
|
||||
)
|
||||
@@ -68,6 +71,8 @@ require (
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
||||
github.com/alexkohler/prealloc v1.0.0 // indirect
|
||||
github.com/alingse/asasalint v0.0.11 // indirect
|
||||
github.com/ashanbrown/forbidigo v1.3.0 // indirect
|
||||
@@ -78,14 +83,15 @@ require (
|
||||
github.com/bombsimon/wsl/v3 v3.3.0 // indirect
|
||||
github.com/breml/bidichk v0.2.3 // indirect
|
||||
github.com/breml/errchkjson v0.3.0 // indirect
|
||||
github.com/bufbuild/connect-go v1.0.0 // indirect
|
||||
github.com/bufbuild/connect-go v1.1.0 // indirect
|
||||
github.com/bufbuild/protocompile v0.1.0 // indirect
|
||||
github.com/butuzov/ireturn v0.1.1 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/charithe/durationcheck v0.0.9 // indirect
|
||||
github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect
|
||||
github.com/containerd/containerd v1.6.8 // indirect
|
||||
github.com/cloudflare/circl v1.3.1 // indirect
|
||||
github.com/containerd/containerd v1.6.9 // indirect
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/containerd/typeurl v1.0.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
@@ -98,19 +104,23 @@ require (
|
||||
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.19+incompatible // indirect
|
||||
github.com/docker/docker v20.10.21+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/esimonov/ifshort v1.0.4 // indirect
|
||||
github.com/ettle/strcase v0.1.1 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fatih/structtag v1.2.0 // indirect
|
||||
github.com/felixge/fgprof v0.9.3 // indirect
|
||||
github.com/firefart/nonamedreturns v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/fzipp/gocyclo v0.6.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.0.7 // indirect
|
||||
github.com/go-critic/go-critic v0.6.5 // indirect
|
||||
github.com/go-git/gcfg v1.5.0 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.3.1 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-toolsmith/astcast v1.0.0 // indirect
|
||||
@@ -137,6 +147,7 @@ require (
|
||||
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
|
||||
github.com/google/btree v1.0.0 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e // indirect
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect
|
||||
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
|
||||
github.com/gostaticanalysis/comment v1.4.2 // indirect
|
||||
@@ -148,17 +159,20 @@ require (
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hexops/gotextdiff v1.0.3 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect
|
||||
github.com/jgautheron/goconst v1.5.1 // indirect
|
||||
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
|
||||
github.com/jmhodges/levigo v1.0.0 // indirect
|
||||
github.com/julz/importas v0.1.0 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/kisielk/errcheck v1.6.2 // indirect
|
||||
github.com/kisielk/gotool v1.0.0 // indirect
|
||||
github.com/kkHAIKE/contextcheck v1.1.3 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/klauspost/compress v1.15.12 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/kulti/thelper v0.6.3 // indirect
|
||||
github.com/kunwardeep/paralleltest v1.0.6 // indirect
|
||||
@@ -174,12 +188,12 @@ require (
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 // indirect
|
||||
github.com/mgechev/revive v1.2.4 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/buildkit v0.10.4 // indirect
|
||||
github.com/moby/buildkit v0.10.5 // indirect
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||
github.com/moricho/tparallel v0.2.1 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
@@ -195,8 +209,9 @@ require (
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
|
||||
github.com/pjbgf/sha1cd v0.2.3 // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/pkg/profile v1.6.0 // indirect
|
||||
github.com/pkg/profile v1.7.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 // indirect
|
||||
github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 // indirect
|
||||
@@ -215,11 +230,13 @@ require (
|
||||
github.com/sashamelentyev/usestdlibvars v1.20.0 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/securego/gosec/v2 v2.13.1 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/sivchari/containedctx v1.0.2 // indirect
|
||||
github.com/sivchari/nosnakecase v1.7.0 // indirect
|
||||
github.com/sivchari/tenv v1.7.0 // indirect
|
||||
github.com/skeema/knownhosts v1.1.0 // indirect
|
||||
github.com/sonatard/noctx v0.0.1 // indirect
|
||||
github.com/sourcegraph/go-diff v0.6.1 // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
@@ -240,28 +257,29 @@ require (
|
||||
github.com/ultraware/funlen v0.0.3 // indirect
|
||||
github.com/ultraware/whitespace v0.0.5 // indirect
|
||||
github.com/uudashr/gocognit v1.0.6 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/yagipy/maintidx v1.0.0 // indirect
|
||||
github.com/yeya24/promlinter v0.2.0 // indirect
|
||||
gitlab.com/bosi/decorder v0.2.3 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 // indirect
|
||||
go.opentelemetry.io/otel v1.11.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.32.3 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.11.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4 // indirect
|
||||
go.opentelemetry.io/otel v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.11.1 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.2.0 // indirect
|
||||
golang.org/x/term v0.2.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/term v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
golang.org/x/tools v0.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
honnef.co/go/tools v0.3.3 // indirect
|
||||
|
||||
160
go.sum
160
go.sum
@@ -23,7 +23,7 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8=
|
||||
cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
@@ -73,8 +73,11 @@ github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXY
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
@@ -83,11 +86,15 @@ github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA=
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 h1:ra2OtmuW0AE5csawV4YXMNGNQQXvLRps3z2Z59OPO+I=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/Workiva/go-datastructures v1.0.52/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA=
|
||||
github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk=
|
||||
github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
|
||||
github.com/adlio/schema v1.1.13/go.mod h1:L5Z7tw+7lRK1Fnpi/LT/ooCP1elkXn0krMWBQHUhEDE=
|
||||
github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I=
|
||||
github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg=
|
||||
@@ -103,6 +110,8 @@ github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pO
|
||||
github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
|
||||
github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw=
|
||||
github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
@@ -112,6 +121,8 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
|
||||
github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc=
|
||||
github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI=
|
||||
@@ -172,14 +183,15 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
|
||||
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/bufbuild/buf v1.9.0 h1:8a60qapVuRj6crerWR0rny4UUV/MhZSL5gagJuBxmx8=
|
||||
github.com/bufbuild/buf v1.9.0/go.mod h1:1Q+rMHiMVcfgScEF/GOldxmu4o9TrQ2sQQh58K6MscE=
|
||||
github.com/bufbuild/connect-go v1.0.0 h1:htSflKUT8y1jxhoPhPYTZMrsY3ipUXjjrbcZR5O2cVo=
|
||||
github.com/bufbuild/connect-go v1.0.0/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I=
|
||||
github.com/bufbuild/buf v1.10.0 h1:t6rV4iP1cs/sJH5SYvcLanOshLvmtvwSC+Mt+GfG05s=
|
||||
github.com/bufbuild/buf v1.10.0/go.mod h1:79BrOWh8uX1a0SVSoPyeYgtP0+Y0n5J3Tt6kjTSkLoU=
|
||||
github.com/bufbuild/connect-go v1.1.0 h1:AUgqqO2ePdOJSpPOep6BPYz5v2moW1Lb8sQh0EeRzQ8=
|
||||
github.com/bufbuild/connect-go v1.1.0/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I=
|
||||
github.com/bufbuild/protocompile v0.1.0 h1:HjgJBI85hY/qmW5tw/66sNDZ7z0UDdVSi/5r40WHw4s=
|
||||
github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnuPCP47kpAm5m/4=
|
||||
github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY=
|
||||
github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
|
||||
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
@@ -208,6 +220,9 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
|
||||
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
|
||||
github.com/cloudflare/circl v1.3.1 h1:4OVCZRL62ijwEwxnF6I7hLwxvIYi3VaZt8TflkqtrtA=
|
||||
github.com/cloudflare/circl v1.3.1/go.mod h1:+CauBF6R70Jqcyl8N2hC8pAXYbWkGIezuSbuGLtRhnw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
@@ -220,8 +235,8 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs=
|
||||
github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0=
|
||||
github.com/containerd/containerd v1.6.9 h1:IN/r8DUes/B5lEGTNfIiUkfZBtIQJGx2ai703dV6lRA=
|
||||
github.com/containerd/containerd v1.6.9/go.mod h1:XVicUvkxOrftE2Q1YWUXgZwkkAxwQYNOFzYWvfVfEfQ=
|
||||
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
|
||||
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
||||
@@ -284,8 +299,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
|
||||
github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.19+incompatible h1:lzEmjivyNHFHMNAFLXORMBXyGIhw/UP4DvJwvyKYq64=
|
||||
github.com/docker/docker v20.10.19+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
|
||||
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
@@ -298,6 +313,8 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@@ -325,6 +342,8 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
|
||||
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
|
||||
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y=
|
||||
github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
@@ -342,10 +361,20 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS
|
||||
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
|
||||
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
|
||||
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
|
||||
github.com/go-chi/chi/v5 v5.0.7 h1:rDTPXLDHGATaeHvVlLcR4Qe0zftYethFucbjVQ1PxU8=
|
||||
github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-critic/go-critic v0.6.5 h1:fDaR/5GWURljXwF8Eh31T2GZNz9X4jeboS912mWF8Uo=
|
||||
github.com/go-critic/go-critic v0.6.5/go.mod h1:ezfP/Lh7MA6dBNn4c6ab5ALv3sKnZVLx37tr00uuaOY=
|
||||
github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
|
||||
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
|
||||
github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=
|
||||
github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.1 h1:y5z6dd3qi8Hl+stezc8p3JxDkoTRqMAlKnXHuzrfjTQ=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
|
||||
github.com/go-git/go-git/v5 v5.5.1 h1:5vtv2TB5PM/gPM+EvsHJ16hJh4uAkdGcKilcwY7FYwo=
|
||||
github.com/go-git/go-git/v5 v5.5.1/go.mod h1:uz5PQ3d0gz7mSgzZhSJToM6ALPaKCdSnl58/Xb5hzr8=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@@ -506,6 +535,9 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e h1:F1LLQqQ8WoIbyoxLUY+JUZe1kuHdxThM6CPUATzE6Io=
|
||||
github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -606,7 +638,9 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK
|
||||
github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@@ -614,13 +648,16 @@ github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/informalsystems/tm-load-test v1.0.0 h1:e1IeUw8701HWCMuOM1vLM/XcpH2Lrb88GNWdFAPDmmA=
|
||||
github.com/informalsystems/tm-load-test v1.0.0/go.mod h1:WVaSKaQdfZK3v0C74EMzn7//+3aeCZF8wkIKBz2/M74=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
||||
github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a h1:d4+I1YEKVmWZrgkt6jpXBnLgV2ZjO0YxEtLDdfIZfH4=
|
||||
github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a/go.mod h1:Zi/ZFkEqFHTm7qkjyNJjaWH4LQA9LQhGJyF0lTYGpxw=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
|
||||
github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=
|
||||
github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
|
||||
github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c h1:XImQJfpJLmGEEd8ll5yPVyL/aEvmgGHW4WYTyNseLOM=
|
||||
github.com/jhump/protoreflect v1.14.0 h1:MBbQK392K3u8NTLbKOCIi3XdI+y+c6yt5oMq0X3xviw=
|
||||
github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
|
||||
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=
|
||||
@@ -649,6 +686,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
|
||||
github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY=
|
||||
github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
|
||||
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/errcheck v1.6.2 h1:uGQ9xI8/pgc9iOoCe7kWQgRE6SBTrCGmTSf0LrEtY7c=
|
||||
@@ -660,8 +699,8 @@ github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkS
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c=
|
||||
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@@ -711,6 +750,7 @@ github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vx
|
||||
github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc=
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA=
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
|
||||
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
|
||||
github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE=
|
||||
github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
@@ -736,8 +776,8 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=
|
||||
github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
|
||||
github.com/mgechev/revive v1.2.4 h1:+2Hd/S8oO2H0Ikq2+egtNwQsVhAeELHjxjIUFX5ajLI=
|
||||
@@ -764,8 +804,8 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
|
||||
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/buildkit v0.10.4 h1:FvC+buO8isGpUFZ1abdSLdGHZVqg9sqI4BbFL8tlzP4=
|
||||
github.com/moby/buildkit v0.10.4/go.mod h1:Yajz9vt1Zw5q9Pp4pdb3TCSUXJBIroIQGQ3TTs/sLug=
|
||||
github.com/moby/buildkit v0.10.5 h1:d9krS/lG3dn6N7y+R8o9PTgIixlYAaDk35f3/B4jZOw=
|
||||
github.com/moby/buildkit v0.10.5/go.mod h1:Yajz9vt1Zw5q9Pp4pdb3TCSUXJBIroIQGQ3TTs/sLug=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI=
|
||||
@@ -799,7 +839,6 @@ github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nishanths/exhaustive v0.8.3 h1:pw5O09vwg8ZaditDp/nQRqVnrMczSJDxRDJMowvhsrM=
|
||||
github.com/nishanths/exhaustive v0.8.3/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg=
|
||||
@@ -880,6 +919,8 @@ github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9oc
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pjbgf/sha1cd v0.2.3 h1:uKQP/7QOzNtKYH7UTohZLcjF5/55EnTw0jO/Ru4jZwI=
|
||||
github.com/pjbgf/sha1cd v0.2.3/go.mod h1:HOK9QrgzdHpbc2Kzip0Q1yi3M2MFGPADtR6HjG65m5M=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -887,8 +928,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/profile v1.6.0 h1:hUDfIISABYI59DyeB3OTay/HxSRwTQ8rB/H83k6r5dM=
|
||||
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -934,8 +975,8 @@ github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
@@ -1001,6 +1042,9 @@ github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvW
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/securego/gosec/v2 v2.13.1 h1:7mU32qn2dyC81MH9L2kefnQyRMUarfDER3iQyMHcjYM=
|
||||
github.com/securego/gosec/v2 v2.13.1/go.mod h1:EO1sImBMBWFjOTFzMWfTRrZW6M15gm60ljzrmy/wtHo=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
@@ -1020,6 +1064,8 @@ github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt
|
||||
github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY=
|
||||
github.com/sivchari/tenv v1.7.0 h1:d4laZMBK6jpe5PWepxlV9S+LC0yXqvYHiq8E6ceoVVE=
|
||||
github.com/sivchari/tenv v1.7.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
|
||||
github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0=
|
||||
github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY=
|
||||
@@ -1133,6 +1179,8 @@ github.com/vektra/mockery/v2 v2.15.0 h1:5Egbxoancm1hhkJUoAF+cf0FBzC9oxS28LL/ZKbC
|
||||
github.com/vektra/mockery/v2 v2.15.0/go.mod h1:RswGtsqDbCR9j4UcgBQuAZY7OFxI+TgtHevc0gR0kCY=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
@@ -1169,16 +1217,17 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 h1:syAz40OyelLZo42+3U68Phisvrx4qh+4wpdZw7eUUdY=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3/go.mod h1:Dts42MGkzZne2yCru741+bFiTMWkIj/LLRizad7b9tw=
|
||||
go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk=
|
||||
go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk=
|
||||
go.opentelemetry.io/otel/metric v0.32.3 h1:dMpnJYk2KULXr0j8ph6N7+IcuiIQXlPXD4kix9t7L9c=
|
||||
go.opentelemetry.io/otel/metric v0.32.3/go.mod h1:pgiGmKohxHyTPHGOff+vrtIH39/R9fiO/WoenUQ3kcc=
|
||||
go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI=
|
||||
go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4 h1:PRXhsszxTt5bbPriTjmaweWUsAnJYeWBhUMLRetUgBU=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4/go.mod h1:05eWWy6ZWzmpeImD3UowLTB3VjDMU1yxQ+ENuVWDM3c=
|
||||
go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4=
|
||||
go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE=
|
||||
go.opentelemetry.io/otel/metric v0.33.0 h1:xQAyl7uGEYvrLAiV/09iTJlp1pZnQ9Wl793qbVvED1E=
|
||||
go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI=
|
||||
go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ=
|
||||
go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
@@ -1225,8 +1274,12 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1240,8 +1293,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 h1:QfTh0HpN6hlw6D3vu8DAwC8pBIwikq0AI1evdm+FksE=
|
||||
golang.org/x/exp v0.0.0-20221031165847-c99f073a8326/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 h1:Ic/qN6TEifvObMGQy72k0n1LlJr7DjWWEi+MOsDOiSk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
@@ -1274,8 +1327,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1331,11 +1384,12 @@ golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
|
||||
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1349,8 +1403,7 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk=
|
||||
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -1452,26 +1505,31 @@ golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
|
||||
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1481,8 +1539,9 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1583,8 +1642,8 @@ golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1674,8 +1733,8 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20211101144312-62acf1d99145/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y=
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo=
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
@@ -1727,8 +1786,9 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
@@ -1740,6 +1800,7 @@ gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
@@ -1753,6 +1814,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
|
||||
36
inspect/doc.go
Normal file
36
inspect/doc.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Package inspect provides a tool for investigating the state of a
|
||||
failed Tendermint node.
|
||||
|
||||
This package provides the Inspector type. The Inspector type runs a subset of the Tendermint
|
||||
RPC endpoints that are useful for debugging issues with Tendermint consensus.
|
||||
|
||||
When a node running the Tendermint consensus engine detects an inconsistent consensus state,
|
||||
the entire node will crash. The Tendermint consensus engine cannot run in this
|
||||
inconsistent state so the node will not be able to start up again.
|
||||
|
||||
The RPC endpoints provided by the Inspector type allow for a node operator to inspect
|
||||
the block store and state store to better understand what may have caused the inconsistent state.
|
||||
|
||||
The Inspector type's lifecycle is controlled by a context.Context
|
||||
|
||||
ins := inspect.NewFromConfig(rpcConfig)
|
||||
ctx, cancelFunc:= context.WithCancel(context.Background())
|
||||
|
||||
// Run blocks until the Inspector server is shut down.
|
||||
go ins.Run(ctx)
|
||||
...
|
||||
|
||||
// calling the cancel function will stop the running inspect server
|
||||
cancelFunc()
|
||||
|
||||
Inspector serves its RPC endpoints on the address configured in the RPC configuration
|
||||
|
||||
rpcConfig.ListenAddress = "tcp://127.0.0.1:26657"
|
||||
ins := inspect.NewFromConfig(rpcConfig)
|
||||
go ins.Run(ctx)
|
||||
|
||||
The list of available RPC endpoints can then be viewed by navigating to
|
||||
http://127.0.0.1:26657/ in the web browser.
|
||||
*/
|
||||
package inspect
|
||||
138
inspect/inspect.go
Normal file
138
inspect/inspect.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package inspect
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/inspect/rpc"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmstrings "github.com/tendermint/tendermint/libs/strings"
|
||||
rpccore "github.com/tendermint/tendermint/rpc/core"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/state/indexer/block"
|
||||
"github.com/tendermint/tendermint/state/txindex"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
)
|
||||
|
||||
// Inspector manages an RPC service that exports methods to debug a failed node.
|
||||
// After a node shuts down due to a consensus failure, it will no longer start
|
||||
// up its state cannot easily be inspected. An Inspector value provides a similar interface
|
||||
// to the node, using the underlying Tendermint data stores, without bringing up
|
||||
// any other components. A caller can query the Inspector service to inspect the
|
||||
// persisted state and debug the failure.
|
||||
type Inspector struct {
|
||||
routes rpccore.RoutesMap
|
||||
|
||||
config *config.RPCConfig
|
||||
|
||||
logger log.Logger
|
||||
|
||||
// References to the state store and block store are maintained to enable
|
||||
// the Inspector to safely close them on shutdown.
|
||||
ss state.Store
|
||||
bs state.BlockStore
|
||||
}
|
||||
|
||||
// New returns an Inspector that serves RPC on the specified BlockStore and StateStore.
|
||||
// The Inspector type does not modify the state or block stores.
|
||||
// The sinks are used to enable block and transaction querying via the RPC server.
|
||||
// The caller is responsible for starting and stopping the Inspector service.
|
||||
//
|
||||
//nolint:lll
|
||||
func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, txidx txindex.TxIndexer, blkidx indexer.BlockIndexer, lg log.Logger) *Inspector {
|
||||
routes := rpc.Routes(*cfg, ss, bs, txidx, blkidx, logger)
|
||||
eb := types.NewEventBus()
|
||||
eb.SetLogger(logger.With("module", "events"))
|
||||
return &Inspector{
|
||||
routes: routes,
|
||||
config: cfg,
|
||||
logger: logger,
|
||||
ss: ss,
|
||||
bs: bs,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFromConfig constructs an Inspector using the values defined in the passed in config.
|
||||
func NewFromConfig(cfg *config.Config) (*Inspector, error) {
|
||||
bsDB, err := config.DefaultDBProvider(&config.DBContext{ID: "blockstore", Config: cfg})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bs := store.NewBlockStore(bsDB)
|
||||
sDB, err := config.DefaultDBProvider(&config.DBContext{ID: "state", Config: cfg})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txidx, blkidx, err := block.IndexerFromConfig(cfg, config.DefaultDBProvider, genDoc.ChainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lg := logger.With("module", "inspect")
|
||||
ss := state.NewStore(sDB, state.StoreOptions{})
|
||||
return New(cfg.RPC, bs, ss, txidx, blkidx, lg), nil
|
||||
}
|
||||
|
||||
// Run starts the Inspector servers and blocks until the servers shut down. The passed
|
||||
// in context is used to control the lifecycle of the servers.
|
||||
func (ins *Inspector) Run(ctx context.Context) error {
|
||||
defer ins.bs.Close()
|
||||
defer ins.ss.Close()
|
||||
|
||||
return startRPCServers(ctx, ins.config, ins.logger, ins.routes)
|
||||
}
|
||||
|
||||
func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logger, routes rpccore.RoutesMap) error {
|
||||
g, tctx := errgroup.WithContext(ctx)
|
||||
listenAddrs := tmstrings.SplitAndTrimEmpty(cfg.ListenAddress, ",", " ")
|
||||
rh := rpc.Handler(cfg, routes, logger)
|
||||
for _, listenerAddr := range listenAddrs {
|
||||
server := rpc.Server{
|
||||
Logger: logger,
|
||||
Config: cfg,
|
||||
Handler: rh,
|
||||
Addr: listenerAddr,
|
||||
}
|
||||
if cfg.IsTLSEnabled() {
|
||||
keyFile := cfg.KeyFile()
|
||||
certFile := cfg.CertFile()
|
||||
listenerAddr := listenerAddr
|
||||
g.Go(func() error {
|
||||
logger.Info("RPC HTTPS server starting", "address", listenerAddr,
|
||||
"certfile", certFile, "keyfile", keyFile)
|
||||
err := server.ListenAndServeTLS(tctx, certFile, keyFile)
|
||||
if !errors.Is(err, net.ErrClosed) {
|
||||
return err
|
||||
}
|
||||
logger.Info("RPC HTTPS server stopped", "address", listenerAddr)
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
listenerAddr := listenerAddr
|
||||
g.Go(func() error {
|
||||
logger.Info("RPC HTTP server starting", "address", listenerAddr)
|
||||
err := server.ListenAndServe(tctx)
|
||||
if !errors.Is(err, net.ErrClosed) {
|
||||
return err
|
||||
}
|
||||
logger.Info("RPC HTTP server stopped", "address", listenerAddr)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
return g.Wait()
|
||||
}
|
||||
605
inspect/inspect_test.go
Normal file
605
inspect/inspect_test.go
Normal file
@@ -0,0 +1,605 @@
|
||||
package inspect_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/inspect"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
"github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
httpclient "github.com/tendermint/tendermint/rpc/client/http"
|
||||
indexermocks "github.com/tendermint/tendermint/state/indexer/mocks"
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
txindexmocks "github.com/tendermint/tendermint/state/txindex/mocks"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestInspectConstructor(t *testing.T) {
|
||||
cfg := test.ResetTestRoot("test")
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
|
||||
t.Run("from config", func(t *testing.T) {
|
||||
d, err := inspect.NewFromConfig(cfg)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, d)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestInspectRun(t *testing.T) {
|
||||
cfg := test.ResetTestRoot("test")
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
|
||||
t.Run("from config", func(t *testing.T) {
|
||||
d, err := inspect.NewFromConfig(cfg)
|
||||
require.NoError(t, err)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
stoppedWG := &sync.WaitGroup{}
|
||||
stoppedWG.Add(1)
|
||||
go func() {
|
||||
require.NoError(t, d.Run(ctx))
|
||||
stoppedWG.Done()
|
||||
}()
|
||||
cancel()
|
||||
stoppedWG.Wait()
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestBlock(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testBlock := new(types.Block)
|
||||
testBlock.Header.Height = testHeight
|
||||
testBlock.Header.LastCommitHash = []byte("test hash")
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{})
|
||||
blockStoreMock.On("LoadBlock", testHeight).Return(testBlock)
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
resultBlock, err := cli.Block(context.Background(), &testHeight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, testBlock.Height, resultBlock.Block.Height)
|
||||
require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash)
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestTxSearch(t *testing.T) {
|
||||
testHash := []byte("test")
|
||||
testTx := []byte("tx")
|
||||
testQuery := fmt.Sprintf("tx.hash='%s'", string(testHash))
|
||||
testTxResult := &abcitypes.TxResult{
|
||||
Height: 1,
|
||||
Index: 100,
|
||||
Tx: testTx,
|
||||
}
|
||||
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
txIndexerMock.On("Search", mock.Anything,
|
||||
mock.MatchedBy(func(q *query.Query) bool {
|
||||
return testQuery == strings.ReplaceAll(q.String(), " ", "")
|
||||
})).
|
||||
Return([]*abcitypes.TxResult{testTxResult}, nil)
|
||||
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
|
||||
var page = 1
|
||||
resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resultTxSearch.Txs, 1)
|
||||
require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
txIndexerMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
}
|
||||
func TestTx(t *testing.T) {
|
||||
testHash := []byte("test")
|
||||
testTx := []byte("tx")
|
||||
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
txIndexerMock.On("Get", testHash).Return(&abcitypes.TxResult{
|
||||
Tx: testTx,
|
||||
}, nil)
|
||||
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err := cli.Tx(context.Background(), testHash, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.Tx(testTx), res.Tx)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
txIndexerMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
}
|
||||
func TestConsensusParams(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testMaxGas := int64(55)
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
stateStoreMock.On("LoadConsensusParams", testHeight).Return(types.ConsensusParams{
|
||||
Block: types.BlockParams{
|
||||
MaxGas: testMaxGas,
|
||||
},
|
||||
}, nil)
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
params, err := cli.ConsensusParams(context.Background(), &testHeight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestBlockResults(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testGasUsed := int64(100)
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
// tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{
|
||||
DeliverTxs: []*abcitypes.ResponseDeliverTx{
|
||||
{
|
||||
GasUsed: testGasUsed,
|
||||
},
|
||||
},
|
||||
EndBlock: &abcitypes.ResponseEndBlock{},
|
||||
BeginBlock: &abcitypes.ResponseBeginBlock{},
|
||||
}, nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
res, err := cli.BlockResults(context.Background(), &testHeight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, res.TxsResults[0].GasUsed, testGasUsed)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestCommit(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testRound := int32(101)
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}, nil)
|
||||
blockStoreMock.On("LoadSeenCommit", testHeight).Return(&types.Commit{
|
||||
Height: testHeight,
|
||||
Round: testRound,
|
||||
}, nil)
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
res, err := cli.Commit(context.Background(), &testHeight)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, res.SignedHeader.Commit.Round, testRound)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestBlockByHash(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testHash := []byte("test hash")
|
||||
testBlock := new(types.Block)
|
||||
testBlock.Header.Height = testHeight
|
||||
testBlock.Header.LastCommitHash = testHash
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
|
||||
BlockID: types.BlockID{
|
||||
Hash: testHash,
|
||||
},
|
||||
Header: types.Header{
|
||||
Height: testHeight,
|
||||
},
|
||||
}, nil)
|
||||
blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil)
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
res, err := cli.BlockByHash(context.Background(), testHash)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, []byte(res.BlockID.Hash), testHash)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestBlockchain(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testBlock := new(types.Block)
|
||||
testBlockHash := []byte("test hash")
|
||||
testBlock.Header.Height = testHeight
|
||||
testBlock.Header.LastCommitHash = testBlockHash
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
|
||||
BlockID: types.BlockID{
|
||||
Hash: testBlockHash,
|
||||
},
|
||||
})
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
res, err := cli.BlockchainInfo(context.Background(), 0, 100)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash))
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestValidators(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testVotingPower := int64(100)
|
||||
testValidators := types.ValidatorSet{
|
||||
Validators: []*types.Validator{
|
||||
{
|
||||
VotingPower: testVotingPower,
|
||||
},
|
||||
},
|
||||
}
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
stateStoreMock.On("LoadValidators", testHeight).Return(&testValidators, nil)
|
||||
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
|
||||
testPage := 1
|
||||
testPerPage := 100
|
||||
res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, testVotingPower, res.Validators[0].VotingPower)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestBlockSearch(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testBlockHash := []byte("test hash")
|
||||
testQuery := "block.height = 1"
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
blockStoreMock.On("LoadBlock", testHeight).Return(&types.Block{
|
||||
Header: types.Header{
|
||||
Height: testHeight,
|
||||
},
|
||||
}, nil)
|
||||
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
|
||||
BlockID: types.BlockID{
|
||||
Hash: testBlockHash,
|
||||
},
|
||||
})
|
||||
blkIdxMock.On("Search", mock.Anything,
|
||||
mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })).
|
||||
Return([]int64{testHeight}, nil)
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
|
||||
testPage := 1
|
||||
testPerPage := 100
|
||||
testOrderBy := "desc"
|
||||
res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash))
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func requireConnect(t testing.TB, addr string, retries int) {
|
||||
parts := strings.SplitN(addr, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
t.Fatalf("malformed address to dial: %s", addr)
|
||||
}
|
||||
var err error
|
||||
for i := 0; i < retries; i++ {
|
||||
var conn net.Conn
|
||||
conn, err = net.Dial(parts[0], parts[1])
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
// FIXME attempt to yield and let the other goroutine continue execution.
|
||||
time.Sleep(time.Microsecond * 100)
|
||||
}
|
||||
t.Fatalf("unable to connect to server %s after %d tries: %s", addr, retries, err)
|
||||
}
|
||||
128
inspect/rpc/rpc.go
Normal file
128
inspect/rpc/rpc.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/rs/cors"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/rpc/core"
|
||||
"github.com/tendermint/tendermint/rpc/jsonrpc/server"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/state/txindex"
|
||||
)
|
||||
|
||||
// Server defines parameters for running an Inspector rpc server.
|
||||
type Server struct {
|
||||
Addr string // TCP address to listen on, ":http" if empty
|
||||
Handler http.Handler
|
||||
Logger log.Logger
|
||||
Config *config.RPCConfig
|
||||
}
|
||||
|
||||
// Routes returns the set of routes used by the Inspector server.
|
||||
func Routes(cfg config.RPCConfig, s state.Store, bs state.BlockStore, txidx txindex.TxIndexer, blkidx indexer.BlockIndexer, logger log.Logger) core.RoutesMap { //nolint: lll
|
||||
env := &core.Environment{
|
||||
Config: cfg,
|
||||
BlockIndexer: blkidx,
|
||||
TxIndexer: txidx,
|
||||
StateStore: s,
|
||||
BlockStore: bs,
|
||||
ConsensusReactor: waitSyncCheckerImpl{},
|
||||
Logger: logger,
|
||||
}
|
||||
return core.RoutesMap{
|
||||
"blockchain": server.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight"),
|
||||
"consensus_params": server.NewRPCFunc(env.ConsensusParams, "height"),
|
||||
"block": server.NewRPCFunc(env.Block, "height"),
|
||||
"block_by_hash": server.NewRPCFunc(env.BlockByHash, "hash"),
|
||||
"block_results": server.NewRPCFunc(env.BlockResults, "height"),
|
||||
"commit": server.NewRPCFunc(env.Commit, "height"),
|
||||
"header": server.NewRPCFunc(env.Header, "height"),
|
||||
"header_by_hash": server.NewRPCFunc(env.HeaderByHash, "hash"),
|
||||
"validators": server.NewRPCFunc(env.Validators, "height,page,per_page"),
|
||||
"tx": server.NewRPCFunc(env.Tx, "hash,prove"),
|
||||
"tx_search": server.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by"),
|
||||
"block_search": server.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by"),
|
||||
}
|
||||
}
|
||||
|
||||
// Handler returns the http.Handler configured for use with an Inspector server. Handler
|
||||
// registers the routes on the http.Handler and also registers the websocket handler
|
||||
// and the CORS handler if specified by the configuration options.
|
||||
func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logger) http.Handler {
|
||||
mux := http.NewServeMux()
|
||||
wmLogger := logger.With("protocol", "websocket")
|
||||
wm := server.NewWebsocketManager(routes,
|
||||
server.ReadLimit(rpcConfig.MaxBodyBytes))
|
||||
wm.SetLogger(wmLogger)
|
||||
mux.HandleFunc("/websocket", wm.WebsocketHandler)
|
||||
|
||||
server.RegisterRPCFuncs(mux, routes, logger)
|
||||
var rootHandler http.Handler = mux
|
||||
if rpcConfig.IsCorsEnabled() {
|
||||
rootHandler = addCORSHandler(rpcConfig, mux)
|
||||
}
|
||||
return rootHandler
|
||||
}
|
||||
|
||||
func addCORSHandler(rpcConfig *config.RPCConfig, h http.Handler) http.Handler {
|
||||
corsMiddleware := cors.New(cors.Options{
|
||||
AllowedOrigins: rpcConfig.CORSAllowedOrigins,
|
||||
AllowedMethods: rpcConfig.CORSAllowedMethods,
|
||||
AllowedHeaders: rpcConfig.CORSAllowedHeaders,
|
||||
})
|
||||
h = corsMiddleware.Handler(h)
|
||||
return h
|
||||
}
|
||||
|
||||
type waitSyncCheckerImpl struct{}
|
||||
|
||||
func (waitSyncCheckerImpl) WaitSync() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ListenAndServe listens on the address specified in srv.Addr and handles any
|
||||
// incoming requests over HTTP using the Inspector rpc handler specified on the server.
|
||||
func (srv *Server) ListenAndServe(ctx context.Context) error {
|
||||
listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
listener.Close()
|
||||
}()
|
||||
return server.Serve(listener, srv.Handler, srv.Logger, serverRPCConfig(srv.Config))
|
||||
}
|
||||
|
||||
// ListenAndServeTLS listens on the address specified in srv.Addr. ListenAndServeTLS handles
|
||||
// incoming requests over HTTPS using the Inspector rpc handler specified on the server.
|
||||
func (srv *Server) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error {
|
||||
listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
listener.Close()
|
||||
}()
|
||||
return server.ServeTLS(listener, srv.Handler, certFile, keyFile, srv.Logger, serverRPCConfig(srv.Config))
|
||||
}
|
||||
|
||||
func serverRPCConfig(r *config.RPCConfig) *server.Config {
|
||||
cfg := server.DefaultConfig()
|
||||
cfg.MaxBodyBytes = r.MaxBodyBytes
|
||||
cfg.MaxHeaderBytes = r.MaxHeaderBytes
|
||||
// If necessary adjust global WriteTimeout to ensure it's greater than
|
||||
// TimeoutBroadcastTxCommit.
|
||||
// See https://github.com/tendermint/tendermint/issues/3435
|
||||
if cfg.WriteTimeout <= r.TimeoutBroadcastTxCommit {
|
||||
cfg.WriteTimeout = r.TimeoutBroadcastTxCommit + 1*time.Second
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
@@ -24,7 +24,7 @@ Pub-Sub in go with event caching
|
||||
|
||||
### Package files
|
||||
|
||||
[event_cache.go](./event_cache.go) [events.go](./events.go)
|
||||
[event_cache.go](./event_cache.go) [events.go](./events.go)
|
||||
|
||||
|
||||
## Type [EventCache](./event_cache.go?s=116:179#L5)
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
// select {
|
||||
// case msg <- subscription.Out():
|
||||
// // handle msg.Data() and msg.Events()
|
||||
// case <-subscription.Cancelled():
|
||||
// case <-subscription.Canceled():
|
||||
// return subscription.Err()
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -431,7 +431,7 @@ func benchmarkNClients(n int, b *testing.B) {
|
||||
select {
|
||||
case <-subscription.Out():
|
||||
continue
|
||||
case <-subscription.Cancelled():
|
||||
case <-subscription.Canceled():
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -472,7 +472,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) {
|
||||
select {
|
||||
case <-subscription.Out():
|
||||
continue
|
||||
case <-subscription.Cancelled():
|
||||
case <-subscription.Canceled():
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -501,7 +501,7 @@ func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message,
|
||||
}
|
||||
|
||||
func assertCancelled(t *testing.T, subscription *pubsub.Subscription, err error) {
|
||||
_, ok := <-subscription.Cancelled()
|
||||
_, ok := <-subscription.Canceled()
|
||||
assert.False(t, ok)
|
||||
assert.Equal(t, err, subscription.Err())
|
||||
}
|
||||
|
||||
@@ -43,11 +43,9 @@ func (s *Subscription) Out() <-chan Message {
|
||||
return s.out
|
||||
}
|
||||
|
||||
// Cancelled returns a channel that's closed when the subscription is
|
||||
// Canceled returns a channel that's closed when the subscription is
|
||||
// terminated and supposed to be used in a select statement.
|
||||
//
|
||||
//nolint:misspell
|
||||
func (s *Subscription) Cancelled() <-chan struct{} {
|
||||
func (s *Subscription) Canceled() <-chan struct{} {
|
||||
return s.canceled
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,27 @@ func SplitAndTrim(s, sep, cutset string) []string {
|
||||
return spl
|
||||
}
|
||||
|
||||
// SplitAndTrimEmpty slices s into all subslices separated by sep and returns a
|
||||
// slice of the string s with all leading and trailing Unicode code points
|
||||
// contained in cutset removed. If sep is empty, SplitAndTrim splits after each
|
||||
// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
|
||||
// -1. also filter out empty strings, only return non-empty strings.
|
||||
func SplitAndTrimEmpty(s, sep, cutset string) []string {
|
||||
if s == "" {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
spl := strings.Split(s, sep)
|
||||
nonEmptyStrings := make([]string, 0, len(spl))
|
||||
for i := 0; i < len(spl); i++ {
|
||||
element := strings.Trim(spl[i], cutset)
|
||||
if element != "" {
|
||||
nonEmptyStrings = append(nonEmptyStrings, element)
|
||||
}
|
||||
}
|
||||
return nonEmptyStrings
|
||||
}
|
||||
|
||||
// Returns true if s is a non-empty printable non-tab ascii character.
|
||||
func IsASCIIText(s string) bool {
|
||||
if len(s) == 0 {
|
||||
|
||||
@@ -113,7 +113,7 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) {
|
||||
}
|
||||
|
||||
// 4) Start listening for new connections.
|
||||
listener, err := rpcserver.Listen(p.Addr, p.Config)
|
||||
listener, err := rpcserver.Listen(p.Addr, p.Config.MaxOpenConnections)
|
||||
if err != nil {
|
||||
return nil, mux, err
|
||||
}
|
||||
|
||||
36
node/node.go
36
node/node.go
@@ -141,7 +141,7 @@ func NewNode(config *cfg.Config,
|
||||
nodeKey *p2p.NodeKey,
|
||||
clientCreator proxy.ClientCreator,
|
||||
genesisDocProvider GenesisDocProvider,
|
||||
dbProvider DBProvider,
|
||||
dbProvider cfg.DBProvider,
|
||||
metricsProvider MetricsProvider,
|
||||
logger log.Logger,
|
||||
options ...Option,
|
||||
@@ -503,12 +503,12 @@ func (n *Node) OnStop() {
|
||||
}
|
||||
|
||||
// ConfigureRPC makes sure RPC has all the objects it needs to operate.
|
||||
func (n *Node) ConfigureRPC() error {
|
||||
func (n *Node) ConfigureRPC() (*rpccore.Environment, error) {
|
||||
pubKey, err := n.privValidator.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
if pubKey == nil || err != nil {
|
||||
return nil, fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
rpccore.SetEnvironment(&rpccore.Environment{
|
||||
rpcCoreEnv := rpccore.Environment{
|
||||
ProxyAppQuery: n.proxyApp.Query(),
|
||||
ProxyAppMempool: n.proxyApp.Mempool(),
|
||||
|
||||
@@ -518,8 +518,8 @@ func (n *Node) ConfigureRPC() error {
|
||||
ConsensusState: n.consensusState,
|
||||
P2PPeers: n.sw,
|
||||
P2PTransport: n,
|
||||
PubKey: pubKey,
|
||||
|
||||
PubKey: pubKey,
|
||||
GenDoc: n.genesisDoc,
|
||||
TxIndexer: n.txIndexer,
|
||||
BlockIndexer: n.blockIndexer,
|
||||
@@ -530,24 +530,24 @@ func (n *Node) ConfigureRPC() error {
|
||||
Logger: n.Logger.With("module", "rpc"),
|
||||
|
||||
Config: *n.config.RPC,
|
||||
})
|
||||
if err := rpccore.InitGenesisChunks(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
if err := rpcCoreEnv.InitGenesisChunks(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rpcCoreEnv, nil
|
||||
}
|
||||
|
||||
func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
err := n.ConfigureRPC()
|
||||
env, err := n.ConfigureRPC()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
|
||||
routes := env.GetRoutes()
|
||||
|
||||
if n.config.RPC.Unsafe {
|
||||
rpccore.AddUnsafeRoutes()
|
||||
env.AddUnsafeRoutes(routes)
|
||||
}
|
||||
|
||||
config := rpcserver.DefaultConfig()
|
||||
@@ -567,7 +567,7 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
mux := http.NewServeMux()
|
||||
rpcLogger := n.Logger.With("module", "rpc-server")
|
||||
wmLogger := rpcLogger.With("protocol", "websocket")
|
||||
wm := rpcserver.NewWebsocketManager(rpccore.Routes,
|
||||
wm := rpcserver.NewWebsocketManager(routes,
|
||||
rpcserver.OnDisconnect(func(remoteAddr string) {
|
||||
err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
|
||||
if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
|
||||
@@ -579,10 +579,10 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
)
|
||||
wm.SetLogger(wmLogger)
|
||||
mux.HandleFunc("/websocket", wm.WebsocketHandler)
|
||||
rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
|
||||
rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger)
|
||||
listener, err := rpcserver.Listen(
|
||||
listenAddr,
|
||||
config,
|
||||
config.MaxOpenConnections,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -640,12 +640,12 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
|
||||
config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
|
||||
}
|
||||
listener, err := rpcserver.Listen(grpcListenAddr, config)
|
||||
listener, err := rpcserver.Listen(grpcListenAddr, config.MaxOpenConnections)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
if err := grpccore.StartGRPCServer(listener); err != nil {
|
||||
if err := grpccore.StartGRPCServer(env, listener); err != nil {
|
||||
n.Logger.Error("Error starting gRPC server", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestNodeStartStop(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
case <-blocksSub.Out():
|
||||
case <-blocksSub.Cancelled():
|
||||
case <-blocksSub.Canceled():
|
||||
t.Fatal("blocksSub was canceled")
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timed out waiting for the node to produce a block")
|
||||
@@ -461,7 +461,7 @@ func TestNodeNewNodeCustomReactors(t *testing.T) {
|
||||
nodeKey,
|
||||
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
|
||||
DefaultGenesisDocProviderFunc(config),
|
||||
DefaultDBProvider,
|
||||
cfg.DefaultDBProvider,
|
||||
DefaultMetricsProvider(config.Instrumentation),
|
||||
log.TestingLogger(),
|
||||
CustomReactors(map[string]p2p.Reactor{"FOO": cr, "BLOCKSYNC": customBlocksyncReactor}),
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
_ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port
|
||||
"time"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
@@ -17,6 +19,7 @@ import (
|
||||
cs "github.com/tendermint/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/evidence"
|
||||
"github.com/tendermint/tendermint/statesync"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -30,13 +33,8 @@ import (
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv"
|
||||
blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null"
|
||||
"github.com/tendermint/tendermint/state/indexer/sink/psql"
|
||||
"github.com/tendermint/tendermint/state/indexer/block"
|
||||
"github.com/tendermint/tendermint/state/txindex"
|
||||
"github.com/tendermint/tendermint/state/txindex/kv"
|
||||
"github.com/tendermint/tendermint/state/txindex/null"
|
||||
"github.com/tendermint/tendermint/statesync"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
@@ -44,24 +42,8 @@ import (
|
||||
_ "github.com/lib/pq" // provide the psql db driver
|
||||
)
|
||||
|
||||
// DBContext specifies config information for loading a new DB.
|
||||
type DBContext struct {
|
||||
ID string
|
||||
Config *cfg.Config
|
||||
}
|
||||
|
||||
// DBProvider takes a DBContext and returns an instantiated DB.
|
||||
type DBProvider func(*DBContext) (dbm.DB, error)
|
||||
|
||||
const readHeaderTimeout = 10 * time.Second
|
||||
|
||||
// DefaultDBProvider returns a database using the DBBackend and DBDir
|
||||
// specified in the ctx.Config.
|
||||
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
|
||||
dbType := dbm.BackendType(ctx.Config.DBBackend)
|
||||
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
|
||||
}
|
||||
|
||||
// GenesisDocProvider returns a GenesisDoc.
|
||||
// It allows the GenesisDoc to be pulled from sources other than the
|
||||
// filesystem, for instance from a distributed key-value store cluster.
|
||||
@@ -92,7 +74,7 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
|
||||
nodeKey,
|
||||
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
|
||||
DefaultGenesisDocProviderFunc(config),
|
||||
DefaultDBProvider,
|
||||
cfg.DefaultDBProvider,
|
||||
DefaultMetricsProvider(config.Instrumentation),
|
||||
logger,
|
||||
)
|
||||
@@ -124,15 +106,15 @@ type blockSyncReactor interface {
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
|
||||
func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
|
||||
var blockStoreDB dbm.DB
|
||||
blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
|
||||
blockStoreDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
blockStore = store.NewBlockStore(blockStoreDB)
|
||||
|
||||
stateDB, err = dbProvider(&DBContext{"state", config})
|
||||
stateDB, err = dbProvider(&cfg.DBContext{ID: "state", Config: config})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -161,7 +143,7 @@ func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
|
||||
func createAndStartIndexerService(
|
||||
config *cfg.Config,
|
||||
chainID string,
|
||||
dbProvider DBProvider,
|
||||
dbProvider cfg.DBProvider,
|
||||
eventBus *types.EventBus,
|
||||
logger log.Logger,
|
||||
) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
|
||||
@@ -169,31 +151,9 @@ func createAndStartIndexerService(
|
||||
txIndexer txindex.TxIndexer
|
||||
blockIndexer indexer.BlockIndexer
|
||||
)
|
||||
|
||||
switch config.TxIndex.Indexer {
|
||||
case "kv":
|
||||
store, err := dbProvider(&DBContext{"tx_index", config})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
txIndexer = kv.NewTxIndex(store)
|
||||
blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
|
||||
|
||||
case "psql":
|
||||
if config.TxIndex.PsqlConn == "" {
|
||||
return nil, nil, nil, errors.New(`no psql-conn is set for the "psql" indexer`)
|
||||
}
|
||||
es, err := psql.NewEventSink(config.TxIndex.PsqlConn, chainID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("creating psql indexer: %w", err)
|
||||
}
|
||||
txIndexer = es.TxIndexer()
|
||||
blockIndexer = es.BlockIndexer()
|
||||
|
||||
default:
|
||||
txIndexer = &null.TxIndex{}
|
||||
blockIndexer = &blockidxnull.BlockerIndexer{}
|
||||
txIndexer, blockIndexer, err := block.IndexerFromConfig(config, dbProvider, chainID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false)
|
||||
@@ -266,6 +226,7 @@ func createMempoolAndMempoolReactor(
|
||||
memplMetrics *mempl.Metrics,
|
||||
logger log.Logger,
|
||||
) (mempl.Mempool, p2p.Reactor) {
|
||||
logger = logger.With("module", "mempool")
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV1:
|
||||
mp := mempoolv1.NewTxMempool(
|
||||
@@ -285,6 +246,7 @@ func createMempoolAndMempoolReactor(
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mp.EnableTxsAvailable()
|
||||
}
|
||||
reactor.SetLogger(logger)
|
||||
|
||||
return mp, reactor
|
||||
|
||||
@@ -307,6 +269,7 @@ func createMempoolAndMempoolReactor(
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mp.EnableTxsAvailable()
|
||||
}
|
||||
reactor.SetLogger(logger)
|
||||
|
||||
return mp, reactor
|
||||
|
||||
@@ -315,10 +278,10 @@ func createMempoolAndMempoolReactor(
|
||||
}
|
||||
}
|
||||
|
||||
func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
|
||||
func createEvidenceReactor(config *cfg.Config, dbProvider cfg.DBProvider,
|
||||
stateStore sm.Store, blockStore *store.BlockStore, logger log.Logger,
|
||||
) (*evidence.Reactor, *evidence.Pool, error) {
|
||||
evidenceDB, err := dbProvider(&DBContext{"evidence", config})
|
||||
evidenceDB, err := dbProvider(&cfg.DBContext{ID: "evidence", Config: config})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -26,8 +26,12 @@ type localClientCreator struct {
|
||||
app types.Application
|
||||
}
|
||||
|
||||
// NewLocalClientCreator returns a ClientCreator for the given app,
|
||||
// which will be running locally.
|
||||
// NewLocalClientCreator returns a [ClientCreator] for the given app, which
|
||||
// will be running locally.
|
||||
//
|
||||
// Maintains a single mutex over all new clients created with NewABCIClient.
|
||||
// For a local client creator that uses a single mutex per new client, rather
|
||||
// use [NewUnsyncLocalClientCreator].
|
||||
func NewLocalClientCreator(app types.Application) ClientCreator {
|
||||
return &localClientCreator{
|
||||
mtx: new(tmsync.Mutex),
|
||||
@@ -39,24 +43,26 @@ func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) {
|
||||
return abcicli.NewLocalClient(l.mtx, l.app), nil
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------
|
||||
// unsynchronized local proxy on an in-proc app (no mutex)
|
||||
//----------------------------------------------------
|
||||
// local proxy creates a new mutex for each client
|
||||
|
||||
type unsyncLocalClientCreator struct {
|
||||
app types.Application
|
||||
}
|
||||
|
||||
// NewUnsyncLocalClientCreator returns a ClientCreator for the given app, which
|
||||
// will be running locally. Unlike NewLocalClientCreator, this leaves
|
||||
// synchronization up to the application.
|
||||
// NewUnsyncLocalClientCreator returns a [ClientCreator] for the given app.
|
||||
// Unlike [NewLocalClientCreator], each call to NewABCIClient returns an ABCI
|
||||
// client that maintains its own mutex over the application.
|
||||
func NewUnsyncLocalClientCreator(app types.Application) ClientCreator {
|
||||
return &unsyncLocalClientCreator{
|
||||
app: app,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *unsyncLocalClientCreator) NewABCIClient() (abcicli.Client, error) {
|
||||
return abcicli.NewUnsyncLocalClient(l.app), nil
|
||||
func (c *unsyncLocalClientCreator) NewABCIClient() (abcicli.Client, error) {
|
||||
// Specifying nil for the mutex causes each instance to create its own
|
||||
// mutex.
|
||||
return abcicli.NewLocalClient(nil, c.app), nil
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------
|
||||
@@ -88,23 +94,33 @@ func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) {
|
||||
return remoteApp, nil
|
||||
}
|
||||
|
||||
// DefaultClientCreator returns a default ClientCreator, which will create a
|
||||
// local client if addr is one of: 'kvstore',
|
||||
// 'persistent_kvstore' or 'noop', otherwise - a remote client.
|
||||
// DefaultClientCreator returns a default [ClientCreator], which will create a
|
||||
// local client if addr is one of "kvstore", "persistent_kvstore", "e2e",
|
||||
// "noop".
|
||||
//
|
||||
// Otherwise a remote client will be created.
|
||||
//
|
||||
// Each of "kvstore", "persistent_kvstore" and "e2e" also currently have an
|
||||
// "_unsync" variant (i.e. "kvstore_unsync", etc.), which attempts to replicate
|
||||
// the same concurrency model as the remote client.
|
||||
func DefaultClientCreator(addr, transport, dbDir string) ClientCreator {
|
||||
switch addr {
|
||||
case "kvstore":
|
||||
return NewLocalClientCreator(kvstore.NewApplication())
|
||||
case "kvstore_unsync":
|
||||
return NewUnsyncLocalClientCreator(kvstore.NewApplication())
|
||||
case "persistent_kvstore":
|
||||
return NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(dbDir))
|
||||
case "persistent_kvstore_unsync":
|
||||
return NewUnsyncLocalClientCreator(kvstore.NewPersistentKVStoreApplication(dbDir))
|
||||
case "e2e":
|
||||
app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return NewLocalClientCreator(app)
|
||||
case "e2e_sync":
|
||||
app, err := e2e.NewSyncApplication(e2e.DefaultConfig(dbDir))
|
||||
case "e2e_unsync":
|
||||
app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -41,22 +41,20 @@ type Local struct {
|
||||
*types.EventBus
|
||||
Logger log.Logger
|
||||
ctx *rpctypes.Context
|
||||
env *core.Environment
|
||||
}
|
||||
|
||||
// NewLocal configures a client that calls the Node directly.
|
||||
//
|
||||
// Note that given how rpc/core works with package singletons, that
|
||||
// you can only have one node per process. So make sure test cases
|
||||
// don't run in parallel, or try to simulate an entire network in
|
||||
// one process...
|
||||
func New(node *nm.Node) *Local {
|
||||
if err := node.ConfigureRPC(); err != nil {
|
||||
env, err := node.ConfigureRPC()
|
||||
if err != nil {
|
||||
node.Logger.Error("Error configuring RPC", "err", err)
|
||||
}
|
||||
return &Local{
|
||||
EventBus: node.EventBus(),
|
||||
Logger: log.NewNopLogger(),
|
||||
ctx: &rpctypes.Context{},
|
||||
env: env,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,11 +66,11 @@ func (c *Local) SetLogger(l log.Logger) {
|
||||
}
|
||||
|
||||
func (c *Local) Status(ctx context.Context) (*ctypes.ResultStatus, error) {
|
||||
return core.Status(c.ctx)
|
||||
return c.env.Status(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) {
|
||||
return core.ABCIInfo(c.ctx)
|
||||
return c.env.ABCIInfo(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) {
|
||||
@@ -84,55 +82,55 @@ func (c *Local) ABCIQueryWithOptions(
|
||||
path string,
|
||||
data bytes.HexBytes,
|
||||
opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
|
||||
return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove)
|
||||
return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove)
|
||||
}
|
||||
|
||||
func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
return core.BroadcastTxCommit(c.ctx, tx)
|
||||
return c.env.BroadcastTxCommit(c.ctx, tx)
|
||||
}
|
||||
|
||||
func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
return core.BroadcastTxAsync(c.ctx, tx)
|
||||
return c.env.BroadcastTxAsync(c.ctx, tx)
|
||||
}
|
||||
|
||||
func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
return core.BroadcastTxSync(c.ctx, tx)
|
||||
return c.env.BroadcastTxSync(c.ctx, tx)
|
||||
}
|
||||
|
||||
func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
return core.UnconfirmedTxs(c.ctx, limit)
|
||||
return c.env.UnconfirmedTxs(c.ctx, limit)
|
||||
}
|
||||
|
||||
func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
return core.NumUnconfirmedTxs(c.ctx)
|
||||
return c.env.NumUnconfirmedTxs(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) {
|
||||
return core.CheckTx(c.ctx, tx)
|
||||
return c.env.CheckTx(c.ctx, tx)
|
||||
}
|
||||
|
||||
func (c *Local) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) {
|
||||
return core.NetInfo(c.ctx)
|
||||
return c.env.NetInfo(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) {
|
||||
return core.DumpConsensusState(c.ctx)
|
||||
return c.env.DumpConsensusState(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) {
|
||||
return core.ConsensusState(c.ctx)
|
||||
return c.env.GetConsensusState(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) {
|
||||
return core.ConsensusParams(c.ctx, height)
|
||||
return c.env.ConsensusParams(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) Health(ctx context.Context) (*ctypes.ResultHealth, error) {
|
||||
return core.Health(c.ctx)
|
||||
return c.env.Health(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
return core.UnsafeDialSeeds(c.ctx, seeds)
|
||||
return c.env.UnsafeDialSeeds(c.ctx, seeds)
|
||||
}
|
||||
|
||||
func (c *Local) DialPeers(
|
||||
@@ -142,51 +140,51 @@ func (c *Local) DialPeers(
|
||||
unconditional,
|
||||
private bool,
|
||||
) (*ctypes.ResultDialPeers, error) {
|
||||
return core.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private)
|
||||
return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private)
|
||||
}
|
||||
|
||||
func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
|
||||
return core.BlockchainInfo(c.ctx, minHeight, maxHeight)
|
||||
return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight)
|
||||
}
|
||||
|
||||
func (c *Local) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) {
|
||||
return core.Genesis(c.ctx)
|
||||
return c.env.Genesis(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) {
|
||||
return core.GenesisChunked(c.ctx, id)
|
||||
return c.env.GenesisChunked(c.ctx, id)
|
||||
}
|
||||
|
||||
func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) {
|
||||
return core.Block(c.ctx, height)
|
||||
return c.env.Block(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) {
|
||||
return core.BlockByHash(c.ctx, hash)
|
||||
return c.env.BlockByHash(c.ctx, hash)
|
||||
}
|
||||
|
||||
func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) {
|
||||
return core.BlockResults(c.ctx, height)
|
||||
return c.env.BlockResults(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) {
|
||||
return core.Header(c.ctx, height)
|
||||
return c.env.Header(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) {
|
||||
return core.HeaderByHash(c.ctx, hash)
|
||||
return c.env.HeaderByHash(c.ctx, hash)
|
||||
}
|
||||
|
||||
func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
|
||||
return core.Commit(c.ctx, height)
|
||||
return c.env.Commit(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) {
|
||||
return core.Validators(c.ctx, height, page, perPage)
|
||||
return c.env.Validators(c.ctx, height, page, perPage)
|
||||
}
|
||||
|
||||
func (c *Local) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
return core.Tx(c.ctx, hash, prove)
|
||||
return c.env.Tx(c.ctx, hash, prove)
|
||||
}
|
||||
|
||||
func (c *Local) TxSearch(
|
||||
@@ -197,7 +195,7 @@ func (c *Local) TxSearch(
|
||||
perPage *int,
|
||||
orderBy string,
|
||||
) (*ctypes.ResultTxSearch, error) {
|
||||
return core.TxSearch(c.ctx, query, prove, page, perPage, orderBy)
|
||||
return c.env.TxSearch(c.ctx, query, prove, page, perPage, orderBy)
|
||||
}
|
||||
|
||||
func (c *Local) BlockSearch(
|
||||
@@ -206,11 +204,11 @@ func (c *Local) BlockSearch(
|
||||
page, perPage *int,
|
||||
orderBy string,
|
||||
) (*ctypes.ResultBlockSearch, error) {
|
||||
return core.BlockSearch(c.ctx, query, page, perPage, orderBy)
|
||||
return c.env.BlockSearch(c.ctx, query, page, perPage, orderBy)
|
||||
}
|
||||
|
||||
func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||
return core.BroadcastEvidence(c.ctx, ev)
|
||||
return c.env.BroadcastEvidence(c.ctx, ev)
|
||||
}
|
||||
|
||||
func (c *Local) Subscribe(
|
||||
@@ -262,7 +260,7 @@ func (c *Local) eventsRoutine(
|
||||
c.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query)
|
||||
}
|
||||
}
|
||||
case <-sub.Cancelled():
|
||||
case <-sub.Canceled():
|
||||
if sub.Err() == tmpubsub.ErrUnsubscribed {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -28,10 +28,6 @@ import (
|
||||
)
|
||||
|
||||
// Client wraps arbitrary implementations of the various interfaces.
|
||||
//
|
||||
// We provide a few choices to mock out each one in this package.
|
||||
// Nothing hidden here, so no New function, just construct it from
|
||||
// some parts, and swap them out them during the tests.
|
||||
type Client struct {
|
||||
client.ABCIClient
|
||||
client.SignClient
|
||||
@@ -41,6 +37,14 @@ type Client struct {
|
||||
client.EvidenceClient
|
||||
client.MempoolClient
|
||||
service.Service
|
||||
|
||||
env *core.Environment
|
||||
}
|
||||
|
||||
func New() Client {
|
||||
return Client{
|
||||
env: &core.Environment{},
|
||||
}
|
||||
}
|
||||
|
||||
var _ client.Client = Client{}
|
||||
@@ -80,11 +84,11 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) {
|
||||
}
|
||||
|
||||
func (c Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) {
|
||||
return core.Status(&rpctypes.Context{})
|
||||
return c.env.Status(&rpctypes.Context{})
|
||||
}
|
||||
|
||||
func (c Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) {
|
||||
return core.ABCIInfo(&rpctypes.Context{})
|
||||
return c.env.ABCIInfo(&rpctypes.Context{})
|
||||
}
|
||||
|
||||
func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) {
|
||||
@@ -96,47 +100,47 @@ func (c Client) ABCIQueryWithOptions(
|
||||
path string,
|
||||
data bytes.HexBytes,
|
||||
opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
|
||||
return core.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove)
|
||||
return c.env.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove)
|
||||
}
|
||||
|
||||
func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
return core.BroadcastTxCommit(&rpctypes.Context{}, tx)
|
||||
return c.env.BroadcastTxCommit(&rpctypes.Context{}, tx)
|
||||
}
|
||||
|
||||
func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
return core.BroadcastTxAsync(&rpctypes.Context{}, tx)
|
||||
return c.env.BroadcastTxAsync(&rpctypes.Context{}, tx)
|
||||
}
|
||||
|
||||
func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
return core.BroadcastTxSync(&rpctypes.Context{}, tx)
|
||||
return c.env.BroadcastTxSync(&rpctypes.Context{}, tx)
|
||||
}
|
||||
|
||||
func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) {
|
||||
return core.CheckTx(&rpctypes.Context{}, tx)
|
||||
return c.env.CheckTx(&rpctypes.Context{}, tx)
|
||||
}
|
||||
|
||||
func (c Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) {
|
||||
return core.NetInfo(&rpctypes.Context{})
|
||||
return c.env.NetInfo(&rpctypes.Context{})
|
||||
}
|
||||
|
||||
func (c Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) {
|
||||
return core.ConsensusState(&rpctypes.Context{})
|
||||
return c.env.GetConsensusState(&rpctypes.Context{})
|
||||
}
|
||||
|
||||
func (c Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) {
|
||||
return core.DumpConsensusState(&rpctypes.Context{})
|
||||
return c.env.DumpConsensusState(&rpctypes.Context{})
|
||||
}
|
||||
|
||||
func (c Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) {
|
||||
return core.ConsensusParams(&rpctypes.Context{}, height)
|
||||
return c.env.ConsensusParams(&rpctypes.Context{}, height)
|
||||
}
|
||||
|
||||
func (c Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) {
|
||||
return core.Health(&rpctypes.Context{})
|
||||
return c.env.Health(&rpctypes.Context{})
|
||||
}
|
||||
|
||||
func (c Client) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
return core.UnsafeDialSeeds(&rpctypes.Context{}, seeds)
|
||||
return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds)
|
||||
}
|
||||
|
||||
func (c Client) DialPeers(
|
||||
@@ -146,33 +150,33 @@ func (c Client) DialPeers(
|
||||
unconditional,
|
||||
private bool,
|
||||
) (*ctypes.ResultDialPeers, error) {
|
||||
return core.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private)
|
||||
return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private)
|
||||
}
|
||||
|
||||
func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
|
||||
return core.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight)
|
||||
return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight)
|
||||
}
|
||||
|
||||
func (c Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) {
|
||||
return core.Genesis(&rpctypes.Context{})
|
||||
return c.env.Genesis(&rpctypes.Context{})
|
||||
}
|
||||
|
||||
func (c Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) {
|
||||
return core.Block(&rpctypes.Context{}, height)
|
||||
return c.env.Block(&rpctypes.Context{}, height)
|
||||
}
|
||||
|
||||
func (c Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) {
|
||||
return core.BlockByHash(&rpctypes.Context{}, hash)
|
||||
return c.env.BlockByHash(&rpctypes.Context{}, hash)
|
||||
}
|
||||
|
||||
func (c Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
|
||||
return core.Commit(&rpctypes.Context{}, height)
|
||||
return c.env.Commit(&rpctypes.Context{}, height)
|
||||
}
|
||||
|
||||
func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) {
|
||||
return core.Validators(&rpctypes.Context{}, height, page, perPage)
|
||||
return c.env.Validators(&rpctypes.Context{}, height, page, perPage)
|
||||
}
|
||||
|
||||
func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||
return core.BroadcastEvidence(&rpctypes.Context{}, ev)
|
||||
return c.env.BroadcastEvidence(&rpctypes.Context{}, ev)
|
||||
}
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
)
|
||||
|
||||
// ABCIQuery queries the application for some information.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/ABCI/abci_query
|
||||
func ABCIQuery(
|
||||
// More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_query
|
||||
func (env *Environment) ABCIQuery(
|
||||
ctx *rpctypes.Context,
|
||||
path string,
|
||||
data bytes.HexBytes,
|
||||
@@ -31,8 +31,8 @@ func ABCIQuery(
|
||||
}
|
||||
|
||||
// ABCIInfo gets some info about the application.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/ABCI/abci_info
|
||||
func ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info
|
||||
func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) {
|
||||
resInfo, err := env.ProxyAppQuery.InfoSync(proxy.RequestInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -15,10 +15,19 @@ import (
|
||||
)
|
||||
|
||||
// BlockchainInfo gets block headers for minHeight <= height <= maxHeight.
|
||||
// Block headers are returned in descending order (highest first).
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/blockchain
|
||||
func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
|
||||
// maximum 20 block metas
|
||||
//
|
||||
// If maxHeight does not yet exist, blocks up to the current height will be
|
||||
// returned. If minHeight does not exist (due to pruning), earliest existing
|
||||
// height will be used.
|
||||
//
|
||||
// At most 20 items will be returned. Block headers are returned in descending
|
||||
// order (highest first).
|
||||
//
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/blockchain
|
||||
func (env *Environment) BlockchainInfo(
|
||||
ctx *rpctypes.Context,
|
||||
minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
|
||||
|
||||
const limit int64 = 20
|
||||
var err error
|
||||
minHeight, maxHeight, err = filterMinMax(
|
||||
@@ -79,8 +88,8 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) {
|
||||
// Header gets block header at a given height.
|
||||
// If no height is provided, it will fetch the latest header.
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/header
|
||||
func Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) {
|
||||
height, err := getHeight(env.BlockStore.Height(), heightPtr)
|
||||
func (env *Environment) Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) {
|
||||
height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -95,7 +104,7 @@ func Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, erro
|
||||
|
||||
// HeaderByHash gets header by hash.
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/header_by_hash
|
||||
func HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) {
|
||||
func (env *Environment) HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) {
|
||||
// N.B. The hash parameter is HexBytes so that the reflective parameter
|
||||
// decoding logic in the HTTP service will correctly translate from JSON.
|
||||
// See https://github.com/tendermint/tendermint/issues/6802 for context.
|
||||
@@ -110,9 +119,9 @@ func HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHea
|
||||
|
||||
// Block gets block at a given height.
|
||||
// If no height is provided, it will fetch the latest block.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/block
|
||||
func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) {
|
||||
height, err := getHeight(env.BlockStore.Height(), heightPtr)
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/block
|
||||
func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) {
|
||||
height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -126,8 +135,8 @@ func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error)
|
||||
}
|
||||
|
||||
// BlockByHash gets block by hash.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/block_by_hash
|
||||
func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash
|
||||
func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) {
|
||||
block := env.BlockStore.LoadBlockByHash(hash)
|
||||
if block == nil {
|
||||
return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil
|
||||
@@ -139,9 +148,9 @@ func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error
|
||||
|
||||
// Commit gets block commit at a given height.
|
||||
// If no height is provided, it will fetch the commit for the latest block.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/commit
|
||||
func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) {
|
||||
height, err := getHeight(env.BlockStore.Height(), heightPtr)
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/commit
|
||||
func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) {
|
||||
height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -170,9 +179,9 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro
|
||||
// Results are for the height of the block containing the txs.
|
||||
// Thus response.results.deliver_tx[5] is the results of executing
|
||||
// getBlock(h).Txs[5]
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/block_results
|
||||
func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) {
|
||||
height, err := getHeight(env.BlockStore.Height(), heightPtr)
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/block_results
|
||||
func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) {
|
||||
height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -194,7 +203,7 @@ func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockR
|
||||
|
||||
// BlockSearch searches for a paginated set of blocks matching BeginBlock and
|
||||
// EndBlock event search criteria.
|
||||
func BlockSearch(
|
||||
func (env *Environment) BlockSearch(
|
||||
ctx *rpctypes.Context,
|
||||
query string,
|
||||
pagePtr, perPagePtr *int,
|
||||
@@ -230,7 +239,7 @@ func BlockSearch(
|
||||
|
||||
// paginate results
|
||||
totalCount := len(results)
|
||||
perPage := validatePerPage(perPagePtr)
|
||||
perPage := env.validatePerPage(perPagePtr)
|
||||
|
||||
page, err := validatePage(pagePtr, perPage, totalCount)
|
||||
if err != nil {
|
||||
|
||||
@@ -80,7 +80,7 @@ func TestBlockResults(t *testing.T) {
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
}
|
||||
|
||||
env = &Environment{}
|
||||
env := &Environment{}
|
||||
env.StateStore = sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
@@ -110,7 +110,7 @@ func TestBlockResults(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
res, err := BlockResults(&rpctypes.Context{}, &tc.height)
|
||||
res, err := env.BlockResults(&rpctypes.Context{}, &tc.height)
|
||||
if tc.wantErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
|
||||
@@ -14,10 +14,14 @@ import (
|
||||
// validators are sorted by their voting power - this is the canonical order
|
||||
// for the validators in the set as used in computing their Merkle root.
|
||||
//
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/validators
|
||||
func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/validators
|
||||
func (env *Environment) Validators(
|
||||
ctx *rpctypes.Context,
|
||||
heightPtr *int64,
|
||||
pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) {
|
||||
|
||||
// The latest validator that we know is the NextValidator of the last block.
|
||||
height, err := getHeight(latestUncommittedHeight(), heightPtr)
|
||||
height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -28,7 +32,7 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *in
|
||||
}
|
||||
|
||||
totalCount := len(validators.Validators)
|
||||
perPage := validatePerPage(perPagePtr)
|
||||
perPage := env.validatePerPage(perPagePtr)
|
||||
page, err := validatePage(pagePtr, perPage, totalCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -47,8 +51,8 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *in
|
||||
|
||||
// DumpConsensusState dumps consensus state.
|
||||
// UNSTABLE
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/dump_consensus_state
|
||||
func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state
|
||||
func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) {
|
||||
// Get Peer consensus states.
|
||||
peers := env.P2PPeers.Peers().List()
|
||||
peerStates := make([]ctypes.PeerStateInfo, len(peers))
|
||||
@@ -80,8 +84,8 @@ func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState
|
||||
|
||||
// ConsensusState returns a concise summary of the consensus state.
|
||||
// UNSTABLE
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/consensus_state
|
||||
func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state
|
||||
func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) {
|
||||
// Get self round state.
|
||||
bz, err := env.ConsensusState.GetRoundStateSimpleJSON()
|
||||
return &ctypes.ResultConsensusState{RoundState: bz}, err
|
||||
@@ -89,11 +93,14 @@ func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error)
|
||||
|
||||
// ConsensusParams gets the consensus parameters at the given block height.
|
||||
// If no height is provided, it will fetch the latest consensus params.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/consensus_params
|
||||
func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params
|
||||
func (env *Environment) ConsensusParams(
|
||||
ctx *rpctypes.Context,
|
||||
heightPtr *int64) (*ctypes.ResultConsensusParams, error) {
|
||||
|
||||
// The latest consensus params that we know is the consensus params after the
|
||||
// last block.
|
||||
height, err := getHeight(latestUncommittedHeight(), heightPtr)
|
||||
height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
)
|
||||
|
||||
// UnsafeFlushMempool removes all transactions from the mempool.
|
||||
func UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) {
|
||||
func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) {
|
||||
env.Mempool.Flush()
|
||||
return &ctypes.ResultUnsafeFlushMempool{}, nil
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -33,17 +32,6 @@ const (
|
||||
genesisChunkSize = 16 * 1024 * 1024 // 16
|
||||
)
|
||||
|
||||
var (
|
||||
// set by Node
|
||||
env *Environment
|
||||
)
|
||||
|
||||
// SetEnvironment sets up the given Environment.
|
||||
// It will race if multiple Node call SetEnvironment.
|
||||
func SetEnvironment(e *Environment) {
|
||||
env = e
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// These interfaces are used by RPC and must be thread safe
|
||||
|
||||
@@ -69,6 +57,10 @@ type peers interface {
|
||||
Peers() p2p.IPeerSet
|
||||
}
|
||||
|
||||
type consensusReactor interface {
|
||||
WaitSync() bool
|
||||
}
|
||||
|
||||
// ----------------------------------------------
|
||||
// Environment contains objects and interfaces used by the RPC. It is expected
|
||||
// to be setup once during startup.
|
||||
@@ -78,21 +70,21 @@ type Environment struct {
|
||||
ProxyAppMempool proxy.AppConnMempool
|
||||
|
||||
// interfaces defined in types and above
|
||||
StateStore sm.Store
|
||||
BlockStore sm.BlockStore
|
||||
EvidencePool sm.EvidencePool
|
||||
ConsensusState Consensus
|
||||
P2PPeers peers
|
||||
P2PTransport transport
|
||||
StateStore sm.Store
|
||||
BlockStore sm.BlockStore
|
||||
EvidencePool sm.EvidencePool
|
||||
ConsensusState Consensus
|
||||
ConsensusReactor consensusReactor
|
||||
P2PPeers peers
|
||||
P2PTransport transport
|
||||
|
||||
// objects
|
||||
PubKey crypto.PubKey
|
||||
GenDoc *types.GenesisDoc // cache the genesis structure
|
||||
TxIndexer txindex.TxIndexer
|
||||
BlockIndexer indexer.BlockIndexer
|
||||
ConsensusReactor *consensus.Reactor
|
||||
EventBus *types.EventBus // thread safe
|
||||
Mempool mempl.Mempool
|
||||
PubKey crypto.PubKey
|
||||
GenDoc *types.GenesisDoc // cache the genesis structure
|
||||
TxIndexer txindex.TxIndexer
|
||||
BlockIndexer indexer.BlockIndexer
|
||||
EventBus *types.EventBus // thread safe
|
||||
Mempool mempl.Mempool
|
||||
|
||||
Logger log.Logger
|
||||
|
||||
@@ -125,7 +117,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) {
|
||||
return page, nil
|
||||
}
|
||||
|
||||
func validatePerPage(perPagePtr *int) int {
|
||||
func (env *Environment) validatePerPage(perPagePtr *int) int {
|
||||
if perPagePtr == nil { // no per_page parameter
|
||||
return defaultPerPage
|
||||
}
|
||||
@@ -141,7 +133,7 @@ func validatePerPage(perPagePtr *int) int {
|
||||
|
||||
// InitGenesisChunks configures the environment and should be called on service
|
||||
// startup.
|
||||
func InitGenesisChunks() error {
|
||||
func (env *Environment) InitGenesisChunks() error {
|
||||
if env.genChunks != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -178,7 +170,7 @@ func validateSkipCount(page, perPage int) int {
|
||||
}
|
||||
|
||||
// latestHeight can be either latest committed or uncommitted (+1) height.
|
||||
func getHeight(latestHeight int64, heightPtr *int64) (int64, error) {
|
||||
func (env *Environment) getHeight(latestHeight int64, heightPtr *int64) (int64, error) {
|
||||
if heightPtr != nil {
|
||||
height := *heightPtr
|
||||
if height <= 0 {
|
||||
@@ -198,7 +190,7 @@ func getHeight(latestHeight int64, heightPtr *int64) (int64, error) {
|
||||
return latestHeight, nil
|
||||
}
|
||||
|
||||
func latestUncommittedHeight() int64 {
|
||||
func (env *Environment) latestUncommittedHeight() int64 {
|
||||
nodeIsSyncing := env.ConsensusReactor.WaitSync()
|
||||
if nodeIsSyncing {
|
||||
return env.BlockStore.Height()
|
||||
|
||||
@@ -70,13 +70,13 @@ func TestPaginationPerPage(t *testing.T) {
|
||||
{5, maxPerPage, maxPerPage},
|
||||
{5, maxPerPage + 1, maxPerPage},
|
||||
}
|
||||
|
||||
env := &Environment{}
|
||||
for _, c := range cases {
|
||||
p := validatePerPage(&c.perPage)
|
||||
p := env.validatePerPage(&c.perPage)
|
||||
assert.Equal(t, c.newPerPage, p, fmt.Sprintf("%v", c))
|
||||
}
|
||||
|
||||
// nil case
|
||||
p := validatePerPage(nil)
|
||||
p := env.validatePerPage(nil)
|
||||
assert.Equal(t, defaultPerPage, p)
|
||||
}
|
||||
|
||||
@@ -19,8 +19,8 @@ const (
|
||||
)
|
||||
|
||||
// Subscribe for events via WebSocket.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Websocket/subscribe
|
||||
func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe
|
||||
func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) {
|
||||
addr := ctx.RemoteAddr()
|
||||
|
||||
if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients {
|
||||
@@ -76,7 +76,7 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-sub.Cancelled():
|
||||
case <-sub.Canceled():
|
||||
if sub.Err() != tmpubsub.ErrUnsubscribed {
|
||||
var reason string
|
||||
if sub.Err() == nil {
|
||||
@@ -102,8 +102,8 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er
|
||||
}
|
||||
|
||||
// Unsubscribe from events via WebSocket.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Websocket/unsubscribe
|
||||
func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe
|
||||
func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) {
|
||||
addr := ctx.RemoteAddr()
|
||||
env.Logger.Info("Unsubscribe from query", "remote", addr, "query", query)
|
||||
q, err := tmquery.New(query)
|
||||
@@ -118,8 +118,8 @@ func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe
|
||||
}
|
||||
|
||||
// UnsubscribeAll from all events via WebSocket.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Websocket/unsubscribe_all
|
||||
func UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe_all
|
||||
func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) {
|
||||
addr := ctx.RemoteAddr()
|
||||
env.Logger.Info("Unsubscribe from all", "remote", addr)
|
||||
err := env.EventBus.UnsubscribeAll(context.Background(), addr)
|
||||
|
||||
@@ -10,8 +10,11 @@ import (
|
||||
)
|
||||
|
||||
// BroadcastEvidence broadcasts evidence of the misbehavior.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/broadcast_evidence
|
||||
func BroadcastEvidence(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence
|
||||
func (env *Environment) BroadcastEvidence(
|
||||
ctx *rpctypes.Context,
|
||||
ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||
|
||||
if ev == nil {
|
||||
return nil, errors.New("no evidence was provided")
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
// Health gets node health. Returns empty result (200 OK) on success, no
|
||||
// response - in case of an error.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/health
|
||||
func Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/health
|
||||
func (env *Environment) Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) {
|
||||
return &ctypes.ResultHealth{}, nil
|
||||
}
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
|
||||
// BroadcastTxAsync returns right away, with no response. Does not wait for
|
||||
// CheckTx nor DeliverTx results.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Tx/broadcast_tx_async
|
||||
func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async
|
||||
func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
err := env.Mempool.CheckTx(tx, nil, mempl.TxInfo{})
|
||||
|
||||
if err != nil {
|
||||
@@ -30,8 +30,8 @@ func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadca
|
||||
|
||||
// BroadcastTxSync returns with the response from CheckTx. Does not wait for
|
||||
// DeliverTx result.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Tx/broadcast_tx_sync
|
||||
func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync
|
||||
func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
resCh := make(chan *abci.Response, 1)
|
||||
err := env.Mempool.CheckTx(tx, func(res *abci.Response) {
|
||||
select {
|
||||
@@ -60,8 +60,8 @@ func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcas
|
||||
}
|
||||
|
||||
// BroadcastTxCommit returns with the responses from CheckTx and DeliverTx.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Tx/broadcast_tx_commit
|
||||
func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit
|
||||
func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
subscriber := ctx.RemoteAddr()
|
||||
|
||||
if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients {
|
||||
@@ -121,7 +121,7 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc
|
||||
Hash: tx.Hash(),
|
||||
Height: deliverTxRes.Height,
|
||||
}, nil
|
||||
case <-deliverTxSub.Cancelled():
|
||||
case <-deliverTxSub.Canceled():
|
||||
var reason string
|
||||
if deliverTxSub.Err() == nil {
|
||||
reason = "Tendermint exited"
|
||||
@@ -149,10 +149,10 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc
|
||||
|
||||
// UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries)
|
||||
// including their number.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/unconfirmed_txs
|
||||
func UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs
|
||||
func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
// reuse per_page validator
|
||||
limit := validatePerPage(limitPtr)
|
||||
limit := env.validatePerPage(limitPtr)
|
||||
|
||||
txs := env.Mempool.ReapMaxTxs(limit)
|
||||
return &ctypes.ResultUnconfirmedTxs{
|
||||
@@ -163,8 +163,8 @@ func UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfi
|
||||
}
|
||||
|
||||
// NumUnconfirmedTxs gets number of unconfirmed transactions.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/num_unconfirmed_txs
|
||||
func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs
|
||||
func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
return &ctypes.ResultUnconfirmedTxs{
|
||||
Count: env.Mempool.Size(),
|
||||
Total: env.Mempool.Size(),
|
||||
@@ -173,8 +173,8 @@ func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, err
|
||||
|
||||
// CheckTx checks the transaction without executing it. The transaction won't
|
||||
// be added to the mempool either.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Tx/check_tx
|
||||
func CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx
|
||||
func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) {
|
||||
res, err := env.ProxyAppMempool.CheckTxSync(abci.RequestCheckTx{Tx: tx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
)
|
||||
|
||||
// NetInfo returns network info.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/net_info
|
||||
func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/net_info
|
||||
func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) {
|
||||
peersList := env.P2PPeers.Peers().List()
|
||||
peers := make([]ctypes.Peer, 0, len(peersList))
|
||||
for _, peer := range peersList {
|
||||
@@ -39,7 +39,7 @@ func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) {
|
||||
}
|
||||
|
||||
// UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT).
|
||||
func UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
if len(seeds) == 0 {
|
||||
return &ctypes.ResultDialSeeds{}, errors.New("no seeds provided")
|
||||
}
|
||||
@@ -52,8 +52,11 @@ func UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialS
|
||||
|
||||
// UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT),
|
||||
// optionally making them persistent.
|
||||
func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent, unconditional, private bool) (
|
||||
*ctypes.ResultDialPeers, error) {
|
||||
func (env *Environment) UnsafeDialPeers(
|
||||
ctx *rpctypes.Context,
|
||||
peers []string,
|
||||
persistent, unconditional, private bool) (*ctypes.ResultDialPeers, error) {
|
||||
|
||||
if len(peers) == 0 {
|
||||
return &ctypes.ResultDialPeers{}, errors.New("no peers provided")
|
||||
}
|
||||
@@ -92,8 +95,8 @@ func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent, uncondit
|
||||
}
|
||||
|
||||
// Genesis returns genesis file.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/genesis
|
||||
func Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/genesis
|
||||
func (env *Environment) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) {
|
||||
if len(env.genChunks) > 1 {
|
||||
return nil, errors.New("genesis response is large, please use the genesis_chunked API instead")
|
||||
}
|
||||
@@ -101,7 +104,7 @@ func Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) {
|
||||
return &ctypes.ResultGenesis{Genesis: env.GenDoc}, nil
|
||||
}
|
||||
|
||||
func GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) {
|
||||
func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) {
|
||||
if env.genChunks == nil {
|
||||
return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized")
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ func TestUnsafeDialSeeds(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
env := &Environment{}
|
||||
env.Logger = log.TestingLogger()
|
||||
env.P2PPeers = sw
|
||||
|
||||
@@ -36,7 +37,7 @@ func TestUnsafeDialSeeds(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
res, err := UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds)
|
||||
res, err := env.UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds)
|
||||
if tc.isErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
@@ -62,6 +63,7 @@ func TestUnsafeDialPeers(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
env := &Environment{}
|
||||
env.Logger = log.TestingLogger()
|
||||
env.P2PPeers = sw
|
||||
|
||||
@@ -76,7 +78,7 @@ func TestUnsafeDialPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
res, err := UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private)
|
||||
res, err := env.UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private)
|
||||
if tc.isErr {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
|
||||
@@ -6,54 +6,58 @@ import (
|
||||
|
||||
// TODO: better system than "unsafe" prefix
|
||||
|
||||
type RoutesMap map[string]*rpc.RPCFunc
|
||||
|
||||
// Routes is a map of available routes.
|
||||
var Routes = map[string]*rpc.RPCFunc{
|
||||
// subscribe/unsubscribe are reserved for websocket events.
|
||||
"subscribe": rpc.NewWSRPCFunc(Subscribe, "query"),
|
||||
"unsubscribe": rpc.NewWSRPCFunc(Unsubscribe, "query"),
|
||||
"unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""),
|
||||
func (env *Environment) GetRoutes() RoutesMap {
|
||||
return RoutesMap{
|
||||
// subscribe/unsubscribe are reserved for websocket events.
|
||||
"subscribe": rpc.NewWSRPCFunc(env.Subscribe, "query"),
|
||||
"unsubscribe": rpc.NewWSRPCFunc(env.Unsubscribe, "query"),
|
||||
"unsubscribe_all": rpc.NewWSRPCFunc(env.UnsubscribeAll, ""),
|
||||
|
||||
// info API
|
||||
"health": rpc.NewRPCFunc(Health, ""),
|
||||
"status": rpc.NewRPCFunc(Status, ""),
|
||||
"net_info": rpc.NewRPCFunc(NetInfo, ""),
|
||||
"blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight", rpc.Cacheable()),
|
||||
"genesis": rpc.NewRPCFunc(Genesis, "", rpc.Cacheable()),
|
||||
"genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk", rpc.Cacheable()),
|
||||
"block": rpc.NewRPCFunc(Block, "height", rpc.Cacheable("height")),
|
||||
"block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash", rpc.Cacheable()),
|
||||
"block_results": rpc.NewRPCFunc(BlockResults, "height", rpc.Cacheable("height")),
|
||||
"commit": rpc.NewRPCFunc(Commit, "height", rpc.Cacheable("height")),
|
||||
"header": rpc.NewRPCFunc(Header, "height", rpc.Cacheable("height")),
|
||||
"header_by_hash": rpc.NewRPCFunc(HeaderByHash, "hash", rpc.Cacheable()),
|
||||
"check_tx": rpc.NewRPCFunc(CheckTx, "tx"),
|
||||
"tx": rpc.NewRPCFunc(Tx, "hash,prove", rpc.Cacheable()),
|
||||
"tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"),
|
||||
"block_search": rpc.NewRPCFunc(BlockSearch, "query,page,per_page,order_by"),
|
||||
"validators": rpc.NewRPCFunc(Validators, "height,page,per_page", rpc.Cacheable("height")),
|
||||
"dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""),
|
||||
"consensus_state": rpc.NewRPCFunc(ConsensusState, ""),
|
||||
"consensus_params": rpc.NewRPCFunc(ConsensusParams, "height", rpc.Cacheable("height")),
|
||||
"unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"),
|
||||
"num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""),
|
||||
// info AP
|
||||
"health": rpc.NewRPCFunc(env.Health, ""),
|
||||
"status": rpc.NewRPCFunc(env.Status, ""),
|
||||
"net_info": rpc.NewRPCFunc(env.NetInfo, ""),
|
||||
"blockchain": rpc.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight", rpc.Cacheable()),
|
||||
"genesis": rpc.NewRPCFunc(env.Genesis, "", rpc.Cacheable()),
|
||||
"genesis_chunked": rpc.NewRPCFunc(env.GenesisChunked, "chunk", rpc.Cacheable()),
|
||||
"block": rpc.NewRPCFunc(env.Block, "height", rpc.Cacheable("height")),
|
||||
"block_by_hash": rpc.NewRPCFunc(env.BlockByHash, "hash", rpc.Cacheable()),
|
||||
"block_results": rpc.NewRPCFunc(env.BlockResults, "height", rpc.Cacheable("height")),
|
||||
"commit": rpc.NewRPCFunc(env.Commit, "height", rpc.Cacheable("height")),
|
||||
"header": rpc.NewRPCFunc(env.Header, "height", rpc.Cacheable("height")),
|
||||
"header_by_hash": rpc.NewRPCFunc(env.HeaderByHash, "hash", rpc.Cacheable()),
|
||||
"check_tx": rpc.NewRPCFunc(env.CheckTx, "tx"),
|
||||
"tx": rpc.NewRPCFunc(env.Tx, "hash,prove", rpc.Cacheable()),
|
||||
"tx_search": rpc.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by"),
|
||||
"block_search": rpc.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by"),
|
||||
"validators": rpc.NewRPCFunc(env.Validators, "height,page,per_page", rpc.Cacheable("height")),
|
||||
"dump_consensus_state": rpc.NewRPCFunc(env.DumpConsensusState, ""),
|
||||
"consensus_state": rpc.NewRPCFunc(env.GetConsensusState, ""),
|
||||
"consensus_params": rpc.NewRPCFunc(env.ConsensusParams, "height", rpc.Cacheable("height")),
|
||||
"unconfirmed_txs": rpc.NewRPCFunc(env.UnconfirmedTxs, "limit"),
|
||||
"num_unconfirmed_txs": rpc.NewRPCFunc(env.NumUnconfirmedTxs, ""),
|
||||
|
||||
// tx broadcast API
|
||||
"broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"),
|
||||
"broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSync, "tx"),
|
||||
"broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"),
|
||||
// tx broadcast API
|
||||
"broadcast_tx_commit": rpc.NewRPCFunc(env.BroadcastTxCommit, "tx"),
|
||||
"broadcast_tx_sync": rpc.NewRPCFunc(env.BroadcastTxSync, "tx"),
|
||||
"broadcast_tx_async": rpc.NewRPCFunc(env.BroadcastTxAsync, "tx"),
|
||||
|
||||
// abci API
|
||||
"abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"),
|
||||
"abci_info": rpc.NewRPCFunc(ABCIInfo, "", rpc.Cacheable()),
|
||||
// abci API
|
||||
"abci_query": rpc.NewRPCFunc(env.ABCIQuery, "path,data,height,prove"),
|
||||
"abci_info": rpc.NewRPCFunc(env.ABCIInfo, "", rpc.Cacheable()),
|
||||
|
||||
// evidence API
|
||||
"broadcast_evidence": rpc.NewRPCFunc(BroadcastEvidence, "evidence"),
|
||||
// evidence API
|
||||
"broadcast_evidence": rpc.NewRPCFunc(env.BroadcastEvidence, "evidence"),
|
||||
}
|
||||
}
|
||||
|
||||
// AddUnsafeRoutes adds unsafe routes.
|
||||
func AddUnsafeRoutes() {
|
||||
func (env *Environment) AddUnsafeRoutes(routes RoutesMap) {
|
||||
// control API
|
||||
Routes["dial_seeds"] = rpc.NewRPCFunc(UnsafeDialSeeds, "seeds")
|
||||
Routes["dial_peers"] = rpc.NewRPCFunc(UnsafeDialPeers, "peers,persistent,unconditional,private")
|
||||
Routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(UnsafeFlushMempool, "")
|
||||
routes["dial_seeds"] = rpc.NewRPCFunc(env.UnsafeDialSeeds, "seeds")
|
||||
routes["dial_peers"] = rpc.NewRPCFunc(env.UnsafeDialPeers, "peers,persistent,unconditional,private")
|
||||
routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(env.UnsafeFlushMempool, "")
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
|
||||
// Status returns Tendermint status including node info, pubkey, latest block
|
||||
// hash, app hash, block height and time.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/status
|
||||
func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
|
||||
// More: https://docs.tendermint.com/master/rpc/#/Info/status
|
||||
func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
|
||||
var (
|
||||
earliestBlockHeight int64
|
||||
earliestBlockHash tmbytes.HexBytes
|
||||
@@ -47,7 +47,7 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
|
||||
// Return the very last voting power, not the voting power of this validator
|
||||
// during the last block.
|
||||
var votingPower int64
|
||||
if val := validatorAtHeight(latestUncommittedHeight()); val != nil {
|
||||
if val := env.validatorAtHeight(env.latestUncommittedHeight()); val != nil {
|
||||
votingPower = val.VotingPower
|
||||
}
|
||||
|
||||
@@ -74,12 +74,12 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func validatorAtHeight(h int64) *types.Validator {
|
||||
vals, err := env.StateStore.LoadValidators(h)
|
||||
func (env *Environment) validatorAtHeight(h int64) *types.Validator {
|
||||
valsWithH, err := env.StateStore.LoadValidators(h)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
privValAddress := env.PubKey.Address()
|
||||
_, val := vals.GetByAddress(privValAddress)
|
||||
_, val := valsWithH.GetByAddress(privValAddress)
|
||||
return val
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// transaction is in the mempool, invalidated, or was not sent in the first
|
||||
// place.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/tx
|
||||
func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
func (env *Environment) Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
// if index is disabled, return error
|
||||
if _, ok := env.TxIndexer.(*null.TxIndex); ok {
|
||||
return nil, fmt.Errorf("transaction indexing is disabled")
|
||||
@@ -51,7 +51,7 @@ func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error
|
||||
// TxSearch allows you to query for multiple transactions results. It returns a
|
||||
// list of transactions (maximum ?per_page entries) and the total count.
|
||||
// More: https://docs.tendermint.com/main/rpc/#/Info/tx_search
|
||||
func TxSearch(
|
||||
func (env *Environment) TxSearch(
|
||||
ctx *rpctypes.Context,
|
||||
query string,
|
||||
prove bool,
|
||||
@@ -98,7 +98,7 @@ func TxSearch(
|
||||
|
||||
// paginate results
|
||||
totalCount := len(results)
|
||||
perPage := validatePerPage(perPagePtr)
|
||||
perPage := env.validatePerPage(perPagePtr)
|
||||
|
||||
page, err := validatePage(pagePtr, perPage, totalCount)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
type broadcastAPI struct {
|
||||
env *core.Environment
|
||||
}
|
||||
|
||||
func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) {
|
||||
@@ -19,7 +20,7 @@ func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*Response
|
||||
func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) {
|
||||
// NOTE: there's no way to get client's remote address
|
||||
// see https://stackoverflow.com/questions/33684570/session-and-remote-ip-address-in-grpc-go
|
||||
res, err := core.BroadcastTxCommit(&rpctypes.Context{}, req.Tx)
|
||||
res, err := bapi.env.BroadcastTxCommit(&rpctypes.Context{}, req.Tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/rpc/core"
|
||||
)
|
||||
|
||||
// Config is an gRPC server configuration.
|
||||
@@ -17,16 +18,16 @@ type Config struct {
|
||||
// StartGRPCServer starts a new gRPC BroadcastAPIServer using the given
|
||||
// net.Listener.
|
||||
// NOTE: This function blocks - you may want to call it in a go-routine.
|
||||
func StartGRPCServer(ln net.Listener) error {
|
||||
func StartGRPCServer(env *core.Environment, ln net.Listener) error {
|
||||
grpcServer := grpc.NewServer()
|
||||
RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{})
|
||||
RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{env: env})
|
||||
return grpcServer.Serve(ln)
|
||||
}
|
||||
|
||||
// StartGRPCClient dials the gRPC server using protoAddr and returns a new
|
||||
// BroadcastAPIClient.
|
||||
func StartGRPCClient(protoAddr string) BroadcastAPIClient {
|
||||
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
//nolint: staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -138,7 +138,7 @@ func setup() {
|
||||
wm.SetLogger(tcpLogger)
|
||||
mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler)
|
||||
config := server.DefaultConfig()
|
||||
listener1, err := server.Listen(tcpAddr, config)
|
||||
listener1, err := server.Listen(tcpAddr, config.MaxOpenConnections)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -154,7 +154,7 @@ func setup() {
|
||||
wm = server.NewWebsocketManager(Routes)
|
||||
wm.SetLogger(unixLogger)
|
||||
mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler)
|
||||
listener2, err := server.Listen(unixAddr, config)
|
||||
listener2, err := server.Listen(unixAddr, config.MaxOpenConnections)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -258,7 +258,7 @@ func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Listen starts a new net.Listener on the given address.
|
||||
// It returns an error if the address is invalid or the call to Listen() fails.
|
||||
func Listen(addr string, config *Config) (listener net.Listener, err error) {
|
||||
func Listen(addr string, maxOpenConnections int) (listener net.Listener, err error) {
|
||||
parts := strings.SplitN(addr, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
@@ -271,8 +271,8 @@ func Listen(addr string, config *Config) (listener net.Listener, err error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to listen on %v: %v", addr, err)
|
||||
}
|
||||
if config.MaxOpenConnections > 0 {
|
||||
listener = netutil.LimitListener(listener, config.MaxOpenConnections)
|
||||
if maxOpenConnections > 0 {
|
||||
listener = netutil.LimitListener(listener, maxOpenConnections)
|
||||
}
|
||||
|
||||
return listener, nil
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user