mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-13 08:12:50 +00:00
Compare commits
84 Commits
cal/node-c
...
wb/add-mul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f468eefed5 | ||
|
|
dcd2705f30 | ||
|
|
b1fbe26ffa | ||
|
|
9dd34ce4a1 | ||
|
|
bbcd128846 | ||
|
|
8b7ae932ff | ||
|
|
82ec855be4 | ||
|
|
6878b38812 | ||
|
|
1a5d5ed63b | ||
|
|
189d120c9f | ||
|
|
8895854e95 | ||
|
|
de29f6e46d | ||
|
|
9998dff847 | ||
|
|
2f4f224faa | ||
|
|
72e686352c | ||
|
|
dfb6589d8c | ||
|
|
42c2747817 | ||
|
|
6e4f57120a | ||
|
|
365af06517 | ||
|
|
7a3697fb30 | ||
|
|
3324f49fb7 | ||
|
|
a76f742107 | ||
|
|
ac48630fdb | ||
|
|
49502dae92 | ||
|
|
c3302b0dc9 | ||
|
|
5ba0d131c4 | ||
|
|
654e5652e4 | ||
|
|
0952a99f45 | ||
|
|
d09f4f503d | ||
|
|
ba84060b07 | ||
|
|
739b92bf01 | ||
|
|
1c0995c809 | ||
|
|
17c94bb0dc | ||
|
|
21b2801c60 | ||
|
|
18d38dd409 | ||
|
|
b8975ccff6 | ||
|
|
4290ad2982 | ||
|
|
0cbecba3d6 | ||
|
|
7efdb6362d | ||
|
|
fee53a074d | ||
|
|
790132a7d8 | ||
|
|
fd49683920 | ||
|
|
da204d371d | ||
|
|
9d01a6880e | ||
|
|
8fd66a6e8d | ||
|
|
f7bb0659be | ||
|
|
4af7568f99 | ||
|
|
3e766984a0 | ||
|
|
2d036c59fe | ||
|
|
12f0c4a624 | ||
|
|
7769467012 | ||
|
|
d16f17569f | ||
|
|
cc0c478c14 | ||
|
|
b9dcddd07a | ||
|
|
34ca3fb474 | ||
|
|
fc8df9a151 | ||
|
|
b85e13aa0c | ||
|
|
20ffa4fd32 | ||
|
|
f9bfdf4ce2 | ||
|
|
2b4436d1b4 | ||
|
|
627b77693f | ||
|
|
755e1474b1 | ||
|
|
b07e1fae89 | ||
|
|
ae164bf533 | ||
|
|
eb14a9564a | ||
|
|
a7dc8aaf91 | ||
|
|
d324430f82 | ||
|
|
99a7ac84dc | ||
|
|
f12588aab1 | ||
|
|
d534285bfe | ||
|
|
ffae184b62 | ||
|
|
c6a0dc8559 | ||
|
|
3aa6c816e5 | ||
|
|
ff0f98892f | ||
|
|
0beac722b0 | ||
|
|
45071d1f23 | ||
|
|
5a9a84eb02 | ||
|
|
f008a275d1 | ||
|
|
816c6bac00 | ||
|
|
9ec9085678 | ||
|
|
f58ba4d2f9 | ||
|
|
6bde634be2 | ||
|
|
d704c0a0b6 | ||
|
|
629cdc7f3d |
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -7,6 +7,6 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @tendermint/tendermint-engineering
|
||||
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
|
||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -30,12 +30,6 @@ updates:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
###################################
|
||||
##
|
||||
## Update All Go Dependencies
|
||||
|
||||
2
.github/workflows/docs-deployment.yml
vendored
2
.github/workflows/docs-deployment.yml
vendored
@@ -35,6 +35,8 @@ jobs:
|
||||
# versions will be available for the build.
|
||||
fetch-depth: 0
|
||||
- name: Build documentation
|
||||
env:
|
||||
NODE_OPTIONS: "--openssl-legacy-provider"
|
||||
run: |
|
||||
git config --global --add safe.directory "$PWD"
|
||||
make build-docs
|
||||
|
||||
2
.github/workflows/e2e-manual.yml
vendored
2
.github/workflows/e2e-manual.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
run: ./generate-multiversion.sh -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
|
||||
10
.github/workflows/e2e-nightly-34x.yml
vendored
10
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -30,8 +30,7 @@ jobs:
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "::set-output name=branch::`git branch --show-current`"
|
||||
echo "::set-output name=commit::`git rev-parse HEAD`"
|
||||
echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
@@ -41,7 +40,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 2 -d networks/nightly
|
||||
run: ./generate-multiversion.sh -g 2 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
@@ -49,7 +48,6 @@ jobs:
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
git-commit: ${{ steps.git-info.outputs.commit }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
@@ -63,7 +61,7 @@ jobs:
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
|
||||
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
@@ -72,7 +70,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
10
.github/workflows/e2e-nightly-37x.yml
vendored
10
.github/workflows/e2e-nightly-37x.yml
vendored
@@ -30,8 +30,7 @@ jobs:
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "::set-output name=branch::`git branch --show-current`"
|
||||
echo "::set-output name=commit::`git rev-parse HEAD`"
|
||||
echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
@@ -41,7 +40,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
run: ./generate-multiversion.sh -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
@@ -49,7 +48,6 @@ jobs:
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
git-commit: ${{ steps.git-info.outputs.commit }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
@@ -63,7 +61,7 @@ jobs:
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
|
||||
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
@@ -72,7 +70,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
6
.github/workflows/e2e-nightly-main.yml
vendored
6
.github/workflows/e2e-nightly-main.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
run: ./generate-multiversion.sh -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}"
|
||||
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
2
.github/workflows/fuzz-nightly.yml
vendored
2
.github/workflows/fuzz-nightly.yml
vendored
@@ -64,7 +64,7 @@ jobs:
|
||||
|
||||
- name: Set crashers count
|
||||
working-directory: test/fuzz
|
||||
run: echo "::set-output name=count::$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
run: echo "count=$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')" >> $GITHUB_OUTPUT
|
||||
id: set-crashers-count
|
||||
|
||||
outputs:
|
||||
|
||||
31
.github/workflows/govulncheck.yml
vendored
Normal file
31
.github/workflows/govulncheck.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Check for Go vulnerabilities
|
||||
# Runs https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck to proactively
|
||||
# check for vulnerabilities in code packages if there were any changes made to
|
||||
# any Go code or dependencies.
|
||||
#
|
||||
# Run `make vulncheck` from the root of the repo to run this workflow locally.
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
govulncheck:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: govulncheck
|
||||
run: make vulncheck
|
||||
if: "env.GIT_DIFF != ''"
|
||||
6
.github/workflows/pre-release.yml
vendored
6
.github/workflows/pre-release.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG_PENDING.md > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon pre-release
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
|
||||
2
.github/workflows/proto-lint.yml
vendored
2
.github/workflows/proto-lint.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.9.0
|
||||
- uses: bufbuild/buf-setup-action@v1.10.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
|
||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon release
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}"
|
||||
fi
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
60
.github/workflows/testapp-docker.yml
vendored
Normal file
60
.github/workflows/testapp-docker.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Docker E2E Node
|
||||
# Build & Push rebuilds the e2e Testapp docker image on every push to main and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/tendermint/e2e-node
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermint/e2e-node
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}"
|
||||
fi
|
||||
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.2.1
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3.2.0
|
||||
with:
|
||||
context: .
|
||||
file: ./test/e2e/docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'beep_boop' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
58
CHANGELOG.md
58
CHANGELOG.md
@@ -2,6 +2,64 @@
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.24
|
||||
|
||||
*Nov 22, 2022*
|
||||
|
||||
Apart from one minor bug fix, this release aims to optimize the output of the
|
||||
RPC (both HTTP and WebSocket endpoints). See our [upgrading
|
||||
guidelines](./UPGRADING.md#v03424) for more details.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- `[rpc]` [\#9724](https://github.com/tendermint/tendermint/issues/9724) Remove
|
||||
useless whitespace in RPC output (@adizere, @thanethomson)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- `[rpc]` [\#9692](https://github.com/tendermint/tendermint/issues/9692) Remove
|
||||
`Cache-Control` header response from `/check_tx` endpoint (@JayT106)
|
||||
|
||||
## v0.34.23
|
||||
|
||||
*Nov 9, 2022*
|
||||
|
||||
This release introduces some new Prometheus metrics to help in determining what
|
||||
kinds of messages are consuming the most P2P bandwidth. This builds towards our
|
||||
broader goal of optimizing Tendermint bandwidth consumption, and will give us
|
||||
meaningful insights once we can establish these metrics for a number of chains.
|
||||
|
||||
We now also return `Cache-Control` headers for select RPC endpoints to help
|
||||
facilitate caching.
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106
|
||||
|
||||
### IMPROVEMENTS
|
||||
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
|
||||
Envelope type and associated methods for sending and receiving Envelopes
|
||||
instead of raw bytes. This also adds new metrics,
|
||||
`tendermint_p2p_message_send_bytes_total` and
|
||||
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
|
||||
each message type have been sent.
|
||||
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
|
||||
caching of RPC responses (@JayT106)
|
||||
|
||||
The following RPC endpoints will return `Cache-Control` headers with a maximum
|
||||
age of 1 day:
|
||||
|
||||
- `/abci_info`
|
||||
- `/block`, if `height` is supplied
|
||||
- `/block_by_hash`
|
||||
- `/block_results`, if `height` is supplied
|
||||
- `/blockchain`
|
||||
- `/check_tx`
|
||||
- `/commit`, if `height` is supplied
|
||||
- `/consensus_params`, if `height` is supplied
|
||||
- `/genesis`
|
||||
- `/genesis_chunked`
|
||||
- `/tx`
|
||||
- `/validators`, if `height` is supplied
|
||||
|
||||
## v0.34.22
|
||||
|
||||
This release includes several bug fixes, [one of
|
||||
|
||||
@@ -12,26 +12,40 @@
|
||||
|
||||
- Go API
|
||||
- [p2p] \#9625 Remove unused p2p/trust package (@cmwaters)
|
||||
- [rpc] \#9655 Remove global environment and replace with constructor. (@williambanfield,@tychoish)
|
||||
- [node] \#9655 Move DBContext and DBProvider from the node package to the config package. (@williambanfield,@tychoish)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
|
||||
- Tooling
|
||||
- [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [metrics] \#9682 move state-syncing and block-syncing metrics to their respective packages (@cmwaters)
|
||||
labels have moved from block_syncing -> blocksync_syncing and state_syncing -> statesync_syncing
|
||||
- [inspect] \#9655 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [proxy] \#9830 Introduce `NewUnsyncLocalClientCreator`, which allows local
|
||||
ABCI clients to have the same concurrency model as remote clients (i.e. one
|
||||
mutex per client "connection", for each of the four ABCI "connections").
|
||||
- [config] \#9680 Introduce `BootstrapPeers` to the config to allow nodes to list peers to be added to
|
||||
the addressbook upon start up (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [crypto/merkle] \#6443 & \#6513 Improve HashAlternatives performance (@cuonglm, @marbar3778)
|
||||
- [rpc] \#9650 Enable caching of RPC responses (@JayT106)
|
||||
- [consensus] \#9760 Save peer LastCommit correctly to achieve 50% reduction in gossiped precommits. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [docker] \#9462 ensure Docker image uses consistent version of Go
|
||||
- [abci-cli] \#9717 fix broken abci-cli help command
|
||||
|
||||
## v0.37.0
|
||||
|
||||
@@ -97,4 +111,4 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)
|
||||
- [docker] \#9073 enable cross platform build using docker buildx
|
||||
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout
|
||||
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout
|
||||
|
||||
4
Makefile
4
Makefile
@@ -274,6 +274,10 @@ lint:
|
||||
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
vulncheck:
|
||||
@go run golang.org/x/vuln/cmd/govulncheck@latest ./...
|
||||
.PHONY: vulncheck
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
###############################################################################
|
||||
|
||||
13
README.md
13
README.md
@@ -113,10 +113,15 @@ For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Supported Versions
|
||||
|
||||
Because we are a small core team, we only ship patch updates, including security
|
||||
updates, to the most recent minor release and the second-most recent minor
|
||||
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
|
||||
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
Because we are a small core team, we have limited capacity to ship patch
|
||||
updates, including security updates. Consequently, we strongly recommend keeping
|
||||
Tendermint up-to-date. Upgrading instructions can be found in
|
||||
[UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
Currently supported versions include:
|
||||
|
||||
- v0.34.x
|
||||
- v0.37.x (release candidate)
|
||||
|
||||
## Resources
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig
|
||||
* Make use of table driven testing where possible and not-cumbersome
|
||||
* [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)
|
||||
* Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require)
|
||||
* When using mocks, it is recommended to use Testify [mock] (<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
* When using mocks, it is recommended to use Testify [mock](<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
) along with [Mockery](https://github.com/vektra/mockery) for autogeneration
|
||||
|
||||
## Errors
|
||||
|
||||
17
UPGRADING.md
17
UPGRADING.md
@@ -5,6 +5,15 @@ Tendermint Core.
|
||||
|
||||
## Unreleased
|
||||
|
||||
## Config Changes
|
||||
|
||||
* A new config field, `BootstrapPeers` has been introduced as a means of
|
||||
adding a list of addresses to the addressbook upon initializing a node. This is an
|
||||
alternative to `PersistentPeers`. `PersistentPeers` shold be only used for
|
||||
nodes that you want to keep a constant connection with i.e. sentry nodes
|
||||
|
||||
----
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* The `ABCIVersion` is now `1.0.0`.
|
||||
@@ -32,6 +41,14 @@ Tendermint Core.
|
||||
this should have no effect on the wire-level encoding for UTF8-encoded
|
||||
strings.
|
||||
|
||||
## v0.34.24
|
||||
|
||||
Note that in [\#9724](https://github.com/tendermint/tendermint/pull/9724) we
|
||||
un-prettified the JSON output (i.e. removed all indentation) of the HTTP and
|
||||
WebSocket RPC for performance and subscription stability reasons. We recommend
|
||||
using a tool such as [jq](https://github.com/stedolan/jq) to obtain prettified
|
||||
output if you rely on that prettified output in some way.
|
||||
|
||||
## v0.34.20
|
||||
|
||||
### Feature: Priority Mempool
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
// case of malicious tx or query). It only makes sense for publicly exposed
|
||||
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
|
||||
@@ -24,8 +22,6 @@ var _ Client = (*localClient)(nil)
|
||||
|
||||
// NewLocalClient creates a local client, which will be directly calling the
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = new(tmsync.Mutex)
|
||||
@@ -309,7 +305,8 @@ func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*type
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkSync(
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -318,7 +315,8 @@ func (app *localClient) LoadSnapshotChunkSync(
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkSync(
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*types.ResponseApplySnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "kvstore", "version":
|
||||
case "kvstore", "version", "help [command]":
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -460,11 +460,14 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
|
||||
fmt.Println("Available commands:")
|
||||
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", prepareProposalCmd.Use, prepareProposalCmd.Short)
|
||||
fmt.Printf("%s: %s\n", processProposalCmd.Use, processProposalCmd.Short)
|
||||
|
||||
fmt.Println("Use \"[command] --help\" for more information about a command.")
|
||||
|
||||
return nil
|
||||
|
||||
@@ -8,4 +8,5 @@ const (
|
||||
CodeTypeUnauthorized uint32 = 3
|
||||
CodeTypeUnknownError uint32 = 4
|
||||
CodeTypeExecuted uint32 = 5
|
||||
CodeTypeRejected uint32 = 6
|
||||
)
|
||||
|
||||
@@ -122,6 +122,10 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if len(req.Tx) == 0 {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeRejected}
|
||||
}
|
||||
|
||||
if req.Type == types.CheckTxType_Recheck {
|
||||
if _, ok := app.txToRemove[string(req.Tx)]; ok {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeExecuted, GasWanted: 1}
|
||||
|
||||
@@ -70,6 +70,24 @@ func TestKVStoreKV(t *testing.T) {
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreEmptyTX(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
tx := []byte("")
|
||||
reqCheck := types.RequestCheckTx{Tx: tx}
|
||||
resCheck := kvstore.CheckTx(reqCheck)
|
||||
require.Equal(t, resCheck.Code, code.CodeTypeRejected)
|
||||
|
||||
txs := make([][]byte, 0, 4)
|
||||
txs = append(txs, []byte("key=value"), []byte("key"), []byte(""), []byte("kee=value"))
|
||||
reqPrepare := types.RequestPrepareProposal{Txs: txs, MaxTxBytes: 10 * 1024}
|
||||
resPrepare := kvstore.PrepareProposal(reqPrepare)
|
||||
require.Equal(t, len(reqPrepare.Txs), len(resPrepare.Txs)+1, "Empty transaction not properly removed")
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
|
||||
@@ -324,11 +324,15 @@ func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.Response
|
||||
}
|
||||
|
||||
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
|
||||
// proposal for transactions with the prefix stripped.
|
||||
// proposal for transactions with the prefix stripped, while discarding invalid empty transactions.
|
||||
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte, maxTxBytes int64) [][]byte {
|
||||
txs := make([][]byte, 0, len(blockData))
|
||||
var totalBytes int64
|
||||
for _, tx := range blockData {
|
||||
if len(tx) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
txMod := tx
|
||||
if isPrepareTx(tx) {
|
||||
txMod = bytes.Replace(tx, []byte(PreparePrefix), []byte(ReplacePrefix), 1)
|
||||
|
||||
@@ -38,8 +38,34 @@ function testExample() {
|
||||
rm "${INPUT}".out.new
|
||||
}
|
||||
|
||||
function testHelp() {
|
||||
INPUT=$1
|
||||
APP="$2 $3"
|
||||
|
||||
echo "Test: $APP"
|
||||
$APP &> "${INPUT}.new" &
|
||||
sleep 2
|
||||
|
||||
pre=$(shasum < "${INPUT}")
|
||||
post=$(shasum < "${INPUT}.new")
|
||||
|
||||
if [[ "$pre" != "$post" ]]; then
|
||||
echo "You broke the tutorial"
|
||||
echo "Got:"
|
||||
cat "${INPUT}.new"
|
||||
echo "Expected:"
|
||||
cat "${INPUT}"
|
||||
echo "Diff:"
|
||||
diff "${INPUT}" "${INPUT}.new"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm "${INPUT}".new
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli kvstore
|
||||
testHelp tests/test_cli/testHelp.out abci-cli help
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
30
abci/tests/test_cli/testHelp.out
Normal file
30
abci/tests/test_cli/testHelp.out
Normal file
@@ -0,0 +1,30 @@
|
||||
the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers
|
||||
|
||||
Usage:
|
||||
abci-cli [command]
|
||||
|
||||
Available Commands:
|
||||
batch run a batch of abci commands against an application
|
||||
check_tx validate a transaction
|
||||
commit commit the application state and return the Merkle root hash
|
||||
completion Generate the autocompletion script for the specified shell
|
||||
console start an interactive ABCI console for multiple commands
|
||||
deliver_tx deliver a new transaction to the application
|
||||
echo have the application echo a message
|
||||
help Help about any command
|
||||
info get some info about the application
|
||||
kvstore ABCI demo example
|
||||
prepare_proposal prepare proposal
|
||||
process_proposal process proposal
|
||||
query query the application state
|
||||
test run integration tests
|
||||
version print ABCI console version
|
||||
|
||||
Flags:
|
||||
--abci string either socket or grpc (default "socket")
|
||||
--address string address of application socket (default "tcp://0.0.0.0:26658")
|
||||
-h, --help help for abci-cli
|
||||
--log_level string set the logger level (default "debug")
|
||||
-v, --verbose print the command and results as if it were a console session
|
||||
|
||||
Use "abci-cli [command] --help" for more information about a command.
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
)
|
||||
|
||||
|
||||
@@ -99,9 +99,6 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
// OnStart implements service.Service by spawning requesters routine and recording
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
if pool.height == 0 {
|
||||
return errors.New("height not set")
|
||||
}
|
||||
go pool.makeRequestersRoutine()
|
||||
pool.startTime = time.Now()
|
||||
return nil
|
||||
@@ -114,19 +111,22 @@ func (pool *BlockPool) makeRequestersRoutine() {
|
||||
break
|
||||
}
|
||||
|
||||
height, maxPeerHeight, numPending, lenRequesters := pool.GetStatus()
|
||||
if height >= maxPeerHeight ||
|
||||
numPending >= maxPendingRequests ||
|
||||
lenRequesters >= maxTotalRequesters {
|
||||
_, numPending, lenRequesters := pool.GetStatus()
|
||||
switch {
|
||||
case numPending >= maxPendingRequests:
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
continue
|
||||
case lenRequesters >= maxTotalRequesters:
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
default:
|
||||
// request for more blocks.
|
||||
pool.makeNextRequester()
|
||||
}
|
||||
|
||||
// request for more blocks.
|
||||
pool.makeNextRequester()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,11 +156,11 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
|
||||
// GetStatus returns pool's height, numPending requests and the number of
|
||||
// requesters.
|
||||
func (pool *BlockPool) GetStatus() (height, maxPeerHeight int64, numPending int32, lenRequesters int) {
|
||||
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
return pool.height, pool.maxPeerHeight, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
}
|
||||
|
||||
// IsCaughtUp returns true if this node is caught up, false - otherwise.
|
||||
@@ -302,7 +302,6 @@ func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) {
|
||||
}
|
||||
|
||||
if height > pool.maxPeerHeight {
|
||||
pool.Logger.Info("new max peer height", "height", height)
|
||||
pool.maxPeerHeight = height
|
||||
}
|
||||
}
|
||||
@@ -389,7 +388,7 @@ func (pool *BlockPool) makeNextRequester() {
|
||||
|
||||
err := request.Start()
|
||||
if err != nil {
|
||||
pool.Logger.Error("Error starting request", "err", err)
|
||||
request.Logger.Error("Error starting request", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ const (
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blocksync reactor and block sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state sm.State, skipWAL bool) error
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
|
||||
type peerError struct {
|
||||
@@ -54,6 +54,7 @@ type Reactor struct {
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
blockSync bool
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
@@ -62,7 +63,9 @@ type Reactor struct {
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, metrics *Metrics) *Reactor {
|
||||
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
blockSync bool, metrics *Metrics) *Reactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
@@ -84,6 +87,7 @@ func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockS
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: pool,
|
||||
blockSync: blockSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
metrics: metrics,
|
||||
@@ -100,22 +104,22 @@ func (bcR *Reactor) SetLogger(l log.Logger) {
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (bcR *Reactor) OnStart() error {
|
||||
if bcR.blockSync {
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go bcR.poolRoutine(false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsSyncing returns whether the node is using blocksync to advance heights
|
||||
func (bcR *Reactor) IsSyncing() bool {
|
||||
return bcR.pool.IsRunning()
|
||||
}
|
||||
|
||||
// SwitchToBlockSync is called by the state sync reactor when switching to block sync.
|
||||
func (bcR *Reactor) SwitchToBlockSync(state sm.State) error {
|
||||
bcR.blockSync = true
|
||||
bcR.initialState = state
|
||||
if state.LastBlockHeight == 0 {
|
||||
bcR.pool.height = state.InitialHeight
|
||||
} else {
|
||||
bcR.pool.height = state.LastBlockHeight + 1
|
||||
}
|
||||
|
||||
bcR.pool.height = state.LastBlockHeight + 1
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -126,7 +130,7 @@ func (bcR *Reactor) SwitchToBlockSync(state sm.State) error {
|
||||
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *Reactor) OnStop() {
|
||||
if bcR.pool.IsRunning() {
|
||||
if bcR.blockSync {
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
@@ -294,28 +298,24 @@ FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-switchToConsensusTicker.C:
|
||||
height, peerHeight, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
height, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
outbound, inbound, _ := bcR.Switch.NumPeers()
|
||||
bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters,
|
||||
"outbound", outbound, "inbound", inbound, "peerHeight", peerHeight)
|
||||
"outbound", outbound, "inbound", inbound)
|
||||
if bcR.pool.IsCaughtUp() {
|
||||
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
|
||||
// TODO: node struct should be responsible for switching from block sync to
|
||||
// consensus. It's messy to have to grab the consensus reactor from the switch
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
err := conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
bcR.Logger.Error("failed to switch to consensus", "err", err)
|
||||
conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
// else {
|
||||
// should only happen during testing
|
||||
// }
|
||||
|
||||
return
|
||||
break FOR_LOOP
|
||||
}
|
||||
|
||||
case <-trySyncTicker.C: // chan time
|
||||
|
||||
@@ -95,6 +95,14 @@ func newReactor(
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
// Make the Reactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mp, sm.EmptyEvidencePool{}, blockStore)
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
@@ -137,7 +145,7 @@ func newReactor(
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, NopMetrics())
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync, NopMetrics())
|
||||
bcReactor.SetLogger(logger.With("module", "blocksync"))
|
||||
|
||||
return ReactorPair{bcReactor, proxyApp}
|
||||
@@ -148,9 +156,6 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]ReactorPair, 2)
|
||||
@@ -164,12 +169,6 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
for _, reactor := range reactorPairs {
|
||||
// turn on the syncing algorithm
|
||||
err := reactor.reactor.SwitchToBlockSync(state)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
err := r.reactor.Stop()
|
||||
@@ -219,9 +218,6 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
// Other chain needs a different validator set
|
||||
@@ -248,12 +244,6 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
for _, reactor := range reactorPairs {
|
||||
// turn on the syncing algorithm
|
||||
err := reactor.reactor.SwitchToBlockSync(state)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
err := r.reactor.Stop()
|
||||
@@ -297,11 +287,6 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
otherState, err := sm.MakeGenesisState(otherGenDoc)
|
||||
require.NoError(t, err)
|
||||
err = lastReactorPair.reactor.SwitchToBlockSync(otherState)
|
||||
require.NoError(t, err)
|
||||
|
||||
for {
|
||||
if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
|
||||
@@ -67,7 +67,7 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
//nolint:all
|
||||
//nolint:gosec,nolintlint
|
||||
resp, err := http.Get(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
|
||||
87
cmd/tendermint/commands/inspect.go
Normal file
87
cmd/tendermint/commands/inspect.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/inspect"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer/block"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// InspectCmd is the command for starting an inspect server.
|
||||
var InspectCmd = &cobra.Command{
|
||||
Use: "inspect",
|
||||
Short: "Run an inspect server for investigating Tendermint state",
|
||||
Long: `
|
||||
inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging
|
||||
issues with Tendermint.
|
||||
|
||||
When the Tendermint consensus engine detects inconsistent state, it will crash the
|
||||
Tendermint process. Tendermint will not start up while in this inconsistent state.
|
||||
The inspect command can be used to query the block and state store using Tendermint
|
||||
RPC calls to debug issues of inconsistent state.
|
||||
`,
|
||||
|
||||
RunE: runInspect,
|
||||
}
|
||||
|
||||
func init() {
|
||||
InspectCmd.Flags().
|
||||
String("rpc.laddr",
|
||||
config.RPC.ListenAddress, "RPC listenener address. Port required")
|
||||
InspectCmd.Flags().
|
||||
String("db-backend",
|
||||
config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb")
|
||||
InspectCmd.Flags().
|
||||
String("db-dir", config.DBPath, "database directory")
|
||||
}
|
||||
|
||||
func runInspect(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
|
||||
go func() {
|
||||
<-c
|
||||
cancel()
|
||||
}()
|
||||
|
||||
blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
defer blockStore.Close()
|
||||
|
||||
stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateStore := state.NewStore(stateDB, state.StoreOptions{DiscardABCIResponses: false})
|
||||
defer stateStore.Close()
|
||||
|
||||
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txIndexer, blockIndexer, err := block.IndexerFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ins := inspect.New(config.RPC, blockStore, stateStore, txIndexer, blockIndexer, logger)
|
||||
|
||||
logger.Info("starting inspect server")
|
||||
if err := ins.Run(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -66,6 +66,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().String("p2p.external-address", config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
|
||||
cmd.Flags().String("p2p.bootstrap_peers", config.P2P.BootstrapPeers, "comma-delimited ID@host:port peers to be added to the addressbook on startup")
|
||||
cmd.Flags().String("p2p.unconditional_peer_ids",
|
||||
config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers")
|
||||
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")
|
||||
|
||||
@@ -30,6 +30,7 @@ func main() {
|
||||
cmd.VersionCmd,
|
||||
cmd.RollbackStateCmd,
|
||||
cmd.CompactGoLevelDBCmd,
|
||||
cmd.InspectCmd,
|
||||
debug.DebugCmd,
|
||||
cli.NewCompletionCmd(rootCmd, true),
|
||||
)
|
||||
|
||||
@@ -548,6 +548,11 @@ type P2PConfig struct { //nolint: maligned
|
||||
// We only use these if we can’t connect to peers in the addrbook
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
|
||||
// Comma separated list of peers to be added to the peer store
|
||||
// on startup. Either BootstrapPeers or PersistentPeers are
|
||||
// needed for peer discovery
|
||||
BootstrapPeers string `mapstructure:"bootstrap_peers"`
|
||||
|
||||
// Comma separated list of nodes to keep persistent connections to
|
||||
PersistentPeers string `mapstructure:"persistent_peers"`
|
||||
|
||||
@@ -708,11 +713,28 @@ type MempoolConfig struct {
|
||||
// Mempool version to use:
|
||||
// 1) "v0" - (default) FIFO mempool.
|
||||
// 2) "v1" - prioritized mempool.
|
||||
Version string `mapstructure:"version"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
Version string `mapstructure:"version"`
|
||||
// RootDir is the root directory for all data. This should be configured via
|
||||
// the $TMHOME env variable or --home cmd flag rather than overriding this
|
||||
// struct field.
|
||||
RootDir string `mapstructure:"home"`
|
||||
// Recheck (default: true) defines whether Tendermint should recheck the
|
||||
// validity for all remaining transaction in the mempool after a block.
|
||||
// Since a block affects the application state, some transactions in the
|
||||
// mempool may become invalid. If this does not apply to your application,
|
||||
// you can disable rechecking.
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
// Broadcast (default: true) defines whether the mempool should relay
|
||||
// transactions to other peers. Setting this to false will stop the mempool
|
||||
// from relaying transactions to other peers until they are included in a
|
||||
// block. In other words, if Broadcast is disabled, only the peer you send
|
||||
// the tx to will see it until it is included in a block.
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
// WalPath (default: "") configures the location of the Write Ahead Log
|
||||
// (WAL) for the mempool. The WAL is disabled by default. To enable, set
|
||||
// WalPath to where you want the WAL to be written (e.g.
|
||||
// "data/mempool.wal").
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
// Maximum number of transactions in the mempool
|
||||
Size int `mapstructure:"size"`
|
||||
// Limit the total size of all txs in the mempool.
|
||||
|
||||
30
config/db.go
Normal file
30
config/db.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
// ServiceProvider takes a config and a logger and returns a ready to go Node.
|
||||
type ServiceProvider func(context.Context, *Config, log.Logger) (service.Service, error)
|
||||
|
||||
// DBContext specifies config information for loading a new DB.
|
||||
type DBContext struct {
|
||||
ID string
|
||||
Config *Config
|
||||
}
|
||||
|
||||
// DBProvider takes a DBContext and returns an instantiated DB.
|
||||
type DBProvider func(*DBContext) (dbm.DB, error)
|
||||
|
||||
// DefaultDBProvider returns a database using the DBBackend and DBDir
|
||||
// specified in the Config.
|
||||
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
|
||||
dbType := dbm.BackendType(ctx.Config.DBBackend)
|
||||
|
||||
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
|
||||
}
|
||||
@@ -283,6 +283,11 @@ external_address = "{{ .P2P.ExternalAddress }}"
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = "{{ .P2P.Seeds }}"
|
||||
|
||||
# Comma separated list of peers to be added to the peer store
|
||||
# on startup. Either BootstrapPeers or PersistentPeers are
|
||||
# needed for peer discovery
|
||||
bootstrap_peers = "{{ .P2P.BootstrapPeers }}"
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
persistent_peers = "{{ .P2P.PersistentPeers }}"
|
||||
|
||||
@@ -349,8 +354,24 @@ dial_timeout = "{{ .P2P.DialTimeout }}"
|
||||
# 2) "v1" - prioritized mempool.
|
||||
version = "{{ .Mempool.Version }}"
|
||||
|
||||
# Recheck (default: true) defines whether Tendermint should recheck the
|
||||
# validity for all remaining transaction in the mempool after a block.
|
||||
# Since a block affects the application state, some transactions in the
|
||||
# mempool may become invalid. If this does not apply to your application,
|
||||
# you can disable rechecking.
|
||||
recheck = {{ .Mempool.Recheck }}
|
||||
|
||||
# Broadcast (default: true) defines whether the mempool should relay
|
||||
# transactions to other peers. Setting this to false will stop the mempool
|
||||
# from relaying transactions to other peers until they are included in a
|
||||
# block. In other words, if Broadcast is disabled, only the peer you send
|
||||
# the tx to will see it until it is included in a block.
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
|
||||
# WalPath (default: "") configures the location of the Write Ahead Log
|
||||
# (WAL) for the mempool. The WAL is disabled by default. To enable, set
|
||||
# WalPath to where you want the WAL to be written (e.g.
|
||||
# "data/mempool.wal").
|
||||
wal_dir = "{{ js .Mempool.WalPath }}"
|
||||
|
||||
# Maximum number of transactions in the mempool
|
||||
@@ -436,7 +457,7 @@ chunk_fetchers = "{{ .StateSync.ChunkFetchers }}"
|
||||
[blocksync]
|
||||
|
||||
# Block Sync version to use:
|
||||
#
|
||||
#
|
||||
# In v0.37, v1 and v2 of the block sync protocols were deprecated.
|
||||
# Please use v0 instead.
|
||||
#
|
||||
|
||||
@@ -46,8 +46,6 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
appFunc := newKVStore
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
css := make([]*State, nValidators)
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
@@ -56,6 +54,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
@@ -103,7 +102,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs.SetLogger(cs.Logger)
|
||||
// set private validator
|
||||
pv := privVals[i]
|
||||
@@ -126,8 +125,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
blocksSubs := make([]types.Subscription, 0)
|
||||
eventBuses := make([]*types.EventBus, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
// Note, we dont start the consensus states
|
||||
reactors[i] = NewReactor(css[i])
|
||||
reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
|
||||
reactors[i].SetLogger(css[i].Logger)
|
||||
|
||||
// eventBus is already started with the cs
|
||||
@@ -256,7 +254,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
// start the consensus reactors
|
||||
for i := 0; i < nValidators; i++ {
|
||||
require.NoError(t, reactors[i].SwitchToConsensus(state.Copy(), false))
|
||||
s := reactors[i].conS.GetState()
|
||||
reactors[i].SwitchToConsensus(s, false)
|
||||
}
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
@@ -315,7 +314,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
N := 4
|
||||
logger := consensusLogger().With("test", "byzantine")
|
||||
app := newKVStore
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_byzantine_test", newMockTickerFunc(false), app)
|
||||
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), app)
|
||||
defer cleanup()
|
||||
|
||||
// give the byzantine validator a normal ticker
|
||||
@@ -364,8 +363,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Note, we don't start the consensus states
|
||||
conR := NewReactor(css[i])
|
||||
conR := NewReactor(css[i], true) // so we don't start the consensus states
|
||||
conR.SetLogger(logger.With("validator", i))
|
||||
conR.SetEventBus(eventBus)
|
||||
|
||||
@@ -409,13 +407,13 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
// note these must be started before the byz
|
||||
for i := 1; i < N; i++ {
|
||||
cr := reactors[i].(*Reactor)
|
||||
require.NoError(t, cr.SwitchToConsensus(cr.conS.GetState(), false))
|
||||
cr.SwitchToConsensus(cr.conS.GetState(), false)
|
||||
}
|
||||
|
||||
// start the byzantine state machine
|
||||
byzR := reactors[0].(*ByzantineReactor)
|
||||
s := byzR.reactor.conS.GetState()
|
||||
require.NoError(t, byzR.reactor.SwitchToConsensus(s, false))
|
||||
byzR.reactor.SwitchToConsensus(s, false)
|
||||
|
||||
// byz proposer sends one block to peers[0]
|
||||
// and the other block to peers[1] and peers[2].
|
||||
@@ -594,7 +592,7 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
|
||||
|
||||
// Send our state to peer.
|
||||
// If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
if br.reactor.conS.IsRunning() {
|
||||
if !br.reactor.waitSync {
|
||||
br.reactor.sendNewRoundStepMessage(peer)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -440,7 +440,7 @@ func newStateWithConfigAndBlockStore(
|
||||
}
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
@@ -747,17 +747,18 @@ func consensusLogger() log.Logger {
|
||||
}).With("module", "consensus")
|
||||
}
|
||||
|
||||
func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) {
|
||||
t.Helper()
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
css := make([]*State, nValidators)
|
||||
logger := consensusLogger()
|
||||
configRootDirs := make([]string, 0, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
for _, opt := range configOpts {
|
||||
@@ -771,7 +772,6 @@ func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc
|
||||
css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
css[i].updateToState(state.Copy())
|
||||
}
|
||||
return css, func() {
|
||||
for _, dir := range configRootDirs {
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
|
||||
@@ -42,6 +42,7 @@ type Reactor struct {
|
||||
conS *State
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
waitSync bool
|
||||
eventBus *types.EventBus
|
||||
rs *cstypes.RoundState
|
||||
|
||||
@@ -52,11 +53,12 @@ type ReactorOption func(*Reactor)
|
||||
|
||||
// NewReactor returns a new Reactor with the given
|
||||
// consensusState.
|
||||
func NewReactor(consensusState *State, options ...ReactorOption) *Reactor {
|
||||
func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor {
|
||||
conR := &Reactor{
|
||||
conS: consensusState,
|
||||
rs: consensusState.GetRoundState(),
|
||||
Metrics: NopMetrics(),
|
||||
conS: consensusState,
|
||||
waitSync: waitSync,
|
||||
rs: consensusState.GetRoundState(),
|
||||
Metrics: NopMetrics(),
|
||||
}
|
||||
conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR)
|
||||
|
||||
@@ -70,12 +72,21 @@ func NewReactor(consensusState *State, options ...ReactorOption) *Reactor {
|
||||
// OnStart implements BaseService by subscribing to events, which later will be
|
||||
// broadcasted to other peers and starting state if we're not in block sync.
|
||||
func (conR *Reactor) OnStart() error {
|
||||
conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync())
|
||||
|
||||
// start routine that computes peer statistics for evaluating peer quality
|
||||
go conR.peerStatsRoutine()
|
||||
|
||||
conR.subscribeToBroadcastEvents()
|
||||
go conR.updateRoundStateRoutine()
|
||||
|
||||
if !conR.WaitSync() {
|
||||
err := conR.conS.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -83,34 +94,45 @@ func (conR *Reactor) OnStart() error {
|
||||
// state.
|
||||
func (conR *Reactor) OnStop() {
|
||||
conR.unsubscribeFromBroadcastEvents()
|
||||
if conR.conS.IsRunning() {
|
||||
if err := conR.conS.Stop(); err != nil {
|
||||
conR.Logger.Error("Error stopping consensus state", "err", err)
|
||||
}
|
||||
if err := conR.conS.Stop(); err != nil {
|
||||
conR.Logger.Error("Error stopping consensus state", "err", err)
|
||||
}
|
||||
if !conR.WaitSync() {
|
||||
conR.conS.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (conR *Reactor) IsConsensusRunning() bool {
|
||||
return conR.conS.IsRunning()
|
||||
}
|
||||
|
||||
// SwitchToConsensus switches from block_sync mode to consensus mode.
|
||||
// It resets the state, turns off block_sync, and starts the consensus state-machine
|
||||
func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) error {
|
||||
func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
|
||||
conR.Logger.Info("SwitchToConsensus")
|
||||
|
||||
// We have no votes, so reconstruct LastCommit from SeenCommit.
|
||||
if state.LastBlockHeight > state.InitialHeight {
|
||||
if state.LastBlockHeight > 0 {
|
||||
conR.conS.reconstructLastCommit(state)
|
||||
}
|
||||
|
||||
// NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a
|
||||
// NewRoundStepMessage.
|
||||
conR.conS.updateToState(state)
|
||||
|
||||
conR.mtx.Lock()
|
||||
conR.waitSync = false
|
||||
conR.mtx.Unlock()
|
||||
|
||||
if skipWAL {
|
||||
conR.conS.doWALCatchup = false
|
||||
}
|
||||
return conR.conS.Start()
|
||||
err := conR.conS.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf(`Failed to start consensus state: %v
|
||||
|
||||
conS:
|
||||
%+v
|
||||
|
||||
conR:
|
||||
%+v`, err, conR.conS, conR))
|
||||
}
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
@@ -177,7 +199,7 @@ func (conR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
|
||||
// Send our state to peer.
|
||||
// If we're block_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
if conR.conS.IsRunning() {
|
||||
if !conR.WaitSync() {
|
||||
conR.sendNewRoundStepMessage(peer)
|
||||
}
|
||||
}
|
||||
@@ -287,7 +309,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
}
|
||||
|
||||
case DataChannel:
|
||||
if !conR.conS.IsRunning() {
|
||||
if conR.WaitSync() {
|
||||
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
|
||||
return
|
||||
}
|
||||
@@ -306,7 +328,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
}
|
||||
|
||||
case VoteChannel:
|
||||
if !conR.conS.IsRunning() {
|
||||
if conR.WaitSync() {
|
||||
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
|
||||
return
|
||||
}
|
||||
@@ -328,7 +350,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
|
||||
}
|
||||
|
||||
case VoteSetBitsChannel:
|
||||
if !conR.conS.IsRunning() {
|
||||
if conR.WaitSync() {
|
||||
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
|
||||
return
|
||||
}
|
||||
@@ -369,6 +391,13 @@ func (conR *Reactor) SetEventBus(b *types.EventBus) {
|
||||
conR.conS.SetEventBus(b)
|
||||
}
|
||||
|
||||
// WaitSync returns whether the consensus reactor is waiting for state/block sync.
|
||||
func (conR *Reactor) WaitSync() bool {
|
||||
conR.mtx.RLock()
|
||||
defer conR.mtx.RUnlock()
|
||||
return conR.waitSync
|
||||
}
|
||||
|
||||
//--------------------------------------
|
||||
|
||||
// subscribeToBroadcastEvents subscribes for new round steps and votes
|
||||
@@ -512,11 +541,6 @@ OUTER_LOOP:
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
return
|
||||
}
|
||||
if !conR.IsConsensusRunning() {
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
|
||||
@@ -661,7 +685,7 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// logger.Info("No parts to send in catch-up, sleeping")
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
}
|
||||
|
||||
@@ -671,16 +695,12 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
// Simple hack to throttle logs upon sleep.
|
||||
var sleeping = 0
|
||||
|
||||
OUTER_LOOP:
|
||||
for {
|
||||
// Manage disconnects from self or peer.
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
return
|
||||
}
|
||||
if !conR.IsConsensusRunning() {
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
continue
|
||||
}
|
||||
|
||||
rs := conR.getRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
|
||||
@@ -691,11 +711,14 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
sleeping = 0
|
||||
}
|
||||
|
||||
// logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round,
|
||||
// "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step)
|
||||
|
||||
// If height matches, then send LastCommit, Prevotes, Precommits.
|
||||
if rs.Height == prs.Height {
|
||||
heightLogger := logger.With("height", prs.Height)
|
||||
if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) {
|
||||
continue
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -704,7 +727,7 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
if prs.Height != 0 && rs.Height == prs.Height+1 {
|
||||
if ps.PickSendVote(rs.LastCommit) {
|
||||
logger.Debug("Picked rs.LastCommit to send", "height", prs.Height)
|
||||
continue
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -717,7 +740,7 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil {
|
||||
if ps.PickSendVote(commit) {
|
||||
logger.Debug("Picked Catchup commit to send", "height", prs.Height)
|
||||
continue
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -734,7 +757,7 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
}
|
||||
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
continue
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
@@ -808,11 +831,6 @@ OUTER_LOOP:
|
||||
return
|
||||
}
|
||||
|
||||
if !conR.IsConsensusRunning() {
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
|
||||
// Maybe send Height/Round/Prevotes
|
||||
{
|
||||
rs := conR.getRoundState()
|
||||
@@ -898,6 +916,8 @@ OUTER_LOOP:
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
|
||||
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
@@ -1331,6 +1351,7 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
|
||||
psRound := ps.PRS.Round
|
||||
psCatchupCommitRound := ps.PRS.CatchupCommitRound
|
||||
psCatchupCommit := ps.PRS.CatchupCommit
|
||||
lastPrecommits := ps.PRS.Precommits
|
||||
|
||||
startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
|
||||
ps.PRS.Height = msg.Height
|
||||
@@ -1358,7 +1379,7 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
|
||||
// Shift Precommits to LastCommit.
|
||||
if psHeight+1 == msg.Height && psRound == msg.LastCommitRound {
|
||||
ps.PRS.LastCommitRound = msg.LastCommitRound
|
||||
ps.PRS.LastCommit = ps.PRS.Precommits
|
||||
ps.PRS.LastCommit = lastPrecommits
|
||||
} else {
|
||||
ps.PRS.LastCommitRound = msg.LastCommitRound
|
||||
ps.PRS.LastCommit = nil
|
||||
|
||||
@@ -55,8 +55,9 @@ func startConsensusNet(t *testing.T, css []*State, n int) (
|
||||
blocksSubs := make([]types.Subscription, 0)
|
||||
eventBuses := make([]*types.EventBus, n)
|
||||
for i := 0; i < n; i++ {
|
||||
// Note, we dont start the consensus states
|
||||
reactors[i] = NewReactor(css[i])
|
||||
/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
|
||||
if err != nil { t.Fatal(err)}*/
|
||||
reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
|
||||
reactors[i].SetLogger(css[i].Logger)
|
||||
|
||||
// eventBus is already started with the cs
|
||||
@@ -87,8 +88,7 @@ func startConsensusNet(t *testing.T, css []*State, n int) (
|
||||
// TODO: is this still true with new pubsub?
|
||||
for i := 0; i < n; i++ {
|
||||
s := reactors[i].conS.GetState()
|
||||
err := reactors[i].SwitchToConsensus(s, false)
|
||||
require.NoError(t, err)
|
||||
reactors[i].SwitchToConsensus(s, false)
|
||||
}
|
||||
return reactors, blocksSubs, eventBuses
|
||||
}
|
||||
@@ -113,7 +113,7 @@ func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*type
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactorBasic(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -135,8 +135,6 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
css := make([]*State, nValidators)
|
||||
logger := consensusLogger()
|
||||
for i := 0; i < nValidators; i++ {
|
||||
@@ -144,6 +142,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
@@ -204,7 +203,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool2, WithState(state.Copy()))
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
@@ -238,7 +237,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// Ensure a testnet makes blocks when there are txs
|
||||
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore,
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore,
|
||||
func(c *cfg.Config) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
})
|
||||
@@ -259,7 +258,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
|
||||
func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -285,7 +284,7 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
|
||||
func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -311,7 +310,7 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
// Test we record stats about votes and block parts from other peers.
|
||||
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
@@ -337,7 +336,6 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
nVals := 4
|
||||
logger := log.TestingLogger()
|
||||
css, cleanup := randConsensusNet(
|
||||
t,
|
||||
nVals,
|
||||
"consensus_voting_power_changes_test",
|
||||
newMockTickerFunc(true),
|
||||
@@ -533,7 +531,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
// Check we can make blocks with skip_timeout_commit=false
|
||||
func TestReactorWithTimeoutCommit(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(t, N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newKVStore)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newKVStore)
|
||||
defer cleanup()
|
||||
// override default SkipTimeoutCommit == true for tests
|
||||
for i := 0; i < N; i++ {
|
||||
|
||||
@@ -54,7 +54,7 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr
|
||||
if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step {
|
||||
return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m)
|
||||
}
|
||||
case <-newStepSub.Cancelled():
|
||||
case <-newStepSub.Canceled():
|
||||
return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was canceled")
|
||||
case <-ticker:
|
||||
return fmt.Errorf("failed to read off newStepSub.Out()")
|
||||
@@ -254,7 +254,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
|
||||
h.logger.Info("ABCI Handshake App Info",
|
||||
"height", blockHeight,
|
||||
"hash", appHash,
|
||||
"hash", log.NewLazySprintf("%X", appHash),
|
||||
"software-version", res.Version,
|
||||
"protocol-version", res.AppVersion,
|
||||
)
|
||||
@@ -271,7 +271,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
}
|
||||
|
||||
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
|
||||
"appHeight", blockHeight, "appHash", appHash)
|
||||
"appHeight", blockHeight, "appHash", log.NewLazySprintf("%X", appHash))
|
||||
|
||||
// TODO: (on restart) replay mempool
|
||||
|
||||
|
||||
@@ -129,8 +129,8 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error
|
||||
}
|
||||
pb.cs.Wait()
|
||||
|
||||
newCS := NewState(pb.cs.config, pb.cs.blockExec,
|
||||
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool, WithState(pb.genesisState.Copy()))
|
||||
newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
|
||||
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool)
|
||||
newCS.SetEventBus(pb.cs.eventBus)
|
||||
newCS.startForReplay()
|
||||
|
||||
@@ -332,8 +332,8 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
|
||||
|
||||
consensusState := NewState(csConfig, blockExec,
|
||||
blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
consensusState := NewState(csConfig, state.Copy(), blockExec,
|
||||
blockStore, mempool, evpool)
|
||||
|
||||
consensusState.SetEventBus(eventBus)
|
||||
return consensusState
|
||||
|
||||
@@ -67,8 +67,7 @@ func TestMain(m *testing.M) {
|
||||
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
|
||||
logger := log.TestingLogger()
|
||||
state, err := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
cs := newStateWithConfigAndBlockStore(
|
||||
consensusReplayConfig,
|
||||
@@ -82,7 +81,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
|
||||
bytes, _ := os.ReadFile(cs.config.WalFile())
|
||||
t.Logf("====== WAL: \n\r%X\n", bytes)
|
||||
|
||||
err = cs.Start()
|
||||
err := cs.Start()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := cs.Stop(); err != nil {
|
||||
@@ -98,7 +97,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
case <-newBlockSub.Out():
|
||||
case <-newBlockSub.Cancelled():
|
||||
case <-newBlockSub.Canceled():
|
||||
t.Fatal("newBlockSub was canceled")
|
||||
case <-time.After(120 * time.Second):
|
||||
t.Fatal("Timed out waiting for new block (see trace above)")
|
||||
@@ -556,40 +555,40 @@ func TestSimulateValidatorsChange(t *testing.T) {
|
||||
// Sync from scratch
|
||||
func TestHandshakeReplayAll(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 0, m, false)
|
||||
testHandshakeReplay(t, config, 0, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 0, m, true)
|
||||
testHandshakeReplay(t, config, 0, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync many, not from scratch
|
||||
func TestHandshakeReplaySome(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 2, m, false)
|
||||
testHandshakeReplay(t, config, 2, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 2, m, true)
|
||||
testHandshakeReplay(t, config, 2, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from lagging by one
|
||||
func TestHandshakeReplayOne(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, numBlocks-1, m, false)
|
||||
testHandshakeReplay(t, config, numBlocks-1, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, numBlocks-1, m, true)
|
||||
testHandshakeReplay(t, config, numBlocks-1, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from caught up
|
||||
func TestHandshakeReplayNone(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, numBlocks, m, false)
|
||||
testHandshakeReplay(t, config, numBlocks, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, numBlocks, m, true)
|
||||
testHandshakeReplay(t, config, numBlocks, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -661,27 +660,25 @@ func tempWALWithData(data []byte) string {
|
||||
|
||||
// Make some blocks. Start a fresh app and apply nBlocks blocks.
|
||||
// Then restart the app and sync it up with the remaining blocks
|
||||
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint, testValidatorsChange bool) {
|
||||
var (
|
||||
chain []*types.Block
|
||||
commits []*types.Commit
|
||||
store *mockBlockStore
|
||||
stateDB dbm.DB
|
||||
genesisState sm.State
|
||||
config *cfg.Config
|
||||
)
|
||||
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) {
|
||||
var chain []*types.Block
|
||||
var commits []*types.Commit
|
||||
var store *mockBlockStore
|
||||
var stateDB dbm.DB
|
||||
var genesisState sm.State
|
||||
if testValidatorsChange {
|
||||
config = ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
|
||||
defer os.RemoveAll(testConfig.RootDir)
|
||||
stateDB = dbm.NewMemDB()
|
||||
|
||||
genesisState = sim.GenesisState
|
||||
config = sim.Config
|
||||
chain = append([]*types.Block{}, sim.Chain...) // copy chain
|
||||
commits = sim.Commits
|
||||
store = newMockBlockStore(t, config, genesisState.ConsensusParams)
|
||||
} else { // test single node
|
||||
config = ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
|
||||
defer os.RemoveAll(testConfig.RootDir)
|
||||
walBody, err := WALWithNBlocks(t, numBlocks)
|
||||
require.NoError(t, err)
|
||||
walFile := tempWALWithData(walBody)
|
||||
@@ -814,11 +811,14 @@ func buildAppStateFromChain(t *testing.T, proxyApp proxy.AppConns, stateStore sm
|
||||
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
_, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo
|
||||
panic(err)
|
||||
}
|
||||
switch mode {
|
||||
case 0:
|
||||
for i := 0; i < nBlocks; i++ {
|
||||
@@ -1198,6 +1198,7 @@ func (bs *mockBlockStore) PruneBlocks(height int64, state sm.State) (uint64, int
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) DeleteLatestBlock() error { return nil }
|
||||
func (bs *mockBlockStore) Close() error { return nil }
|
||||
|
||||
//---------------------------------------
|
||||
// Test handshake/init chain
|
||||
|
||||
@@ -148,6 +148,7 @@ type StateOption func(*State)
|
||||
// NewState returns a new State.
|
||||
func NewState(
|
||||
config *cfg.ConsensusConfig,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore sm.BlockStore,
|
||||
txNotifier txNotifier,
|
||||
@@ -176,6 +177,13 @@ func NewState(
|
||||
cs.doPrevote = cs.defaultDoPrevote
|
||||
cs.setProposal = cs.defaultSetProposal
|
||||
|
||||
// We have no votes, so reconstruct LastCommit from SeenCommit.
|
||||
if state.LastBlockHeight > 0 {
|
||||
cs.reconstructLastCommit(state)
|
||||
}
|
||||
|
||||
cs.updateToState(state)
|
||||
|
||||
// NOTE: we do not call scheduleRound0 yet, we do that upon Start()
|
||||
|
||||
cs.BaseService = *service.NewBaseService(nil, "State", cs)
|
||||
@@ -199,19 +207,10 @@ func (cs *State) SetEventBus(b *types.EventBus) {
|
||||
}
|
||||
|
||||
// StateMetrics sets the metrics.
|
||||
func WithMetrics(metrics *Metrics) StateOption {
|
||||
func StateMetrics(metrics *Metrics) StateOption {
|
||||
return func(cs *State) { cs.metrics = metrics }
|
||||
}
|
||||
|
||||
func WithState(state sm.State) StateOption {
|
||||
return func(cs *State) {
|
||||
if state.LastBlockHeight > 0 {
|
||||
cs.reconstructLastCommit(state)
|
||||
}
|
||||
cs.updateToState(state)
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string.
|
||||
func (cs *State) String() string {
|
||||
// better not to access shared variables
|
||||
@@ -298,10 +297,6 @@ func (cs *State) LoadCommit(height int64) *types.Commit {
|
||||
// OnStart loads the latest state via the WAL, and starts the timeout and
|
||||
// receive routines.
|
||||
func (cs *State) OnStart() error {
|
||||
if cs.state.IsEmpty() {
|
||||
return errors.New("no state to commence consensus on")
|
||||
}
|
||||
|
||||
// We may set the WAL in testing before calling Start, so only OpenWAL if its
|
||||
// still the nilWAL.
|
||||
if _, ok := cs.wal.(nilWAL); ok {
|
||||
@@ -617,7 +612,7 @@ func (cs *State) updateToState(state sm.State) {
|
||||
// signal the new round step, because other services (eg. txNotifier)
|
||||
// depend on having an up-to-date peer state!
|
||||
if state.LastBlockHeight <= cs.state.LastBlockHeight {
|
||||
cs.Logger.Info(
|
||||
cs.Logger.Debug(
|
||||
"ignoring updateToState()",
|
||||
"new_height", state.LastBlockHeight+1,
|
||||
"old_height", cs.state.LastBlockHeight+1,
|
||||
@@ -2280,10 +2275,11 @@ func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: pass pubKey to signVote
|
||||
vote, err := cs.signVote(msgType, hash, header)
|
||||
if err == nil {
|
||||
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
|
||||
cs.Logger.Info("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote)
|
||||
cs.Logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote)
|
||||
return vote
|
||||
}
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
return
|
||||
}
|
||||
voteSet := hvs.getVoteSet(vote.Round, vote.Type)
|
||||
if voteSet.IsEmpty() {
|
||||
if voteSet == nil {
|
||||
if rndz := hvs.peerCatchupRounds[peerID]; len(rndz) < 2 {
|
||||
hvs.addRound(vote.Round)
|
||||
voteSet = hvs.getVoteSet(vote.Round, vote.Type)
|
||||
@@ -166,7 +166,7 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID types.BlockID) {
|
||||
func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType) *types.VoteSet {
|
||||
rvs, ok := hvs.roundVoteSets[round]
|
||||
if !ok {
|
||||
return &types.VoteSet{}
|
||||
return nil
|
||||
}
|
||||
switch voteType {
|
||||
case tmproto.PrevoteType:
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
@@ -31,23 +30,30 @@ func TestPeerCatchupRounds(t *testing.T) {
|
||||
|
||||
vote999_0 := makeVoteHR(t, 1, 0, 999, privVals)
|
||||
added, err := hvs.AddVote(vote999_0, "peer1")
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from peer", added, err)
|
||||
}
|
||||
|
||||
vote1000_0 := makeVoteHR(t, 1, 0, 1000, privVals)
|
||||
added, err = hvs.AddVote(vote1000_0, "peer1")
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from peer", added, err)
|
||||
}
|
||||
|
||||
vote1001_0 := makeVoteHR(t, 1, 0, 1001, privVals)
|
||||
added, err = hvs.AddVote(vote1001_0, "peer1")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, ErrGotVoteFromUnwantedRound, err)
|
||||
require.False(t, added)
|
||||
if err != ErrGotVoteFromUnwantedRound {
|
||||
t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err)
|
||||
}
|
||||
if added {
|
||||
t.Error("Expected to *not* add vote from peer, too many catchup rounds.")
|
||||
}
|
||||
|
||||
added, err = hvs.AddVote(vote1001_0, "peer2")
|
||||
require.NoError(t, err)
|
||||
require.True(t, added)
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from another peer")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []types.PrivValidator) *types.Vote {
|
||||
|
||||
@@ -86,7 +86,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
mempool := emptyMempool{}
|
||||
evpool := sm.EmptyEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
|
||||
consensusState := NewState(config.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
|
||||
consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
|
||||
consensusState.SetLogger(logger)
|
||||
consensusState.SetEventBus(eventBus)
|
||||
if privValidator != nil {
|
||||
|
||||
@@ -9,8 +9,8 @@ module.exports = {
|
||||
editLinks: true,
|
||||
label: 'core',
|
||||
algolia: {
|
||||
id: "BH4D9OD16A",
|
||||
key: "59f0e2deb984aa9cdf2b3a5fd24ac501",
|
||||
id: "QQFROLBNZC",
|
||||
key: "f1b68b96fb31d8aa4a54412c44917a26",
|
||||
index: "tendermint"
|
||||
},
|
||||
versions: [
|
||||
|
||||
@@ -27,22 +27,28 @@ Usage:
|
||||
abci-cli [command]
|
||||
|
||||
Available Commands:
|
||||
batch Run a batch of abci commands against an application
|
||||
check_tx Validate a tx
|
||||
commit Commit the application state and return the Merkle root hash
|
||||
console Start an interactive abci console for multiple commands
|
||||
deliver_tx Deliver a new tx to the application
|
||||
kvstore ABCI demo example
|
||||
echo Have the application echo a message
|
||||
help Help about any command
|
||||
info Get some info about the application
|
||||
query Query the application state
|
||||
batch run a batch of abci commands against an application
|
||||
check_tx validate a transaction
|
||||
commit commit the application state and return the Merkle root hash
|
||||
completion Generate the autocompletion script for the specified shell
|
||||
console start an interactive ABCI console for multiple commands
|
||||
deliver_tx deliver a new transaction to the application
|
||||
echo have the application echo a message
|
||||
help Help about any command
|
||||
info get some info about the application
|
||||
kvstore ABCI demo example
|
||||
prepare_proposal prepare proposal
|
||||
process_proposal process proposal
|
||||
query query the application state
|
||||
test run integration tests
|
||||
version print ABCI console version
|
||||
|
||||
Flags:
|
||||
--abci string socket or grpc (default "socket")
|
||||
--address string address of application socket (default "tcp://127.0.0.1:26658")
|
||||
-h, --help help for abci-cli
|
||||
-v, --verbose print the command and results as if it were a console session
|
||||
--abci string either socket or grpc (default "socket")
|
||||
--address string address of application socket (default "tcp://0.0.0.0:26658")
|
||||
-h, --help help for abci-cli
|
||||
--log_level string set the logger level (default "debug")
|
||||
-v, --verbose print the command and results as if it were a console session
|
||||
|
||||
Use "abci-cli [command] --help" for more information about a command.
|
||||
```
|
||||
@@ -58,47 +64,51 @@ purposes.
|
||||
|
||||
We'll start a kvstore application, which was installed at the same time
|
||||
as `abci-cli` above. The kvstore just stores transactions in a merkle
|
||||
tree.
|
||||
|
||||
Its code can be found
|
||||
[here](https://github.com/tendermint/tendermint/blob/v0.34.x/abci/cmd/abci-cli/abci-cli.go)
|
||||
and looks like:
|
||||
tree. Its code can be found
|
||||
[here](https://github.com/tendermint/tendermint/blob/main/abci/cmd/abci-cli/abci-cli.go)
|
||||
and looks like the following:
|
||||
|
||||
```go
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
app = kvstore.NewKVStoreApplication()
|
||||
} else {
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
}
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
var err error
|
||||
flagPersist, err = os.MkdirTemp("", "persistent_kvstore_tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddrD, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Start by running:
|
||||
Start the application by running:
|
||||
|
||||
```sh
|
||||
abci-cli kvstore
|
||||
@@ -163,32 +173,32 @@ Try running these commands:
|
||||
-> data: hello
|
||||
-> data.hex: 0x68656C6C6F
|
||||
|
||||
> info
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"size":0}
|
||||
-> data.hex: 0x7B2273697A65223A307D
|
||||
|
||||
> prepare_proposal "abc"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: abc action: UNMODIFIED
|
||||
-> log: Succeeded. Tx: abc
|
||||
|
||||
> process_proposal "abc"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> commit
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
|
||||
> deliver_tx "abc"
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"size":1}
|
||||
-> data.hex: 0x7B2273697A65223A317D
|
||||
|
||||
> commit
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
@@ -204,7 +214,7 @@ Try running these commands:
|
||||
> deliver_tx "def=xyz"
|
||||
-> code: OK
|
||||
|
||||
> commit
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
@@ -219,11 +229,9 @@ Try running these commands:
|
||||
|
||||
> prepare_proposal "preparedef"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: def action: ADDED
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: preparedef action: REMOVED
|
||||
-> log: Succeeded. Tx: replacedef
|
||||
|
||||
> process_proposal "def"
|
||||
> process_proposal "replacedef"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
@@ -245,21 +253,21 @@ Try running these commands:
|
||||
Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if
|
||||
we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
|
||||
|
||||
Similarly, you could put the commands in a file and run
|
||||
You could put the commands in a file and run
|
||||
`abci-cli --verbose batch < myfile`.
|
||||
|
||||
|
||||
Note that the `abci-cli` is designed strictly for testing and debugging. In a real
|
||||
deployment, the role of sending messages is taken by Tendermint, which
|
||||
connects to the app using three separate connections, each with its own
|
||||
pattern of messages.
|
||||
|
||||
For examples of running an ABCI app with Tendermint, see the
|
||||
[getting started guide](./getting-started.md).
|
||||
|
||||
## Bounties
|
||||
|
||||
Want to write an app in your favorite language?! We'd be happy
|
||||
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
|
||||
See [funding](https://github.com/interchainio/funding) opportunities from the
|
||||
[Interchain Foundation](https://interchain.io) for implementations in new languages and more.
|
||||
|
||||
The `abci-cli` is designed strictly for testing and debugging. In a real
|
||||
deployment, the role of sending messages is taken by Tendermint, which
|
||||
connects to the app using three separate connections, each with its own
|
||||
pattern of messages.
|
||||
|
||||
For examples of running an ABCI app with
|
||||
Tendermint, see the [getting started guide](./getting-started.md).
|
||||
Next is the ABCI specification.
|
||||
|
||||
@@ -11,9 +11,10 @@ application you want to run. So, to run a complete blockchain that does
|
||||
something useful, you must start two programs: one is Tendermint Core,
|
||||
the other is your application, which can be written in any programming
|
||||
language. Recall from [the intro to
|
||||
ABCI](../introduction/what-is-tendermint.md#abci-overview) that Tendermint Core handles all the p2p and consensus stuff, and just forwards transactions to the
|
||||
ABCI](../introduction/what-is-tendermint.md#abci-overview) that Tendermint Core
|
||||
handles all the p2p and consensus stuff, and just forwards transactions to the
|
||||
application when they need to be validated, or when they're ready to be
|
||||
committed to a block.
|
||||
executed and committed.
|
||||
|
||||
In this guide, we show you some examples of how to run an application
|
||||
using Tendermint.
|
||||
@@ -22,7 +23,8 @@ using Tendermint.
|
||||
|
||||
The first apps we will work with are written in Go. To install them, you
|
||||
need to [install Go](https://golang.org/doc/install), put
|
||||
`$GOPATH/bin` in your `$PATH` and enable go modules with these instructions:
|
||||
`$GOPATH/bin` in your `$PATH` and enable go modules. If you use `bash`,
|
||||
follow these instructions:
|
||||
|
||||
```bash
|
||||
echo export GOPATH=\"\$HOME/go\" >> ~/.bash_profile
|
||||
@@ -31,17 +33,48 @@ echo export PATH=\"\$PATH:\$GOPATH/bin\" >> ~/.bash_profile
|
||||
|
||||
Then run
|
||||
|
||||
```sh
|
||||
```bash
|
||||
go get github.com/tendermint/tendermint
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
make install_abci
|
||||
```
|
||||
|
||||
Now you should have the `abci-cli` installed; you'll notice the `kvstore`
|
||||
command, an example application written
|
||||
in Go. See below for an application written in JavaScript.
|
||||
Now you should have the `abci-cli` installed; run `abci-cli` to see the list of commands:
|
||||
|
||||
Now, let's run some apps!
|
||||
```
|
||||
Usage:
|
||||
abci-cli [command]
|
||||
|
||||
Available Commands:
|
||||
batch run a batch of abci commands against an application
|
||||
check_tx validate a transaction
|
||||
commit commit the application state and return the Merkle root hash
|
||||
completion Generate the autocompletion script for the specified shell
|
||||
console start an interactive ABCI console for multiple commands
|
||||
deliver_tx deliver a new transaction to the application
|
||||
echo have the application echo a message
|
||||
help Help about any command
|
||||
info get some info about the application
|
||||
kvstore ABCI demo example
|
||||
prepare_proposal prepare proposal
|
||||
process_proposal process proposal
|
||||
query query the application state
|
||||
test run integration tests
|
||||
version print ABCI console version
|
||||
|
||||
Flags:
|
||||
--abci string either socket or grpc (default "socket")
|
||||
--address string address of application socket (default "tcp://0.0.0.0:26658")
|
||||
-h, --help help for abci-cli
|
||||
--log_level string set the logger level (default "debug")
|
||||
-v, --verbose print the command and results as if it were a console session
|
||||
|
||||
Use "abci-cli [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
You'll notice the `kvstore` command, an example application written in Go.
|
||||
|
||||
Now, let's run an app!
|
||||
|
||||
## KVStore - A First Example
|
||||
|
||||
@@ -68,7 +101,7 @@ tendermint node
|
||||
```
|
||||
|
||||
If you have used Tendermint, you may want to reset the data for a new
|
||||
blockchain by running `tendermint unsafe_reset_all`. Then you can run
|
||||
blockchain by running `tendermint unsafe-reset-all`. Then you can run
|
||||
`tendermint node` to start Tendermint, and connect to the app. For more
|
||||
details, see [the guide on using Tendermint](../tendermint-core/using-tendermint.md).
|
||||
|
||||
@@ -164,47 +197,3 @@ curl -s 'localhost:26657/abci_query?data="name"'
|
||||
|
||||
Try some other transactions and queries to make sure everything is
|
||||
working!
|
||||
|
||||
|
||||
## CounterJS - Example in Another Language
|
||||
|
||||
We also want to run applications in another language - in this case,
|
||||
we'll run a Javascript version of the `counter`. To run it, you'll need
|
||||
to [install node](https://nodejs.org/en/download/).
|
||||
|
||||
You'll also need to fetch the relevant repository, from
|
||||
[here](https://github.com/tendermint/js-abci), then install it:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/tendermint/js-abci.git
|
||||
cd js-abci
|
||||
npm install abci
|
||||
```
|
||||
|
||||
Kill the previous `counter` and `tendermint` processes. Now run the app:
|
||||
|
||||
```sh
|
||||
node example/counter.js
|
||||
```
|
||||
|
||||
In another window, reset and start `tendermint`:
|
||||
|
||||
```sh
|
||||
tendermint unsafe_reset_all
|
||||
tendermint node
|
||||
```
|
||||
|
||||
Once again, you should see blocks streaming by - but now, our
|
||||
application is written in Javascript! Try sending some transactions, and
|
||||
like before - the results should be the same:
|
||||
|
||||
```sh
|
||||
# ok
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x00
|
||||
# invalid nonce
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x05
|
||||
# ok
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x01
|
||||
```
|
||||
|
||||
Neat, eh?
|
||||
|
||||
@@ -78,6 +78,7 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-039: Peer-Behaviour](./adr-039-peer-behaviour.md)
|
||||
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
||||
- [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md)
|
||||
- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md)
|
||||
- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md)
|
||||
- [ADR-079: Ed25519 Verification](./adr-079-ed25519-verification.md)
|
||||
- [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md)
|
||||
@@ -114,7 +115,6 @@ None
|
||||
- [ADR-064: Batch Verification](./adr-064-batch-verification.md)
|
||||
- [ADR-068: Reverse-Sync](./adr-068-reverse-sync.md)
|
||||
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
|
||||
- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md)
|
||||
- [ADR-073: Adopt LibP2P](./adr-073-libp2p.md)
|
||||
- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md)
|
||||
- [ADR-080: Reverse Sync](./adr-080-reverse-sync.md)
|
||||
|
||||
@@ -6,7 +6,7 @@ order: 4
|
||||
|
||||
Tendermint is software for securely and consistently replicating an
|
||||
application on many machines. By securely, we mean that Tendermint works
|
||||
even if up to 1/3 of machines fail in arbitrary ways. By consistently,
|
||||
as long as less than 1/3 of machines fail in arbitrary ways. By consistently,
|
||||
we mean that every non-faulty machine sees the same transaction log and
|
||||
computes the same state. Secure and consistent replication is a
|
||||
fundamental problem in distributed systems; it plays a critical role in
|
||||
@@ -22,15 +22,14 @@ reformalization of BFT in a more modern setting, with emphasis on
|
||||
peer-to-peer networking and cryptographic authentication. The name
|
||||
derives from the way transactions are batched in blocks, where each
|
||||
block contains a cryptographic hash of the previous one, forming a
|
||||
chain. In practice, the blockchain data structure actually optimizes BFT
|
||||
design.
|
||||
chain.
|
||||
|
||||
Tendermint consists of two chief technical components: a blockchain
|
||||
consensus engine and a generic application interface. The consensus
|
||||
engine, called Tendermint Core, ensures that the same transactions are
|
||||
recorded on every machine in the same order. The application interface,
|
||||
called the Application BlockChain Interface (ABCI), enables the
|
||||
transactions to be processed in any programming language. Unlike other
|
||||
called the Application BlockChain Interface (ABCI), delivers the transactions
|
||||
to applications for processing. Unlike other
|
||||
blockchain and consensus solutions, which come pre-packaged with built
|
||||
in state machines (like a fancy key-value store, or a quirky scripting
|
||||
language), developers can use Tendermint for BFT state machine
|
||||
@@ -51,13 +50,13 @@ Hyperledger's Burrow.
|
||||
|
||||
### Zookeeper, etcd, consul
|
||||
|
||||
Zookeeper, etcd, and consul are all implementations of a key-value store
|
||||
atop a classical, non-BFT consensus algorithm. Zookeeper uses a version
|
||||
of Paxos called Zookeeper Atomic Broadcast, while etcd and consul use
|
||||
the Raft consensus algorithm, which is much younger and simpler. A
|
||||
Zookeeper, etcd, and consul are all implementations of key-value stores
|
||||
atop a classical, non-BFT consensus algorithm. Zookeeper uses an
|
||||
algorithm called Zookeeper Atomic Broadcast, while etcd and consul use
|
||||
the Raft log replication algorithm. A
|
||||
typical cluster contains 3-5 machines, and can tolerate crash failures
|
||||
in up to 1/2 of the machines, but even a single Byzantine fault can
|
||||
destroy the system.
|
||||
in less than 1/2 of the machines (e.g., 1 out of 3 or 2 out of 5),
|
||||
but even a single Byzantine fault can jeopardize the whole system.
|
||||
|
||||
Each offering provides a slightly different implementation of a
|
||||
featureful key-value store, but all are generally focused around
|
||||
@@ -66,8 +65,8 @@ configuration, service discovery, locking, leader-election, and so on.
|
||||
|
||||
Tendermint is in essence similar software, but with two key differences:
|
||||
|
||||
- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a
|
||||
1/3 of failures, but those failures can include arbitrary behavior -
|
||||
- It is Byzantine Fault Tolerant, meaning it can only tolerate less than 1/3
|
||||
of machines failing, but those failures can include arbitrary behavior -
|
||||
including hacking and malicious attacks. - It does not specify a
|
||||
particular application, like a fancy key-value store. Instead, it
|
||||
focuses on arbitrary state machine replication, so developers can build
|
||||
@@ -106,8 +105,8 @@ docker containers, modules it calls "chaincode". It uses an
|
||||
implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf).
|
||||
from a team at IBM that is [augmented to handle potentially
|
||||
non-deterministic
|
||||
chaincode](https://drops.dagstuhl.de/opus/volltexte/2017/7093/pdf/LIPIcs-OPODIS-2016-24.pdf) It is
|
||||
possible to implement this docker-based behavior as a ABCI app in
|
||||
chaincode](https://drops.dagstuhl.de/opus/volltexte/2017/7093/pdf/LIPIcs-OPODIS-2016-24.pdf).
|
||||
It is possible to implement this docker-based behavior as an ABCI app in
|
||||
Tendermint, though extending Tendermint to handle non-determinism
|
||||
remains for future work.
|
||||
|
||||
@@ -143,24 +142,22 @@ in design and suffers from "spaghetti code".
|
||||
Another problem with monolithic design is that it limits you to the
|
||||
language of the blockchain stack (or vice versa). In the case of
|
||||
Ethereum which supports a Turing-complete bytecode virtual-machine, it
|
||||
limits you to languages that compile down to that bytecode; today, those
|
||||
are Serpent and Solidity.
|
||||
limits you to languages that compile down to that bytecode; while the
|
||||
[list](https://github.com/pirapira/awesome-ethereum-virtual-machine#programming-languages-that-compile-into-evm)
|
||||
is growing, it is still very limited.
|
||||
|
||||
In contrast, our approach is to decouple the consensus engine and P2P
|
||||
layers from the details of the application state of the particular
|
||||
layers from the details of the state of the particular
|
||||
blockchain application. We do this by abstracting away the details of
|
||||
the application to an interface, which is implemented as a socket
|
||||
protocol.
|
||||
|
||||
Thus we have an interface, the Application BlockChain Interface (ABCI),
|
||||
and its primary implementation, the Tendermint Socket Protocol (TSP, or
|
||||
Teaspoon).
|
||||
|
||||
### Intro to ABCI
|
||||
|
||||
[Tendermint Core](https://github.com/tendermint/tendermint) (the
|
||||
"consensus engine") communicates with the application via a socket
|
||||
protocol that satisfies the ABCI.
|
||||
[Tendermint Core](https://github.com/tendermint/tendermint), the
|
||||
"consensus engine", communicates with the application via a socket
|
||||
protocol that satisfies the ABCI, the Tendermint Socket Protocol
|
||||
(TSP, or Teaspoon).
|
||||
|
||||
To draw an analogy, lets talk about a well-known cryptocurrency,
|
||||
Bitcoin. Bitcoin is a cryptocurrency blockchain where each node
|
||||
@@ -180,7 +177,7 @@ The application will be responsible for
|
||||
- Allowing clients to query the UTXO database.
|
||||
|
||||
Tendermint is able to decompose the blockchain design by offering a very
|
||||
simple API (ie. the ABCI) between the application process and consensus
|
||||
simple API (i.e. the ABCI) between the application process and consensus
|
||||
process.
|
||||
|
||||
The ABCI consists of 3 primary message types that get delivered from the
|
||||
@@ -239,8 +236,7 @@ Solidity on Ethereum is a great language of choice for blockchain
|
||||
applications because, among other reasons, it is a completely
|
||||
deterministic programming language. However, it's also possible to
|
||||
create deterministic applications using existing popular languages like
|
||||
Java, C++, Python, or Go. Game programmers and blockchain developers are
|
||||
already familiar with creating deterministic programs by avoiding
|
||||
Java, C++, Python, or Go, by avoiding
|
||||
sources of non-determinism such as:
|
||||
|
||||
- random number generators (without deterministic seeding)
|
||||
@@ -271,14 +267,15 @@ committed in a chain, with one block at each **height**. A block may
|
||||
fail to be committed, in which case the protocol moves to the next
|
||||
**round**, and a new validator gets to propose a block for that height.
|
||||
Two stages of voting are required to successfully commit a block; we
|
||||
call them **pre-vote** and **pre-commit**. A block is committed when
|
||||
more than 2/3 of validators pre-commit for the same block in the same
|
||||
round.
|
||||
call them **pre-vote** and **pre-commit**.
|
||||
|
||||
There is a picture of a couple doing the polka because validators are
|
||||
doing something like a polka dance. When more than two-thirds of the
|
||||
validators pre-vote for the same block, we call that a **polka**. Every
|
||||
pre-commit must be justified by a polka in the same round.
|
||||
A block is committed when
|
||||
more than 2/3 of validators pre-commit for the same block in the same
|
||||
round.
|
||||
|
||||
Validators may fail to commit a block for a number of reasons; the
|
||||
current proposer may be offline, or the network may be slow. Tendermint
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
cp -a ../rpc/openapi/ .vuepress/public/rpc/
|
||||
mkdir -p .vuepress/public/rpc/
|
||||
cp -a ../rpc/openapi/* .vuepress/public/rpc/
|
||||
|
||||
@@ -62,5 +62,6 @@ sections.
|
||||
- [RFC-023: Semi-permanent Testnet](./rfc-023-semi-permanent-testnet.md)
|
||||
- [RFC-024: Block Structure Consolidation](./rfc-024-block-structure-consolidation.md)
|
||||
- [RFC-025: Application Defined Transaction Storage](./rfc-025-support-app-side-mempool.md)
|
||||
- [RFC-027: P2P Message Bandwidth Report](./rfc-027-p2p-message-bandwidth-report.md)
|
||||
|
||||
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->
|
||||
|
||||
BIN
docs/rfc/images/receive-rate-all.png
Normal file
BIN
docs/rfc/images/receive-rate-all.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 204 KiB |
BIN
docs/rfc/images/send-rate-all.png
Normal file
BIN
docs/rfc/images/send-rate-all.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 213 KiB |
BIN
docs/rfc/images/top-3-percent-receive.png
Normal file
BIN
docs/rfc/images/top-3-percent-receive.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 78 KiB |
BIN
docs/rfc/images/top-3-percent-send.png
Normal file
BIN
docs/rfc/images/top-3-percent-send.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 63 KiB |
287
docs/rfc/rfc-027-p2p-message-bandwidth-report.md
Normal file
287
docs/rfc/rfc-027-p2p-message-bandwidth-report.md
Normal file
@@ -0,0 +1,287 @@
|
||||
# RFC 27: P2P Message Bandwidth Report
|
||||
|
||||
## Changelog
|
||||
|
||||
- Nov 7, 2022: initial draft (@williambanfield)
|
||||
- Nov 15, 2022: draft completed (@williambanfield)
|
||||
|
||||
## Abstract
|
||||
|
||||
Node operators and application developers complain that Tendermint nodes consume
|
||||
larges amounts of network bandwidth. This RFC catalogues the major sources of bandwidth
|
||||
consumption within Tendermint and suggests modifications to Tendermint that may reduce
|
||||
bandwidth consumption for nodes.
|
||||
|
||||
## Background
|
||||
Multiple teams running validators in production report that the validator
|
||||
consumes a lot of bandwidth. They report that operators running on a network
|
||||
with hundreds of validators consumes multiple terabytes of bandwidth per day.
|
||||
Prometheus data collected from a validator node running on the Osmosis chain
|
||||
shows that Tendermint sends and receives large amounts of data to peers. In the
|
||||
nearly three hours of observation, Tendermint sent nearly 42 gigabytes and
|
||||
received about 26 gigabytes, for an estimated 366 gigabytes sent daily and 208
|
||||
gigabytes received daily. While this is shy of the reported terabytes number,
|
||||
operators running multiple nodes for a 'sentry' pattern could easily send and
|
||||
receive a terabyte of data.
|
||||
|
||||
Sending and receiving large amounts of data has a cost for node operators. Most
|
||||
cloud platforms charge for network traffic egress. Google Cloud charges between
|
||||
[$.05 to $.12 per gigabyte of egress traffic][gcloud-pricing], and ingress is
|
||||
free. Hetzner [charges 1€ per TB used over the 10-20TB base bandwidth per
|
||||
month][hetzner-pricing], which will be easily hit if multiple terabytes are
|
||||
sent and received per day. Using the values collected from the validator on
|
||||
Osmosis, a single node on Google cloud may cost $18 to $44 a day running on
|
||||
Google cloud. On Hetzner, the estimated 18TB a month of both sending and
|
||||
receiving may cost between 0 and 10 Euro a month per node.
|
||||
|
||||
## Discussion
|
||||
|
||||
### Overview of Major Bandwidth Usage
|
||||
|
||||
To determine which components of Tendermint were consuming the most bandwidth,
|
||||
I gathered prometheus metrics from the [Blockpane][blockpane] validator running
|
||||
on the Osmosis network for several hours. The data reveal that three message
|
||||
types account for 98% of the total bandwidth consumed. These message types are
|
||||
as follows:
|
||||
|
||||
|
||||
1. [consensus.BlockPart][block-part-message]
|
||||
2. [mempool.Txs][mempool-txs-message]
|
||||
3. [consensus.Vote][vote-message]
|
||||
|
||||
|
||||
The image below of p2p data collected from the Blockpane validator illustrate
|
||||
the total bandwidth consumption of these three message types.
|
||||
|
||||
|
||||
#### Send:
|
||||
|
||||
##### Top 3 Percent:
|
||||
|
||||

|
||||
|
||||
##### Rate For All Messages:
|
||||
|
||||

|
||||
|
||||
#### Receive:
|
||||
|
||||
##### Top 3 Percent:
|
||||
|
||||

|
||||
|
||||
##### Rate For All Messages:
|
||||
|
||||

|
||||
|
||||
### Investigation of Message Usage
|
||||
|
||||
This section discusses the usage of each of the three highest consumption messages.
|
||||
#### BlockPart Transmission
|
||||
|
||||
Sending `BlockPart` messages consumes the most bandwidth out of all p2p
|
||||
messages types as observed in the Blockpane Osmosis validator. In the almost 3
|
||||
hour observation, the validator sent about 20 gigabytes of `BlockPart`
|
||||
messages.
|
||||
|
||||
A block is proposed each round of Tendermint consensus. The paper does not
|
||||
define a specific way that the block is to be transmitted, just that all
|
||||
participants will receive it via a gossip network.
|
||||
|
||||
The Go implementation of Tendermint transmits the block in 'parts'. It
|
||||
serializes the block to wire-format proto and splits this byte representation
|
||||
into a set of 4 kilobyte arrays and sends these arrays to its peers, each in a
|
||||
separate message.
|
||||
|
||||
The logic for sending `BlockPart` messages resides in the code for the
|
||||
[consensus.Reactor][gossip-data-routine]. The consensus reactor starts a new
|
||||
`gossipDataRoutine` for each peer it connects to. This routine repeatedly picks
|
||||
a part of the block that Tendermint believes the peer does not know about yet
|
||||
and gossips it to the peer. The set of `BlockParts` that Tendermint considers
|
||||
its peer as having is only updated in one of four ways:
|
||||
|
||||
|
||||
1. Our peer tells us they have entered a new round [via a `NewRoundStep`
|
||||
message][new-round-step-message-send]. This message is only sent when a node
|
||||
moves to a new round or height and only resets the data we collect about a
|
||||
peer's blockpart state.
|
||||
1. [We receive a block part from the peer][block-part-receive].
|
||||
1. [We send][block-part-send-1] [the peer a block part][block-part-send-2].
|
||||
1. Our peer tells us about the parts they have block [via `NewValidBlock`
|
||||
messages][new-valid-block-message-send]. This message is only sent when the
|
||||
peer has a quorum of prevotes or precommits for a block.
|
||||
|
||||
Each node receives block parts from all of its peers. The particular block part
|
||||
to send at any given time is randomly selected from the set of parts that the
|
||||
peer node is not yet known to have. Given that these are the only times that
|
||||
Tendermint learns of its peers' block parts, it's very likely that a node has
|
||||
an incomplete understanding of its peers' block parts and is transmitting block
|
||||
parts to a peer that the peer has received from some other node.
|
||||
|
||||
Multiple potential mechanisms exist to reduce the number of duplicate block
|
||||
parts a node receives. One set of mechanisms relies on more frequently
|
||||
communicating the set of block parts a node needs to its peers. Another
|
||||
potential mechanism requires a larger overhaul to the way blocks are gossiped
|
||||
in the network.
|
||||
|
||||
#### Mempool Tx Transmission
|
||||
|
||||
The Tendermint mempool stages transactions that are yet to be committed to the
|
||||
blockchain and communicates these transactions to its peers. Each message
|
||||
contains one transaction. Data collected from the Blockpane node running on
|
||||
Osmosis indicates that the validator sent about 12 gigabytes of `Txs` messages
|
||||
during the nearly 3 hour observation period.
|
||||
|
||||
The Tendermint mempool starts a new [broadcastTxRoutine][broadcast-tx-routine]
|
||||
for each peer that it is informed of. The routine sends all transactions that
|
||||
the mempool is aware of to all peers with few exceptions. The only exception is
|
||||
if the mempool received a transaction from a peer, then it marks it as such and
|
||||
won't resend to that peer. Otherwise, it retains no information about which
|
||||
transactions it already sent to a peer. In some cases it may therefore resend
|
||||
transactions the peer already has. This can occur if the mempool removes a
|
||||
transaction from the `CList` data structure used to store the list of
|
||||
transactions while it is about to be sent and if the transaction was the tail
|
||||
of the `CList` during removal. This will be more likely to occur if a large
|
||||
number of transactions from the end of the list are removed during `RecheckTx`,
|
||||
since multiple transactions will become the tail and then be deleted. It is
|
||||
unclear at the moment how frequently this occurs on production chains.
|
||||
|
||||
Beyond ensuring that transactions are rebroadcast to peers less frequently,
|
||||
there is not a simple scheme to communicate fewer transactions to peers. Peers
|
||||
cannot communicate what transactions they need since they do not know which
|
||||
transactions exist on the network.
|
||||
|
||||
#### Vote Transmission
|
||||
|
||||
Tendermint votes, both prevotes and precommits, are central to Tendermint
|
||||
consensus and are gossiped by all nodes to all peers during each consensus
|
||||
round. Data collected from the Blockpane node running on Osmosis indicates that
|
||||
about 9 gigabytes of `Vote` messages were sent during the nearly 3 hour period
|
||||
of observation. Examination of the [Vote message][vote-msg] indicates that it
|
||||
contains 184 bytes of data, with the proto encoding adding a few additional
|
||||
bytes when transmitting.
|
||||
|
||||
The Tendermint consensus reactor starts a new
|
||||
[gossipVotesRoutine][gossip-votes-routine] for each peer that it connects to.
|
||||
The reactor sends all votes to all peers unless it knows that the peer already
|
||||
has the vote or the reactor learns that the peer is in a different round and
|
||||
that thus the vote no longer applies. Tendermint learns that a peer has a vote
|
||||
in one of 4 ways:
|
||||
|
||||
1. Tendermint sent the peer the vote.
|
||||
1. Tendermint received the vote from the peer.
|
||||
1. The peer [sent a `HasVote` message][apply-has-vote]. This message is broadcast
|
||||
to all peers [each time validator receives a vote it hasn't seen before
|
||||
corresponding to its current height and round][publish-event-vote].
|
||||
1. The peer [sent a `VoteSetBits` message][apply-vote-set-bits]. This message is
|
||||
[sent as a response to a peer that sends a `VoteSetMaj23`][vote-set-bits-send].
|
||||
|
||||
Given that Tendermint informs all peers of _each_ vote message it receives, all
|
||||
nodes should be well informed of which votes their peers have. Given that the
|
||||
vote messages were the third largest consumer of bandwidth in the observation
|
||||
on Osmosis, it's possible that this system is not currently working correctly.
|
||||
Further analysis should examine where votes may be being retransmitted.
|
||||
|
||||
### Suggested Improvements to Lower Message Transmission Bandwidth
|
||||
|
||||
#### Gossip Known BlockPart Data
|
||||
|
||||
The `BlockPart` messages, by far, account for the majority of the data sent to
|
||||
each peer. At the moment, peers do not inform the node of which block parts
|
||||
they already have. This means that each block part is _very likely_ to be
|
||||
transmitted many times to each node. This frivolous consumption is even worse
|
||||
in networks with large blocks.
|
||||
|
||||
The very simple solution to this issue is to copy the technique used in
|
||||
consensus for informing peers when the node receives a vote. The consensus
|
||||
reactor can be augmented with a `HasBlockPart` message that is broadcast to
|
||||
each peer every time the node receives a block part. By informing each peer
|
||||
every time the node receives a block part, we can drastically reduce the amount
|
||||
of duplicate data sent to each node. There would be no algorithmic way of
|
||||
enforcing that a peer accurately reports its block parts, so providing this
|
||||
message would be a somewhat altruistic action on the part of the node. Such a
|
||||
system [has been proposed in the past][i627] as well, so this is certainly not
|
||||
totally new ground.
|
||||
|
||||
Measuring the size of duplicately received blockparts before and after this
|
||||
change would help validate this approach.
|
||||
|
||||
#### Compress Transmitted Data
|
||||
|
||||
Tendermint's data is sent uncompressed on the wire. The messages are not
|
||||
compressed before sending and the transport performs no compression either.
|
||||
Some of the information communicated by Tendermint is a poor candidate for
|
||||
compression: Data such as digital signatures and hashes have high entropy and
|
||||
therefore do not compress well. However, transactions may contain lots of
|
||||
information that has less entropy. Compression within Tendermint may be added
|
||||
at several levels. Compression may be performed at the [Tendermint 'packet'
|
||||
level][must-wrap-packet] or at the [Tendermint message send
|
||||
level][message-send].
|
||||
|
||||
#### Transmit Less Data During Block Gossip
|
||||
|
||||
Block, vote, and mempool gossiping transmit much of same data. The mempool
|
||||
reactor gossips candidate transactions to each peer. The consensus reactor,
|
||||
when gossiping the votes, sends vote metadata and the digital signature of that
|
||||
signs over that metadata. Finally, when a block is proposed, the proposing node
|
||||
amalgamates the received votes, a set of transaction, and adds a header to
|
||||
produce the block. This block is then serialized and gossiped as a list of
|
||||
bytes. However, the data that the block contains, namely the votes and the
|
||||
transactions were most likely _already transmitted to the nodes on the network_
|
||||
via mempool transaction gossip and consensus vote gossip.
|
||||
|
||||
Therefore, block gossip can be updated to transmit a representation of the data
|
||||
contained in the block that assumes the peers will already have most of this
|
||||
data. Namely, the block gossip can be updated to only send 1) a list of
|
||||
transaction hashes and 2) a bit array of votes selected for the block along
|
||||
with the header and other required block metadata.
|
||||
|
||||
This new proposed method for gossiping block data could accompany a slight
|
||||
update to the mempool transaction gossip and consensus vote gossip. Since all
|
||||
of the contents of each block will not be gossiped together, it's possible that
|
||||
some nodes are missing a proposed transaction or the vote of a validator
|
||||
indicated in the new block gossip format during block gossip. The mempool and
|
||||
consensus reactors may therefore be updated to provide a `NeedTxs` and
|
||||
`NeedVotes` message. Each of these messages would allow a node to request a set
|
||||
of data from their peers. When a node receives one of these, it will then
|
||||
transmit the Tx/Votes indicate in the associated message regardless of whether
|
||||
it believes it has transmitted them to the peer before. The gossip layer will
|
||||
ensure that each peer eventually receives all of the data in the block.
|
||||
However, if a transaction is needed immediately by a peer so that it can verify
|
||||
and execute a block during consensus, a mechanism such as the `NeedTxs` and
|
||||
`NeedVotes` messages should be added to ensure it receives the messages
|
||||
quickly.
|
||||
|
||||
The same logic may applied for evidence transmission as well, since all nodes
|
||||
should receive evidence and therefore do not need to re-transmit it in a block
|
||||
part.
|
||||
|
||||
A similar idea has been proposed in the past as [Compact Block
|
||||
Propagation][compact-block-propagation].
|
||||
|
||||
|
||||
## References
|
||||
|
||||
[blockpane]: https://www.mintscan.io/osmosis/validators/osmovaloper1z0sh4s80u99l6y9d3vfy582p8jejeeu6tcucs2
|
||||
[block-part-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/consensus/types.proto#L44
|
||||
[mempool-txs-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/mempool/types.proto#L6
|
||||
[vote-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/consensus/types.proto#L51
|
||||
[gossip-data-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L537
|
||||
[block-part-receive]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L324
|
||||
[block-part-send-1]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L566
|
||||
[block-part-send-2]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L684.
|
||||
[new-valid-block-message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L268
|
||||
[new-round-step-message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L266
|
||||
[broadcast-tx-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/reactor.go#L197
|
||||
[gossip-votes-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L694
|
||||
[apply-has-vote]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L1429
|
||||
[apply-vote-set-bits]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L1445
|
||||
[publish-event-vote]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/state.go#L2083
|
||||
[vote-set-bits-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L306
|
||||
[must-wrap-packet]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/p2p/conn/connection.go#L889-L918
|
||||
[message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/p2p/peer.go#L285
|
||||
[gcloud-pricing]: https://cloud.google.com/vpc/network-pricing#vpc-pricing
|
||||
[hetzner-pricing]: https://docs.hetzner.com/robot/general/traffic
|
||||
[vote-msg]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/types/types.pb.go#L468
|
||||
[i627]: https://github.com/tendermint/tendermint/issues/627
|
||||
[compact-block-propagation]: https://github.com/tendermint/tendermint/issues/7932
|
||||
@@ -18,50 +18,52 @@ Listen address can be changed in the config file (see
|
||||
|
||||
The following metrics are available:
|
||||
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
|----------------------------------------|-----------|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| abci_connection_method_timing_seconds | Histogram | method, type | Timings for each of the ABCI methods |
|
||||
| consensus_height | Gauge | | Height of the chain |
|
||||
| consensus_validators | Gauge | | Number of validators |
|
||||
| consensus_validators_power | Gauge | | Total voting power of all validators |
|
||||
| consensus_validator_power | Gauge | | Voting power of the node if in the validator set |
|
||||
| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| consensus_missing_validators | Gauge | | Number of validators who did not sign |
|
||||
| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators |
|
||||
| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign |
|
||||
| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators |
|
||||
| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| consensus_rounds | Gauge | | Number of rounds |
|
||||
| consensus_num_txs | Gauge | | Number of transactions |
|
||||
| consensus_total_txs | Gauge | | Total number of transactions committed |
|
||||
| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer |
|
||||
| consensus_latest_block_height | gauge | | /status sync_info number |
|
||||
| consensus_block_syncing | gauge | | either 0 (not block syncing) or 1 (syncing) |
|
||||
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
|
||||
| consensus_block_size_bytes | Gauge | | Block size in bytes |
|
||||
| consensus_step_duration | Histogram | step | Histogram of durations for each step in the consensus protocol |
|
||||
| consensus_round_duration | Histogram | | Histogram of durations for all the rounds that have occurred since the process started |
|
||||
| consensus_block_gossip_parts_received | Counter | matches_current | Number of block parts received by the node |
|
||||
| consensus_quorum_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum |
|
||||
| consensus_full_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted |
|
||||
| consensus_proposal_receive_count | Counter | status | Total number of proposals received by the node since process start |
|
||||
| consensus_proposal_create_count | Counter | | Total number of proposals created by the node since process start |
|
||||
| consensus_round_voting_power_percent | Gauge | vote_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round |
|
||||
| consensus_late_votes | Counter | vote_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. |
|
||||
| p2p_peers | Gauge | | Number of peers node's connected to |
|
||||
| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer |
|
||||
| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer |
|
||||
| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer |
|
||||
| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id |
|
||||
| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer |
|
||||
| mempool_size | Gauge | | Number of uncommitted transactions |
|
||||
| mempool_tx_size_bytes | histogram | | transaction sizes in bytes |
|
||||
| mempool_failed_txs | counter | | number of failed transactions |
|
||||
| mempool_recheck_times | counter | | number of transactions rechecked in the mempool |
|
||||
| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms |
|
||||
| state_consensus_param_updates | Counter | | number of consensus parameter updates returned by the application since process start |
|
||||
| state_validator_set_updates | Counter | | number of validator set updates returned by the application since process start |
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
|--------------------------------------------|-----------|------------------|--------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| abci\_connection\_method\_timing\_seconds | Histogram | method, type | Timings for each of the ABCI methods |
|
||||
| blocksync\_syncing | Gauge | | Either 0 (not block syncing) or 1 (syncing) |
|
||||
| consensus\_height | Gauge | | Height of the chain |
|
||||
| consensus\_validators | Gauge | | Number of validators |
|
||||
| consensus\_validators\_power | Gauge | | Total voting power of all validators |
|
||||
| consensus\_validator\_power | Gauge | | Voting power of the node if in the validator set |
|
||||
| consensus\_validator\_last\_signed\_height | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| consensus\_validator\_missed\_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| consensus\_missing\_validators | Gauge | | Number of validators who did not sign |
|
||||
| consensus\_missing\_validators\_power | Gauge | | Total voting power of the missing validators |
|
||||
| consensus\_byzantine\_validators | Gauge | | Number of validators who tried to double sign |
|
||||
| consensus\_byzantine\_validators\_power | Gauge | | Total voting power of the byzantine validators |
|
||||
| consensus\_block\_interval\_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| consensus\_rounds | Gauge | | Number of rounds |
|
||||
| consensus\_num\_txs | Gauge | | Number of transactions |
|
||||
| consensus\_total\_txs | Gauge | | Total number of transactions committed |
|
||||
| consensus\_block\_parts | Counter | peer\_id | Number of blockparts transmitted by peer |
|
||||
| consensus\_latest\_block\_height | Gauge | | /status sync\_info number |
|
||||
| consensus\_block\_size\_bytes | Gauge | | Block size in bytes |
|
||||
| consensus\_step\_duration | Histogram | step | Histogram of durations for each step in the consensus protocol |
|
||||
| consensus\_round\_duration | Histogram | | Histogram of durations for all the rounds that have occurred since the process started |
|
||||
| consensus\_block\_gossip\_parts\_received | Counter | matches\_current | Number of block parts received by the node |
|
||||
| consensus\_quorum\_prevote\_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum |
|
||||
| consensus\_full\_prevote\_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted |
|
||||
| consensus\_proposal\_receive\_count | Counter | status | Total number of proposals received by the node since process start |
|
||||
| consensus\_proposal\_create\_count | Counter | | Total number of proposals created by the node since process start |
|
||||
| consensus\_round\_voting\_power\_percent | Gauge | vote\_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round |
|
||||
| consensus\_late\_votes | Counter | vote\_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. |
|
||||
| p2p\_message\_send\_bytes\_total | Counter | message\_type | Number of bytes sent to all peers per message type |
|
||||
| p2p\_message\_receive\_bytes\_total | Counter | message\_type | Number of bytes received from all peers per message type |
|
||||
| p2p\_peers | Gauge | | Number of peers node's connected to |
|
||||
| p2p\_peer\_receive\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel received from a given peer |
|
||||
| p2p\_peer\_send\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel sent to a given peer |
|
||||
| p2p\_peer\_pending\_send\_bytes | Gauge | peer\_id | Number of pending bytes to be sent to a given peer |
|
||||
| p2p\_num\_txs | Gauge | peer\_id | Number of transactions submitted by each peer\_id |
|
||||
| p2p\_pending\_send\_bytes | Gauge | peer\_id | Amount of data pending to be sent to peer |
|
||||
| mempool\_size | Gauge | | Number of uncommitted transactions |
|
||||
| mempool\_tx\_size\_bytes | Histogram | | Transaction sizes in bytes |
|
||||
| mempool\_failed\_txs | Counter | | Number of failed transactions |
|
||||
| mempool\_recheck\_times | Counter | | Number of transactions rechecked in the mempool |
|
||||
| state\_block\_processing\_time | Histogram | | Time between BeginBlock and EndBlock in ms |
|
||||
| state\_consensus\_param\_updates | Counter | | Number of consensus parameter updates returned by the application since process start |
|
||||
| state\_validator\_set\_updates | Counter | | Number of validator set updates returned by the application since process start |
|
||||
| statesync\_syncing | Gauge | | Either 0 (not state syncing) or 1 (syncing) |
|
||||
|
||||
## Useful queries
|
||||
|
||||
|
||||
@@ -55,3 +55,47 @@ given destination directory. Each archive will contain:
|
||||
|
||||
Note: goroutine.out and heap.out will only be written if a profile address is
|
||||
provided and is operational. This command is blocking and will log any error.
|
||||
|
||||
## Tendermint Inspect
|
||||
|
||||
Tendermint includes an `inspect` command for querying Tendermint's state store and block
|
||||
store over Tendermint RPC.
|
||||
|
||||
When the Tendermint consensus engine detects inconsistent state, it will crash the
|
||||
entire Tendermint process.
|
||||
While in this inconsistent state, a node running Tendermint's consensus engine will not start up.
|
||||
The `inspect` command runs only a subset of Tendermint's RPC endpoints for querying the block store
|
||||
and state store.
|
||||
`inspect` allows operators to query a read-only view of the stage.
|
||||
`inspect` does not run the consensus engine at all and can therefore be used to debug
|
||||
processes that have crashed due to inconsistent state.
|
||||
|
||||
### Running inspect
|
||||
|
||||
Start up the `inspect` tool on the machine where Tendermint crashed using:
|
||||
```bash
|
||||
tendermint inspect --home=</path/to/app.d>
|
||||
```
|
||||
|
||||
`inspect` will use the data directory specified in your Tendermint configuration file.
|
||||
`inspect` will also run the RPC server at the address specified in your Tendermint configuration file.
|
||||
|
||||
### Using inspect
|
||||
|
||||
With the `inspect` server running, you can access RPC endpoints that are critically important
|
||||
for debugging.
|
||||
Calling the `/status`, `/consensus_state` and `/dump_consensus_state` RPC endpoint
|
||||
will return useful information about the Tendermint consensus state.
|
||||
|
||||
To start the `inspect` process, run
|
||||
```bash
|
||||
tendermint inspect
|
||||
```
|
||||
|
||||
### RPC endpoints
|
||||
|
||||
The list of available RPC endpoints can be found by making a request to the RPC port.
|
||||
For an `inspect` process running on `127.0.0.1:26657`, navigate your browser to
|
||||
`http://127.0.0.1:26657/` to retrieve the list of enabled RPC endpoints.
|
||||
|
||||
Additional information on the Tendermint RPC endpoints can be found in the [rpc documentation](https://docs.tendermint.com/master/rpc).
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
clist "github.com/tendermint/tendermint/libs/clist"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
|
||||
70
go.mod
70
go.mod
@@ -21,36 +21,37 @@ require (
|
||||
github.com/ory/dockertest v3.3.5+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pointlander/peg v1.0.1
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/prometheus/common v0.37.0
|
||||
github.com/prometheus/common v0.39.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/rs/cors v1.8.2
|
||||
github.com/sasha-s/go-deadlock v0.3.1
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/viper v1.13.0
|
||||
github.com/spf13/viper v1.14.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
golang.org/x/crypto v0.1.0
|
||||
golang.org/x/net v0.1.0
|
||||
google.golang.org/grpc v1.50.1
|
||||
golang.org/x/crypto v0.4.0
|
||||
golang.org/x/net v0.4.0
|
||||
google.golang.org/grpc v1.51.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/bufbuild/buf v1.9.0
|
||||
github.com/bufbuild/buf v1.10.0
|
||||
github.com/creachadair/taskgroup v0.3.2
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.0
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2
|
||||
github.com/cosmos/gogoproto v1.4.2
|
||||
github.com/gofrs/uuid v4.3.0+incompatible
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3
|
||||
github.com/cosmos/gogoproto v1.4.3
|
||||
github.com/gofrs/uuid v4.3.1+incompatible
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae
|
||||
github.com/vektra/mockery/v2 v2.14.1
|
||||
github.com/vektra/mockery/v2 v2.15.0
|
||||
golang.org/x/sync v0.1.0
|
||||
gonum.org/v1/gonum v0.12.0
|
||||
google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8
|
||||
)
|
||||
@@ -78,14 +79,14 @@ require (
|
||||
github.com/bombsimon/wsl/v3 v3.3.0 // indirect
|
||||
github.com/breml/bidichk v0.2.3 // indirect
|
||||
github.com/breml/errchkjson v0.3.0 // indirect
|
||||
github.com/bufbuild/connect-go v1.0.0 // indirect
|
||||
github.com/bufbuild/connect-go v1.1.0 // indirect
|
||||
github.com/bufbuild/protocompile v0.1.0 // indirect
|
||||
github.com/butuzov/ireturn v0.1.1 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/charithe/durationcheck v0.0.9 // indirect
|
||||
github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect
|
||||
github.com/containerd/containerd v1.6.8 // indirect
|
||||
github.com/containerd/containerd v1.6.9 // indirect
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/containerd/typeurl v1.0.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
@@ -98,7 +99,7 @@ require (
|
||||
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.19+incompatible // indirect
|
||||
github.com/docker/docker v20.10.21+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
@@ -106,8 +107,9 @@ require (
|
||||
github.com/ettle/strcase v0.1.1 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fatih/structtag v1.2.0 // indirect
|
||||
github.com/felixge/fgprof v0.9.3 // indirect
|
||||
github.com/firefart/nonamedreturns v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/fzipp/gocyclo v0.6.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.0.7 // indirect
|
||||
github.com/go-critic/go-critic v0.6.5 // indirect
|
||||
@@ -137,6 +139,7 @@ require (
|
||||
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
|
||||
github.com/google/btree v1.0.0 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e // indirect
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect
|
||||
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
|
||||
github.com/gostaticanalysis/comment v1.4.2 // indirect
|
||||
@@ -158,7 +161,7 @@ require (
|
||||
github.com/kisielk/errcheck v1.6.2 // indirect
|
||||
github.com/kisielk/gotool v1.0.0 // indirect
|
||||
github.com/kkHAIKE/contextcheck v1.1.3 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/klauspost/compress v1.15.12 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/kulti/thelper v0.6.3 // indirect
|
||||
github.com/kunwardeep/paralleltest v1.0.6 // indirect
|
||||
@@ -174,12 +177,12 @@ require (
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 // indirect
|
||||
github.com/mgechev/revive v1.2.4 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/buildkit v0.10.4 // indirect
|
||||
github.com/moby/buildkit v0.10.5 // indirect
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||
github.com/moricho/tparallel v0.2.1 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
@@ -196,7 +199,7 @@ require (
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/pkg/profile v1.6.0 // indirect
|
||||
github.com/pkg/profile v1.7.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 // indirect
|
||||
github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 // indirect
|
||||
@@ -222,7 +225,7 @@ require (
|
||||
github.com/sivchari/tenv v1.7.0 // indirect
|
||||
github.com/sonatard/noctx v0.0.1 // indirect
|
||||
github.com/sourcegraph/go-diff v0.6.1 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
@@ -244,23 +247,22 @@ require (
|
||||
github.com/yeya24/promlinter v0.2.0 // indirect
|
||||
gitlab.com/bosi/decorder v0.2.3 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 // indirect
|
||||
go.opentelemetry.io/otel v1.11.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.32.3 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.11.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4 // indirect
|
||||
go.opentelemetry.io/otel v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.11.1 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/term v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
golang.org/x/tools v0.3.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
||||
162
go.sum
162
go.sum
@@ -23,14 +23,15 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=
|
||||
cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc=
|
||||
cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=
|
||||
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
@@ -151,12 +152,12 @@ github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA=
|
||||
github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.0 h1:S/6K1GEwlEsFzZP4cOOl5mg6PEd/pr0zz7hvXcaxhJ4=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.0/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
@@ -171,10 +172,10 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
|
||||
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/bufbuild/buf v1.9.0 h1:8a60qapVuRj6crerWR0rny4UUV/MhZSL5gagJuBxmx8=
|
||||
github.com/bufbuild/buf v1.9.0/go.mod h1:1Q+rMHiMVcfgScEF/GOldxmu4o9TrQ2sQQh58K6MscE=
|
||||
github.com/bufbuild/connect-go v1.0.0 h1:htSflKUT8y1jxhoPhPYTZMrsY3ipUXjjrbcZR5O2cVo=
|
||||
github.com/bufbuild/connect-go v1.0.0/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I=
|
||||
github.com/bufbuild/buf v1.10.0 h1:t6rV4iP1cs/sJH5SYvcLanOshLvmtvwSC+Mt+GfG05s=
|
||||
github.com/bufbuild/buf v1.10.0/go.mod h1:79BrOWh8uX1a0SVSoPyeYgtP0+Y0n5J3Tt6kjTSkLoU=
|
||||
github.com/bufbuild/connect-go v1.1.0 h1:AUgqqO2ePdOJSpPOep6BPYz5v2moW1Lb8sQh0EeRzQ8=
|
||||
github.com/bufbuild/connect-go v1.1.0/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I=
|
||||
github.com/bufbuild/protocompile v0.1.0 h1:HjgJBI85hY/qmW5tw/66sNDZ7z0UDdVSi/5r40WHw4s=
|
||||
github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnuPCP47kpAm5m/4=
|
||||
github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY=
|
||||
@@ -219,8 +220,8 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs=
|
||||
github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0=
|
||||
github.com/containerd/containerd v1.6.9 h1:IN/r8DUes/B5lEGTNfIiUkfZBtIQJGx2ai703dV6lRA=
|
||||
github.com/containerd/containerd v1.6.9/go.mod h1:XVicUvkxOrftE2Q1YWUXgZwkkAxwQYNOFzYWvfVfEfQ=
|
||||
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
|
||||
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
||||
@@ -240,8 +241,8 @@ github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
|
||||
github.com/cosmos/gogoproto v1.4.2 h1:UeGRcmFW41l0G0MiefWhkPEVEwvu78SZsHBvI78dAYw=
|
||||
github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU=
|
||||
github.com/cosmos/gogoproto v1.4.3 h1:RP3yyVREh9snv/lsOvmsAPQt8f44LgL281X0IOIhhcI=
|
||||
github.com/cosmos/gogoproto v1.4.3/go.mod h1:0hLIG5TR7IvV1fme1HCFKjfzW9X2x0Mo+RooWXCnOWU=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
@@ -283,8 +284,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
|
||||
github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.19+incompatible h1:lzEmjivyNHFHMNAFLXORMBXyGIhw/UP4DvJwvyKYq64=
|
||||
github.com/docker/docker v20.10.19+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
|
||||
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
@@ -324,6 +325,8 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
|
||||
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
|
||||
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y=
|
||||
github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
@@ -336,8 +339,8 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM
|
||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
|
||||
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
@@ -398,8 +401,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
|
||||
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gofrs/uuid v4.3.0+incompatible h1:CaSVZxm5B+7o45rtab4jC2G37WGYX1zQfuU2i6DSvnc=
|
||||
github.com/gofrs/uuid v4.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI=
|
||||
github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
@@ -505,6 +508,9 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e h1:F1LLQqQ8WoIbyoxLUY+JUZe1kuHdxThM6CPUATzE6Io=
|
||||
github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -605,6 +611,7 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK
|
||||
github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
||||
@@ -619,7 +626,7 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=
|
||||
github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
|
||||
github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c h1:XImQJfpJLmGEEd8ll5yPVyL/aEvmgGHW4WYTyNseLOM=
|
||||
github.com/jhump/protoreflect v1.14.0 h1:MBbQK392K3u8NTLbKOCIi3XdI+y+c6yt5oMq0X3xviw=
|
||||
github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
|
||||
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=
|
||||
@@ -659,8 +666,8 @@ github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkS
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c=
|
||||
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@@ -735,8 +742,8 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=
|
||||
github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
|
||||
github.com/mgechev/revive v1.2.4 h1:+2Hd/S8oO2H0Ikq2+egtNwQsVhAeELHjxjIUFX5ajLI=
|
||||
@@ -763,8 +770,8 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
|
||||
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/buildkit v0.10.4 h1:FvC+buO8isGpUFZ1abdSLdGHZVqg9sqI4BbFL8tlzP4=
|
||||
github.com/moby/buildkit v0.10.4/go.mod h1:Yajz9vt1Zw5q9Pp4pdb3TCSUXJBIroIQGQ3TTs/sLug=
|
||||
github.com/moby/buildkit v0.10.5 h1:d9krS/lG3dn6N7y+R8o9PTgIixlYAaDk35f3/B4jZOw=
|
||||
github.com/moby/buildkit v0.10.5/go.mod h1:Yajz9vt1Zw5q9Pp4pdb3TCSUXJBIroIQGQ3TTs/sLug=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI=
|
||||
@@ -798,7 +805,6 @@ github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nishanths/exhaustive v0.8.3 h1:pw5O09vwg8ZaditDp/nQRqVnrMczSJDxRDJMowvhsrM=
|
||||
github.com/nishanths/exhaustive v0.8.3/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg=
|
||||
@@ -886,8 +892,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/profile v1.6.0 h1:hUDfIISABYI59DyeB3OTay/HxSRwTQ8rB/H83k6r5dM=
|
||||
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -912,8 +918,8 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP
|
||||
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -933,8 +939,8 @@ github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
@@ -1034,8 +1040,8 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
|
||||
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
|
||||
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
@@ -1059,8 +1065,8 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU=
|
||||
github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw=
|
||||
github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=
|
||||
github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
|
||||
github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc=
|
||||
@@ -1128,8 +1134,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y=
|
||||
github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
|
||||
github.com/vektra/mockery/v2 v2.14.1 h1:Xamr4zUkFBDGdZhJ6iCiJ1AwkGRmUgZd8zkwjRXt+TU=
|
||||
github.com/vektra/mockery/v2 v2.14.1/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M=
|
||||
github.com/vektra/mockery/v2 v2.15.0 h1:5Egbxoancm1hhkJUoAF+cf0FBzC9oxS28LL/ZKbC980=
|
||||
github.com/vektra/mockery/v2 v2.15.0/go.mod h1:RswGtsqDbCR9j4UcgBQuAZY7OFxI+TgtHevc0gR0kCY=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
@@ -1168,16 +1174,17 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 h1:syAz40OyelLZo42+3U68Phisvrx4qh+4wpdZw7eUUdY=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3/go.mod h1:Dts42MGkzZne2yCru741+bFiTMWkIj/LLRizad7b9tw=
|
||||
go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk=
|
||||
go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk=
|
||||
go.opentelemetry.io/otel/metric v0.32.3 h1:dMpnJYk2KULXr0j8ph6N7+IcuiIQXlPXD4kix9t7L9c=
|
||||
go.opentelemetry.io/otel/metric v0.32.3/go.mod h1:pgiGmKohxHyTPHGOff+vrtIH39/R9fiO/WoenUQ3kcc=
|
||||
go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI=
|
||||
go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4 h1:PRXhsszxTt5bbPriTjmaweWUsAnJYeWBhUMLRetUgBU=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4/go.mod h1:05eWWy6ZWzmpeImD3UowLTB3VjDMU1yxQ+ENuVWDM3c=
|
||||
go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4=
|
||||
go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE=
|
||||
go.opentelemetry.io/otel/metric v0.33.0 h1:xQAyl7uGEYvrLAiV/09iTJlp1pZnQ9Wl793qbVvED1E=
|
||||
go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI=
|
||||
go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ=
|
||||
go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
@@ -1224,8 +1231,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1239,8 +1246,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 h1:QfTh0HpN6hlw6D3vu8DAwC8pBIwikq0AI1evdm+FksE=
|
||||
golang.org/x/exp v0.0.0-20221031165847-c99f073a8326/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 h1:Ic/qN6TEifvObMGQy72k0n1LlJr7DjWWEi+MOsDOiSk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
@@ -1273,8 +1280,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1330,11 +1337,9 @@ golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1348,8 +1353,7 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
|
||||
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -1362,8 +1366,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpiB4nXvw1XYzFPGgzLUOY=
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1451,26 +1455,26 @@ golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1480,15 +1484,15 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -1582,8 +1586,8 @@ golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM=
|
||||
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1673,8 +1677,8 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20211101144312-62acf1d99145/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a h1:GH6UPn3ixhWcKDhpnEC55S75cerLPdpp3hrhfKYjZgw=
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo=
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
@@ -1704,8 +1708,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
|
||||
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
|
||||
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -1726,8 +1730,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
|
||||
36
inspect/doc.go
Normal file
36
inspect/doc.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Package inspect provides a tool for investigating the state of a
|
||||
failed Tendermint node.
|
||||
|
||||
This package provides the Inspector type. The Inspector type runs a subset of the Tendermint
|
||||
RPC endpoints that are useful for debugging issues with Tendermint consensus.
|
||||
|
||||
When a node running the Tendermint consensus engine detects an inconsistent consensus state,
|
||||
the entire node will crash. The Tendermint consensus engine cannot run in this
|
||||
inconsistent state so the node will not be able to start up again.
|
||||
|
||||
The RPC endpoints provided by the Inspector type allow for a node operator to inspect
|
||||
the block store and state store to better understand what may have caused the inconsistent state.
|
||||
|
||||
The Inspector type's lifecycle is controlled by a context.Context
|
||||
|
||||
ins := inspect.NewFromConfig(rpcConfig)
|
||||
ctx, cancelFunc:= context.WithCancel(context.Background())
|
||||
|
||||
// Run blocks until the Inspector server is shut down.
|
||||
go ins.Run(ctx)
|
||||
...
|
||||
|
||||
// calling the cancel function will stop the running inspect server
|
||||
cancelFunc()
|
||||
|
||||
Inspector serves its RPC endpoints on the address configured in the RPC configuration
|
||||
|
||||
rpcConfig.ListenAddress = "tcp://127.0.0.1:26657"
|
||||
ins := inspect.NewFromConfig(rpcConfig)
|
||||
go ins.Run(ctx)
|
||||
|
||||
The list of available RPC endpoints can then be viewed by navigating to
|
||||
http://127.0.0.1:26657/ in the web browser.
|
||||
*/
|
||||
package inspect
|
||||
138
inspect/inspect.go
Normal file
138
inspect/inspect.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package inspect
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/inspect/rpc"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmstrings "github.com/tendermint/tendermint/libs/strings"
|
||||
rpccore "github.com/tendermint/tendermint/rpc/core"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/state/indexer/block"
|
||||
"github.com/tendermint/tendermint/state/txindex"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
)
|
||||
|
||||
// Inspector manages an RPC service that exports methods to debug a failed node.
|
||||
// After a node shuts down due to a consensus failure, it will no longer start
|
||||
// up its state cannot easily be inspected. An Inspector value provides a similar interface
|
||||
// to the node, using the underlying Tendermint data stores, without bringing up
|
||||
// any other components. A caller can query the Inspector service to inspect the
|
||||
// persisted state and debug the failure.
|
||||
type Inspector struct {
|
||||
routes rpccore.RoutesMap
|
||||
|
||||
config *config.RPCConfig
|
||||
|
||||
logger log.Logger
|
||||
|
||||
// References to the state store and block store are maintained to enable
|
||||
// the Inspector to safely close them on shutdown.
|
||||
ss state.Store
|
||||
bs state.BlockStore
|
||||
}
|
||||
|
||||
// New returns an Inspector that serves RPC on the specified BlockStore and StateStore.
|
||||
// The Inspector type does not modify the state or block stores.
|
||||
// The sinks are used to enable block and transaction querying via the RPC server.
|
||||
// The caller is responsible for starting and stopping the Inspector service.
|
||||
//
|
||||
//nolint:lll
|
||||
func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, txidx txindex.TxIndexer, blkidx indexer.BlockIndexer, lg log.Logger) *Inspector {
|
||||
routes := rpc.Routes(*cfg, ss, bs, txidx, blkidx, logger)
|
||||
eb := types.NewEventBus()
|
||||
eb.SetLogger(logger.With("module", "events"))
|
||||
return &Inspector{
|
||||
routes: routes,
|
||||
config: cfg,
|
||||
logger: logger,
|
||||
ss: ss,
|
||||
bs: bs,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFromConfig constructs an Inspector using the values defined in the passed in config.
|
||||
func NewFromConfig(cfg *config.Config) (*Inspector, error) {
|
||||
bsDB, err := config.DefaultDBProvider(&config.DBContext{ID: "blockstore", Config: cfg})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bs := store.NewBlockStore(bsDB)
|
||||
sDB, err := config.DefaultDBProvider(&config.DBContext{ID: "state", Config: cfg})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txidx, blkidx, err := block.IndexerFromConfig(cfg, config.DefaultDBProvider, genDoc.ChainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lg := logger.With("module", "inspect")
|
||||
ss := state.NewStore(sDB, state.StoreOptions{})
|
||||
return New(cfg.RPC, bs, ss, txidx, blkidx, lg), nil
|
||||
}
|
||||
|
||||
// Run starts the Inspector servers and blocks until the servers shut down. The passed
|
||||
// in context is used to control the lifecycle of the servers.
|
||||
func (ins *Inspector) Run(ctx context.Context) error {
|
||||
defer ins.bs.Close()
|
||||
defer ins.ss.Close()
|
||||
|
||||
return startRPCServers(ctx, ins.config, ins.logger, ins.routes)
|
||||
}
|
||||
|
||||
func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logger, routes rpccore.RoutesMap) error {
|
||||
g, tctx := errgroup.WithContext(ctx)
|
||||
listenAddrs := tmstrings.SplitAndTrimEmpty(cfg.ListenAddress, ",", " ")
|
||||
rh := rpc.Handler(cfg, routes, logger)
|
||||
for _, listenerAddr := range listenAddrs {
|
||||
server := rpc.Server{
|
||||
Logger: logger,
|
||||
Config: cfg,
|
||||
Handler: rh,
|
||||
Addr: listenerAddr,
|
||||
}
|
||||
if cfg.IsTLSEnabled() {
|
||||
keyFile := cfg.KeyFile()
|
||||
certFile := cfg.CertFile()
|
||||
listenerAddr := listenerAddr
|
||||
g.Go(func() error {
|
||||
logger.Info("RPC HTTPS server starting", "address", listenerAddr,
|
||||
"certfile", certFile, "keyfile", keyFile)
|
||||
err := server.ListenAndServeTLS(tctx, certFile, keyFile)
|
||||
if !errors.Is(err, net.ErrClosed) {
|
||||
return err
|
||||
}
|
||||
logger.Info("RPC HTTPS server stopped", "address", listenerAddr)
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
listenerAddr := listenerAddr
|
||||
g.Go(func() error {
|
||||
logger.Info("RPC HTTP server starting", "address", listenerAddr)
|
||||
err := server.ListenAndServe(tctx)
|
||||
if !errors.Is(err, net.ErrClosed) {
|
||||
return err
|
||||
}
|
||||
logger.Info("RPC HTTP server stopped", "address", listenerAddr)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
return g.Wait()
|
||||
}
|
||||
605
inspect/inspect_test.go
Normal file
605
inspect/inspect_test.go
Normal file
@@ -0,0 +1,605 @@
|
||||
package inspect_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/inspect"
|
||||
"github.com/tendermint/tendermint/internal/test"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
"github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
httpclient "github.com/tendermint/tendermint/rpc/client/http"
|
||||
indexermocks "github.com/tendermint/tendermint/state/indexer/mocks"
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
txindexmocks "github.com/tendermint/tendermint/state/txindex/mocks"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestInspectConstructor(t *testing.T) {
|
||||
cfg := test.ResetTestRoot("test")
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
|
||||
t.Run("from config", func(t *testing.T) {
|
||||
d, err := inspect.NewFromConfig(cfg)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, d)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestInspectRun(t *testing.T) {
|
||||
cfg := test.ResetTestRoot("test")
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
|
||||
t.Run("from config", func(t *testing.T) {
|
||||
d, err := inspect.NewFromConfig(cfg)
|
||||
require.NoError(t, err)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
stoppedWG := &sync.WaitGroup{}
|
||||
stoppedWG.Add(1)
|
||||
go func() {
|
||||
require.NoError(t, d.Run(ctx))
|
||||
stoppedWG.Done()
|
||||
}()
|
||||
cancel()
|
||||
stoppedWG.Wait()
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestBlock(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testBlock := new(types.Block)
|
||||
testBlock.Header.Height = testHeight
|
||||
testBlock.Header.LastCommitHash = []byte("test hash")
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{})
|
||||
blockStoreMock.On("LoadBlock", testHeight).Return(testBlock)
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
resultBlock, err := cli.Block(context.Background(), &testHeight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, testBlock.Height, resultBlock.Block.Height)
|
||||
require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash)
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestTxSearch(t *testing.T) {
|
||||
testHash := []byte("test")
|
||||
testTx := []byte("tx")
|
||||
testQuery := fmt.Sprintf("tx.hash='%s'", string(testHash))
|
||||
testTxResult := &abcitypes.TxResult{
|
||||
Height: 1,
|
||||
Index: 100,
|
||||
Tx: testTx,
|
||||
}
|
||||
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
txIndexerMock.On("Search", mock.Anything,
|
||||
mock.MatchedBy(func(q *query.Query) bool {
|
||||
return testQuery == strings.ReplaceAll(q.String(), " ", "")
|
||||
})).
|
||||
Return([]*abcitypes.TxResult{testTxResult}, nil)
|
||||
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
|
||||
var page = 1
|
||||
resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resultTxSearch.Txs, 1)
|
||||
require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
txIndexerMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
}
|
||||
func TestTx(t *testing.T) {
|
||||
testHash := []byte("test")
|
||||
testTx := []byte("tx")
|
||||
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
txIndexerMock.On("Get", testHash).Return(&abcitypes.TxResult{
|
||||
Tx: testTx,
|
||||
}, nil)
|
||||
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err := cli.Tx(context.Background(), testHash, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.Tx(testTx), res.Tx)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
txIndexerMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
}
|
||||
func TestConsensusParams(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testMaxGas := int64(55)
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
stateStoreMock.On("LoadConsensusParams", testHeight).Return(types.ConsensusParams{
|
||||
Block: types.BlockParams{
|
||||
MaxGas: testMaxGas,
|
||||
},
|
||||
}, nil)
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
params, err := cli.ConsensusParams(context.Background(), &testHeight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestBlockResults(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testGasUsed := int64(100)
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
// tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{
|
||||
DeliverTxs: []*abcitypes.ResponseDeliverTx{
|
||||
{
|
||||
GasUsed: testGasUsed,
|
||||
},
|
||||
},
|
||||
EndBlock: &abcitypes.ResponseEndBlock{},
|
||||
BeginBlock: &abcitypes.ResponseBeginBlock{},
|
||||
}, nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
res, err := cli.BlockResults(context.Background(), &testHeight)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, res.TxsResults[0].GasUsed, testGasUsed)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestCommit(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testRound := int32(101)
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}, nil)
|
||||
blockStoreMock.On("LoadSeenCommit", testHeight).Return(&types.Commit{
|
||||
Height: testHeight,
|
||||
Round: testRound,
|
||||
}, nil)
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
res, err := cli.Commit(context.Background(), &testHeight)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, res.SignedHeader.Commit.Round, testRound)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestBlockByHash(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testHash := []byte("test hash")
|
||||
testBlock := new(types.Block)
|
||||
testBlock.Header.Height = testHeight
|
||||
testBlock.Header.LastCommitHash = testHash
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
|
||||
BlockID: types.BlockID{
|
||||
Hash: testHash,
|
||||
},
|
||||
Header: types.Header{
|
||||
Height: testHeight,
|
||||
},
|
||||
}, nil)
|
||||
blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil)
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
res, err := cli.BlockByHash(context.Background(), testHash)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, []byte(res.BlockID.Hash), testHash)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestBlockchain(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testBlock := new(types.Block)
|
||||
testBlockHash := []byte("test hash")
|
||||
testBlock.Header.Height = testHeight
|
||||
testBlock.Header.LastCommitHash = testBlockHash
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
|
||||
BlockID: types.BlockID{
|
||||
Hash: testBlockHash,
|
||||
},
|
||||
})
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
res, err := cli.BlockchainInfo(context.Background(), 0, 100)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash))
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestValidators(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testVotingPower := int64(100)
|
||||
testValidators := types.ValidatorSet{
|
||||
Validators: []*types.Validator{
|
||||
{
|
||||
VotingPower: testVotingPower,
|
||||
},
|
||||
},
|
||||
}
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
stateStoreMock.On("LoadValidators", testHeight).Return(&testValidators, nil)
|
||||
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
blockStoreMock.On("Height").Return(testHeight)
|
||||
blockStoreMock.On("Base").Return(int64(0))
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
|
||||
testPage := 1
|
||||
testPerPage := 100
|
||||
res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, testVotingPower, res.Validators[0].VotingPower)
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestBlockSearch(t *testing.T) {
|
||||
testHeight := int64(1)
|
||||
testBlockHash := []byte("test hash")
|
||||
testQuery := "block.height = 1"
|
||||
stateStoreMock := &statemocks.Store{}
|
||||
stateStoreMock.On("Close").Return(nil)
|
||||
|
||||
blockStoreMock := &statemocks.BlockStore{}
|
||||
blockStoreMock.On("Close").Return(nil)
|
||||
|
||||
txIndexerMock := &txindexmocks.TxIndexer{}
|
||||
blkIdxMock := &indexermocks.BlockIndexer{}
|
||||
blockStoreMock.On("LoadBlock", testHeight).Return(&types.Block{
|
||||
Header: types.Header{
|
||||
Height: testHeight,
|
||||
},
|
||||
}, nil)
|
||||
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
|
||||
BlockID: types.BlockID{
|
||||
Hash: testBlockHash,
|
||||
},
|
||||
})
|
||||
blkIdxMock.On("Search", mock.Anything,
|
||||
mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })).
|
||||
Return([]int64{testHeight}, nil)
|
||||
rpcConfig := config.TestRPCConfig()
|
||||
l := log.TestingLogger()
|
||||
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
startedWG := &sync.WaitGroup{}
|
||||
startedWG.Add(1)
|
||||
go func() {
|
||||
startedWG.Done()
|
||||
defer wg.Done()
|
||||
require.NoError(t, d.Run(ctx))
|
||||
}()
|
||||
// FIXME: used to induce context switch.
|
||||
// Determine more deterministic method for prompting a context switch
|
||||
startedWG.Wait()
|
||||
requireConnect(t, rpcConfig.ListenAddress, 20)
|
||||
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
|
||||
require.NoError(t, err)
|
||||
|
||||
testPage := 1
|
||||
testPerPage := 100
|
||||
testOrderBy := "desc"
|
||||
res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash))
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
|
||||
blockStoreMock.AssertExpectations(t)
|
||||
stateStoreMock.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func requireConnect(t testing.TB, addr string, retries int) {
|
||||
parts := strings.SplitN(addr, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
t.Fatalf("malformed address to dial: %s", addr)
|
||||
}
|
||||
var err error
|
||||
for i := 0; i < retries; i++ {
|
||||
var conn net.Conn
|
||||
conn, err = net.Dial(parts[0], parts[1])
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
// FIXME attempt to yield and let the other goroutine continue execution.
|
||||
time.Sleep(time.Microsecond * 100)
|
||||
}
|
||||
t.Fatalf("unable to connect to server %s after %d tries: %s", addr, retries, err)
|
||||
}
|
||||
128
inspect/rpc/rpc.go
Normal file
128
inspect/rpc/rpc.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/rs/cors"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/rpc/core"
|
||||
"github.com/tendermint/tendermint/rpc/jsonrpc/server"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/state/txindex"
|
||||
)
|
||||
|
||||
// Server defines parameters for running an Inspector rpc server.
|
||||
type Server struct {
|
||||
Addr string // TCP address to listen on, ":http" if empty
|
||||
Handler http.Handler
|
||||
Logger log.Logger
|
||||
Config *config.RPCConfig
|
||||
}
|
||||
|
||||
// Routes returns the set of routes used by the Inspector server.
|
||||
func Routes(cfg config.RPCConfig, s state.Store, bs state.BlockStore, txidx txindex.TxIndexer, blkidx indexer.BlockIndexer, logger log.Logger) core.RoutesMap { //nolint: lll
|
||||
env := &core.Environment{
|
||||
Config: cfg,
|
||||
BlockIndexer: blkidx,
|
||||
TxIndexer: txidx,
|
||||
StateStore: s,
|
||||
BlockStore: bs,
|
||||
ConsensusReactor: waitSyncCheckerImpl{},
|
||||
Logger: logger,
|
||||
}
|
||||
return core.RoutesMap{
|
||||
"blockchain": server.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight"),
|
||||
"consensus_params": server.NewRPCFunc(env.ConsensusParams, "height"),
|
||||
"block": server.NewRPCFunc(env.Block, "height"),
|
||||
"block_by_hash": server.NewRPCFunc(env.BlockByHash, "hash"),
|
||||
"block_results": server.NewRPCFunc(env.BlockResults, "height"),
|
||||
"commit": server.NewRPCFunc(env.Commit, "height"),
|
||||
"header": server.NewRPCFunc(env.Header, "height"),
|
||||
"header_by_hash": server.NewRPCFunc(env.HeaderByHash, "hash"),
|
||||
"validators": server.NewRPCFunc(env.Validators, "height,page,per_page"),
|
||||
"tx": server.NewRPCFunc(env.Tx, "hash,prove"),
|
||||
"tx_search": server.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by"),
|
||||
"block_search": server.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by"),
|
||||
}
|
||||
}
|
||||
|
||||
// Handler returns the http.Handler configured for use with an Inspector server. Handler
|
||||
// registers the routes on the http.Handler and also registers the websocket handler
|
||||
// and the CORS handler if specified by the configuration options.
|
||||
func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logger) http.Handler {
|
||||
mux := http.NewServeMux()
|
||||
wmLogger := logger.With("protocol", "websocket")
|
||||
wm := server.NewWebsocketManager(routes,
|
||||
server.ReadLimit(rpcConfig.MaxBodyBytes))
|
||||
wm.SetLogger(wmLogger)
|
||||
mux.HandleFunc("/websocket", wm.WebsocketHandler)
|
||||
|
||||
server.RegisterRPCFuncs(mux, routes, logger)
|
||||
var rootHandler http.Handler = mux
|
||||
if rpcConfig.IsCorsEnabled() {
|
||||
rootHandler = addCORSHandler(rpcConfig, mux)
|
||||
}
|
||||
return rootHandler
|
||||
}
|
||||
|
||||
func addCORSHandler(rpcConfig *config.RPCConfig, h http.Handler) http.Handler {
|
||||
corsMiddleware := cors.New(cors.Options{
|
||||
AllowedOrigins: rpcConfig.CORSAllowedOrigins,
|
||||
AllowedMethods: rpcConfig.CORSAllowedMethods,
|
||||
AllowedHeaders: rpcConfig.CORSAllowedHeaders,
|
||||
})
|
||||
h = corsMiddleware.Handler(h)
|
||||
return h
|
||||
}
|
||||
|
||||
type waitSyncCheckerImpl struct{}
|
||||
|
||||
func (waitSyncCheckerImpl) WaitSync() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ListenAndServe listens on the address specified in srv.Addr and handles any
|
||||
// incoming requests over HTTP using the Inspector rpc handler specified on the server.
|
||||
func (srv *Server) ListenAndServe(ctx context.Context) error {
|
||||
listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
listener.Close()
|
||||
}()
|
||||
return server.Serve(listener, srv.Handler, srv.Logger, serverRPCConfig(srv.Config))
|
||||
}
|
||||
|
||||
// ListenAndServeTLS listens on the address specified in srv.Addr. ListenAndServeTLS handles
|
||||
// incoming requests over HTTPS using the Inspector rpc handler specified on the server.
|
||||
func (srv *Server) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error {
|
||||
listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
listener.Close()
|
||||
}()
|
||||
return server.ServeTLS(listener, srv.Handler, certFile, keyFile, srv.Logger, serverRPCConfig(srv.Config))
|
||||
}
|
||||
|
||||
func serverRPCConfig(r *config.RPCConfig) *server.Config {
|
||||
cfg := server.DefaultConfig()
|
||||
cfg.MaxBodyBytes = r.MaxBodyBytes
|
||||
cfg.MaxHeaderBytes = r.MaxHeaderBytes
|
||||
// If necessary adjust global WriteTimeout to ensure it's greater than
|
||||
// TimeoutBroadcastTxCommit.
|
||||
// See https://github.com/tendermint/tendermint/issues/3435
|
||||
if cfg.WriteTimeout <= r.TimeoutBroadcastTxCommit {
|
||||
cfg.WriteTimeout = r.TimeoutBroadcastTxCommit + 1*time.Second
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
@@ -27,7 +27,7 @@
|
||||
// select {
|
||||
// case msg <- subscription.Out():
|
||||
// // handle msg.Data() and msg.Events()
|
||||
// case <-subscription.Cancelled():
|
||||
// case <-subscription.Canceled():
|
||||
// return subscription.Err()
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -431,7 +431,7 @@ func benchmarkNClients(n int, b *testing.B) {
|
||||
select {
|
||||
case <-subscription.Out():
|
||||
continue
|
||||
case <-subscription.Cancelled():
|
||||
case <-subscription.Canceled():
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -472,7 +472,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) {
|
||||
select {
|
||||
case <-subscription.Out():
|
||||
continue
|
||||
case <-subscription.Cancelled():
|
||||
case <-subscription.Canceled():
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -501,7 +501,7 @@ func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message,
|
||||
}
|
||||
|
||||
func assertCancelled(t *testing.T, subscription *pubsub.Subscription, err error) {
|
||||
_, ok := <-subscription.Cancelled()
|
||||
_, ok := <-subscription.Canceled()
|
||||
assert.False(t, ok)
|
||||
assert.Equal(t, err, subscription.Err())
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/pubsub"
|
||||
"github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
|
||||
@@ -43,11 +43,9 @@ func (s *Subscription) Out() <-chan Message {
|
||||
return s.out
|
||||
}
|
||||
|
||||
// Cancelled returns a channel that's closed when the subscription is
|
||||
// Canceled returns a channel that's closed when the subscription is
|
||||
// terminated and supposed to be used in a select statement.
|
||||
//
|
||||
//nolint:misspell
|
||||
func (s *Subscription) Cancelled() <-chan struct{} {
|
||||
func (s *Subscription) Canceled() <-chan struct{} {
|
||||
return s.canceled
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,27 @@ func SplitAndTrim(s, sep, cutset string) []string {
|
||||
return spl
|
||||
}
|
||||
|
||||
// SplitAndTrimEmpty slices s into all subslices separated by sep and returns a
|
||||
// slice of the string s with all leading and trailing Unicode code points
|
||||
// contained in cutset removed. If sep is empty, SplitAndTrim splits after each
|
||||
// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
|
||||
// -1. also filter out empty strings, only return non-empty strings.
|
||||
func SplitAndTrimEmpty(s, sep, cutset string) []string {
|
||||
if s == "" {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
spl := strings.Split(s, sep)
|
||||
nonEmptyStrings := make([]string, 0, len(spl))
|
||||
for i := 0; i < len(spl); i++ {
|
||||
element := strings.Trim(spl[i], cutset)
|
||||
if element != "" {
|
||||
nonEmptyStrings = append(nonEmptyStrings, element)
|
||||
}
|
||||
}
|
||||
return nonEmptyStrings
|
||||
}
|
||||
|
||||
// Returns true if s is a non-empty printable non-tab ascii character.
|
||||
func IsASCIIText(s string) bool {
|
||||
if len(s) == 0 {
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
// trace that was produced from the primary. If successful, it produces two sets of evidence
|
||||
// and sends them to the opposite provider before halting.
|
||||
//
|
||||
// If there are no conflictinge headers, the light client deems the verified target header
|
||||
// If there are no conflicting headers, the light client deems the verified target header
|
||||
// trusted and saves it to the trusted store.
|
||||
func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.LightBlock, now time.Time) error {
|
||||
if primaryTrace == nil || len(primaryTrace) < 2 {
|
||||
@@ -74,7 +74,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig
|
||||
witnessesToRemove = append(witnessesToRemove, e.WitnessIndex)
|
||||
|
||||
case errBadWitness:
|
||||
// these are all melevolent errors and should result in removing the
|
||||
// these are all malevolent errors and should result in removing the
|
||||
// witness
|
||||
c.logger.Info("witness returned an error during header comparison, removing...",
|
||||
"witness", c.witnesses[e.WitnessIndex], "err", err)
|
||||
@@ -217,7 +217,7 @@ func (c *Client) sendEvidence(ctx context.Context, ev *types.LightClientAttackEv
|
||||
func (c *Client) handleConflictingHeaders(
|
||||
ctx context.Context,
|
||||
primaryTrace []*types.LightBlock,
|
||||
challendingBlock *types.LightBlock,
|
||||
challengingBlock *types.LightBlock,
|
||||
witnessIndex int,
|
||||
now time.Time,
|
||||
) error {
|
||||
@@ -225,7 +225,7 @@ func (c *Client) handleConflictingHeaders(
|
||||
witnessTrace, primaryBlock, err := c.examineConflictingHeaderAgainstTrace(
|
||||
ctx,
|
||||
primaryTrace,
|
||||
challendingBlock,
|
||||
challengingBlock,
|
||||
supportingWitness,
|
||||
now,
|
||||
)
|
||||
@@ -374,7 +374,7 @@ func (c *Client) examineConflictingHeaderAgainstTrace(
|
||||
}
|
||||
|
||||
// getTargetBlockOrLatest gets the latest height, if it is greater than the target height then it queries
|
||||
// the target heght else it returns the latest. returns true if it successfully managed to acquire the target
|
||||
// the target height else it returns the latest. returns true if it successfully managed to acquire the target
|
||||
// height.
|
||||
func (c *Client) getTargetBlockOrLatest(
|
||||
ctx context.Context,
|
||||
@@ -394,7 +394,7 @@ func (c *Client) getTargetBlockOrLatest(
|
||||
|
||||
if lightBlock.Height > height {
|
||||
// the witness has caught up. We recursively call the function again. However in order
|
||||
// to avoud a wild goose chase where the witness sends us one header below and one header
|
||||
// to avoid a wild goose chase where the witness sends us one header below and one header
|
||||
// above the height we set a timeout to the context
|
||||
lightBlock, err := witness.LightBlock(ctx, height)
|
||||
return true, lightBlock, err
|
||||
|
||||
@@ -113,7 +113,7 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) {
|
||||
}
|
||||
|
||||
// 4) Start listening for new connections.
|
||||
listener, err := rpcserver.Listen(p.Addr, p.Config)
|
||||
listener, err := rpcserver.Listen(p.Addr, p.Config.MaxOpenConnections)
|
||||
if err != nil {
|
||||
return nil, mux, err
|
||||
}
|
||||
|
||||
@@ -21,22 +21,22 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
|
||||
"health": rpcserver.NewRPCFunc(makeHealthFunc(c), ""),
|
||||
"status": rpcserver.NewRPCFunc(makeStatusFunc(c), ""),
|
||||
"net_info": rpcserver.NewRPCFunc(makeNetInfoFunc(c), ""),
|
||||
"blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"),
|
||||
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""),
|
||||
"genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), ""),
|
||||
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"),
|
||||
"header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height"),
|
||||
"header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash"),
|
||||
"block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash"),
|
||||
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height"),
|
||||
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"),
|
||||
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"),
|
||||
"blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight", rpcserver.Cacheable()),
|
||||
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), "", rpcserver.Cacheable()),
|
||||
"genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), "", rpcserver.Cacheable()),
|
||||
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash", rpcserver.Cacheable()),
|
||||
"block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash", rpcserver.Cacheable()),
|
||||
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove", rpcserver.Cacheable()),
|
||||
"tx_search": rpcserver.NewRPCFunc(makeTxSearchFunc(c), "query,prove,page,per_page,order_by"),
|
||||
"block_search": rpcserver.NewRPCFunc(makeBlockSearchFunc(c), "query,page,per_page,order_by"),
|
||||
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page"),
|
||||
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page", rpcserver.Cacheable("height")),
|
||||
"dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), ""),
|
||||
"consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), ""),
|
||||
"consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height"),
|
||||
"consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit"),
|
||||
"num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), ""),
|
||||
|
||||
@@ -47,7 +47,7 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
|
||||
|
||||
// abci API
|
||||
"abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data,height,prove"),
|
||||
"abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), ""),
|
||||
"abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), "", rpcserver.Cacheable()),
|
||||
|
||||
// evidence API
|
||||
"broadcast_evidence": rpcserver.NewRPCFunc(makeBroadcastEvidenceFunc(c), "evidence"),
|
||||
|
||||
@@ -590,7 +590,7 @@ func (c *Client) updateLightClientIfNeededTo(ctx context.Context, height *int64)
|
||||
l, err = c.lc.VerifyLightBlockAtHeight(ctx, *height, time.Now())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update light client to %d: %w", height, err)
|
||||
return nil, fmt.Errorf("failed to update light client to %d: %w", *height, err)
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
306
node/node.go
306
node/node.go
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/rs/cors"
|
||||
|
||||
"github.com/tendermint/tendermint/blocksync"
|
||||
bc "github.com/tendermint/tendermint/blocksync"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cs "github.com/tendermint/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/evidence"
|
||||
@@ -50,6 +50,7 @@ type Node struct {
|
||||
privValidator types.PrivValidator // local node's validator key
|
||||
|
||||
// network
|
||||
transport *p2p.MultiplexTransport
|
||||
sw *p2p.Switch // p2p connections
|
||||
addrBook pex.AddrBook // known peers
|
||||
nodeInfo p2p.NodeInfo
|
||||
@@ -59,10 +60,11 @@ type Node struct {
|
||||
// services
|
||||
eventBus *types.EventBus // pub/sub for services
|
||||
stateStore sm.Store
|
||||
blockStore *store.BlockStore // store the blockchain to disk
|
||||
bcReactor *blocksync.Reactor // for block-syncing
|
||||
mempoolReactor p2p.Reactor // for gossipping transactions
|
||||
blockStore *store.BlockStore // store the blockchain to disk
|
||||
bcReactor p2p.Reactor // for block-syncing
|
||||
mempoolReactor p2p.Reactor // for gossipping transactions
|
||||
mempool mempl.Mempool
|
||||
stateSync bool // whether the node should state sync on startup
|
||||
stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots
|
||||
stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
|
||||
stateSyncGenesis sm.State // provides the genesis state for state sync
|
||||
@@ -77,7 +79,6 @@ type Node struct {
|
||||
indexerService *txindex.IndexerService
|
||||
prometheusSrv *http.Server
|
||||
pprofSrv *http.Server
|
||||
customReactors map[string]p2p.Reactor
|
||||
}
|
||||
|
||||
// Option sets a parameter for the node.
|
||||
@@ -97,7 +98,29 @@ type Option func(*Node)
|
||||
// - STATESYNC
|
||||
func CustomReactors(reactors map[string]p2p.Reactor) Option {
|
||||
return func(n *Node) {
|
||||
n.customReactors = reactors
|
||||
for name, reactor := range reactors {
|
||||
if existingReactor := n.sw.Reactor(name); existingReactor != nil {
|
||||
n.sw.Logger.Info("Replacing existing reactor with a custom one",
|
||||
"name", name, "existing", existingReactor, "custom", reactor)
|
||||
n.sw.RemoveReactor(name, existingReactor)
|
||||
}
|
||||
n.sw.AddReactor(name, reactor)
|
||||
// register the new channels to the nodeInfo
|
||||
// NOTE: This is a bit messy now with the type casting but is
|
||||
// cleaned up in the following version when NodeInfo is changed from
|
||||
// and interface to a concrete type
|
||||
if ni, ok := n.nodeInfo.(p2p.DefaultNodeInfo); ok {
|
||||
for _, chDesc := range reactor.GetChannels() {
|
||||
if !ni.HasChannel(chDesc.ID) {
|
||||
ni.Channels = append(ni.Channels, chDesc.ID)
|
||||
n.transport.AddChannel(chDesc.ID)
|
||||
}
|
||||
}
|
||||
n.nodeInfo = ni
|
||||
} else {
|
||||
n.Logger.Error("Node info is not of type DefaultNodeInfo. Custom reactor channels can not be added.")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,7 +141,7 @@ func NewNode(config *cfg.Config,
|
||||
nodeKey *p2p.NodeKey,
|
||||
clientCreator proxy.ClientCreator,
|
||||
genesisDocProvider GenesisDocProvider,
|
||||
dbProvider DBProvider,
|
||||
dbProvider cfg.DBProvider,
|
||||
metricsProvider MetricsProvider,
|
||||
logger log.Logger,
|
||||
options ...Option,
|
||||
@@ -140,23 +163,71 @@ func NewNode(config *cfg.Config,
|
||||
csMetrics, p2pMetrics, memplMetrics, smMetrics, abciMetrics, bsMetrics, ssMetrics := metricsProvider(genDoc.ChainID)
|
||||
|
||||
// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
|
||||
proxyApp, err := createProxyAppConns(clientCreator, logger, abciMetrics)
|
||||
proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, abciMetrics)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error starting proxy app connections: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// EventBus and IndexerService must be started before the handshake because
|
||||
// we might need to index the txs of the replayed block as this might not have happened
|
||||
// when the node stopped last time (i.e. the node stopped after it saved the block
|
||||
// but before it indexed the txs, or, endblocker panicked)
|
||||
eventBus := createEventBus(logger)
|
||||
eventBus, err := createAndStartEventBus(logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexerService, txIndexer, blockIndexer, err := createIndexerService(config,
|
||||
indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(config,
|
||||
genDoc.ChainID, dbProvider, eventBus, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If an address is provided, listen on the socket for a connection from an
|
||||
// external signing process.
|
||||
if config.PrivValidatorListenAddr != "" {
|
||||
// FIXME: we should start services inside OnStart
|
||||
privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error with private validator socket client: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
pubKey, err := privValidator.GetPubKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
|
||||
// Determine whether we should attempt state sync.
|
||||
stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
|
||||
if stateSync && state.LastBlockHeight > 0 {
|
||||
logger.Info("Found local state with non-zero height, skipping state sync")
|
||||
stateSync = false
|
||||
}
|
||||
|
||||
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
|
||||
// and replays any blocks as necessary to sync tendermint with the app.
|
||||
consensusLogger := logger.With("module", "consensus")
|
||||
if !stateSync {
|
||||
if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reload the state. It will have the Version.Consensus.App set by the
|
||||
// Handshake, and may have other modifications as well (ie. depending on
|
||||
// what happened during block replay).
|
||||
state, err = stateStore.Load()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load state: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Determine whether we should do block sync. This must happen after the handshake, since the
|
||||
// app may modify the validator set, specifying ourself as the only validator.
|
||||
blockSync := config.BlockSyncMode && !onlyValidatorIsUs(state, pubKey)
|
||||
|
||||
logNodeStartupInfo(state, pubKey, logger, consensusLogger)
|
||||
|
||||
// Make MempoolReactor
|
||||
mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
|
||||
|
||||
@@ -178,16 +249,15 @@ func NewNode(config *cfg.Config,
|
||||
)
|
||||
|
||||
// Make BlocksyncReactor. Don't start block sync if we're doing a state sync first.
|
||||
bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, bsMetrics, logger)
|
||||
bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger, bsMetrics)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create blocksync reactor: %w", err)
|
||||
}
|
||||
|
||||
// Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first.
|
||||
consensusLogger := logger.With("module", "consensus")
|
||||
// Make ConsensusReactor
|
||||
consensusReactor, consensusState := createConsensusReactor(
|
||||
config, blockExec, blockStore, mempool, evidencePool,
|
||||
privValidator, csMetrics, eventBus, consensusLogger,
|
||||
config, state, blockExec, blockStore, mempool, evidencePool,
|
||||
privValidator, csMetrics, stateSync || blockSync, eventBus, consensusLogger,
|
||||
)
|
||||
|
||||
// Set up state sync reactor, and schedule a sync if requested.
|
||||
@@ -203,11 +273,19 @@ func NewNode(config *cfg.Config,
|
||||
)
|
||||
stateSyncReactor.SetLogger(logger.With("module", "statesync"))
|
||||
|
||||
nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup Transport.
|
||||
transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
|
||||
|
||||
// Setup Switch.
|
||||
p2pLogger := logger.With("module", "p2p")
|
||||
sw := createSwitch(
|
||||
config, p2pMetrics, mempoolReactor, bcReactor,
|
||||
stateSyncReactor, consensusReactor, evidenceReactor, p2pLogger,
|
||||
config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
|
||||
stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
|
||||
)
|
||||
|
||||
err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
|
||||
@@ -220,11 +298,22 @@ func NewNode(config *cfg.Config,
|
||||
return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
|
||||
}
|
||||
|
||||
addrBook, err := createAddrBook(config, p2pLogger, nodeKey)
|
||||
addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create addrbook: %w", err)
|
||||
}
|
||||
|
||||
for _, addr := range splitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") {
|
||||
netAddrs, err := p2p.NewNetAddressString(addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid bootstrap peer address: %w", err)
|
||||
}
|
||||
err = addrBook.AddAddress(netAddrs, netAddrs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("adding bootstrap address to addressbook: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Optionally, start the pex reactor
|
||||
//
|
||||
// TODO:
|
||||
@@ -239,8 +328,7 @@ func NewNode(config *cfg.Config,
|
||||
// Note we currently use the addrBook regardless at least for AddOurAddress
|
||||
var pexReactor *pex.Reactor
|
||||
if config.P2P.PexReactor {
|
||||
pexReactor = createPEXReactor(addrBook, config, logger)
|
||||
sw.AddReactor("PEX", pexReactor)
|
||||
pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
|
||||
}
|
||||
|
||||
// Add private IDs to addrbook to block those peers being added
|
||||
@@ -251,9 +339,11 @@ func NewNode(config *cfg.Config,
|
||||
genesisDoc: genDoc,
|
||||
privValidator: privValidator,
|
||||
|
||||
sw: sw,
|
||||
addrBook: addrBook,
|
||||
nodeKey: nodeKey,
|
||||
transport: transport,
|
||||
sw: sw,
|
||||
addrBook: addrBook,
|
||||
nodeInfo: nodeInfo,
|
||||
nodeKey: nodeKey,
|
||||
|
||||
stateStore: stateStore,
|
||||
blockStore: blockStore,
|
||||
@@ -263,6 +353,7 @@ func NewNode(config *cfg.Config,
|
||||
consensusState: consensusState,
|
||||
consensusReactor: consensusReactor,
|
||||
stateSyncReactor: stateSyncReactor,
|
||||
stateSync: stateSync,
|
||||
stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
|
||||
pexReactor: pexReactor,
|
||||
evidencePool: evidencePool,
|
||||
@@ -278,15 +369,6 @@ func NewNode(config *cfg.Config,
|
||||
option(node)
|
||||
}
|
||||
|
||||
for name, reactor := range node.customReactors {
|
||||
if existingReactor := node.sw.Reactor(name); existingReactor != nil {
|
||||
node.sw.Logger.Info("Replacing existing reactor with a custom one",
|
||||
"name", name, "existing", existingReactor, "custom", reactor)
|
||||
node.sw.RemoveReactor(name, existingReactor)
|
||||
}
|
||||
node.sw.AddReactor(name, reactor)
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
@@ -299,77 +381,6 @@ func (n *Node) OnStart() error {
|
||||
time.Sleep(genTime.Sub(now))
|
||||
}
|
||||
|
||||
// If an address is provided, listen on the socket for a connection from an
|
||||
// external signing process. This will overwrite the privvalidator provided in the constructor
|
||||
if n.config.PrivValidatorListenAddr != "" {
|
||||
var err error
|
||||
n.privValidator, err = createPrivValidatorSocketClient(n.config.PrivValidatorListenAddr, n.genesisDoc.ChainID, n.Logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error with private validator socket client: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
pubKey, err := n.privValidator.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
|
||||
state, err := n.stateStore.LoadFromDBOrGenesisDoc(n.genesisDoc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot load state: %w", err)
|
||||
}
|
||||
|
||||
// Determine whether we should attempt state sync.
|
||||
stateSync := n.config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
|
||||
if stateSync && state.LastBlockHeight > 0 {
|
||||
n.Logger.Info("Found local state with non-zero height, skipping state sync")
|
||||
stateSync = false
|
||||
}
|
||||
|
||||
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
|
||||
// and replays any blocks as necessary to sync tendermint with the app.
|
||||
|
||||
if !stateSync {
|
||||
if err := doHandshake(n.stateStore, state, n.blockStore, n.genesisDoc, n.eventBus, n.proxyApp, n.Logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reload the state. It will have the Version.Consensus.App set by the
|
||||
// Handshake, and may have other modifications as well (ie. depending on
|
||||
// what happened during block replay).
|
||||
state, err = n.stateStore.Load()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot load state: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
nodeInfo, err := makeNodeInfo(n.config, n.nodeKey, n.txIndexer, n.genesisDoc, state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, reactor := range n.customReactors {
|
||||
for _, chDesc := range reactor.GetChannels() {
|
||||
if !nodeInfo.HasChannel(chDesc.ID) {
|
||||
nodeInfo.Channels = append(nodeInfo.Channels, chDesc.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
n.nodeInfo = nodeInfo
|
||||
|
||||
addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Setup Transport.
|
||||
transport, peerFilters := createTransport(n.config, n.nodeInfo, n.nodeKey, addr, n.proxyApp)
|
||||
|
||||
n.sw.SetTransport(transport)
|
||||
n.sw.SetPeerFilters(peerFilters...)
|
||||
n.sw.SetNodeInfo(n.nodeInfo)
|
||||
n.sw.SetNodeKey(n.nodeKey)
|
||||
|
||||
// run pprof server if it is enabled
|
||||
if n.config.RPC.IsPprofEnabled() {
|
||||
n.pprofSrv = n.startPprofServer()
|
||||
@@ -390,13 +401,16 @@ func (n *Node) OnStart() error {
|
||||
n.rpcListeners = listeners
|
||||
}
|
||||
|
||||
if err := n.eventBus.Start(); err != nil {
|
||||
// Start the transport.
|
||||
addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := n.transport.Listen(*addr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := n.indexerService.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
n.isListening = true
|
||||
|
||||
// Start the switch (the P2P server).
|
||||
err = n.sw.Start()
|
||||
@@ -404,37 +418,23 @@ func (n *Node) OnStart() error {
|
||||
return err
|
||||
}
|
||||
|
||||
n.isListening = true
|
||||
|
||||
// Always connect to persistent peers
|
||||
err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
|
||||
}
|
||||
|
||||
// Determine whether we should do block sync. This must happen after the handshake, since the
|
||||
// app may modify the validator set, specifying ourself as the only validator.
|
||||
blockSync := n.config.BlockSyncMode && !onlyValidatorIsUs(state, pubKey)
|
||||
|
||||
logNodeStartupInfo(state, pubKey, n.Logger)
|
||||
|
||||
// Run start up phases
|
||||
if stateSync {
|
||||
err := startStateSync(n.stateSyncReactor, n.bcReactor, n.consensusReactor, n.stateSyncProvider,
|
||||
// Run state sync
|
||||
if n.stateSync {
|
||||
bcR, ok := n.bcReactor.(blockSyncReactor)
|
||||
if !ok {
|
||||
return fmt.Errorf("this blocksync reactor does not support switching from state sync")
|
||||
}
|
||||
err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
|
||||
n.config.StateSync, n.config.BlockSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start state sync: %w", err)
|
||||
}
|
||||
} else if blockSync {
|
||||
err := n.bcReactor.SwitchToBlockSync(state)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start block sync: %w", err)
|
||||
}
|
||||
} else {
|
||||
err := n.consensusReactor.SwitchToConsensus(state, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to switch to consensus: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -459,6 +459,10 @@ func (n *Node) OnStop() {
|
||||
n.Logger.Error("Error closing switch", "err", err)
|
||||
}
|
||||
|
||||
if err := n.transport.Close(); err != nil {
|
||||
n.Logger.Error("Error closing transport", "err", err)
|
||||
}
|
||||
|
||||
n.isListening = false
|
||||
|
||||
// finally stop the listeners / external services
|
||||
@@ -499,12 +503,12 @@ func (n *Node) OnStop() {
|
||||
}
|
||||
|
||||
// ConfigureRPC makes sure RPC has all the objects it needs to operate.
|
||||
func (n *Node) ConfigureRPC() error {
|
||||
func (n *Node) ConfigureRPC() (*rpccore.Environment, error) {
|
||||
pubKey, err := n.privValidator.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
if pubKey == nil || err != nil {
|
||||
return nil, fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
rpccore.SetEnvironment(&rpccore.Environment{
|
||||
rpcCoreEnv := rpccore.Environment{
|
||||
ProxyAppQuery: n.proxyApp.Query(),
|
||||
ProxyAppMempool: n.proxyApp.Mempool(),
|
||||
|
||||
@@ -514,38 +518,36 @@ func (n *Node) ConfigureRPC() error {
|
||||
ConsensusState: n.consensusState,
|
||||
P2PPeers: n.sw,
|
||||
P2PTransport: n,
|
||||
PubKey: pubKey,
|
||||
|
||||
PubKey: pubKey,
|
||||
GenDoc: n.genesisDoc,
|
||||
TxIndexer: n.txIndexer,
|
||||
BlockIndexer: n.blockIndexer,
|
||||
ConsensusReactor: n.consensusReactor,
|
||||
BlocksyncReactor: n.bcReactor,
|
||||
StatesyncReactor: n.stateSyncReactor,
|
||||
EventBus: n.eventBus,
|
||||
Mempool: n.mempool,
|
||||
|
||||
Logger: n.Logger.With("module", "rpc"),
|
||||
|
||||
Config: *n.config.RPC,
|
||||
})
|
||||
if err := rpccore.InitGenesisChunks(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
if err := rpcCoreEnv.InitGenesisChunks(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rpcCoreEnv, nil
|
||||
}
|
||||
|
||||
func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
err := n.ConfigureRPC()
|
||||
env, err := n.ConfigureRPC()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
|
||||
routes := env.GetRoutes()
|
||||
|
||||
if n.config.RPC.Unsafe {
|
||||
rpccore.AddUnsafeRoutes()
|
||||
env.AddUnsafeRoutes(routes)
|
||||
}
|
||||
|
||||
config := rpcserver.DefaultConfig()
|
||||
@@ -565,7 +567,7 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
mux := http.NewServeMux()
|
||||
rpcLogger := n.Logger.With("module", "rpc-server")
|
||||
wmLogger := rpcLogger.With("protocol", "websocket")
|
||||
wm := rpcserver.NewWebsocketManager(rpccore.Routes,
|
||||
wm := rpcserver.NewWebsocketManager(routes,
|
||||
rpcserver.OnDisconnect(func(remoteAddr string) {
|
||||
err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
|
||||
if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
|
||||
@@ -577,10 +579,10 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
)
|
||||
wm.SetLogger(wmLogger)
|
||||
mux.HandleFunc("/websocket", wm.WebsocketHandler)
|
||||
rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
|
||||
rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger)
|
||||
listener, err := rpcserver.Listen(
|
||||
listenAddr,
|
||||
config,
|
||||
config.MaxOpenConnections,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -638,12 +640,12 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
|
||||
config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
|
||||
}
|
||||
listener, err := rpcserver.Listen(grpcListenAddr, config)
|
||||
listener, err := rpcserver.Listen(grpcListenAddr, config.MaxOpenConnections)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
if err := grpccore.StartGRPCServer(listener); err != nil {
|
||||
if err := grpccore.StartGRPCServer(env, listener); err != nil {
|
||||
n.Logger.Error("Error starting gRPC server", "err", err)
|
||||
}
|
||||
}()
|
||||
@@ -655,7 +657,7 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
}
|
||||
|
||||
// startPrometheusServer starts a Prometheus HTTP server, listening for metrics
|
||||
// collectors on the provided address.
|
||||
// collectors on addr.
|
||||
func (n *Node) startPrometheusServer() *http.Server {
|
||||
srv := &http.Server{
|
||||
Addr: n.config.Instrumentation.PrometheusListenAddr,
|
||||
@@ -676,7 +678,7 @@ func (n *Node) startPrometheusServer() *http.Server {
|
||||
return srv
|
||||
}
|
||||
|
||||
// starts a pprof server at the specified listen address
|
||||
// starts a ppro
|
||||
func (n *Node) startPprofServer() *http.Server {
|
||||
srv := &http.Server{
|
||||
Addr: n.config.RPC.PprofListenAddress,
|
||||
@@ -686,7 +688,7 @@ func (n *Node) startPprofServer() *http.Server {
|
||||
go func() {
|
||||
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
|
||||
// Error starting or closing listener:
|
||||
n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
|
||||
n.Logger.Error("pprof HTTP server ListenAndServe", "err", err)
|
||||
}
|
||||
}()
|
||||
return srv
|
||||
@@ -797,7 +799,7 @@ func makeNodeInfo(
|
||||
Network: genDoc.ChainID,
|
||||
Version: version.TMCoreSemVer,
|
||||
Channels: []byte{
|
||||
blocksync.BlocksyncChannel,
|
||||
bc.BlocksyncChannel,
|
||||
cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
|
||||
mempl.MempoolChannel,
|
||||
evidence.EvidenceChannel,
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestNodeStartStop(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
case <-blocksSub.Out():
|
||||
case <-blocksSub.Cancelled():
|
||||
case <-blocksSub.Canceled():
|
||||
t.Fatal("blocksSub was canceled")
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timed out waiting for the node to produce a block")
|
||||
@@ -123,8 +123,6 @@ func TestNodeSetAppVersion(t *testing.T) {
|
||||
// create & start node
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, n.Start())
|
||||
defer n.Stop() //nolint:errcheck
|
||||
|
||||
// default config uses the kvstore app
|
||||
var appVersion = kvstore.ProtocolVersion
|
||||
@@ -191,8 +189,6 @@ func TestNodeSetPrivValTCP(t *testing.T) {
|
||||
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, n.Start())
|
||||
defer n.Stop() //nolint:errcheck
|
||||
assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator())
|
||||
}
|
||||
|
||||
@@ -205,7 +201,7 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
|
||||
config.BaseConfig.PrivValidatorListenAddr = addrNoPrefix
|
||||
|
||||
_, err := DefaultNewNode(config, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestNodeSetPrivValIPC(t *testing.T) {
|
||||
@@ -455,6 +451,7 @@ func TestNodeNewNodeCustomReactors(t *testing.T) {
|
||||
RecvMessageCapacity: 100,
|
||||
},
|
||||
}
|
||||
customBlocksyncReactor := p2pmock.NewReactor()
|
||||
|
||||
nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
|
||||
require.NoError(t, err)
|
||||
@@ -464,10 +461,10 @@ func TestNodeNewNodeCustomReactors(t *testing.T) {
|
||||
nodeKey,
|
||||
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
|
||||
DefaultGenesisDocProviderFunc(config),
|
||||
DefaultDBProvider,
|
||||
cfg.DefaultDBProvider,
|
||||
DefaultMetricsProvider(config.Instrumentation),
|
||||
log.TestingLogger(),
|
||||
CustomReactors(map[string]p2p.Reactor{"FOO": cr}),
|
||||
CustomReactors(map[string]p2p.Reactor{"FOO": cr, "BLOCKSYNC": customBlocksyncReactor}),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -478,6 +475,9 @@ func TestNodeNewNodeCustomReactors(t *testing.T) {
|
||||
assert.True(t, cr.IsRunning())
|
||||
assert.Equal(t, cr, n.Switch().Reactor("FOO"))
|
||||
|
||||
assert.True(t, customBlocksyncReactor.IsRunning())
|
||||
assert.Equal(t, customBlocksyncReactor, n.Switch().Reactor("BLOCKSYNC"))
|
||||
|
||||
channels := n.NodeInfo().(p2p.DefaultNodeInfo).Channels
|
||||
assert.Contains(t, channels, mempl.MempoolChannel)
|
||||
assert.Contains(t, channels, cr.Channels[0].ID)
|
||||
|
||||
144
node/setup.go
144
node/setup.go
@@ -7,6 +7,8 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
_ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port
|
||||
"time"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
@@ -17,6 +19,7 @@ import (
|
||||
cs "github.com/tendermint/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/evidence"
|
||||
"github.com/tendermint/tendermint/statesync"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -30,13 +33,8 @@ import (
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv"
|
||||
blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null"
|
||||
"github.com/tendermint/tendermint/state/indexer/sink/psql"
|
||||
"github.com/tendermint/tendermint/state/indexer/block"
|
||||
"github.com/tendermint/tendermint/state/txindex"
|
||||
"github.com/tendermint/tendermint/state/txindex/kv"
|
||||
"github.com/tendermint/tendermint/state/txindex/null"
|
||||
"github.com/tendermint/tendermint/statesync"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
@@ -44,24 +42,8 @@ import (
|
||||
_ "github.com/lib/pq" // provide the psql db driver
|
||||
)
|
||||
|
||||
// DBContext specifies config information for loading a new DB.
|
||||
type DBContext struct {
|
||||
ID string
|
||||
Config *cfg.Config
|
||||
}
|
||||
|
||||
// DBProvider takes a DBContext and returns an instantiated DB.
|
||||
type DBProvider func(*DBContext) (dbm.DB, error)
|
||||
|
||||
const readHeaderTimeout = 10 * time.Second
|
||||
|
||||
// DefaultDBProvider returns a database using the DBBackend and DBDir
|
||||
// specified in the ctx.Config.
|
||||
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
|
||||
dbType := dbm.BackendType(ctx.Config.DBBackend)
|
||||
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
|
||||
}
|
||||
|
||||
// GenesisDocProvider returns a GenesisDoc.
|
||||
// It allows the GenesisDoc to be pulled from sources other than the
|
||||
// filesystem, for instance from a distributed key-value store cluster.
|
||||
@@ -92,7 +74,7 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
|
||||
nodeKey,
|
||||
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
|
||||
DefaultGenesisDocProviderFunc(config),
|
||||
DefaultDBProvider,
|
||||
cfg.DefaultDBProvider,
|
||||
DefaultMetricsProvider(config.Instrumentation),
|
||||
logger,
|
||||
)
|
||||
@@ -124,15 +106,15 @@ type blockSyncReactor interface {
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
|
||||
func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
|
||||
var blockStoreDB dbm.DB
|
||||
blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
|
||||
blockStoreDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
blockStore = store.NewBlockStore(blockStoreDB)
|
||||
|
||||
stateDB, err = dbProvider(&DBContext{"state", config})
|
||||
stateDB, err = dbProvider(&cfg.DBContext{ID: "state", Config: config})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -140,7 +122,7 @@ func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.Block
|
||||
return
|
||||
}
|
||||
|
||||
func createProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) {
|
||||
func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) {
|
||||
proxyApp := proxy.NewAppConns(clientCreator, metrics)
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
@@ -149,16 +131,19 @@ func createProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger, m
|
||||
return proxyApp, nil
|
||||
}
|
||||
|
||||
func createEventBus(logger log.Logger) *types.EventBus {
|
||||
func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus.SetLogger(logger.With("module", "events"))
|
||||
return eventBus
|
||||
if err := eventBus.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return eventBus, nil
|
||||
}
|
||||
|
||||
func createIndexerService(
|
||||
func createAndStartIndexerService(
|
||||
config *cfg.Config,
|
||||
chainID string,
|
||||
dbProvider DBProvider,
|
||||
dbProvider cfg.DBProvider,
|
||||
eventBus *types.EventBus,
|
||||
logger log.Logger,
|
||||
) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
|
||||
@@ -166,36 +151,18 @@ func createIndexerService(
|
||||
txIndexer txindex.TxIndexer
|
||||
blockIndexer indexer.BlockIndexer
|
||||
)
|
||||
|
||||
switch config.TxIndex.Indexer {
|
||||
case "kv":
|
||||
store, err := dbProvider(&DBContext{"tx_index", config})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
txIndexer = kv.NewTxIndex(store)
|
||||
blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
|
||||
|
||||
case "psql":
|
||||
if config.TxIndex.PsqlConn == "" {
|
||||
return nil, nil, nil, errors.New(`no psql-conn is set for the "psql" indexer`)
|
||||
}
|
||||
es, err := psql.NewEventSink(config.TxIndex.PsqlConn, chainID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("creating psql indexer: %w", err)
|
||||
}
|
||||
txIndexer = es.TxIndexer()
|
||||
blockIndexer = es.BlockIndexer()
|
||||
|
||||
default:
|
||||
txIndexer = &null.TxIndex{}
|
||||
blockIndexer = &blockidxnull.BlockerIndexer{}
|
||||
txIndexer, blockIndexer, err := block.IndexerFromConfig(config, dbProvider, chainID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false)
|
||||
indexerService.SetLogger(logger.With("module", "txindex"))
|
||||
|
||||
if err := indexerService.Start(); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return indexerService, txIndexer, blockIndexer, nil
|
||||
}
|
||||
|
||||
@@ -217,7 +184,7 @@ func doHandshake(
|
||||
return nil
|
||||
}
|
||||
|
||||
func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger log.Logger) {
|
||||
func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
|
||||
// Log the version info.
|
||||
logger.Info("Version info",
|
||||
"tendermint_version", version.TMCoreSemVer,
|
||||
@@ -238,9 +205,9 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger log.Logger)
|
||||
addr := pubKey.Address()
|
||||
// Log whether this node is a validator or an observer
|
||||
if state.Validators.HasAddress(addr) {
|
||||
logger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
|
||||
consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
|
||||
} else {
|
||||
logger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
|
||||
consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,6 +226,7 @@ func createMempoolAndMempoolReactor(
|
||||
memplMetrics *mempl.Metrics,
|
||||
logger log.Logger,
|
||||
) (mempl.Mempool, p2p.Reactor) {
|
||||
logger = logger.With("module", "mempool")
|
||||
switch config.Mempool.Version {
|
||||
case cfg.MempoolV1:
|
||||
mp := mempoolv1.NewTxMempool(
|
||||
@@ -278,6 +246,7 @@ func createMempoolAndMempoolReactor(
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mp.EnableTxsAvailable()
|
||||
}
|
||||
reactor.SetLogger(logger)
|
||||
|
||||
return mp, reactor
|
||||
|
||||
@@ -300,6 +269,7 @@ func createMempoolAndMempoolReactor(
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mp.EnableTxsAvailable()
|
||||
}
|
||||
reactor.SetLogger(logger)
|
||||
|
||||
return mp, reactor
|
||||
|
||||
@@ -308,10 +278,10 @@ func createMempoolAndMempoolReactor(
|
||||
}
|
||||
}
|
||||
|
||||
func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
|
||||
func createEvidenceReactor(config *cfg.Config, dbProvider cfg.DBProvider,
|
||||
stateStore sm.Store, blockStore *store.BlockStore, logger log.Logger,
|
||||
) (*evidence.Reactor, *evidence.Pool, error) {
|
||||
evidenceDB, err := dbProvider(&DBContext{"evidence", config})
|
||||
evidenceDB, err := dbProvider(&cfg.DBContext{ID: "evidence", Config: config})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -329,12 +299,13 @@ func createBlocksyncReactor(config *cfg.Config,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore *store.BlockStore,
|
||||
metrics *blocksync.Metrics,
|
||||
blockSync bool,
|
||||
logger log.Logger,
|
||||
) (bcReactor *blocksync.Reactor, err error) {
|
||||
metrics *blocksync.Metrics,
|
||||
) (bcReactor p2p.Reactor, err error) {
|
||||
switch config.BlockSync.Version {
|
||||
case "v0":
|
||||
bcReactor = blocksync.NewReactor(state.Copy(), blockExec, blockStore, metrics)
|
||||
bcReactor = blocksync.NewReactor(state.Copy(), blockExec, blockStore, blockSync, metrics)
|
||||
case "v1", "v2":
|
||||
return nil, fmt.Errorf("block sync version %s has been deprecated. Please use v0", config.BlockSync.Version)
|
||||
default:
|
||||
@@ -346,28 +317,31 @@ func createBlocksyncReactor(config *cfg.Config,
|
||||
}
|
||||
|
||||
func createConsensusReactor(config *cfg.Config,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore sm.BlockStore,
|
||||
mempool mempl.Mempool,
|
||||
evidencePool *evidence.Pool,
|
||||
privValidator types.PrivValidator,
|
||||
csMetrics *cs.Metrics,
|
||||
waitSync bool,
|
||||
eventBus *types.EventBus,
|
||||
consensusLogger log.Logger,
|
||||
) (*cs.Reactor, *cs.State) {
|
||||
consensusState := cs.NewState(
|
||||
config.Consensus,
|
||||
state.Copy(),
|
||||
blockExec,
|
||||
blockStore,
|
||||
mempool,
|
||||
evidencePool,
|
||||
cs.WithMetrics(csMetrics),
|
||||
cs.StateMetrics(csMetrics),
|
||||
)
|
||||
consensusState.SetLogger(consensusLogger)
|
||||
if privValidator != nil {
|
||||
consensusState.SetPrivValidator(privValidator)
|
||||
}
|
||||
consensusReactor := cs.NewReactor(consensusState, cs.ReactorMetrics(csMetrics))
|
||||
consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics))
|
||||
consensusReactor.SetLogger(consensusLogger)
|
||||
// services which will be publishing and/or subscribing for messages (events)
|
||||
// consensusReactor will set it on consensusState and blockExecutor
|
||||
@@ -379,7 +353,6 @@ func createTransport(
|
||||
config *cfg.Config,
|
||||
nodeInfo p2p.NodeInfo,
|
||||
nodeKey *p2p.NodeKey,
|
||||
netAddr *p2p.NetAddress,
|
||||
proxyApp proxy.AppConns,
|
||||
) (
|
||||
*p2p.MultiplexTransport,
|
||||
@@ -387,7 +360,7 @@ func createTransport(
|
||||
) {
|
||||
var (
|
||||
mConnConfig = p2p.MConnConfig(config.P2P)
|
||||
transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, *netAddr, mConnConfig)
|
||||
transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
|
||||
connFilters = []p2p.ConnFilterFunc{}
|
||||
peerFilters = []p2p.PeerFilterFunc{}
|
||||
)
|
||||
@@ -446,17 +419,23 @@ func createTransport(
|
||||
}
|
||||
|
||||
func createSwitch(config *cfg.Config,
|
||||
transport p2p.Transport,
|
||||
p2pMetrics *p2p.Metrics,
|
||||
peerFilters []p2p.PeerFilterFunc,
|
||||
mempoolReactor p2p.Reactor,
|
||||
bcReactor p2p.Reactor,
|
||||
stateSyncReactor *statesync.Reactor,
|
||||
consensusReactor *cs.Reactor,
|
||||
evidenceReactor *evidence.Reactor,
|
||||
nodeInfo p2p.NodeInfo,
|
||||
nodeKey *p2p.NodeKey,
|
||||
p2pLogger log.Logger,
|
||||
) *p2p.Switch {
|
||||
sw := p2p.NewSwitch(
|
||||
config.P2P,
|
||||
transport,
|
||||
p2p.WithMetrics(p2pMetrics),
|
||||
p2p.SwitchPeerFilters(peerFilters...),
|
||||
)
|
||||
sw.SetLogger(p2pLogger)
|
||||
sw.AddReactor("MEMPOOL", mempoolReactor)
|
||||
@@ -464,10 +443,15 @@ func createSwitch(config *cfg.Config,
|
||||
sw.AddReactor("CONSENSUS", consensusReactor)
|
||||
sw.AddReactor("EVIDENCE", evidenceReactor)
|
||||
sw.AddReactor("STATESYNC", stateSyncReactor)
|
||||
|
||||
sw.SetNodeInfo(nodeInfo)
|
||||
sw.SetNodeKey(nodeKey)
|
||||
|
||||
p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
|
||||
return sw
|
||||
}
|
||||
|
||||
func createAddrBook(config *cfg.Config,
|
||||
func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
|
||||
p2pLogger log.Logger, nodeKey *p2p.NodeKey,
|
||||
) (pex.AddrBook, error) {
|
||||
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
|
||||
@@ -488,10 +472,15 @@ func createAddrBook(config *cfg.Config,
|
||||
}
|
||||
addrBook.AddOurAddress(addr)
|
||||
}
|
||||
|
||||
sw.SetAddrBook(addrBook)
|
||||
|
||||
return addrBook, nil
|
||||
}
|
||||
|
||||
func createPEXReactor(addrBook pex.AddrBook, config *cfg.Config, logger log.Logger) *pex.Reactor {
|
||||
func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
|
||||
sw *p2p.Switch, logger log.Logger,
|
||||
) *pex.Reactor {
|
||||
// TODO persistent peers ? so we can have their DNS addrs saved
|
||||
pexReactor := pex.NewReactor(addrBook,
|
||||
&pex.ReactorConfig{
|
||||
@@ -506,6 +495,7 @@ func createPEXReactor(addrBook pex.AddrBook, config *cfg.Config, logger log.Logg
|
||||
PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
|
||||
})
|
||||
pexReactor.SetLogger(logger.With("module", "pex"))
|
||||
sw.AddReactor("PEX", pexReactor)
|
||||
return pexReactor
|
||||
}
|
||||
|
||||
@@ -557,11 +547,7 @@ func startStateSync(ssR *statesync.Reactor, bcR blockSyncReactor, conR *cs.React
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err := conR.SwitchToConsensus(state, true)
|
||||
if err != nil {
|
||||
ssR.Logger.Error("Failed to switch to consensus", "err", err)
|
||||
return
|
||||
}
|
||||
conR.SwitchToConsensus(state, true)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
@@ -636,7 +622,7 @@ func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func createPrivValidatorSocketClient(
|
||||
func createAndStartPrivValidatorSocketClient(
|
||||
listenAddr,
|
||||
chainID string,
|
||||
logger log.Logger,
|
||||
@@ -651,6 +637,12 @@ func createPrivValidatorSocketClient(
|
||||
return nil, fmt.Errorf("failed to start private validator: %w", err)
|
||||
}
|
||||
|
||||
// try to get a pubkey from private validate first time
|
||||
_, err = pvsc.GetPubKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
|
||||
const (
|
||||
retries = 50 // 50 * 100ms = 5s total
|
||||
timeout = 100 * time.Millisecond
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cmap"
|
||||
"github.com/tendermint/tendermint/libs/rand"
|
||||
@@ -108,6 +108,7 @@ type SwitchOption func(*Switch)
|
||||
// NewSwitch creates a new Switch with the given config.
|
||||
func NewSwitch(
|
||||
cfg *config.P2PConfig,
|
||||
transport Transport,
|
||||
options ...SwitchOption,
|
||||
) *Switch {
|
||||
|
||||
@@ -121,6 +122,7 @@ func NewSwitch(
|
||||
dialing: cmap.NewCMap(),
|
||||
reconnecting: cmap.NewCMap(),
|
||||
metrics: NopMetrics(),
|
||||
transport: transport,
|
||||
filterTimeout: defaultFilterTimeout,
|
||||
persistentPeersAddrs: make([]*NetAddress, 0),
|
||||
unconditionalPeerIDs: make(map[ID]struct{}),
|
||||
@@ -223,30 +225,11 @@ func (sw *Switch) SetNodeKey(nodeKey *NodeKey) {
|
||||
sw.nodeKey = nodeKey
|
||||
}
|
||||
|
||||
func (sw *Switch) SetPeerFilters(filters ...PeerFilterFunc) {
|
||||
sw.peerFilters = filters
|
||||
}
|
||||
|
||||
func (sw *Switch) SetTransport(transport Transport) {
|
||||
if sw.IsRunning() {
|
||||
panic("cannot set transport while switch is running")
|
||||
}
|
||||
sw.transport = transport
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
// Service start/stop
|
||||
|
||||
// OnStart implements BaseService. It starts all the reactors and peers.
|
||||
func (sw *Switch) OnStart() error {
|
||||
if sw.transport == nil {
|
||||
return errors.New("transport not set")
|
||||
}
|
||||
|
||||
if err := sw.transport.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start reactors
|
||||
for _, reactor := range sw.reactors {
|
||||
err := reactor.Start()
|
||||
@@ -275,10 +258,6 @@ func (sw *Switch) OnStop() {
|
||||
sw.Logger.Error("error while stopped reactor", "reactor", reactor, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := sw.transport.Stop(); err != nil {
|
||||
sw.Logger.Error("closing transport", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
@@ -748,7 +727,7 @@ func (sw *Switch) addOutboundPeerWithConfig(
|
||||
addr *NetAddress,
|
||||
cfg *config.P2PConfig,
|
||||
) error {
|
||||
sw.Logger.Info("Dialing peer", "address", addr)
|
||||
sw.Logger.Debug("Dialing peer", "address", addr)
|
||||
|
||||
// XXX(xla): Remove the leakage of test concerns in implementation.
|
||||
if cfg.TestDialFail {
|
||||
@@ -876,7 +855,7 @@ func (sw *Switch) addPeer(p Peer) error {
|
||||
reactor.AddPeer(p)
|
||||
}
|
||||
|
||||
sw.Logger.Info("Added peer", "peer", p)
|
||||
sw.Logger.Debug("Added peer", "peer", p)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p"
|
||||
@@ -696,16 +695,9 @@ func TestSwitchAcceptRoutine(t *testing.T) {
|
||||
}
|
||||
|
||||
type errorTransport struct {
|
||||
service.BaseService
|
||||
acceptErr error
|
||||
}
|
||||
|
||||
func newErrTransport(acceptErr error) *errorTransport {
|
||||
t := &errorTransport{acceptErr: acceptErr}
|
||||
t.BaseService = *service.NewBaseService(nil, "Error Transport", t)
|
||||
return t
|
||||
}
|
||||
|
||||
func (et errorTransport) NetAddress() NetAddress {
|
||||
panic("not implemented")
|
||||
}
|
||||
@@ -721,9 +713,7 @@ func (errorTransport) Cleanup(Peer) {
|
||||
}
|
||||
|
||||
func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
|
||||
sw := NewSwitch(cfg)
|
||||
sw.SetTransport(newErrTransport(ErrFilterTimeout{}))
|
||||
|
||||
sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}})
|
||||
assert.NotPanics(t, func() {
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -731,8 +721,7 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
sw = NewSwitch(cfg)
|
||||
sw.SetTransport(newErrTransport(ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}))
|
||||
sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}})
|
||||
assert.NotPanics(t, func() {
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -741,8 +730,7 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
|
||||
})
|
||||
// TODO(melekes) check we remove our address from addrBook
|
||||
|
||||
sw = NewSwitch(cfg)
|
||||
sw.SetTransport(newErrTransport(ErrTransportClosed{}))
|
||||
sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}})
|
||||
assert.NotPanics(t, func() {
|
||||
err := sw.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -194,11 +194,14 @@ func MakeSwitch(
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t := NewMultiplexTransport(nodeInfo, nodeKey, *addr, MConnConfig(cfg))
|
||||
t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg))
|
||||
|
||||
if err := t.Listen(*addr); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// TODO: let the config be passed in?
|
||||
sw := initSwitch(i, NewSwitch(cfg, opts...))
|
||||
sw.SetTransport(t)
|
||||
sw := initSwitch(i, NewSwitch(cfg, t, opts...))
|
||||
sw.SetLogger(log.TestingLogger().With("switch", i))
|
||||
sw.SetNodeKey(&nodeKey)
|
||||
|
||||
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"golang.org/x/net/netutil"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
|
||||
)
|
||||
@@ -60,8 +60,6 @@ type peerConfig struct {
|
||||
// the transport. Each transport is also responsible to filter establishing
|
||||
// peers specific to its domain.
|
||||
type Transport interface {
|
||||
service.Service
|
||||
|
||||
// Listening address.
|
||||
NetAddress() NetAddress
|
||||
|
||||
@@ -75,6 +73,13 @@ type Transport interface {
|
||||
Cleanup(Peer)
|
||||
}
|
||||
|
||||
// transportLifecycle bundles the methods for callers to control start and stop
|
||||
// behavior.
|
||||
type transportLifecycle interface {
|
||||
Close() error
|
||||
Listen(NetAddress) error
|
||||
}
|
||||
|
||||
// ConnFilterFunc to be implemented by filter hooks after a new connection has
|
||||
// been established. The set of exisiting connections is passed along together
|
||||
// with all resolved IPs for the new connection.
|
||||
@@ -132,8 +137,6 @@ func MultiplexTransportMaxIncomingConnections(n int) MultiplexTransportOption {
|
||||
// MultiplexTransport accepts and dials tcp connections and upgrades them to
|
||||
// multiplexed peers.
|
||||
type MultiplexTransport struct {
|
||||
service.BaseService
|
||||
|
||||
netAddr NetAddress
|
||||
listener net.Listener
|
||||
maxIncomingConnections int // see MaxIncomingConnections
|
||||
@@ -160,15 +163,15 @@ type MultiplexTransport struct {
|
||||
|
||||
// Test multiplexTransport for interface completeness.
|
||||
var _ Transport = (*MultiplexTransport)(nil)
|
||||
var _ transportLifecycle = (*MultiplexTransport)(nil)
|
||||
|
||||
// NewMultiplexTransport returns a tcp connected multiplexed peer.
|
||||
func NewMultiplexTransport(
|
||||
nodeInfo NodeInfo,
|
||||
nodeKey NodeKey,
|
||||
netAddr NetAddress,
|
||||
mConfig conn.MConnConfig,
|
||||
) *MultiplexTransport {
|
||||
t := &MultiplexTransport{
|
||||
return &MultiplexTransport{
|
||||
acceptc: make(chan accept),
|
||||
closec: make(chan struct{}),
|
||||
dialTimeout: defaultDialTimeout,
|
||||
@@ -177,12 +180,9 @@ func NewMultiplexTransport(
|
||||
mConfig: mConfig,
|
||||
nodeInfo: nodeInfo,
|
||||
nodeKey: nodeKey,
|
||||
netAddr: netAddr,
|
||||
conns: NewConnSet(),
|
||||
resolver: net.DefaultResolver,
|
||||
}
|
||||
t.BaseService = *service.NewBaseService(nil, "P2P Transport", t)
|
||||
return t
|
||||
}
|
||||
|
||||
// NetAddress implements Transport.
|
||||
@@ -235,20 +235,20 @@ func (mt *MultiplexTransport) Dial(
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// OnStop implements Service.
|
||||
func (mt *MultiplexTransport) OnStop() {
|
||||
// Close implements transportLifecycle.
|
||||
func (mt *MultiplexTransport) Close() error {
|
||||
close(mt.closec)
|
||||
|
||||
if mt.listener != nil {
|
||||
if err := mt.listener.Close(); err != nil {
|
||||
mt.Logger.Error("closing listener", "err", err)
|
||||
}
|
||||
return mt.listener.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStart implements Service.
|
||||
func (mt *MultiplexTransport) OnStart() error {
|
||||
ln, err := net.Listen("tcp", mt.netAddr.DialString())
|
||||
// Listen implements transportLifecycle.
|
||||
func (mt *MultiplexTransport) Listen(addr NetAddress) error {
|
||||
ln, err := net.Listen("tcp", addr.DialString())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -257,6 +257,7 @@ func (mt *MultiplexTransport) OnStart() error {
|
||||
ln = netutil.LimitListener(ln, mt.maxIncomingConnections)
|
||||
}
|
||||
|
||||
mt.netAddr = addr
|
||||
mt.listener = ln
|
||||
|
||||
go mt.acceptPeers()
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
@@ -29,26 +28,20 @@ func emptyNodeInfo() NodeInfo {
|
||||
func newMultiplexTransport(
|
||||
nodeInfo NodeInfo,
|
||||
nodeKey NodeKey,
|
||||
netAddr NetAddress,
|
||||
) *MultiplexTransport {
|
||||
return NewMultiplexTransport(
|
||||
nodeInfo, nodeKey, netAddr, conn.DefaultMConnConfig(),
|
||||
nodeInfo, nodeKey, conn.DefaultMConnConfig(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestTransportMultiplexConnFilter(t *testing.T) {
|
||||
nodeKey := NodeKey{
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
}
|
||||
id := nodeKey.ID()
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
require.NoError(t, err)
|
||||
|
||||
mt := newMultiplexTransport(
|
||||
emptyNodeInfo(),
|
||||
nodeKey,
|
||||
*addr,
|
||||
NodeKey{
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
},
|
||||
)
|
||||
id := mt.nodeKey.ID()
|
||||
|
||||
MultiplexTransportConnFilters(
|
||||
func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil },
|
||||
@@ -58,8 +51,14 @@ func TestTransportMultiplexConnFilter(t *testing.T) {
|
||||
},
|
||||
)(mt)
|
||||
|
||||
err = mt.Start()
|
||||
require.NoError(t, err)
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := mt.Listen(*addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
|
||||
@@ -90,18 +89,13 @@ func TestTransportMultiplexConnFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
|
||||
nodeKey := NodeKey{
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
}
|
||||
id := nodeKey.ID()
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
require.NoError(t, err)
|
||||
|
||||
mt := newMultiplexTransport(
|
||||
emptyNodeInfo(),
|
||||
nodeKey,
|
||||
*addr,
|
||||
NodeKey{
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
},
|
||||
)
|
||||
id := mt.nodeKey.ID()
|
||||
|
||||
MultiplexTransportFilterTimeout(5 * time.Millisecond)(mt)
|
||||
MultiplexTransportConnFilters(
|
||||
@@ -111,9 +105,14 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
|
||||
},
|
||||
)(mt)
|
||||
|
||||
err = mt.Start()
|
||||
require.NoError(t, err)
|
||||
defer mt.Stop() //nolint:errcheck
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := mt.Listen(*addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
go func() {
|
||||
@@ -141,10 +140,6 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
|
||||
func TestTransportMultiplexMaxIncomingConnections(t *testing.T) {
|
||||
pv := ed25519.GenPrivKey()
|
||||
id := PubKeyToID(pv.PubKey())
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mt := newMultiplexTransport(
|
||||
testNodeInfo(
|
||||
id, "transport",
|
||||
@@ -152,22 +147,26 @@ func TestTransportMultiplexMaxIncomingConnections(t *testing.T) {
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
|
||||
MultiplexTransportMaxIncomingConnections(0)(mt)
|
||||
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
const maxIncomingConns = 2
|
||||
MultiplexTransportMaxIncomingConnections(maxIncomingConns)(mt)
|
||||
err = mt.Start()
|
||||
require.NoError(t, err)
|
||||
if err := mt.Listen(*addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
|
||||
// Connect more peers than max
|
||||
for i := 0; i <= maxIncomingConns; i++ {
|
||||
errc := make(chan error)
|
||||
go testDialer(t, *laddr, errc)
|
||||
go testDialer(*laddr, errc)
|
||||
|
||||
err = <-errc
|
||||
if i < maxIncomingConns {
|
||||
@@ -199,7 +198,7 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) {
|
||||
|
||||
// Setup dialers.
|
||||
for i := 0; i < nDialers; i++ {
|
||||
go testDialer(t, *laddr, errc)
|
||||
go testDialer(*laddr, errc)
|
||||
}
|
||||
|
||||
// Catch connection errors.
|
||||
@@ -236,26 +235,23 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := mt.Stop(); err != nil {
|
||||
if err := mt.Close(); err != nil {
|
||||
t.Errorf("close errored: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testDialer(t *testing.T, dialAddr NetAddress, errc chan error) {
|
||||
pv := ed25519.GenPrivKey()
|
||||
id := PubKeyToID(pv.PubKey())
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
require.NoError(t, err)
|
||||
|
||||
dialer := newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
func testDialer(dialAddr NetAddress, errc chan error) {
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
dialer = newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
_, err = dialer.Dial(dialAddr, peerConfig{})
|
||||
_, err := dialer.Dial(dialAddr, peerConfig{})
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
@@ -323,14 +319,15 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) {
|
||||
go func() {
|
||||
<-slowc
|
||||
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
dialer := newMultiplexTransport(
|
||||
fastNodeInfo,
|
||||
NodeKey{
|
||||
PrivKey: fastNodePV,
|
||||
},
|
||||
*addr,
|
||||
var (
|
||||
dialer = newMultiplexTransport(
|
||||
fastNodeInfo,
|
||||
NodeKey{
|
||||
PrivKey: fastNodePV,
|
||||
},
|
||||
)
|
||||
)
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
|
||||
_, err := dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
@@ -363,17 +360,18 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) {
|
||||
errc := make(chan error)
|
||||
|
||||
go func() {
|
||||
|
||||
pv := ed25519.GenPrivKey()
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
dialer := newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
dialer = newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
|
||||
_, err := dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
errc <- err
|
||||
@@ -403,7 +401,6 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
|
||||
errc := make(chan error)
|
||||
|
||||
go func() {
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
dialer := newMultiplexTransport(
|
||||
testNodeInfo(
|
||||
PubKeyToID(ed25519.GenPrivKey().PubKey()), "dialer",
|
||||
@@ -411,8 +408,8 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
|
||||
NodeKey{
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
|
||||
_, err := dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
@@ -440,16 +437,18 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
|
||||
func TestTransportMultiplexDialRejectWrongID(t *testing.T) {
|
||||
mt := testSetupMultiplexTransport(t)
|
||||
|
||||
pv := ed25519.GenPrivKey()
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
dialer = newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
wrongID := PubKeyToID(ed25519.GenPrivKey().PubKey())
|
||||
addr := NewNetAddress(wrongID, mt.listener.Addr())
|
||||
dialer := newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
|
||||
_, err := dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
@@ -472,15 +471,14 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) {
|
||||
go func() {
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
addr = NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
dialer = newMultiplexTransport(
|
||||
testNodeInfoWithNetwork(PubKeyToID(pv.PubKey()), "dialer", "incompatible-network"),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
)
|
||||
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
|
||||
_, err := dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
@@ -624,16 +622,11 @@ func TestTransportHandshake(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTransportAddChannel(t *testing.T) {
|
||||
pv := ed25519.GenPrivKey()
|
||||
id := PubKeyToID(pv.PubKey())
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
require.NoError(t, err)
|
||||
mt := newMultiplexTransport(
|
||||
emptyNodeInfo(),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
testChannel := byte(0x01)
|
||||
|
||||
@@ -645,27 +638,27 @@ func TestTransportAddChannel(t *testing.T) {
|
||||
|
||||
// create listener
|
||||
func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport {
|
||||
pv := ed25519.GenPrivKey()
|
||||
id := PubKeyToID(pv.PubKey())
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
id = PubKeyToID(pv.PubKey())
|
||||
mt = newMultiplexTransport(
|
||||
testNodeInfo(
|
||||
id, "transport",
|
||||
),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mt := newMultiplexTransport(
|
||||
testNodeInfo(
|
||||
id, "transport",
|
||||
),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
*addr,
|
||||
)
|
||||
|
||||
err = mt.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
mt.Stop() //nolint:errcheck
|
||||
})
|
||||
if err := mt.Listen(*addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// give the listener some time to get ready
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
@@ -2,6 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
|
||||
@@ -26,8 +26,12 @@ type localClientCreator struct {
|
||||
app types.Application
|
||||
}
|
||||
|
||||
// NewLocalClientCreator returns a ClientCreator for the given app,
|
||||
// which will be running locally.
|
||||
// NewLocalClientCreator returns a [ClientCreator] for the given app, which
|
||||
// will be running locally.
|
||||
//
|
||||
// Maintains a single mutex over all new clients created with NewABCIClient.
|
||||
// For a local client creator that uses a single mutex per new client, rather
|
||||
// use [NewUnsyncLocalClientCreator].
|
||||
func NewLocalClientCreator(app types.Application) ClientCreator {
|
||||
return &localClientCreator{
|
||||
mtx: new(tmsync.Mutex),
|
||||
@@ -39,6 +43,28 @@ func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) {
|
||||
return abcicli.NewLocalClient(l.mtx, l.app), nil
|
||||
}
|
||||
|
||||
//----------------------------------------------------
|
||||
// local proxy creates a new mutex for each client
|
||||
|
||||
type unsyncLocalClientCreator struct {
|
||||
app types.Application
|
||||
}
|
||||
|
||||
// NewUnsyncLocalClientCreator returns a [ClientCreator] for the given app.
|
||||
// Unlike [NewLocalClientCreator], each call to NewABCIClient returns an ABCI
|
||||
// client that maintains its own mutex over the application.
|
||||
func NewUnsyncLocalClientCreator(app types.Application) ClientCreator {
|
||||
return &unsyncLocalClientCreator{
|
||||
app: app,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *unsyncLocalClientCreator) NewABCIClient() (abcicli.Client, error) {
|
||||
// Specifying nil for the mutex causes each instance to create its own
|
||||
// mutex.
|
||||
return abcicli.NewLocalClient(nil, c.app), nil
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------
|
||||
// remote proxy opens new connections to an external app process
|
||||
|
||||
@@ -68,21 +94,37 @@ func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) {
|
||||
return remoteApp, nil
|
||||
}
|
||||
|
||||
// DefaultClientCreator returns a default ClientCreator, which will create a
|
||||
// local client if addr is one of: 'kvstore',
|
||||
// 'persistent_kvstore' or 'noop', otherwise - a remote client.
|
||||
// DefaultClientCreator returns a default [ClientCreator], which will create a
|
||||
// local client if addr is one of "kvstore", "persistent_kvstore", "e2e",
|
||||
// "noop".
|
||||
//
|
||||
// Otherwise a remote client will be created.
|
||||
//
|
||||
// Each of "kvstore", "persistent_kvstore" and "e2e" also currently have an
|
||||
// "_unsync" variant (i.e. "kvstore_unsync", etc.), which attempts to replicate
|
||||
// the same concurrency model as the remote client.
|
||||
func DefaultClientCreator(addr, transport, dbDir string) ClientCreator {
|
||||
switch addr {
|
||||
case "kvstore":
|
||||
return NewLocalClientCreator(kvstore.NewApplication())
|
||||
case "kvstore_unsync":
|
||||
return NewUnsyncLocalClientCreator(kvstore.NewApplication())
|
||||
case "persistent_kvstore":
|
||||
return NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(dbDir))
|
||||
case "persistent_kvstore_unsync":
|
||||
return NewUnsyncLocalClientCreator(kvstore.NewPersistentKVStoreApplication(dbDir))
|
||||
case "e2e":
|
||||
app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return NewLocalClientCreator(app)
|
||||
case "e2e_unsync":
|
||||
app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return NewUnsyncLocalClientCreator(app)
|
||||
case "noop":
|
||||
return NewLocalClientCreator(types.NewBaseApplication())
|
||||
default:
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
Tendermint Core v0.34.0 is the Tendermint Core release which supports the Stargate upgrade.
|
||||
|
||||
For more information on how to upgrade to Tendermint 0.34, please see [UPGRADING.md](https://github.com/tendermint/tendermint/blob/release/v0.34.0/UPGRADING.md).
|
||||
For a full list of user-facing changes, please see [CHANGELOG.md](https://github.com/tendermint/tendermint/blob/release/v0.34.0/CHANGELOG.md).
|
||||
@@ -41,22 +41,20 @@ type Local struct {
|
||||
*types.EventBus
|
||||
Logger log.Logger
|
||||
ctx *rpctypes.Context
|
||||
env *core.Environment
|
||||
}
|
||||
|
||||
// NewLocal configures a client that calls the Node directly.
|
||||
//
|
||||
// Note that given how rpc/core works with package singletons, that
|
||||
// you can only have one node per process. So make sure test cases
|
||||
// don't run in parallel, or try to simulate an entire network in
|
||||
// one process...
|
||||
func New(node *nm.Node) *Local {
|
||||
if err := node.ConfigureRPC(); err != nil {
|
||||
env, err := node.ConfigureRPC()
|
||||
if err != nil {
|
||||
node.Logger.Error("Error configuring RPC", "err", err)
|
||||
}
|
||||
return &Local{
|
||||
EventBus: node.EventBus(),
|
||||
Logger: log.NewNopLogger(),
|
||||
ctx: &rpctypes.Context{},
|
||||
env: env,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,11 +66,11 @@ func (c *Local) SetLogger(l log.Logger) {
|
||||
}
|
||||
|
||||
func (c *Local) Status(ctx context.Context) (*ctypes.ResultStatus, error) {
|
||||
return core.Status(c.ctx)
|
||||
return c.env.Status(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) {
|
||||
return core.ABCIInfo(c.ctx)
|
||||
return c.env.ABCIInfo(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) {
|
||||
@@ -84,55 +82,55 @@ func (c *Local) ABCIQueryWithOptions(
|
||||
path string,
|
||||
data bytes.HexBytes,
|
||||
opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
|
||||
return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove)
|
||||
return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove)
|
||||
}
|
||||
|
||||
func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
return core.BroadcastTxCommit(c.ctx, tx)
|
||||
return c.env.BroadcastTxCommit(c.ctx, tx)
|
||||
}
|
||||
|
||||
func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
return core.BroadcastTxAsync(c.ctx, tx)
|
||||
return c.env.BroadcastTxAsync(c.ctx, tx)
|
||||
}
|
||||
|
||||
func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
return core.BroadcastTxSync(c.ctx, tx)
|
||||
return c.env.BroadcastTxSync(c.ctx, tx)
|
||||
}
|
||||
|
||||
func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
return core.UnconfirmedTxs(c.ctx, limit)
|
||||
return c.env.UnconfirmedTxs(c.ctx, limit)
|
||||
}
|
||||
|
||||
func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
return core.NumUnconfirmedTxs(c.ctx)
|
||||
return c.env.NumUnconfirmedTxs(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) {
|
||||
return core.CheckTx(c.ctx, tx)
|
||||
return c.env.CheckTx(c.ctx, tx)
|
||||
}
|
||||
|
||||
func (c *Local) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) {
|
||||
return core.NetInfo(c.ctx)
|
||||
return c.env.NetInfo(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) {
|
||||
return core.DumpConsensusState(c.ctx)
|
||||
return c.env.DumpConsensusState(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) {
|
||||
return core.ConsensusState(c.ctx)
|
||||
return c.env.GetConsensusState(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) {
|
||||
return core.ConsensusParams(c.ctx, height)
|
||||
return c.env.ConsensusParams(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) Health(ctx context.Context) (*ctypes.ResultHealth, error) {
|
||||
return core.Health(c.ctx)
|
||||
return c.env.Health(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
return core.UnsafeDialSeeds(c.ctx, seeds)
|
||||
return c.env.UnsafeDialSeeds(c.ctx, seeds)
|
||||
}
|
||||
|
||||
func (c *Local) DialPeers(
|
||||
@@ -142,51 +140,51 @@ func (c *Local) DialPeers(
|
||||
unconditional,
|
||||
private bool,
|
||||
) (*ctypes.ResultDialPeers, error) {
|
||||
return core.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private)
|
||||
return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private)
|
||||
}
|
||||
|
||||
func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
|
||||
return core.BlockchainInfo(c.ctx, minHeight, maxHeight)
|
||||
return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight)
|
||||
}
|
||||
|
||||
func (c *Local) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) {
|
||||
return core.Genesis(c.ctx)
|
||||
return c.env.Genesis(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) {
|
||||
return core.GenesisChunked(c.ctx, id)
|
||||
return c.env.GenesisChunked(c.ctx, id)
|
||||
}
|
||||
|
||||
func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) {
|
||||
return core.Block(c.ctx, height)
|
||||
return c.env.Block(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) {
|
||||
return core.BlockByHash(c.ctx, hash)
|
||||
return c.env.BlockByHash(c.ctx, hash)
|
||||
}
|
||||
|
||||
func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) {
|
||||
return core.BlockResults(c.ctx, height)
|
||||
return c.env.BlockResults(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) {
|
||||
return core.Header(c.ctx, height)
|
||||
return c.env.Header(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) {
|
||||
return core.HeaderByHash(c.ctx, hash)
|
||||
return c.env.HeaderByHash(c.ctx, hash)
|
||||
}
|
||||
|
||||
func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
|
||||
return core.Commit(c.ctx, height)
|
||||
return c.env.Commit(c.ctx, height)
|
||||
}
|
||||
|
||||
func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) {
|
||||
return core.Validators(c.ctx, height, page, perPage)
|
||||
return c.env.Validators(c.ctx, height, page, perPage)
|
||||
}
|
||||
|
||||
func (c *Local) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
return core.Tx(c.ctx, hash, prove)
|
||||
return c.env.Tx(c.ctx, hash, prove)
|
||||
}
|
||||
|
||||
func (c *Local) TxSearch(
|
||||
@@ -197,7 +195,7 @@ func (c *Local) TxSearch(
|
||||
perPage *int,
|
||||
orderBy string,
|
||||
) (*ctypes.ResultTxSearch, error) {
|
||||
return core.TxSearch(c.ctx, query, prove, page, perPage, orderBy)
|
||||
return c.env.TxSearch(c.ctx, query, prove, page, perPage, orderBy)
|
||||
}
|
||||
|
||||
func (c *Local) BlockSearch(
|
||||
@@ -206,11 +204,11 @@ func (c *Local) BlockSearch(
|
||||
page, perPage *int,
|
||||
orderBy string,
|
||||
) (*ctypes.ResultBlockSearch, error) {
|
||||
return core.BlockSearch(c.ctx, query, page, perPage, orderBy)
|
||||
return c.env.BlockSearch(c.ctx, query, page, perPage, orderBy)
|
||||
}
|
||||
|
||||
func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||
return core.BroadcastEvidence(c.ctx, ev)
|
||||
return c.env.BroadcastEvidence(c.ctx, ev)
|
||||
}
|
||||
|
||||
func (c *Local) Subscribe(
|
||||
@@ -262,7 +260,7 @@ func (c *Local) eventsRoutine(
|
||||
c.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query)
|
||||
}
|
||||
}
|
||||
case <-sub.Cancelled():
|
||||
case <-sub.Canceled():
|
||||
if sub.Err() == tmpubsub.ErrUnsubscribed {
|
||||
return
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user