Compare commits

..

10 Commits

Author SHA1 Message Date
Callum Waters
7018c73baf fix up a few things 2022-11-09 15:17:43 +01:00
Callum Waters
4021df503b lint 2022-11-09 13:51:33 +01:00
Callum Waters
11976a4863 fix lints 2022-11-01 14:18:13 +01:00
Callum Waters
296ec7e113 merge 2022-11-01 11:58:53 +01:00
Callum Waters
933256c862 node start up phases 2022-11-01 11:26:46 +01:00
Callum Waters
a2610a9998 lint 2022-10-26 17:07:23 +02:00
Callum Waters
da8810f480 fix gosec compaints 2022-10-26 17:01:11 +02:00
Callum Waters
10dca45e8c Merge branch 'main' of github.com:tendermint/tendermint 2022-10-26 16:45:32 +02:00
Callum Waters
67bc51ad72 split out node into a setup file 2022-10-26 16:45:27 +02:00
Callum Waters
3faf580a0d remove unused id file 2022-10-26 16:44:45 +02:00
193 changed files with 2259 additions and 7012 deletions

4
.github/CODEOWNERS vendored
View File

@@ -7,6 +7,6 @@
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
* @ebuchman @tendermint/tendermint-engineering
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering @adizere @lasarojc
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering

View File

@@ -33,7 +33,7 @@ jobs:
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}"
fi
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
echo ::set-output name=tags::${TAGS}
- name: Set up QEMU
uses: docker/setup-qemu-action@master

View File

@@ -35,8 +35,6 @@ jobs:
# versions will be available for the build.
fetch-depth: 0
- name: Build documentation
env:
NODE_OPTIONS: "--openssl-legacy-provider"
run: |
git config --global --add safe.directory "$PWD"
make build-docs

View File

@@ -30,7 +30,8 @@ jobs:
- name: Capture git repo info
id: git-info
run: |
echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT
echo "::set-output name=branch::`git branch --show-current`"
echo "::set-output name=commit::`git rev-parse HEAD`"
- name: Build
working-directory: test/e2e
@@ -48,6 +49,7 @@ jobs:
outputs:
git-branch: ${{ steps.git-info.outputs.branch }}
git-commit: ${{ steps.git-info.outputs.commit }}
e2e-nightly-fail:
needs: e2e-nightly-test
@@ -61,7 +63,7 @@ jobs:
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}"
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
with:
payload: |
{
@@ -70,7 +72,7 @@ jobs:
"type": "section",
"text": {
"type": "mrkdwn",
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
}
}
]

View File

@@ -30,7 +30,8 @@ jobs:
- name: Capture git repo info
id: git-info
run: |
echo "branch=`git branch --show-current`" >> $GITHUB_OUTPUT
echo "::set-output name=branch::`git branch --show-current`"
echo "::set-output name=commit::`git rev-parse HEAD`"
- name: Build
working-directory: test/e2e
@@ -48,6 +49,7 @@ jobs:
outputs:
git-branch: ${{ steps.git-info.outputs.branch }}
git-commit: ${{ steps.git-info.outputs.commit }}
e2e-nightly-fail:
needs: e2e-nightly-test
@@ -61,7 +63,7 @@ jobs:
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ needs.e2e-nightly-test.outputs.git-branch }}"
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
with:
payload: |
{
@@ -70,7 +72,7 @@ jobs:
"type": "section",
"text": {
"type": "mrkdwn",
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
}
}
]

View File

@@ -52,7 +52,7 @@ jobs:
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
BRANCH: ${{ github.ref_name }}
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
COMMITS_URL: "${{ github.server_url }}/${{ github.repository }}/commits/${{ github.ref_name }}"
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}"
with:
payload: |
{
@@ -61,7 +61,7 @@ jobs:
"type": "section",
"text": {
"type": "mrkdwn",
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMITS_URL }}|latest commits> possibly related to the failure."
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
}
}
]

View File

@@ -64,7 +64,7 @@ jobs:
- name: Set crashers count
working-directory: test/fuzz
run: echo "count=$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')" >> $GITHUB_OUTPUT
run: echo "::set-output name=count::$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
id: set-crashers-count
outputs:

View File

@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack upon pre-release
uses: slackapi/slack-github-action@v1.23.0
uses: slackapi/slack-github-action@v1.22.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK

View File

@@ -42,7 +42,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack upon release
uses: slackapi/slack-github-action@v1.23.0
uses: slackapi/slack-github-action@v1.22.0
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK

View File

@@ -1,60 +0,0 @@
name: Docker E2E Node
# Build & Push rebuilds the e2e Testapp docker image on every push to main and creation of tags
# and pushes the image to https://hub.docker.com/r/tendermint/e2e-node
on:
push:
branches:
- main
tags:
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=tendermint/e2e-node
VERSION=noop
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
elif [[ $GITHUB_REF == refs/heads/* ]]; then
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
VERSION=latest
fi
fi
TAGS="${DOCKER_IMAGE}:${VERSION}"
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}"
fi
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@master
with:
platforms: all
- name: Set up Docker Build
uses: docker/setup-buildx-action@v2.2.1
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v2.1.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v3.2.0
with:
context: .
file: ./test/e2e/docker/Dockerfile
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'beep_boop' }}
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -2,64 +2,6 @@
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
## v0.34.24
*Nov 22, 2022*
Apart from one minor bug fix, this release aims to optimize the output of the
RPC (both HTTP and WebSocket endpoints). See our [upgrading
guidelines](./UPGRADING.md#v03424) for more details.
### IMPROVEMENTS
- `[rpc]` [\#9724](https://github.com/tendermint/tendermint/issues/9724) Remove
useless whitespace in RPC output (@adizere, @thanethomson)
### BUG FIXES
- `[rpc]` [\#9692](https://github.com/tendermint/tendermint/issues/9692) Remove
`Cache-Control` header response from `/check_tx` endpoint (@JayT106)
## v0.34.23
*Nov 9, 2022*
This release introduces some new Prometheus metrics to help in determining what
kinds of messages are consuming the most P2P bandwidth. This builds towards our
broader goal of optimizing Tendermint bandwidth consumption, and will give us
meaningful insights once we can establish these metrics for a number of chains.
We now also return `Cache-Control` headers for select RPC endpoints to help
facilitate caching.
Special thanks to external contributors on this release: @JayT106
### IMPROVEMENTS
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
Envelope type and associated methods for sending and receiving Envelopes
instead of raw bytes. This also adds new metrics,
`tendermint_p2p_message_send_bytes_total` and
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
each message type have been sent.
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
caching of RPC responses (@JayT106)
The following RPC endpoints will return `Cache-Control` headers with a maximum
age of 1 day:
- `/abci_info`
- `/block`, if `height` is supplied
- `/block_by_hash`
- `/block_results`, if `height` is supplied
- `/blockchain`
- `/check_tx`
- `/commit`, if `height` is supplied
- `/consensus_params`, if `height` is supplied
- `/genesis`
- `/genesis_chunked`
- `/tx`
- `/validators`, if `height` is supplied
## v0.34.22
This release includes several bug fixes, [one of

View File

@@ -12,37 +12,26 @@
- Go API
- [p2p] \#9625 Remove unused p2p/trust package (@cmwaters)
- [rpc] \#9655 Remove global environment and replace with constructor. (@williambanfield,@tychoish)
- [node] \#9655 Move DBContext and DBProvider from the node package to the config package. (@williambanfield,@tychoish)
- Blockchain Protocol
- Data Storage
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
- Tooling
- [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
- [metrics] \#9682 move state-syncing and block-syncing metrics to their respective packages (@cmwaters)
labels have moved from block_syncing -> blocksync_syncing and state_syncing -> statesync_syncing
- [inspect] \#9655 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
### FEATURES
- [config] \#9680 Introduce `BootstrapPeers` to the config to allow nodes to list peers to be added to
the addressbook upon start up (@cmwaters)
### IMPROVEMENTS
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
- [crypto/merkle] \#6443 & \#6513 Improve HashAlternatives performance (@cuonglm, @marbar3778)
- [rpc] \#9650 Enable caching of RPC responses (@JayT106)
- [consensus] \#9760 Save peer LastCommit correctly to achieve 50% reduction in gossiped precommits. (@williambanfield)
### BUG FIXES
- [docker] \#9462 ensure Docker image uses consistent version of Go
- [abci-cli] \#9717 fix broken abci-cli help command
## v0.37.0
@@ -108,4 +97,4 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)
- [docker] \#9073 enable cross platform build using docker buildx
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout

View File

@@ -113,15 +113,10 @@ For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
### Supported Versions
Because we are a small core team, we have limited capacity to ship patch
updates, including security updates. Consequently, we strongly recommend keeping
Tendermint up-to-date. Upgrading instructions can be found in
[UPGRADING.md](./UPGRADING.md).
Currently supported versions include:
- v0.34.x
- v0.37.x (release candidate)
Because we are a small core team, we only ship patch updates, including security
updates, to the most recent minor release and the second-most recent minor
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
## Resources

View File

@@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig
* Make use of table driven testing where possible and not-cumbersome
* [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)
* Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require)
* When using mocks, it is recommended to use Testify [mock](<https://pkg.go.dev/github.com/stretchr/testify/mock>
* When using mocks, it is recommended to use Testify [mock] (<https://pkg.go.dev/github.com/stretchr/testify/mock>
) along with [Mockery](https://github.com/vektra/mockery) for autogeneration
## Errors

View File

@@ -5,15 +5,6 @@ Tendermint Core.
## Unreleased
## Config Changes
* A new config field, `BootstrapPeers` has been introduced as a means of
adding a list of addresses to the addressbook upon initializing a node. This is an
alternative to `PersistentPeers`. `PersistentPeers` shold be only used for
nodes that you want to keep a constant connection with i.e. sentry nodes
----
### ABCI Changes
* The `ABCIVersion` is now `1.0.0`.
@@ -41,14 +32,6 @@ Tendermint Core.
this should have no effect on the wire-level encoding for UTF8-encoded
strings.
## v0.34.24
Note that in [\#9724](https://github.com/tendermint/tendermint/pull/9724) we
un-prettified the JSON output (i.e. removed all indentation) of the HTTP and
WebSocket RPC for performance and subscription stability reasons. We recommend
using a tool such as [jq](https://github.com/stedolan/jq) to obtain prettified
output if you rely on that prettified output in some way.
## v0.34.20
### Feature: Priority Mempool

View File

@@ -6,6 +6,8 @@ import (
tmsync "github.com/tendermint/tendermint/libs/sync"
)
var _ Client = (*localClient)(nil)
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
// case of malicious tx or query). It only makes sense for publicly exposed
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
@@ -22,6 +24,8 @@ var _ Client = (*localClient)(nil)
// NewLocalClient creates a local client, which will be directly calling the
// methods of the given app.
//
// Both Async and Sync methods ignore the given context.Context parameter.
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
if mtx == nil {
mtx = new(tmsync.Mutex)
@@ -305,8 +309,7 @@ func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*type
}
func (app *localClient) LoadSnapshotChunkSync(
req types.RequestLoadSnapshotChunk,
) (*types.ResponseLoadSnapshotChunk, error) {
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
@@ -315,8 +318,7 @@ func (app *localClient) LoadSnapshotChunkSync(
}
func (app *localClient) ApplySnapshotChunkSync(
req types.RequestApplySnapshotChunk,
) (*types.ResponseApplySnapshotChunk, error) {
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
app.mtx.Lock()
defer app.mtx.Unlock()

View File

@@ -1,263 +0,0 @@
package abcicli
import (
"sync"
types "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/service"
)
type unsyncLocalClient struct {
service.BaseService
types.Application
// This mutex is exclusively used to protect the callback.
mtx sync.RWMutex
Callback
}
var _ Client = (*unsyncLocalClient)(nil)
// NewUnsyncLocalClient creates an unsynchronized local client, which will be
// directly calling the methods of the given app.
//
// Unlike NewLocalClient, it does not hold a mutex around the application, so
// it is up to the application to manage its synchronization properly.
func NewUnsyncLocalClient(app types.Application) Client {
cli := &unsyncLocalClient{
Application: app,
}
cli.BaseService = *service.NewBaseService(nil, "unsyncLocalClient", cli)
return cli
}
func (app *unsyncLocalClient) SetResponseCallback(cb Callback) {
app.mtx.Lock()
defer app.mtx.Unlock()
app.Callback = cb
}
// TODO: change types.Application to include Error()?
func (app *unsyncLocalClient) Error() error {
return nil
}
func (app *unsyncLocalClient) FlushAsync() *ReqRes {
// Do nothing
return newLocalReqRes(types.ToRequestFlush(), nil)
}
func (app *unsyncLocalClient) EchoAsync(msg string) *ReqRes {
return app.callback(
types.ToRequestEcho(msg),
types.ToResponseEcho(msg),
)
}
func (app *unsyncLocalClient) InfoAsync(req types.RequestInfo) *ReqRes {
res := app.Application.Info(req)
return app.callback(
types.ToRequestInfo(req),
types.ToResponseInfo(res),
)
}
func (app *unsyncLocalClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
res := app.Application.DeliverTx(params)
return app.callback(
types.ToRequestDeliverTx(params),
types.ToResponseDeliverTx(res),
)
}
func (app *unsyncLocalClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
res := app.Application.CheckTx(req)
return app.callback(
types.ToRequestCheckTx(req),
types.ToResponseCheckTx(res),
)
}
func (app *unsyncLocalClient) QueryAsync(req types.RequestQuery) *ReqRes {
res := app.Application.Query(req)
return app.callback(
types.ToRequestQuery(req),
types.ToResponseQuery(res),
)
}
func (app *unsyncLocalClient) CommitAsync() *ReqRes {
res := app.Application.Commit()
return app.callback(
types.ToRequestCommit(),
types.ToResponseCommit(res),
)
}
func (app *unsyncLocalClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
res := app.Application.InitChain(req)
return app.callback(
types.ToRequestInitChain(req),
types.ToResponseInitChain(res),
)
}
func (app *unsyncLocalClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
res := app.Application.BeginBlock(req)
return app.callback(
types.ToRequestBeginBlock(req),
types.ToResponseBeginBlock(res),
)
}
func (app *unsyncLocalClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
res := app.Application.EndBlock(req)
return app.callback(
types.ToRequestEndBlock(req),
types.ToResponseEndBlock(res),
)
}
func (app *unsyncLocalClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
res := app.Application.ListSnapshots(req)
return app.callback(
types.ToRequestListSnapshots(req),
types.ToResponseListSnapshots(res),
)
}
func (app *unsyncLocalClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
res := app.Application.OfferSnapshot(req)
return app.callback(
types.ToRequestOfferSnapshot(req),
types.ToResponseOfferSnapshot(res),
)
}
func (app *unsyncLocalClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
res := app.Application.LoadSnapshotChunk(req)
return app.callback(
types.ToRequestLoadSnapshotChunk(req),
types.ToResponseLoadSnapshotChunk(res),
)
}
func (app *unsyncLocalClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
res := app.Application.ApplySnapshotChunk(req)
return app.callback(
types.ToRequestApplySnapshotChunk(req),
types.ToResponseApplySnapshotChunk(res),
)
}
func (app *unsyncLocalClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
res := app.Application.PrepareProposal(req)
return app.callback(
types.ToRequestPrepareProposal(req),
types.ToResponsePrepareProposal(res),
)
}
func (app *unsyncLocalClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
res := app.Application.ProcessProposal(req)
return app.callback(
types.ToRequestProcessProposal(req),
types.ToResponseProcessProposal(res),
)
}
//-------------------------------------------------------
func (app *unsyncLocalClient) FlushSync() error {
return nil
}
func (app *unsyncLocalClient) EchoSync(msg string) (*types.ResponseEcho, error) {
return &types.ResponseEcho{Message: msg}, nil
}
func (app *unsyncLocalClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
res := app.Application.Info(req)
return &res, nil
}
func (app *unsyncLocalClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
res := app.Application.DeliverTx(req)
return &res, nil
}
func (app *unsyncLocalClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
res := app.Application.CheckTx(req)
return &res, nil
}
func (app *unsyncLocalClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
res := app.Application.Query(req)
return &res, nil
}
func (app *unsyncLocalClient) CommitSync() (*types.ResponseCommit, error) {
res := app.Application.Commit()
return &res, nil
}
func (app *unsyncLocalClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
res := app.Application.InitChain(req)
return &res, nil
}
func (app *unsyncLocalClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
res := app.Application.BeginBlock(req)
return &res, nil
}
func (app *unsyncLocalClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
res := app.Application.EndBlock(req)
return &res, nil
}
func (app *unsyncLocalClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
res := app.Application.ListSnapshots(req)
return &res, nil
}
func (app *unsyncLocalClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
res := app.Application.OfferSnapshot(req)
return &res, nil
}
func (app *unsyncLocalClient) LoadSnapshotChunkSync(
req types.RequestLoadSnapshotChunk,
) (*types.ResponseLoadSnapshotChunk, error) {
res := app.Application.LoadSnapshotChunk(req)
return &res, nil
}
func (app *unsyncLocalClient) ApplySnapshotChunkSync(
req types.RequestApplySnapshotChunk,
) (*types.ResponseApplySnapshotChunk, error) {
res := app.Application.ApplySnapshotChunk(req)
return &res, nil
}
func (app *unsyncLocalClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
res := app.Application.PrepareProposal(req)
return &res, nil
}
func (app *unsyncLocalClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
res := app.Application.ProcessProposal(req)
return &res, nil
}
//-------------------------------------------------------
func (app *unsyncLocalClient) callback(req *types.Request, res *types.Response) *ReqRes {
app.mtx.RLock()
defer app.mtx.RUnlock()
app.Callback(req, res)
rr := newLocalReqRes(req, res)
rr.callbackInvoked = true
return rr
}

View File

@@ -54,7 +54,7 @@ var RootCmd = &cobra.Command{
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
switch cmd.Use {
case "kvstore", "version", "help [command]":
case "kvstore", "version":
return nil
}
@@ -460,14 +460,11 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
fmt.Println("Available commands:")
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
fmt.Printf("%s: %s\n", prepareProposalCmd.Use, prepareProposalCmd.Short)
fmt.Printf("%s: %s\n", processProposalCmd.Use, processProposalCmd.Short)
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
fmt.Println("Use \"[command] --help\" for more information about a command.")
return nil

View File

@@ -8,5 +8,4 @@ const (
CodeTypeUnauthorized uint32 = 3
CodeTypeUnknownError uint32 = 4
CodeTypeExecuted uint32 = 5
CodeTypeRejected uint32 = 6
)

View File

@@ -122,10 +122,6 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
}
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
if len(req.Tx) == 0 {
return types.ResponseCheckTx{Code: code.CodeTypeRejected}
}
if req.Type == types.CheckTxType_Recheck {
if _, ok := app.txToRemove[string(req.Tx)]; ok {
return types.ResponseCheckTx{Code: code.CodeTypeExecuted, GasWanted: 1}

View File

@@ -70,24 +70,6 @@ func TestKVStoreKV(t *testing.T) {
testKVStore(t, kvstore, tx, key, value)
}
func TestPersistentKVStoreEmptyTX(t *testing.T) {
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
kvstore := NewPersistentKVStoreApplication(dir)
tx := []byte("")
reqCheck := types.RequestCheckTx{Tx: tx}
resCheck := kvstore.CheckTx(reqCheck)
require.Equal(t, resCheck.Code, code.CodeTypeRejected)
txs := make([][]byte, 0, 4)
txs = append(txs, []byte("key=value"), []byte("key"), []byte(""), []byte("kee=value"))
reqPrepare := types.RequestPrepareProposal{Txs: txs, MaxTxBytes: 10 * 1024}
resPrepare := kvstore.PrepareProposal(reqPrepare)
require.Equal(t, len(reqPrepare.Txs), len(resPrepare.Txs)+1, "Empty transaction not properly removed")
}
func TestPersistentKVStoreKV(t *testing.T) {
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
if err != nil {

View File

@@ -324,15 +324,11 @@ func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.Response
}
// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the
// proposal for transactions with the prefix stripped, while discarding invalid empty transactions.
// proposal for transactions with the prefix stripped.
func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte, maxTxBytes int64) [][]byte {
txs := make([][]byte, 0, len(blockData))
var totalBytes int64
for _, tx := range blockData {
if len(tx) == 0 {
continue
}
txMod := tx
if isPrepareTx(tx) {
txMod = bytes.Replace(tx, []byte(PreparePrefix), []byte(ReplacePrefix), 1)

View File

@@ -38,34 +38,8 @@ function testExample() {
rm "${INPUT}".out.new
}
function testHelp() {
INPUT=$1
APP="$2 $3"
echo "Test: $APP"
$APP &> "${INPUT}.new" &
sleep 2
pre=$(shasum < "${INPUT}")
post=$(shasum < "${INPUT}.new")
if [[ "$pre" != "$post" ]]; then
echo "You broke the tutorial"
echo "Got:"
cat "${INPUT}.new"
echo "Expected:"
cat "${INPUT}"
echo "Diff:"
diff "${INPUT}" "${INPUT}.new"
exit 1
fi
rm "${INPUT}".new
}
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
testExample 2 tests/test_cli/ex2.abci abci-cli kvstore
testHelp tests/test_cli/testHelp.out abci-cli help
echo ""
echo "PASS"

View File

@@ -1,30 +0,0 @@
the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers
Usage:
abci-cli [command]
Available Commands:
batch run a batch of abci commands against an application
check_tx validate a transaction
commit commit the application state and return the Merkle root hash
completion Generate the autocompletion script for the specified shell
console start an interactive ABCI console for multiple commands
deliver_tx deliver a new transaction to the application
echo have the application echo a message
help Help about any command
info get some info about the application
kvstore ABCI demo example
prepare_proposal prepare proposal
process_proposal process proposal
query query the application state
test run integration tests
version print ABCI console version
Flags:
--abci string either socket or grpc (default "socket")
--address string address of application socket (default "tcp://0.0.0.0:26658")
-h, --help help for abci-cli
--log_level string set the logger level (default "debug")
-v, --verbose print the command and results as if it were a console session
Use "abci-cli [command] --help" for more information about a command.

View File

@@ -4,7 +4,6 @@ import (
"io"
"github.com/cosmos/gogoproto/proto"
"github.com/tendermint/tendermint/libs/protoio"
)

View File

@@ -99,6 +99,9 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
// OnStart implements service.Service by spawning requesters routine and recording
// pool's start time.
func (pool *BlockPool) OnStart() error {
if pool.height == 0 {
return errors.New("height not set")
}
go pool.makeRequestersRoutine()
pool.startTime = time.Now()
return nil
@@ -111,22 +114,19 @@ func (pool *BlockPool) makeRequestersRoutine() {
break
}
_, numPending, lenRequesters := pool.GetStatus()
switch {
case numPending >= maxPendingRequests:
height, maxPeerHeight, numPending, lenRequesters := pool.GetStatus()
if height >= maxPeerHeight ||
numPending >= maxPendingRequests ||
lenRequesters >= maxTotalRequesters {
// sleep for a bit.
time.Sleep(requestIntervalMS * time.Millisecond)
// check for timed out peers
pool.removeTimedoutPeers()
case lenRequesters >= maxTotalRequesters:
// sleep for a bit.
time.Sleep(requestIntervalMS * time.Millisecond)
// check for timed out peers
pool.removeTimedoutPeers()
default:
// request for more blocks.
pool.makeNextRequester()
continue
}
// request for more blocks.
pool.makeNextRequester()
}
}
@@ -156,11 +156,11 @@ func (pool *BlockPool) removeTimedoutPeers() {
// GetStatus returns pool's height, numPending requests and the number of
// requesters.
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
func (pool *BlockPool) GetStatus() (height, maxPeerHeight int64, numPending int32, lenRequesters int) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
return pool.height, pool.maxPeerHeight, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
}
// IsCaughtUp returns true if this node is caught up, false - otherwise.
@@ -302,6 +302,7 @@ func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) {
}
if height > pool.maxPeerHeight {
pool.Logger.Info("new max peer height", "height", height)
pool.maxPeerHeight = height
}
}
@@ -388,7 +389,7 @@ func (pool *BlockPool) makeNextRequester() {
err := request.Start()
if err != nil {
request.Logger.Error("Error starting request", "err", err)
pool.Logger.Error("Error starting request", "err", err)
}
}

View File

@@ -32,7 +32,7 @@ const (
type consensusReactor interface {
// for when we switch from blocksync reactor and block sync to
// the consensus machine
SwitchToConsensus(state sm.State, skipWAL bool)
SwitchToConsensus(state sm.State, skipWAL bool) error
}
type peerError struct {
@@ -54,7 +54,6 @@ type Reactor struct {
blockExec *sm.BlockExecutor
store *store.BlockStore
pool *BlockPool
blockSync bool
requestsCh <-chan BlockRequest
errorsCh <-chan peerError
@@ -63,9 +62,7 @@ type Reactor struct {
}
// NewReactor returns new reactor instance.
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
blockSync bool, metrics *Metrics) *Reactor {
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, metrics *Metrics) *Reactor {
if state.LastBlockHeight != store.Height() {
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
store.Height()))
@@ -87,7 +84,6 @@ func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockS
blockExec: blockExec,
store: store,
pool: pool,
blockSync: blockSync,
requestsCh: requestsCh,
errorsCh: errorsCh,
metrics: metrics,
@@ -104,22 +100,22 @@ func (bcR *Reactor) SetLogger(l log.Logger) {
// OnStart implements service.Service.
func (bcR *Reactor) OnStart() error {
if bcR.blockSync {
err := bcR.pool.Start()
if err != nil {
return err
}
go bcR.poolRoutine(false)
}
return nil
}
// IsSyncing returns whether the node is using blocksync to advance heights
func (bcR *Reactor) IsSyncing() bool {
return bcR.pool.IsRunning()
}
// SwitchToBlockSync is called by the state sync reactor when switching to block sync.
func (bcR *Reactor) SwitchToBlockSync(state sm.State) error {
bcR.blockSync = true
bcR.initialState = state
bcR.pool.height = state.LastBlockHeight + 1
if state.LastBlockHeight == 0 {
bcR.pool.height = state.InitialHeight
} else {
bcR.pool.height = state.LastBlockHeight + 1
}
err := bcR.pool.Start()
if err != nil {
return err
@@ -130,7 +126,7 @@ func (bcR *Reactor) SwitchToBlockSync(state sm.State) error {
// OnStop implements service.Service.
func (bcR *Reactor) OnStop() {
if bcR.blockSync {
if bcR.pool.IsRunning() {
if err := bcR.pool.Stop(); err != nil {
bcR.Logger.Error("Error stopping pool", "err", err)
}
@@ -298,24 +294,28 @@ FOR_LOOP:
for {
select {
case <-switchToConsensusTicker.C:
height, numPending, lenRequesters := bcR.pool.GetStatus()
height, peerHeight, numPending, lenRequesters := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers()
bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters,
"outbound", outbound, "inbound", inbound)
"outbound", outbound, "inbound", inbound, "peerHeight", peerHeight)
if bcR.pool.IsCaughtUp() {
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
if err := bcR.pool.Stop(); err != nil {
bcR.Logger.Error("Error stopping pool", "err", err)
}
// TODO: node struct should be responsible for switching from block sync to
// consensus. It's messy to have to grab the consensus reactor from the switch
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
if ok {
conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
err := conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
bcR.Logger.Error("failed to switch to consensus", "err", err)
}
// else {
// should only happen during testing
// }
break FOR_LOOP
return
}
case <-trySyncTicker.C: // chan time

View File

@@ -95,14 +95,6 @@ func newReactor(
mock.Anything,
mock.Anything).Return(nil)
// Make the Reactor itself.
// NOTE we have to create and commit the blocks first because
// pool.height is determined from the store.
fastSync := true
db := dbm.NewMemDB()
stateStore = sm.NewStore(db, sm.StoreOptions{
DiscardABCIResponses: false,
})
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mp, sm.EmptyEvidencePool{}, blockStore)
if err = stateStore.Save(state); err != nil {
@@ -145,7 +137,7 @@ func newReactor(
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
}
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync, NopMetrics())
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, NopMetrics())
bcReactor.SetLogger(logger.With("module", "blocksync"))
return ReactorPair{bcReactor, proxyApp}
@@ -156,6 +148,9 @@ func TestNoBlockResponse(t *testing.T) {
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
maxBlockHeight := int64(65)
reactorPairs := make([]ReactorPair, 2)
@@ -169,6 +164,12 @@ func TestNoBlockResponse(t *testing.T) {
}, p2p.Connect2Switches)
for _, reactor := range reactorPairs {
// turn on the syncing algorithm
err := reactor.reactor.SwitchToBlockSync(state)
require.NoError(t, err)
}
defer func() {
for _, r := range reactorPairs {
err := r.reactor.Stop()
@@ -218,6 +219,9 @@ func TestBadBlockStopsPeer(t *testing.T) {
defer os.RemoveAll(config.RootDir)
genDoc, privVals := randGenesisDoc(1, false, 30)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
maxBlockHeight := int64(148)
// Other chain needs a different validator set
@@ -244,6 +248,12 @@ func TestBadBlockStopsPeer(t *testing.T) {
}, p2p.Connect2Switches)
for _, reactor := range reactorPairs {
// turn on the syncing algorithm
err := reactor.reactor.SwitchToBlockSync(state)
require.NoError(t, err)
}
defer func() {
for _, r := range reactorPairs {
err := r.reactor.Stop()
@@ -287,6 +297,11 @@ func TestBadBlockStopsPeer(t *testing.T) {
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
}
otherState, err := sm.MakeGenesisState(otherGenDoc)
require.NoError(t, err)
err = lastReactorPair.reactor.SwitchToBlockSync(otherState)
require.NoError(t, err)
for {
if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 {
break

View File

@@ -67,7 +67,7 @@ func copyConfig(home, dir string) error {
func dumpProfile(dir, addr, profile string, debug int) error {
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
//nolint:gosec,nolintlint
//nolint:all
resp, err := http.Get(endpoint)
if err != nil {
return fmt.Errorf("failed to query for %s profile: %w", profile, err)

View File

@@ -1,87 +0,0 @@
package commands
import (
"context"
"os"
"os/signal"
"syscall"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/inspect"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer/block"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
// InspectCmd is the command for starting an inspect server.
var InspectCmd = &cobra.Command{
Use: "inspect",
Short: "Run an inspect server for investigating Tendermint state",
Long: `
inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging
issues with Tendermint.
When the Tendermint consensus engine detects inconsistent state, it will crash the
Tendermint process. Tendermint will not start up while in this inconsistent state.
The inspect command can be used to query the block and state store using Tendermint
RPC calls to debug issues of inconsistent state.
`,
RunE: runInspect,
}
func init() {
InspectCmd.Flags().
String("rpc.laddr",
config.RPC.ListenAddress, "RPC listenener address. Port required")
InspectCmd.Flags().
String("db-backend",
config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb")
InspectCmd.Flags().
String("db-dir", config.DBPath, "database directory")
}
func runInspect(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
go func() {
<-c
cancel()
}()
blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config})
if err != nil {
return err
}
blockStore := store.NewBlockStore(blockStoreDB)
defer blockStore.Close()
stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config})
if err != nil {
return err
}
stateStore := state.NewStore(stateDB, state.StoreOptions{DiscardABCIResponses: false})
defer stateStore.Close()
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
if err != nil {
return err
}
txIndexer, blockIndexer, err := block.IndexerFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID)
if err != nil {
return err
}
ins := inspect.New(config.RPC, blockStore, stateStore, txIndexer, blockIndexer, logger)
logger.Info("starting inspect server")
if err := ins.Run(ctx); err != nil {
return err
}
return nil
}

View File

@@ -66,7 +66,6 @@ func AddNodeFlags(cmd *cobra.Command) {
cmd.Flags().String("p2p.external-address", config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial")
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
cmd.Flags().String("p2p.bootstrap_peers", config.P2P.BootstrapPeers, "comma-delimited ID@host:port peers to be added to the addressbook on startup")
cmd.Flags().String("p2p.unconditional_peer_ids",
config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers")
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")

View File

@@ -30,7 +30,6 @@ func main() {
cmd.VersionCmd,
cmd.RollbackStateCmd,
cmd.CompactGoLevelDBCmd,
cmd.InspectCmd,
debug.DebugCmd,
cli.NewCompletionCmd(rootCmd, true),
)

View File

@@ -548,11 +548,6 @@ type P2PConfig struct { //nolint: maligned
// We only use these if we cant connect to peers in the addrbook
Seeds string `mapstructure:"seeds"`
// Comma separated list of peers to be added to the peer store
// on startup. Either BootstrapPeers or PersistentPeers are
// needed for peer discovery
BootstrapPeers string `mapstructure:"bootstrap_peers"`
// Comma separated list of nodes to keep persistent connections to
PersistentPeers string `mapstructure:"persistent_peers"`
@@ -713,28 +708,11 @@ type MempoolConfig struct {
// Mempool version to use:
// 1) "v0" - (default) FIFO mempool.
// 2) "v1" - prioritized mempool.
Version string `mapstructure:"version"`
// RootDir is the root directory for all data. This should be configured via
// the $TMHOME env variable or --home cmd flag rather than overriding this
// struct field.
RootDir string `mapstructure:"home"`
// Recheck (default: true) defines whether Tendermint should recheck the
// validity for all remaining transaction in the mempool after a block.
// Since a block affects the application state, some transactions in the
// mempool may become invalid. If this does not apply to your application,
// you can disable rechecking.
Recheck bool `mapstructure:"recheck"`
// Broadcast (default: true) defines whether the mempool should relay
// transactions to other peers. Setting this to false will stop the mempool
// from relaying transactions to other peers until they are included in a
// block. In other words, if Broadcast is disabled, only the peer you send
// the tx to will see it until it is included in a block.
Broadcast bool `mapstructure:"broadcast"`
// WalPath (default: "") configures the location of the Write Ahead Log
// (WAL) for the mempool. The WAL is disabled by default. To enable, set
// WalPath to where you want the WAL to be written (e.g.
// "data/mempool.wal").
WalPath string `mapstructure:"wal_dir"`
Version string `mapstructure:"version"`
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
Broadcast bool `mapstructure:"broadcast"`
WalPath string `mapstructure:"wal_dir"`
// Maximum number of transactions in the mempool
Size int `mapstructure:"size"`
// Limit the total size of all txs in the mempool.

View File

@@ -1,30 +0,0 @@
package config
import (
"context"
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
)
// ServiceProvider takes a config and a logger and returns a ready to go Node.
type ServiceProvider func(context.Context, *Config, log.Logger) (service.Service, error)
// DBContext specifies config information for loading a new DB.
type DBContext struct {
ID string
Config *Config
}
// DBProvider takes a DBContext and returns an instantiated DB.
type DBProvider func(*DBContext) (dbm.DB, error)
// DefaultDBProvider returns a database using the DBBackend and DBDir
// specified in the Config.
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
dbType := dbm.BackendType(ctx.Config.DBBackend)
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
}

View File

@@ -283,11 +283,6 @@ external_address = "{{ .P2P.ExternalAddress }}"
# Comma separated list of seed nodes to connect to
seeds = "{{ .P2P.Seeds }}"
# Comma separated list of peers to be added to the peer store
# on startup. Either BootstrapPeers or PersistentPeers are
# needed for peer discovery
bootstrap_peers = "{{ .P2P.BootstrapPeers }}"
# Comma separated list of nodes to keep persistent connections to
persistent_peers = "{{ .P2P.PersistentPeers }}"
@@ -354,24 +349,8 @@ dial_timeout = "{{ .P2P.DialTimeout }}"
# 2) "v1" - prioritized mempool.
version = "{{ .Mempool.Version }}"
# Recheck (default: true) defines whether Tendermint should recheck the
# validity for all remaining transaction in the mempool after a block.
# Since a block affects the application state, some transactions in the
# mempool may become invalid. If this does not apply to your application,
# you can disable rechecking.
recheck = {{ .Mempool.Recheck }}
# Broadcast (default: true) defines whether the mempool should relay
# transactions to other peers. Setting this to false will stop the mempool
# from relaying transactions to other peers until they are included in a
# block. In other words, if Broadcast is disabled, only the peer you send
# the tx to will see it until it is included in a block.
broadcast = {{ .Mempool.Broadcast }}
# WalPath (default: "") configures the location of the Write Ahead Log
# (WAL) for the mempool. The WAL is disabled by default. To enable, set
# WalPath to where you want the WAL to be written (e.g.
# "data/mempool.wal").
wal_dir = "{{ js .Mempool.WalPath }}"
# Maximum number of transactions in the mempool
@@ -457,7 +436,7 @@ chunk_fetchers = "{{ .StateSync.ChunkFetchers }}"
[blocksync]
# Block Sync version to use:
#
#
# In v0.37, v1 and v2 of the block sync protocols were deprecated.
# Please use v0 instead.
#

View File

@@ -46,6 +46,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
appFunc := newKVStore
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
css := make([]*State, nValidators)
for i := 0; i < nValidators; i++ {
@@ -54,7 +56,6 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
defer os.RemoveAll(thisConfig.RootDir)
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
@@ -102,7 +103,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
// Make State
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
cs.SetLogger(cs.Logger)
// set private validator
pv := privVals[i]
@@ -125,7 +126,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
blocksSubs := make([]types.Subscription, 0)
eventBuses := make([]*types.EventBus, nValidators)
for i := 0; i < nValidators; i++ {
reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
// Note, we dont start the consensus states
reactors[i] = NewReactor(css[i])
reactors[i].SetLogger(css[i].Logger)
// eventBus is already started with the cs
@@ -254,8 +256,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
// start the consensus reactors
for i := 0; i < nValidators; i++ {
s := reactors[i].conS.GetState()
reactors[i].SwitchToConsensus(s, false)
require.NoError(t, reactors[i].SwitchToConsensus(state.Copy(), false))
}
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
@@ -314,7 +315,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
N := 4
logger := consensusLogger().With("test", "byzantine")
app := newKVStore
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), app)
css, cleanup := randConsensusNet(t, N, "consensus_byzantine_test", newMockTickerFunc(false), app)
defer cleanup()
// give the byzantine validator a normal ticker
@@ -363,7 +364,8 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
require.NoError(t, err)
conR := NewReactor(css[i], true) // so we don't start the consensus states
// Note, we don't start the consensus states
conR := NewReactor(css[i])
conR.SetLogger(logger.With("validator", i))
conR.SetEventBus(eventBus)
@@ -407,13 +409,13 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
// note these must be started before the byz
for i := 1; i < N; i++ {
cr := reactors[i].(*Reactor)
cr.SwitchToConsensus(cr.conS.GetState(), false)
require.NoError(t, cr.SwitchToConsensus(cr.conS.GetState(), false))
}
// start the byzantine state machine
byzR := reactors[0].(*ByzantineReactor)
s := byzR.reactor.conS.GetState()
byzR.reactor.SwitchToConsensus(s, false)
require.NoError(t, byzR.reactor.SwitchToConsensus(s, false))
// byz proposer sends one block to peers[0]
// and the other block to peers[1] and peers[2].
@@ -592,7 +594,7 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
// Send our state to peer.
// If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
if !br.reactor.waitSync {
if br.reactor.conS.IsRunning() {
br.reactor.sendNewRoundStepMessage(peer)
}
}

View File

@@ -440,7 +440,7 @@ func newStateWithConfigAndBlockStore(
}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
cs.SetPrivValidator(pv)
@@ -747,18 +747,17 @@ func consensusLogger() log.Logger {
}).With("module", "consensus")
}
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc func() TimeoutTicker,
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) {
t.Helper()
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
css := make([]*State, nValidators)
logger := consensusLogger()
configRootDirs := make([]string, 0, nValidators)
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
configRootDirs = append(configRootDirs, thisConfig.RootDir)
for _, opt := range configOpts {
@@ -772,6 +771,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
css[i].SetTimeoutTicker(tickerFunc())
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
css[i].updateToState(state.Copy())
}
return css, func() {
for _, dir := range configRootDirs {

View File

@@ -19,7 +19,7 @@ import (
// Ensure a testnet makes blocks
func TestReactorInvalidPrecommit(t *testing.T) {
N := 4
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
defer cleanup()
for i := 0; i < 4; i++ {

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"github.com/cosmos/gogoproto/proto"
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/libs/bits"
tmmath "github.com/tendermint/tendermint/libs/math"

View File

@@ -42,7 +42,6 @@ type Reactor struct {
conS *State
mtx tmsync.RWMutex
waitSync bool
eventBus *types.EventBus
rs *cstypes.RoundState
@@ -53,12 +52,11 @@ type ReactorOption func(*Reactor)
// NewReactor returns a new Reactor with the given
// consensusState.
func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor {
func NewReactor(consensusState *State, options ...ReactorOption) *Reactor {
conR := &Reactor{
conS: consensusState,
waitSync: waitSync,
rs: consensusState.GetRoundState(),
Metrics: NopMetrics(),
conS: consensusState,
rs: consensusState.GetRoundState(),
Metrics: NopMetrics(),
}
conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR)
@@ -72,21 +70,12 @@ func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption)
// OnStart implements BaseService by subscribing to events, which later will be
// broadcasted to other peers and starting state if we're not in block sync.
func (conR *Reactor) OnStart() error {
conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync())
// start routine that computes peer statistics for evaluating peer quality
go conR.peerStatsRoutine()
conR.subscribeToBroadcastEvents()
go conR.updateRoundStateRoutine()
if !conR.WaitSync() {
err := conR.conS.Start()
if err != nil {
return err
}
}
return nil
}
@@ -94,45 +83,34 @@ func (conR *Reactor) OnStart() error {
// state.
func (conR *Reactor) OnStop() {
conR.unsubscribeFromBroadcastEvents()
if err := conR.conS.Stop(); err != nil {
conR.Logger.Error("Error stopping consensus state", "err", err)
}
if !conR.WaitSync() {
if conR.conS.IsRunning() {
if err := conR.conS.Stop(); err != nil {
conR.Logger.Error("Error stopping consensus state", "err", err)
}
conR.conS.Wait()
}
}
func (conR *Reactor) IsConsensusRunning() bool {
return conR.conS.IsRunning()
}
// SwitchToConsensus switches from block_sync mode to consensus mode.
// It resets the state, turns off block_sync, and starts the consensus state-machine
func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) error {
conR.Logger.Info("SwitchToConsensus")
// We have no votes, so reconstruct LastCommit from SeenCommit.
if state.LastBlockHeight > 0 {
if state.LastBlockHeight > state.InitialHeight {
conR.conS.reconstructLastCommit(state)
}
// NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a
// NewRoundStepMessage.
conR.conS.updateToState(state)
conR.mtx.Lock()
conR.waitSync = false
conR.mtx.Unlock()
if skipWAL {
conR.conS.doWALCatchup = false
}
err := conR.conS.Start()
if err != nil {
panic(fmt.Sprintf(`Failed to start consensus state: %v
conS:
%+v
conR:
%+v`, err, conR.conS, conR))
}
return conR.conS.Start()
}
// GetChannels implements Reactor
@@ -199,7 +177,7 @@ func (conR *Reactor) AddPeer(peer p2p.Peer) {
// Send our state to peer.
// If we're block_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
if !conR.WaitSync() {
if conR.conS.IsRunning() {
conR.sendNewRoundStepMessage(peer)
}
}
@@ -309,7 +287,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
}
case DataChannel:
if conR.WaitSync() {
if !conR.conS.IsRunning() {
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
return
}
@@ -328,7 +306,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
}
case VoteChannel:
if conR.WaitSync() {
if !conR.conS.IsRunning() {
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
return
}
@@ -350,7 +328,7 @@ func (conR *Reactor) Receive(e p2p.Envelope) {
}
case VoteSetBitsChannel:
if conR.WaitSync() {
if !conR.conS.IsRunning() {
conR.Logger.Info("Ignoring message received during sync", "msg", msg)
return
}
@@ -391,13 +369,6 @@ func (conR *Reactor) SetEventBus(b *types.EventBus) {
conR.conS.SetEventBus(b)
}
// WaitSync returns whether the consensus reactor is waiting for state/block sync.
func (conR *Reactor) WaitSync() bool {
conR.mtx.RLock()
defer conR.mtx.RUnlock()
return conR.waitSync
}
//--------------------------------------
// subscribeToBroadcastEvents subscribes for new round steps and votes
@@ -541,6 +512,11 @@ OUTER_LOOP:
if !peer.IsRunning() || !conR.IsRunning() {
return
}
if !conR.IsConsensusRunning() {
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
continue OUTER_LOOP
}
rs := conR.getRoundState()
prs := ps.GetRoundState()
@@ -685,7 +661,7 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt
}
return
}
// logger.Info("No parts to send in catch-up, sleeping")
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
}
@@ -695,12 +671,16 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
// Simple hack to throttle logs upon sleep.
var sleeping = 0
OUTER_LOOP:
for {
// Manage disconnects from self or peer.
if !peer.IsRunning() || !conR.IsRunning() {
return
}
if !conR.IsConsensusRunning() {
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
continue
}
rs := conR.getRoundState()
prs := ps.GetRoundState()
@@ -711,14 +691,11 @@ OUTER_LOOP:
sleeping = 0
}
// logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round,
// "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step)
// If height matches, then send LastCommit, Prevotes, Precommits.
if rs.Height == prs.Height {
heightLogger := logger.With("height", prs.Height)
if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) {
continue OUTER_LOOP
continue
}
}
@@ -727,7 +704,7 @@ OUTER_LOOP:
if prs.Height != 0 && rs.Height == prs.Height+1 {
if ps.PickSendVote(rs.LastCommit) {
logger.Debug("Picked rs.LastCommit to send", "height", prs.Height)
continue OUTER_LOOP
continue
}
}
@@ -740,7 +717,7 @@ OUTER_LOOP:
if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil {
if ps.PickSendVote(commit) {
logger.Debug("Picked Catchup commit to send", "height", prs.Height)
continue OUTER_LOOP
continue
}
}
}
@@ -757,7 +734,7 @@ OUTER_LOOP:
}
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
continue OUTER_LOOP
continue
}
}
@@ -831,6 +808,11 @@ OUTER_LOOP:
return
}
if !conR.IsConsensusRunning() {
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
continue OUTER_LOOP
}
// Maybe send Height/Round/Prevotes
{
rs := conR.getRoundState()
@@ -916,8 +898,6 @@ OUTER_LOOP:
}
}
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
continue OUTER_LOOP
}
}
@@ -1351,7 +1331,6 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
psRound := ps.PRS.Round
psCatchupCommitRound := ps.PRS.CatchupCommitRound
psCatchupCommit := ps.PRS.CatchupCommit
lastPrecommits := ps.PRS.Precommits
startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
ps.PRS.Height = msg.Height
@@ -1379,7 +1358,7 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
// Shift Precommits to LastCommit.
if psHeight+1 == msg.Height && psRound == msg.LastCommitRound {
ps.PRS.LastCommitRound = msg.LastCommitRound
ps.PRS.LastCommit = lastPrecommits
ps.PRS.LastCommit = ps.PRS.Precommits
} else {
ps.PRS.LastCommitRound = msg.LastCommitRound
ps.PRS.LastCommit = nil

View File

@@ -55,9 +55,8 @@ func startConsensusNet(t *testing.T, css []*State, n int) (
blocksSubs := make([]types.Subscription, 0)
eventBuses := make([]*types.EventBus, n)
for i := 0; i < n; i++ {
/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
if err != nil { t.Fatal(err)}*/
reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
// Note, we dont start the consensus states
reactors[i] = NewReactor(css[i])
reactors[i].SetLogger(css[i].Logger)
// eventBus is already started with the cs
@@ -88,7 +87,8 @@ func startConsensusNet(t *testing.T, css []*State, n int) (
// TODO: is this still true with new pubsub?
for i := 0; i < n; i++ {
s := reactors[i].conS.GetState()
reactors[i].SwitchToConsensus(s, false)
err := reactors[i].SwitchToConsensus(s, false)
require.NoError(t, err)
}
return reactors, blocksSubs, eventBuses
}
@@ -113,7 +113,7 @@ func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*type
// Ensure a testnet makes blocks
func TestReactorBasic(t *testing.T) {
N := 4
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
defer cleanup()
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
@@ -135,6 +135,8 @@ func TestReactorWithEvidence(t *testing.T) {
// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
css := make([]*State, nValidators)
logger := consensusLogger()
for i := 0; i < nValidators; i++ {
@@ -142,7 +144,6 @@ func TestReactorWithEvidence(t *testing.T) {
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
DiscardABCIResponses: false,
})
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
defer os.RemoveAll(thisConfig.RootDir)
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
@@ -203,7 +204,7 @@ func TestReactorWithEvidence(t *testing.T) {
// Make State
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
cs := NewState(thisConfig.Consensus, blockExec, blockStore, mempool, evpool2, WithState(state.Copy()))
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
cs.SetPrivValidator(pv)
@@ -237,7 +238,7 @@ func TestReactorWithEvidence(t *testing.T) {
// Ensure a testnet makes blocks when there are txs
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
N := 4
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore,
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore,
func(c *cfg.Config) {
c.Consensus.CreateEmptyBlocks = false
})
@@ -258,7 +259,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
N := 1
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
defer cleanup()
reactors, _, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
@@ -284,7 +285,7 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
N := 1
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
defer cleanup()
reactors, _, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
@@ -310,7 +311,7 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
// Test we record stats about votes and block parts from other peers.
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
N := 4
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
css, cleanup := randConsensusNet(t, N, "consensus_reactor_test", newMockTickerFunc(true), newKVStore)
defer cleanup()
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
@@ -336,6 +337,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
nVals := 4
logger := log.TestingLogger()
css, cleanup := randConsensusNet(
t,
nVals,
"consensus_voting_power_changes_test",
newMockTickerFunc(true),
@@ -531,7 +533,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
// Check we can make blocks with skip_timeout_commit=false
func TestReactorWithTimeoutCommit(t *testing.T) {
N := 4
css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newKVStore)
css, cleanup := randConsensusNet(t, N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newKVStore)
defer cleanup()
// override default SkipTimeoutCommit == true for tests
for i := 0; i < N; i++ {

View File

@@ -54,7 +54,7 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr
if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step {
return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m)
}
case <-newStepSub.Canceled():
case <-newStepSub.Cancelled():
return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was canceled")
case <-ticker:
return fmt.Errorf("failed to read off newStepSub.Out()")
@@ -254,7 +254,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
h.logger.Info("ABCI Handshake App Info",
"height", blockHeight,
"hash", log.NewLazySprintf("%X", appHash),
"hash", appHash,
"software-version", res.Version,
"protocol-version", res.AppVersion,
)
@@ -271,7 +271,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
}
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
"appHeight", blockHeight, "appHash", log.NewLazySprintf("%X", appHash))
"appHeight", blockHeight, "appHash", appHash)
// TODO: (on restart) replay mempool

View File

@@ -129,8 +129,8 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error
}
pb.cs.Wait()
newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool)
newCS := NewState(pb.cs.config, pb.cs.blockExec,
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool, WithState(pb.genesisState.Copy()))
newCS.SetEventBus(pb.cs.eventBus)
newCS.startForReplay()
@@ -332,8 +332,8 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
consensusState := NewState(csConfig, state.Copy(), blockExec,
blockStore, mempool, evpool)
consensusState := NewState(csConfig, blockExec,
blockStore, mempool, evpool, WithState(state.Copy()))
consensusState.SetEventBus(eventBus)
return consensusState

View File

@@ -67,7 +67,8 @@ func TestMain(m *testing.M) {
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
logger := log.TestingLogger()
state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
state, err := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile())
require.NoError(t, err)
privValidator := loadPrivValidator(consensusReplayConfig)
cs := newStateWithConfigAndBlockStore(
consensusReplayConfig,
@@ -81,7 +82,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
bytes, _ := os.ReadFile(cs.config.WalFile())
t.Logf("====== WAL: \n\r%X\n", bytes)
err := cs.Start()
err = cs.Start()
require.NoError(t, err)
defer func() {
if err := cs.Stop(); err != nil {
@@ -97,7 +98,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
require.NoError(t, err)
select {
case <-newBlockSub.Out():
case <-newBlockSub.Canceled():
case <-newBlockSub.Cancelled():
t.Fatal("newBlockSub was canceled")
case <-time.After(120 * time.Second):
t.Fatal("Timed out waiting for new block (see trace above)")
@@ -555,40 +556,40 @@ func TestSimulateValidatorsChange(t *testing.T) {
// Sync from scratch
func TestHandshakeReplayAll(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, config, 0, m, false)
testHandshakeReplay(t, 0, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, config, 0, m, true)
testHandshakeReplay(t, 0, m, true)
}
}
// Sync many, not from scratch
func TestHandshakeReplaySome(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, config, 2, m, false)
testHandshakeReplay(t, 2, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, config, 2, m, true)
testHandshakeReplay(t, 2, m, true)
}
}
// Sync from lagging by one
func TestHandshakeReplayOne(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, config, numBlocks-1, m, false)
testHandshakeReplay(t, numBlocks-1, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, config, numBlocks-1, m, true)
testHandshakeReplay(t, numBlocks-1, m, true)
}
}
// Sync from caught up
func TestHandshakeReplayNone(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, config, numBlocks, m, false)
testHandshakeReplay(t, numBlocks, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, config, numBlocks, m, true)
testHandshakeReplay(t, numBlocks, m, true)
}
}
@@ -660,25 +661,27 @@ func tempWALWithData(data []byte) string {
// Make some blocks. Start a fresh app and apply nBlocks blocks.
// Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) {
var chain []*types.Block
var commits []*types.Commit
var store *mockBlockStore
var stateDB dbm.DB
var genesisState sm.State
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint, testValidatorsChange bool) {
var (
chain []*types.Block
commits []*types.Commit
store *mockBlockStore
stateDB dbm.DB
genesisState sm.State
config *cfg.Config
)
if testValidatorsChange {
testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
defer os.RemoveAll(testConfig.RootDir)
config = ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
defer os.RemoveAll(config.RootDir)
stateDB = dbm.NewMemDB()
genesisState = sim.GenesisState
config = sim.Config
chain = append([]*types.Block{}, sim.Chain...) // copy chain
commits = sim.Commits
store = newMockBlockStore(t, config, genesisState.ConsensusParams)
} else { // test single node
testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
defer os.RemoveAll(testConfig.RootDir)
config = ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
defer os.RemoveAll(config.RootDir)
walBody, err := WALWithNBlocks(t, numBlocks)
require.NoError(t, err)
walFile := tempWALWithData(walBody)
@@ -811,14 +814,11 @@ func buildAppStateFromChain(t *testing.T, proxyApp proxy.AppConns, stateStore sm
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
validators := types.TM2PB.ValidatorUpdates(state.Validators)
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
_, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
Validators: validators,
}); err != nil {
panic(err)
}
if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo
panic(err)
}
})
require.NoError(t, err)
require.NoError(t, stateStore.Save(state))
switch mode {
case 0:
for i := 0; i < nBlocks; i++ {
@@ -1198,7 +1198,6 @@ func (bs *mockBlockStore) PruneBlocks(height int64, state sm.State) (uint64, int
}
func (bs *mockBlockStore) DeleteLatestBlock() error { return nil }
func (bs *mockBlockStore) Close() error { return nil }
//---------------------------------------
// Test handshake/init chain

View File

@@ -148,7 +148,6 @@ type StateOption func(*State)
// NewState returns a new State.
func NewState(
config *cfg.ConsensusConfig,
state sm.State,
blockExec *sm.BlockExecutor,
blockStore sm.BlockStore,
txNotifier txNotifier,
@@ -177,13 +176,6 @@ func NewState(
cs.doPrevote = cs.defaultDoPrevote
cs.setProposal = cs.defaultSetProposal
// We have no votes, so reconstruct LastCommit from SeenCommit.
if state.LastBlockHeight > 0 {
cs.reconstructLastCommit(state)
}
cs.updateToState(state)
// NOTE: we do not call scheduleRound0 yet, we do that upon Start()
cs.BaseService = *service.NewBaseService(nil, "State", cs)
@@ -207,10 +199,19 @@ func (cs *State) SetEventBus(b *types.EventBus) {
}
// StateMetrics sets the metrics.
func StateMetrics(metrics *Metrics) StateOption {
func WithMetrics(metrics *Metrics) StateOption {
return func(cs *State) { cs.metrics = metrics }
}
func WithState(state sm.State) StateOption {
return func(cs *State) {
if state.LastBlockHeight > 0 {
cs.reconstructLastCommit(state)
}
cs.updateToState(state)
}
}
// String returns a string.
func (cs *State) String() string {
// better not to access shared variables
@@ -297,6 +298,10 @@ func (cs *State) LoadCommit(height int64) *types.Commit {
// OnStart loads the latest state via the WAL, and starts the timeout and
// receive routines.
func (cs *State) OnStart() error {
if cs.state.IsEmpty() {
return errors.New("no state to commence consensus on")
}
// We may set the WAL in testing before calling Start, so only OpenWAL if its
// still the nilWAL.
if _, ok := cs.wal.(nilWAL); ok {
@@ -612,7 +617,7 @@ func (cs *State) updateToState(state sm.State) {
// signal the new round step, because other services (eg. txNotifier)
// depend on having an up-to-date peer state!
if state.LastBlockHeight <= cs.state.LastBlockHeight {
cs.Logger.Debug(
cs.Logger.Info(
"ignoring updateToState()",
"new_height", state.LastBlockHeight+1,
"old_height", cs.state.LastBlockHeight+1,
@@ -2275,11 +2280,10 @@ func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header
return nil
}
// TODO: pass pubKey to signVote
vote, err := cs.signVote(msgType, hash, header)
if err == nil {
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
cs.Logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote)
cs.Logger.Info("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote)
return vote
}

View File

@@ -121,7 +121,7 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool,
return
}
voteSet := hvs.getVoteSet(vote.Round, vote.Type)
if voteSet == nil {
if voteSet.IsEmpty() {
if rndz := hvs.peerCatchupRounds[peerID]; len(rndz) < 2 {
hvs.addRound(vote.Round)
voteSet = hvs.getVoteSet(vote.Round, vote.Type)
@@ -166,7 +166,7 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID types.BlockID) {
func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType) *types.VoteSet {
rvs, ok := hvs.roundVoteSets[round]
if !ok {
return nil
return &types.VoteSet{}
}
switch voteType {
case tmproto.PrevoteType:

View File

@@ -5,6 +5,7 @@ import (
"os"
"testing"
"github.com/stretchr/testify/require"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/internal/test"
@@ -30,30 +31,23 @@ func TestPeerCatchupRounds(t *testing.T) {
vote999_0 := makeVoteHR(t, 1, 0, 999, privVals)
added, err := hvs.AddVote(vote999_0, "peer1")
if !added || err != nil {
t.Error("Expected to successfully add vote from peer", added, err)
}
require.NoError(t, err)
require.True(t, added)
vote1000_0 := makeVoteHR(t, 1, 0, 1000, privVals)
added, err = hvs.AddVote(vote1000_0, "peer1")
if !added || err != nil {
t.Error("Expected to successfully add vote from peer", added, err)
}
require.NoError(t, err)
require.True(t, added)
vote1001_0 := makeVoteHR(t, 1, 0, 1001, privVals)
added, err = hvs.AddVote(vote1001_0, "peer1")
if err != ErrGotVoteFromUnwantedRound {
t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err)
}
if added {
t.Error("Expected to *not* add vote from peer, too many catchup rounds.")
}
require.Error(t, err)
require.Equal(t, ErrGotVoteFromUnwantedRound, err)
require.False(t, added)
added, err = hvs.AddVote(vote1001_0, "peer2")
if !added || err != nil {
t.Error("Expected to successfully add vote from another peer")
}
require.NoError(t, err)
require.True(t, added)
}
func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []types.PrivValidator) *types.Vote {

View File

@@ -86,7 +86,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
mempool := emptyMempool{}
evpool := sm.EmptyEvidencePool{}
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
consensusState := NewState(config.Consensus, blockExec, blockStore, mempool, evpool, WithState(state.Copy()))
consensusState.SetLogger(logger)
consensusState.SetEventBus(eventBus)
if privValidator != nil {

View File

@@ -9,8 +9,8 @@ module.exports = {
editLinks: true,
label: 'core',
algolia: {
id: "QQFROLBNZC",
key: "f1b68b96fb31d8aa4a54412c44917a26",
id: "BH4D9OD16A",
key: "59f0e2deb984aa9cdf2b3a5fd24ac501",
index: "tendermint"
},
versions: [

View File

@@ -27,28 +27,22 @@ Usage:
abci-cli [command]
Available Commands:
batch run a batch of abci commands against an application
check_tx validate a transaction
commit commit the application state and return the Merkle root hash
completion Generate the autocompletion script for the specified shell
console start an interactive ABCI console for multiple commands
deliver_tx deliver a new transaction to the application
echo have the application echo a message
help Help about any command
info get some info about the application
kvstore ABCI demo example
prepare_proposal prepare proposal
process_proposal process proposal
query query the application state
test run integration tests
version print ABCI console version
batch Run a batch of abci commands against an application
check_tx Validate a tx
commit Commit the application state and return the Merkle root hash
console Start an interactive abci console for multiple commands
deliver_tx Deliver a new tx to the application
kvstore ABCI demo example
echo Have the application echo a message
help Help about any command
info Get some info about the application
query Query the application state
Flags:
--abci string either socket or grpc (default "socket")
--address string address of application socket (default "tcp://0.0.0.0:26658")
-h, --help help for abci-cli
--log_level string set the logger level (default "debug")
-v, --verbose print the command and results as if it were a console session
--abci string socket or grpc (default "socket")
--address string address of application socket (default "tcp://127.0.0.1:26658")
-h, --help help for abci-cli
-v, --verbose print the command and results as if it were a console session
Use "abci-cli [command] --help" for more information about a command.
```
@@ -64,51 +58,47 @@ purposes.
We'll start a kvstore application, which was installed at the same time
as `abci-cli` above. The kvstore just stores transactions in a merkle
tree. Its code can be found
[here](https://github.com/tendermint/tendermint/blob/main/abci/cmd/abci-cli/abci-cli.go)
and looks like the following:
tree.
Its code can be found
[here](https://github.com/tendermint/tendermint/blob/v0.34.x/abci/cmd/abci-cli/abci-cli.go)
and looks like:
```go
func cmdKVStore(cmd *cobra.Command, args []string) error {
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Create the application - in memory or persisted to disk
var app types.Application
if flagPersist == "" {
var err error
flagPersist, err = os.MkdirTemp("", "persistent_kvstore_tmp")
if err != nil {
return err
}
}
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
// Create the application - in memory or persisted to disk
var app types.Application
if flagPersist == "" {
app = kvstore.NewKVStoreApplication()
} else {
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
}
// Start the listener
srv, err := server.NewServer(flagAddress, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Start the listener
srv, err := server.NewServer(flagAddrD, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {
// Cleanup
if err := srv.Stop(); err != nil {
logger.Error("Error while stopping server", "err", err)
}
})
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {
// Cleanup
srv.Stop()
})
// Run forever.
select {}
// Run forever.
select {}
}
```
Start the application by running:
Start by running:
```sh
abci-cli kvstore
@@ -173,32 +163,32 @@ Try running these commands:
-> data: hello
-> data.hex: 0x68656C6C6F
> info
> info
-> code: OK
-> data: {"size":0}
-> data.hex: 0x7B2273697A65223A307D
> prepare_proposal "abc"
-> code: OK
-> log: Succeeded. Tx: abc
-> log: Succeeded. Tx: abc action: UNMODIFIED
> process_proposal "abc"
-> code: OK
-> status: ACCEPT
> commit
> commit
-> code: OK
-> data.hex: 0x0000000000000000
> deliver_tx "abc"
-> code: OK
> info
> info
-> code: OK
-> data: {"size":1}
-> data.hex: 0x7B2273697A65223A317D
> commit
> commit
-> code: OK
-> data.hex: 0x0200000000000000
@@ -214,7 +204,7 @@ Try running these commands:
> deliver_tx "def=xyz"
-> code: OK
> commit
> commit
-> code: OK
-> data.hex: 0x0400000000000000
@@ -229,9 +219,11 @@ Try running these commands:
> prepare_proposal "preparedef"
-> code: OK
-> log: Succeeded. Tx: replacedef
-> log: Succeeded. Tx: def action: ADDED
-> code: OK
-> log: Succeeded. Tx: preparedef action: REMOVED
> process_proposal "replacedef"
> process_proposal "def"
-> code: OK
-> status: ACCEPT
@@ -253,21 +245,21 @@ Try running these commands:
Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if
we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
You could put the commands in a file and run
Similarly, you could put the commands in a file and run
`abci-cli --verbose batch < myfile`.
Note that the `abci-cli` is designed strictly for testing and debugging. In a real
deployment, the role of sending messages is taken by Tendermint, which
connects to the app using three separate connections, each with its own
pattern of messages.
For examples of running an ABCI app with Tendermint, see the
[getting started guide](./getting-started.md).
## Bounties
Want to write an app in your favorite language?! We'd be happy
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
See [funding](https://github.com/interchainio/funding) opportunities from the
[Interchain Foundation](https://interchain.io) for implementations in new languages and more.
The `abci-cli` is designed strictly for testing and debugging. In a real
deployment, the role of sending messages is taken by Tendermint, which
connects to the app using three separate connections, each with its own
pattern of messages.
For examples of running an ABCI app with
Tendermint, see the [getting started guide](./getting-started.md).
Next is the ABCI specification.

View File

@@ -11,10 +11,9 @@ application you want to run. So, to run a complete blockchain that does
something useful, you must start two programs: one is Tendermint Core,
the other is your application, which can be written in any programming
language. Recall from [the intro to
ABCI](../introduction/what-is-tendermint.md#abci-overview) that Tendermint Core
handles all the p2p and consensus stuff, and just forwards transactions to the
ABCI](../introduction/what-is-tendermint.md#abci-overview) that Tendermint Core handles all the p2p and consensus stuff, and just forwards transactions to the
application when they need to be validated, or when they're ready to be
executed and committed.
committed to a block.
In this guide, we show you some examples of how to run an application
using Tendermint.
@@ -23,8 +22,7 @@ using Tendermint.
The first apps we will work with are written in Go. To install them, you
need to [install Go](https://golang.org/doc/install), put
`$GOPATH/bin` in your `$PATH` and enable go modules. If you use `bash`,
follow these instructions:
`$GOPATH/bin` in your `$PATH` and enable go modules with these instructions:
```bash
echo export GOPATH=\"\$HOME/go\" >> ~/.bash_profile
@@ -33,48 +31,17 @@ echo export PATH=\"\$PATH:\$GOPATH/bin\" >> ~/.bash_profile
Then run
```bash
```sh
go get github.com/tendermint/tendermint
cd $GOPATH/src/github.com/tendermint/tendermint
make install_abci
```
Now you should have the `abci-cli` installed; run `abci-cli` to see the list of commands:
Now you should have the `abci-cli` installed; you'll notice the `kvstore`
command, an example application written
in Go. See below for an application written in JavaScript.
```
Usage:
abci-cli [command]
Available Commands:
batch run a batch of abci commands against an application
check_tx validate a transaction
commit commit the application state and return the Merkle root hash
completion Generate the autocompletion script for the specified shell
console start an interactive ABCI console for multiple commands
deliver_tx deliver a new transaction to the application
echo have the application echo a message
help Help about any command
info get some info about the application
kvstore ABCI demo example
prepare_proposal prepare proposal
process_proposal process proposal
query query the application state
test run integration tests
version print ABCI console version
Flags:
--abci string either socket or grpc (default "socket")
--address string address of application socket (default "tcp://0.0.0.0:26658")
-h, --help help for abci-cli
--log_level string set the logger level (default "debug")
-v, --verbose print the command and results as if it were a console session
Use "abci-cli [command] --help" for more information about a command.
```
You'll notice the `kvstore` command, an example application written in Go.
Now, let's run an app!
Now, let's run some apps!
## KVStore - A First Example
@@ -101,7 +68,7 @@ tendermint node
```
If you have used Tendermint, you may want to reset the data for a new
blockchain by running `tendermint unsafe-reset-all`. Then you can run
blockchain by running `tendermint unsafe_reset_all`. Then you can run
`tendermint node` to start Tendermint, and connect to the app. For more
details, see [the guide on using Tendermint](../tendermint-core/using-tendermint.md).
@@ -197,3 +164,47 @@ curl -s 'localhost:26657/abci_query?data="name"'
Try some other transactions and queries to make sure everything is
working!
## CounterJS - Example in Another Language
We also want to run applications in another language - in this case,
we'll run a Javascript version of the `counter`. To run it, you'll need
to [install node](https://nodejs.org/en/download/).
You'll also need to fetch the relevant repository, from
[here](https://github.com/tendermint/js-abci), then install it:
```sh
git clone https://github.com/tendermint/js-abci.git
cd js-abci
npm install abci
```
Kill the previous `counter` and `tendermint` processes. Now run the app:
```sh
node example/counter.js
```
In another window, reset and start `tendermint`:
```sh
tendermint unsafe_reset_all
tendermint node
```
Once again, you should see blocks streaming by - but now, our
application is written in Javascript! Try sending some transactions, and
like before - the results should be the same:
```sh
# ok
curl localhost:26657/broadcast_tx_commit?tx=0x00
# invalid nonce
curl localhost:26657/broadcast_tx_commit?tx=0x05
# ok
curl localhost:26657/broadcast_tx_commit?tx=0x01
```
Neat, eh?

View File

@@ -78,7 +78,6 @@ Note the context/background should be written in the present tense.
- [ADR-039: Peer-Behaviour](./adr-039-peer-behaviour.md)
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
- [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md)
- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md)
- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md)
- [ADR-079: Ed25519 Verification](./adr-079-ed25519-verification.md)
- [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md)
@@ -115,6 +114,7 @@ None
- [ADR-064: Batch Verification](./adr-064-batch-verification.md)
- [ADR-068: Reverse-Sync](./adr-068-reverse-sync.md)
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md)
- [ADR-073: Adopt LibP2P](./adr-073-libp2p.md)
- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md)
- [ADR-080: Reverse Sync](./adr-080-reverse-sync.md)

View File

@@ -6,7 +6,7 @@ order: 4
Tendermint is software for securely and consistently replicating an
application on many machines. By securely, we mean that Tendermint works
as long as less than 1/3 of machines fail in arbitrary ways. By consistently,
even if up to 1/3 of machines fail in arbitrary ways. By consistently,
we mean that every non-faulty machine sees the same transaction log and
computes the same state. Secure and consistent replication is a
fundamental problem in distributed systems; it plays a critical role in
@@ -22,14 +22,15 @@ reformalization of BFT in a more modern setting, with emphasis on
peer-to-peer networking and cryptographic authentication. The name
derives from the way transactions are batched in blocks, where each
block contains a cryptographic hash of the previous one, forming a
chain.
chain. In practice, the blockchain data structure actually optimizes BFT
design.
Tendermint consists of two chief technical components: a blockchain
consensus engine and a generic application interface. The consensus
engine, called Tendermint Core, ensures that the same transactions are
recorded on every machine in the same order. The application interface,
called the Application BlockChain Interface (ABCI), delivers the transactions
to applications for processing. Unlike other
called the Application BlockChain Interface (ABCI), enables the
transactions to be processed in any programming language. Unlike other
blockchain and consensus solutions, which come pre-packaged with built
in state machines (like a fancy key-value store, or a quirky scripting
language), developers can use Tendermint for BFT state machine
@@ -50,13 +51,13 @@ Hyperledger's Burrow.
### Zookeeper, etcd, consul
Zookeeper, etcd, and consul are all implementations of key-value stores
atop a classical, non-BFT consensus algorithm. Zookeeper uses an
algorithm called Zookeeper Atomic Broadcast, while etcd and consul use
the Raft log replication algorithm. A
Zookeeper, etcd, and consul are all implementations of a key-value store
atop a classical, non-BFT consensus algorithm. Zookeeper uses a version
of Paxos called Zookeeper Atomic Broadcast, while etcd and consul use
the Raft consensus algorithm, which is much younger and simpler. A
typical cluster contains 3-5 machines, and can tolerate crash failures
in less than 1/2 of the machines (e.g., 1 out of 3 or 2 out of 5),
but even a single Byzantine fault can jeopardize the whole system.
in up to 1/2 of the machines, but even a single Byzantine fault can
destroy the system.
Each offering provides a slightly different implementation of a
featureful key-value store, but all are generally focused around
@@ -65,8 +66,8 @@ configuration, service discovery, locking, leader-election, and so on.
Tendermint is in essence similar software, but with two key differences:
- It is Byzantine Fault Tolerant, meaning it can only tolerate less than 1/3
of machines failing, but those failures can include arbitrary behavior -
- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a
1/3 of failures, but those failures can include arbitrary behavior -
including hacking and malicious attacks. - It does not specify a
particular application, like a fancy key-value store. Instead, it
focuses on arbitrary state machine replication, so developers can build
@@ -105,8 +106,8 @@ docker containers, modules it calls "chaincode". It uses an
implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf).
from a team at IBM that is [augmented to handle potentially
non-deterministic
chaincode](https://drops.dagstuhl.de/opus/volltexte/2017/7093/pdf/LIPIcs-OPODIS-2016-24.pdf).
It is possible to implement this docker-based behavior as an ABCI app in
chaincode](https://drops.dagstuhl.de/opus/volltexte/2017/7093/pdf/LIPIcs-OPODIS-2016-24.pdf) It is
possible to implement this docker-based behavior as a ABCI app in
Tendermint, though extending Tendermint to handle non-determinism
remains for future work.
@@ -142,22 +143,24 @@ in design and suffers from "spaghetti code".
Another problem with monolithic design is that it limits you to the
language of the blockchain stack (or vice versa). In the case of
Ethereum which supports a Turing-complete bytecode virtual-machine, it
limits you to languages that compile down to that bytecode; while the
[list](https://github.com/pirapira/awesome-ethereum-virtual-machine#programming-languages-that-compile-into-evm)
is growing, it is still very limited.
limits you to languages that compile down to that bytecode; today, those
are Serpent and Solidity.
In contrast, our approach is to decouple the consensus engine and P2P
layers from the details of the state of the particular
layers from the details of the application state of the particular
blockchain application. We do this by abstracting away the details of
the application to an interface, which is implemented as a socket
protocol.
Thus we have an interface, the Application BlockChain Interface (ABCI),
and its primary implementation, the Tendermint Socket Protocol (TSP, or
Teaspoon).
### Intro to ABCI
[Tendermint Core](https://github.com/tendermint/tendermint), the
"consensus engine", communicates with the application via a socket
protocol that satisfies the ABCI, the Tendermint Socket Protocol
(TSP, or Teaspoon).
[Tendermint Core](https://github.com/tendermint/tendermint) (the
"consensus engine") communicates with the application via a socket
protocol that satisfies the ABCI.
To draw an analogy, lets talk about a well-known cryptocurrency,
Bitcoin. Bitcoin is a cryptocurrency blockchain where each node
@@ -177,7 +180,7 @@ The application will be responsible for
- Allowing clients to query the UTXO database.
Tendermint is able to decompose the blockchain design by offering a very
simple API (i.e. the ABCI) between the application process and consensus
simple API (ie. the ABCI) between the application process and consensus
process.
The ABCI consists of 3 primary message types that get delivered from the
@@ -236,7 +239,8 @@ Solidity on Ethereum is a great language of choice for blockchain
applications because, among other reasons, it is a completely
deterministic programming language. However, it's also possible to
create deterministic applications using existing popular languages like
Java, C++, Python, or Go, by avoiding
Java, C++, Python, or Go. Game programmers and blockchain developers are
already familiar with creating deterministic programs by avoiding
sources of non-determinism such as:
- random number generators (without deterministic seeding)
@@ -267,15 +271,14 @@ committed in a chain, with one block at each **height**. A block may
fail to be committed, in which case the protocol moves to the next
**round**, and a new validator gets to propose a block for that height.
Two stages of voting are required to successfully commit a block; we
call them **pre-vote** and **pre-commit**.
call them **pre-vote** and **pre-commit**. A block is committed when
more than 2/3 of validators pre-commit for the same block in the same
round.
There is a picture of a couple doing the polka because validators are
doing something like a polka dance. When more than two-thirds of the
validators pre-vote for the same block, we call that a **polka**. Every
pre-commit must be justified by a polka in the same round.
A block is committed when
more than 2/3 of validators pre-commit for the same block in the same
round.
Validators may fail to commit a block for a number of reasons; the
current proposer may be offline, or the network may be slow. Tendermint

View File

@@ -1,4 +1,3 @@
#!/bin/bash
mkdir -p .vuepress/public/rpc/
cp -a ../rpc/openapi/* .vuepress/public/rpc/
cp -a ../rpc/openapi/ .vuepress/public/rpc/

View File

@@ -62,6 +62,5 @@ sections.
- [RFC-023: Semi-permanent Testnet](./rfc-023-semi-permanent-testnet.md)
- [RFC-024: Block Structure Consolidation](./rfc-024-block-structure-consolidation.md)
- [RFC-025: Application Defined Transaction Storage](./rfc-025-support-app-side-mempool.md)
- [RFC-027: P2P Message Bandwidth Report](./rfc-027-p2p-message-bandwidth-report.md)
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->

Binary file not shown.

Before

Width:  |  Height:  |  Size: 204 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 213 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 63 KiB

View File

@@ -1,287 +0,0 @@
# RFC 27: P2P Message Bandwidth Report
## Changelog
- Nov 7, 2022: initial draft (@williambanfield)
- Nov 15, 2022: draft completed (@williambanfield)
## Abstract
Node operators and application developers complain that Tendermint nodes consume
larges amounts of network bandwidth. This RFC catalogues the major sources of bandwidth
consumption within Tendermint and suggests modifications to Tendermint that may reduce
bandwidth consumption for nodes.
## Background
Multiple teams running validators in production report that the validator
consumes a lot of bandwidth. They report that operators running on a network
with hundreds of validators consumes multiple terabytes of bandwidth per day.
Prometheus data collected from a validator node running on the Osmosis chain
shows that Tendermint sends and receives large amounts of data to peers. In the
nearly three hours of observation, Tendermint sent nearly 42 gigabytes and
received about 26 gigabytes, for an estimated 366 gigabytes sent daily and 208
gigabytes received daily. While this is shy of the reported terabytes number,
operators running multiple nodes for a 'sentry' pattern could easily send and
receive a terabyte of data.
Sending and receiving large amounts of data has a cost for node operators. Most
cloud platforms charge for network traffic egress. Google Cloud charges between
[$.05 to $.12 per gigabyte of egress traffic][gcloud-pricing], and ingress is
free. Hetzner [charges 1€ per TB used over the 10-20TB base bandwidth per
month][hetzner-pricing], which will be easily hit if multiple terabytes are
sent and received per day. Using the values collected from the validator on
Osmosis, a single node on Google cloud may cost $18 to $44 a day running on
Google cloud. On Hetzner, the estimated 18TB a month of both sending and
receiving may cost between 0 and 10 Euro a month per node.
## Discussion
### Overview of Major Bandwidth Usage
To determine which components of Tendermint were consuming the most bandwidth,
I gathered prometheus metrics from the [Blockpane][blockpane] validator running
on the Osmosis network for several hours. The data reveal that three message
types account for 98% of the total bandwidth consumed. These message types are
as follows:
1. [consensus.BlockPart][block-part-message]
2. [mempool.Txs][mempool-txs-message]
3. [consensus.Vote][vote-message]
The image below of p2p data collected from the Blockpane validator illustrate
the total bandwidth consumption of these three message types.
#### Send:
##### Top 3 Percent:
![](./images/top-3-percent-send.png)
##### Rate For All Messages:
![](./images/send-rate-all.png)
#### Receive:
##### Top 3 Percent:
![](./images/top-3-percent-receive.png)
##### Rate For All Messages:
![](./images/receive-rate-all.png)
### Investigation of Message Usage
This section discusses the usage of each of the three highest consumption messages.
#### BlockPart Transmission
Sending `BlockPart` messages consumes the most bandwidth out of all p2p
messages types as observed in the Blockpane Osmosis validator. In the almost 3
hour observation, the validator sent about 20 gigabytes of `BlockPart`
messages.
A block is proposed each round of Tendermint consensus. The paper does not
define a specific way that the block is to be transmitted, just that all
participants will receive it via a gossip network.
The Go implementation of Tendermint transmits the block in 'parts'. It
serializes the block to wire-format proto and splits this byte representation
into a set of 4 kilobyte arrays and sends these arrays to its peers, each in a
separate message.
The logic for sending `BlockPart` messages resides in the code for the
[consensus.Reactor][gossip-data-routine]. The consensus reactor starts a new
`gossipDataRoutine` for each peer it connects to. This routine repeatedly picks
a part of the block that Tendermint believes the peer does not know about yet
and gossips it to the peer. The set of `BlockParts` that Tendermint considers
its peer as having is only updated in one of four ways:
1. Our peer tells us they have entered a new round [via a `NewRoundStep`
message][new-round-step-message-send]. This message is only sent when a node
moves to a new round or height and only resets the data we collect about a
peer's blockpart state.
1. [We receive a block part from the peer][block-part-receive].
1. [We send][block-part-send-1] [the peer a block part][block-part-send-2].
1. Our peer tells us about the parts they have block [via `NewValidBlock`
messages][new-valid-block-message-send]. This message is only sent when the
peer has a quorum of prevotes or precommits for a block.
Each node receives block parts from all of its peers. The particular block part
to send at any given time is randomly selected from the set of parts that the
peer node is not yet known to have. Given that these are the only times that
Tendermint learns of its peers' block parts, it's very likely that a node has
an incomplete understanding of its peers' block parts and is transmitting block
parts to a peer that the peer has received from some other node.
Multiple potential mechanisms exist to reduce the number of duplicate block
parts a node receives. One set of mechanisms relies on more frequently
communicating the set of block parts a node needs to its peers. Another
potential mechanism requires a larger overhaul to the way blocks are gossiped
in the network.
#### Mempool Tx Transmission
The Tendermint mempool stages transactions that are yet to be committed to the
blockchain and communicates these transactions to its peers. Each message
contains one transaction. Data collected from the Blockpane node running on
Osmosis indicates that the validator sent about 12 gigabytes of `Txs` messages
during the nearly 3 hour observation period.
The Tendermint mempool starts a new [broadcastTxRoutine][broadcast-tx-routine]
for each peer that it is informed of. The routine sends all transactions that
the mempool is aware of to all peers with few exceptions. The only exception is
if the mempool received a transaction from a peer, then it marks it as such and
won't resend to that peer. Otherwise, it retains no information about which
transactions it already sent to a peer. In some cases it may therefore resend
transactions the peer already has. This can occur if the mempool removes a
transaction from the `CList` data structure used to store the list of
transactions while it is about to be sent and if the transaction was the tail
of the `CList` during removal. This will be more likely to occur if a large
number of transactions from the end of the list are removed during `RecheckTx`,
since multiple transactions will become the tail and then be deleted. It is
unclear at the moment how frequently this occurs on production chains.
Beyond ensuring that transactions are rebroadcast to peers less frequently,
there is not a simple scheme to communicate fewer transactions to peers. Peers
cannot communicate what transactions they need since they do not know which
transactions exist on the network.
#### Vote Transmission
Tendermint votes, both prevotes and precommits, are central to Tendermint
consensus and are gossiped by all nodes to all peers during each consensus
round. Data collected from the Blockpane node running on Osmosis indicates that
about 9 gigabytes of `Vote` messages were sent during the nearly 3 hour period
of observation. Examination of the [Vote message][vote-msg] indicates that it
contains 184 bytes of data, with the proto encoding adding a few additional
bytes when transmitting.
The Tendermint consensus reactor starts a new
[gossipVotesRoutine][gossip-votes-routine] for each peer that it connects to.
The reactor sends all votes to all peers unless it knows that the peer already
has the vote or the reactor learns that the peer is in a different round and
that thus the vote no longer applies. Tendermint learns that a peer has a vote
in one of 4 ways:
1. Tendermint sent the peer the vote.
1. Tendermint received the vote from the peer.
1. The peer [sent a `HasVote` message][apply-has-vote]. This message is broadcast
to all peers [each time validator receives a vote it hasn't seen before
corresponding to its current height and round][publish-event-vote].
1. The peer [sent a `VoteSetBits` message][apply-vote-set-bits]. This message is
[sent as a response to a peer that sends a `VoteSetMaj23`][vote-set-bits-send].
Given that Tendermint informs all peers of _each_ vote message it receives, all
nodes should be well informed of which votes their peers have. Given that the
vote messages were the third largest consumer of bandwidth in the observation
on Osmosis, it's possible that this system is not currently working correctly.
Further analysis should examine where votes may be being retransmitted.
### Suggested Improvements to Lower Message Transmission Bandwidth
#### Gossip Known BlockPart Data
The `BlockPart` messages, by far, account for the majority of the data sent to
each peer. At the moment, peers do not inform the node of which block parts
they already have. This means that each block part is _very likely_ to be
transmitted many times to each node. This frivolous consumption is even worse
in networks with large blocks.
The very simple solution to this issue is to copy the technique used in
consensus for informing peers when the node receives a vote. The consensus
reactor can be augmented with a `HasBlockPart` message that is broadcast to
each peer every time the node receives a block part. By informing each peer
every time the node receives a block part, we can drastically reduce the amount
of duplicate data sent to each node. There would be no algorithmic way of
enforcing that a peer accurately reports its block parts, so providing this
message would be a somewhat altruistic action on the part of the node. Such a
system [has been proposed in the past][i627] as well, so this is certainly not
totally new ground.
Measuring the size of duplicately received blockparts before and after this
change would help validate this approach.
#### Compress Transmitted Data
Tendermint's data is sent uncompressed on the wire. The messages are not
compressed before sending and the transport performs no compression either.
Some of the information communicated by Tendermint is a poor candidate for
compression: Data such as digital signatures and hashes have high entropy and
therefore do not compress well. However, transactions may contain lots of
information that has less entropy. Compression within Tendermint may be added
at several levels. Compression may be performed at the [Tendermint 'packet'
level][must-wrap-packet] or at the [Tendermint message send
level][message-send].
#### Transmit Less Data During Block Gossip
Block, vote, and mempool gossiping transmit much of same data. The mempool
reactor gossips candidate transactions to each peer. The consensus reactor,
when gossiping the votes, sends vote metadata and the digital signature of that
signs over that metadata. Finally, when a block is proposed, the proposing node
amalgamates the received votes, a set of transaction, and adds a header to
produce the block. This block is then serialized and gossiped as a list of
bytes. However, the data that the block contains, namely the votes and the
transactions were most likely _already transmitted to the nodes on the network_
via mempool transaction gossip and consensus vote gossip.
Therefore, block gossip can be updated to transmit a representation of the data
contained in the block that assumes the peers will already have most of this
data. Namely, the block gossip can be updated to only send 1) a list of
transaction hashes and 2) a bit array of votes selected for the block along
with the header and other required block metadata.
This new proposed method for gossiping block data could accompany a slight
update to the mempool transaction gossip and consensus vote gossip. Since all
of the contents of each block will not be gossiped together, it's possible that
some nodes are missing a proposed transaction or the vote of a validator
indicated in the new block gossip format during block gossip. The mempool and
consensus reactors may therefore be updated to provide a `NeedTxs` and
`NeedVotes` message. Each of these messages would allow a node to request a set
of data from their peers. When a node receives one of these, it will then
transmit the Tx/Votes indicate in the associated message regardless of whether
it believes it has transmitted them to the peer before. The gossip layer will
ensure that each peer eventually receives all of the data in the block.
However, if a transaction is needed immediately by a peer so that it can verify
and execute a block during consensus, a mechanism such as the `NeedTxs` and
`NeedVotes` messages should be added to ensure it receives the messages
quickly.
The same logic may applied for evidence transmission as well, since all nodes
should receive evidence and therefore do not need to re-transmit it in a block
part.
A similar idea has been proposed in the past as [Compact Block
Propagation][compact-block-propagation].
## References
[blockpane]: https://www.mintscan.io/osmosis/validators/osmovaloper1z0sh4s80u99l6y9d3vfy582p8jejeeu6tcucs2
[block-part-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/consensus/types.proto#L44
[mempool-txs-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/mempool/types.proto#L6
[vote-message]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/consensus/types.proto#L51
[gossip-data-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L537
[block-part-receive]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L324
[block-part-send-1]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L566
[block-part-send-2]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L684.
[new-valid-block-message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L268
[new-round-step-message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L266
[broadcast-tx-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/mempool/v0/reactor.go#L197
[gossip-votes-routine]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L694
[apply-has-vote]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L1429
[apply-vote-set-bits]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L1445
[publish-event-vote]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/state.go#L2083
[vote-set-bits-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/consensus/reactor.go#L306
[must-wrap-packet]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/p2p/conn/connection.go#L889-L918
[message-send]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/p2p/peer.go#L285
[gcloud-pricing]: https://cloud.google.com/vpc/network-pricing#vpc-pricing
[hetzner-pricing]: https://docs.hetzner.com/robot/general/traffic
[vote-msg]: https://github.com/tendermint/tendermint/blob/ff0f98892f24aac11e46aeff2b6d2c0ad816701a/proto/tendermint/types/types.pb.go#L468
[i627]: https://github.com/tendermint/tendermint/issues/627
[compact-block-propagation]: https://github.com/tendermint/tendermint/issues/7932

View File

@@ -18,52 +18,50 @@ Listen address can be changed in the config file (see
The following metrics are available:
| **Name** | **Type** | **Tags** | **Description** |
|--------------------------------------------|-----------|------------------|--------------------------------------------------------------------------------------------------------------------------------------------|
| abci\_connection\_method\_timing\_seconds | Histogram | method, type | Timings for each of the ABCI methods |
| blocksync\_syncing | Gauge | | Either 0 (not block syncing) or 1 (syncing) |
| consensus\_height | Gauge | | Height of the chain |
| consensus\_validators | Gauge | | Number of validators |
| consensus\_validators\_power | Gauge | | Total voting power of all validators |
| consensus\_validator\_power | Gauge | | Voting power of the node if in the validator set |
| consensus\_validator\_last\_signed\_height | Gauge | | Last height the node signed a block, if the node is a validator |
| consensus\_validator\_missed\_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
| consensus\_missing\_validators | Gauge | | Number of validators who did not sign |
| consensus\_missing\_validators\_power | Gauge | | Total voting power of the missing validators |
| consensus\_byzantine\_validators | Gauge | | Number of validators who tried to double sign |
| consensus\_byzantine\_validators\_power | Gauge | | Total voting power of the byzantine validators |
| consensus\_block\_interval\_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
| consensus\_rounds | Gauge | | Number of rounds |
| consensus\_num\_txs | Gauge | | Number of transactions |
| consensus\_total\_txs | Gauge | | Total number of transactions committed |
| consensus\_block\_parts | Counter | peer\_id | Number of blockparts transmitted by peer |
| consensus\_latest\_block\_height | Gauge | | /status sync\_info number |
| consensus\_block\_size\_bytes | Gauge | | Block size in bytes |
| consensus\_step\_duration | Histogram | step | Histogram of durations for each step in the consensus protocol |
| consensus\_round\_duration | Histogram | | Histogram of durations for all the rounds that have occurred since the process started |
| consensus\_block\_gossip\_parts\_received | Counter | matches\_current | Number of block parts received by the node |
| consensus\_quorum\_prevote\_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum |
| consensus\_full\_prevote\_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted |
| consensus\_proposal\_receive\_count | Counter | status | Total number of proposals received by the node since process start |
| consensus\_proposal\_create\_count | Counter | | Total number of proposals created by the node since process start |
| consensus\_round\_voting\_power\_percent | Gauge | vote\_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round |
| consensus\_late\_votes | Counter | vote\_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. |
| p2p\_message\_send\_bytes\_total | Counter | message\_type | Number of bytes sent to all peers per message type |
| p2p\_message\_receive\_bytes\_total | Counter | message\_type | Number of bytes received from all peers per message type |
| p2p\_peers | Gauge | | Number of peers node's connected to |
| p2p\_peer\_receive\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel received from a given peer |
| p2p\_peer\_send\_bytes\_total | Counter | peer\_id, chID | Number of bytes per channel sent to a given peer |
| p2p\_peer\_pending\_send\_bytes | Gauge | peer\_id | Number of pending bytes to be sent to a given peer |
| p2p\_num\_txs | Gauge | peer\_id | Number of transactions submitted by each peer\_id |
| p2p\_pending\_send\_bytes | Gauge | peer\_id | Amount of data pending to be sent to peer |
| mempool\_size | Gauge | | Number of uncommitted transactions |
| mempool\_tx\_size\_bytes | Histogram | | Transaction sizes in bytes |
| mempool\_failed\_txs | Counter | | Number of failed transactions |
| mempool\_recheck\_times | Counter | | Number of transactions rechecked in the mempool |
| state\_block\_processing\_time | Histogram | | Time between BeginBlock and EndBlock in ms |
| state\_consensus\_param\_updates | Counter | | Number of consensus parameter updates returned by the application since process start |
| state\_validator\_set\_updates | Counter | | Number of validator set updates returned by the application since process start |
| statesync\_syncing | Gauge | | Either 0 (not state syncing) or 1 (syncing) |
| **Name** | **Type** | **Tags** | **Description** |
|----------------------------------------|-----------|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------|
| abci_connection_method_timing_seconds | Histogram | method, type | Timings for each of the ABCI methods |
| consensus_height | Gauge | | Height of the chain |
| consensus_validators | Gauge | | Number of validators |
| consensus_validators_power | Gauge | | Total voting power of all validators |
| consensus_validator_power | Gauge | | Voting power of the node if in the validator set |
| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator |
| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
| consensus_missing_validators | Gauge | | Number of validators who did not sign |
| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators |
| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign |
| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators |
| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
| consensus_rounds | Gauge | | Number of rounds |
| consensus_num_txs | Gauge | | Number of transactions |
| consensus_total_txs | Gauge | | Total number of transactions committed |
| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer |
| consensus_latest_block_height | gauge | | /status sync_info number |
| consensus_block_syncing | gauge | | either 0 (not block syncing) or 1 (syncing) |
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
| consensus_block_size_bytes | Gauge | | Block size in bytes |
| consensus_step_duration | Histogram | step | Histogram of durations for each step in the consensus protocol |
| consensus_round_duration | Histogram | | Histogram of durations for all the rounds that have occurred since the process started |
| consensus_block_gossip_parts_received | Counter | matches_current | Number of block parts received by the node |
| consensus_quorum_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum |
| consensus_full_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted |
| consensus_proposal_receive_count | Counter | status | Total number of proposals received by the node since process start |
| consensus_proposal_create_count | Counter | | Total number of proposals created by the node since process start |
| consensus_round_voting_power_percent | Gauge | vote_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round |
| consensus_late_votes | Counter | vote_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. |
| p2p_peers | Gauge | | Number of peers node's connected to |
| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer |
| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer |
| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer |
| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id |
| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer |
| mempool_size | Gauge | | Number of uncommitted transactions |
| mempool_tx_size_bytes | histogram | | transaction sizes in bytes |
| mempool_failed_txs | counter | | number of failed transactions |
| mempool_recheck_times | counter | | number of transactions rechecked in the mempool |
| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms |
| state_consensus_param_updates | Counter | | number of consensus parameter updates returned by the application since process start |
| state_validator_set_updates | Counter | | number of validator set updates returned by the application since process start |
## Useful queries

View File

@@ -55,47 +55,3 @@ given destination directory. Each archive will contain:
Note: goroutine.out and heap.out will only be written if a profile address is
provided and is operational. This command is blocking and will log any error.
## Tendermint Inspect
Tendermint includes an `inspect` command for querying Tendermint's state store and block
store over Tendermint RPC.
When the Tendermint consensus engine detects inconsistent state, it will crash the
entire Tendermint process.
While in this inconsistent state, a node running Tendermint's consensus engine will not start up.
The `inspect` command runs only a subset of Tendermint's RPC endpoints for querying the block store
and state store.
`inspect` allows operators to query a read-only view of the stage.
`inspect` does not run the consensus engine at all and can therefore be used to debug
processes that have crashed due to inconsistent state.
### Running inspect
Start up the `inspect` tool on the machine where Tendermint crashed using:
```bash
tendermint inspect --home=</path/to/app.d>
```
`inspect` will use the data directory specified in your Tendermint configuration file.
`inspect` will also run the RPC server at the address specified in your Tendermint configuration file.
### Using inspect
With the `inspect` server running, you can access RPC endpoints that are critically important
for debugging.
Calling the `/status`, `/consensus_state` and `/dump_consensus_state` RPC endpoint
will return useful information about the Tendermint consensus state.
To start the `inspect` process, run
```bash
tendermint inspect
```
### RPC endpoints
The list of available RPC endpoints can be found by making a request to the RPC port.
For an `inspect` process running on `127.0.0.1:26657`, navigate your browser to
`http://127.0.0.1:26657/` to retrieve the list of enabled RPC endpoints.
Additional information on the Tendermint RPC endpoints can be found in the [rpc documentation](https://docs.tendermint.com/master/rpc).

File diff suppressed because it is too large Load Diff

View File

@@ -10,42 +10,47 @@ This guide is designed for beginners who want to get started with a Tendermint
Core application from scratch. It does not assume that you have any prior
experience with Tendermint Core.
Tendermint Core is a service that provides a Byzantine Fault Tolerant consensus engine
for state-machine replication. The replicated state-machine, or "application", can be written
in any language that can send and receive protocol buffer messages in a client-server model.
Applications written in Go can also use Tendermint as a library and run the service in the same
process as the application.
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state
transition machine - written in any programming language - and securely
replicates it on many machines.
By following along this tutorial you will create a Tendermint Core application called kvstore,
a (very) simple distributed BFT key-value store.
The application will be written in Go and
some understanding of the Go programming language is expected.
If you have never written Go, you may want to go through [Learn X in Y minutes
Where X=Go](https://learnxinyminutes.com/docs/go/) first, to familiarize
Although Tendermint Core is written in the Golang programming language, prior
knowledge of it is not required for this guide. You can learn it as we go due
to it's simplicity. However, you may want to go through [Learn X in Y minutes
Where X=Go](https://learnxinyminutes.com/docs/go/) first to familiarize
yourself with the syntax.
Note: Please use the latest released version of this guide and of Tendermint.
We strongly advise against using unreleased commits for your development.
By following along with this guide, you'll create a Tendermint Core project
called kvstore, a (very) simple distributed BFT key-value store.
### Built-in app vs external app
On the one hand, to get maximum performance you can run your application in
the same process as the Tendermint Core, as long as your application is written in Go.
[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written
this way.
If that is the way you wish to proceed, use the [Creating a built-in application in Go](./go-built-in.md) guide instead of this one.
## Built-in app vs external app
On the other hand, having a separate application might give you better security
guarantees as two processes would be communicating via established binary protocol.
Tendermint Core will not have access to application's state.
This is the approach followed in this tutorial.
To get maximum performance it is better to run your application alongside
Tendermint Core. [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written
this way. Please refer to [Writing a built-in Tendermint Core application in
Go](./go-built-in.md) guide for details.
Having a separate application might give you better security guarantees as two
processes would be communicating via established binary protocol. Tendermint
Core will not have access to application's state.
## 1.1 Installing Go
Verify that you have the latest version of Go installed (refer to the [official guide for installing Go](https://golang.org/doc/install)):
Please refer to [the official guide for installing
Go](https://golang.org/doc/install).
Verify that you have the latest version of Go installed:
```bash
$ go version
go version go1.19.2 darwin/amd64
go version go1.15.x darwin/amd64
```
Make sure you have `$GOPATH` environment variable set:
```bash
echo $GOPATH
/Users/melekes/go
```
## 1.2 Creating a new Go project
@@ -54,73 +59,39 @@ We'll start by creating a new Go project.
```bash
mkdir kvstore
cd kvstore
```
Inside the example directory, create a `main.go` file with the following content:
Inside the example directory create a `main.go` file with the following content:
```go
package main
import (
"fmt"
"fmt"
)
func main() {
fmt.Println("Hello, Tendermint Core")
fmt.Println("Hello, Tendermint Core")
}
```
When run, this should print "Hello, Tendermint Core" to the standard output.
```bash
cd kvstore
$ go run main.go
go run main.go
Hello, Tendermint Core
```
We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for
dependency management, so let's start by including a dependency on the latest version of
Tendermint.
```bash
go mod init kvstore
go get github.com/tendermint/tendermint@latest
```
After running the above commands you will see two generated files, `go.mod` and `go.sum`.
The go.mod file should look similar to:
```go
module github.com/me/example
go 1.19
require (
github.com/tendermint/tendermint v0.37.0
)
```
As you write the kvstore application, you can rebuild the binary by
pulling any new dependencies and recompiling it.
```sh
go get
go build
```
## 1.3 Writing a Tendermint Core application
Tendermint Core communicates with the application through the Application
BlockChain Interface (ABCI). The messages exchanged through the interface are
defined in the ABCI [protobuf
BlockChain Interface (ABCI). All message types are defined in the [protobuf
file](https://github.com/tendermint/tendermint/blob/main/proto/tendermint/abci/types.proto).
This allows Tendermint Core to run applications written in any programming
language.
We begin by creating the basic scaffolding for an ABCI application by
creating a new type, `KVStoreApplication`, which implements the
methods defined by the `abcitypes.Application` interface.
Create a file called `app.go` with the following contents:
Create a file called `app.go` with the following content:
```go
package main
@@ -129,7 +100,7 @@ import (
abcitypes "github.com/tendermint/tendermint/abci/types"
)
type KVStoreApplication struct{}
type KVStoreApplication struct {}
var _ abcitypes.Application = (*KVStoreApplication)(nil)
@@ -137,359 +108,238 @@ func NewKVStoreApplication() *KVStoreApplication {
return &KVStoreApplication{}
}
func (app *KVStoreApplication) Info(info abcitypes.RequestInfo) abcitypes.ResponseInfo {
func (KVStoreApplication) Info(req abcitypes.RequestInfo) abcitypes.ResponseInfo {
return abcitypes.ResponseInfo{}
}
func (app *KVStoreApplication) Query(query abcitypes.RequestQuery) abcitypes.ResponseQuery {
return abcitypes.ResponseQuery{}
func (KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx {
return abcitypes.ResponseDeliverTx{Code: 0}
}
func (app *KVStoreApplication) CheckTx(tx abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx {
return abcitypes.ResponseCheckTx{}
func (KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx {
return abcitypes.ResponseCheckTx{Code: 0}
}
func (app *KVStoreApplication) InitChain(chain abcitypes.RequestInitChain) abcitypes.ResponseInitChain {
return abcitypes.ResponseInitChain{}
}
func (app *KVStoreApplication) PrepareProposal(proposal abcitypes.RequestPrepareProposal) abcitypes.ResponsePrepareProposal {
return abcitypes.ResponsePrepareProposal{}
}
func (app *KVStoreApplication) ProcessProposal(proposal abcitypes.RequestProcessProposal) abcitypes.ResponseProcessProposal {
return abcitypes.ResponseProcessProposal{}
}
func (app *KVStoreApplication) BeginBlock(block abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock {
return abcitypes.ResponseBeginBlock{}
}
func (app *KVStoreApplication) DeliverTx(tx abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx {
return abcitypes.ResponseDeliverTx{}
}
func (app *KVStoreApplication) EndBlock(block abcitypes.RequestEndBlock) abcitypes.ResponseEndBlock {
return abcitypes.ResponseEndBlock{}
}
func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit {
func (KVStoreApplication) Commit() abcitypes.ResponseCommit {
return abcitypes.ResponseCommit{}
}
func (app *KVStoreApplication) ListSnapshots(snapshots abcitypes.RequestListSnapshots) abcitypes.ResponseListSnapshots {
func (KVStoreApplication) Query(req abcitypes.RequestQuery) abcitypes.ResponseQuery {
return abcitypes.ResponseQuery{Code: 0}
}
func (KVStoreApplication) InitChain(req abcitypes.RequestInitChain) abcitypes.ResponseInitChain {
return abcitypes.ResponseInitChain{}
}
func (KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock {
return abcitypes.ResponseBeginBlock{}
}
func (KVStoreApplication) EndBlock(req abcitypes.RequestEndBlock) abcitypes.ResponseEndBlock {
return abcitypes.ResponseEndBlock{}
}
func (KVStoreApplication) ListSnapshots(abcitypes.RequestListSnapshots) abcitypes.ResponseListSnapshots {
return abcitypes.ResponseListSnapshots{}
}
func (app *KVStoreApplication) OfferSnapshot(snapshot abcitypes.RequestOfferSnapshot) abcitypes.ResponseOfferSnapshot {
func (KVStoreApplication) OfferSnapshot(abcitypes.RequestOfferSnapshot) abcitypes.ResponseOfferSnapshot {
return abcitypes.ResponseOfferSnapshot{}
}
func (app *KVStoreApplication) LoadSnapshotChunk(chunk abcitypes.RequestLoadSnapshotChunk) abcitypes.ResponseLoadSnapshotChunk {
func (KVStoreApplication) LoadSnapshotChunk(abcitypes.RequestLoadSnapshotChunk) abcitypes.ResponseLoadSnapshotChunk {
return abcitypes.ResponseLoadSnapshotChunk{}
}
func (app *KVStoreApplication) ApplySnapshotChunk(chunk abcitypes.RequestApplySnapshotChunk) abcitypes.ResponseApplySnapshotChunk {
func (KVStoreApplication) ApplySnapshotChunk(abcitypes.RequestApplySnapshotChunk) abcitypes.ResponseApplySnapshotChunk {
return abcitypes.ResponseApplySnapshotChunk{}
}
```
The types used here are defined in the Tendermint library and were added as a dependency
to the project when you ran `go get`. If your IDE is not recognizing the types, go ahead and run the command again.
Now I will go through each method explaining when it's called and adding
required business logic.
```bash
go get github.com/tendermint/tendermint@latest
```
Now go back to the `main.go` and modify the `main` function so it matches the following,
where an instance of the `KVStoreApplication` type is created.
### 1.3.1 CheckTx
When a new transaction is added to the Tendermint Core, it will ask the
application to check it (validate the format, signatures, etc.).
```go
func main() {
fmt.Println("Hello, Tendermint Core")
import "bytes"
_ = NewKVStoreApplication()
}
```
You can recompile and run the application now by running `go get` and `go build`, but it does
not do anything.
So let's revisit the code adding the logic needed to implement our minimal key/value store
and to start it along with the Tendermint Service.
### 1.3.1 Add a persistent data store
Our application will need to write its state out to persistent storage so that it
can stop and start without losing all of its data.
For this tutorial, we will use [BadgerDB](https://github.com/dgraph-io/badger), a
a fast embedded key-value store.
First, add Badger as a dependency of your go module using the `go get` command:
`go get github.com/dgraph-io/badger/v3`
Next, let's update the application and its constructor to receive a handle to the database, as follows:
```go
type KVStoreApplication struct {
db *badger.DB
onGoingBlock *badger.Txn
}
var _ abcitypes.Application = (*KVStoreApplication)(nil)
func NewKVStoreApplication(db *badger.DB) *KVStoreApplication {
return &KVStoreApplication{db: db}
}
```
The `onGoingBlock` keeps track of the Badger transaction that will update the application's state when a block
is completed. Don't worry about it for now, we'll get to that later.
Next, update the `import` stanza at the top to include the Badger library:
```go
import(
"github.com/dgraph-io/badger/v3"
abcitypes "github.com/tendermint/tendermint/abci/types"
)
```
Finally, update the `main.go` file to invoke the updated constructor:
```go
_ = NewKVStoreApplication(nil)
```
### 1.3.2 CheckTx
When Tendermint Core receives a new transaction from a client, Tendermint asks the application if
the transaction is acceptable, using the `CheckTx` method.
In our application, a transaction is a string with the form `key=value`, indicating a key and value to write to the store.
The most basic validation check we can perform is to check if the transaction conforms to the `key=value` pattern.
For that, let's add the following helper method to app.go:
```go
func (app *KVStoreApplication) isValid(tx []byte) uint32 {
func (app *KVStoreApplication) isValid(tx []byte) (code uint32) {
// check format
parts := bytes.Split(tx, []byte("="))
if len(parts) != 2 {
return 1
}
return 0
key, value := parts[0], parts[1]
// check if the same key=value already exists
err := app.db.View(func(txn *badger.Txn) error {
item, err := txn.Get(key)
if err != nil && err != badger.ErrKeyNotFound {
return err
}
if err == nil {
return item.Value(func(val []byte) error {
if bytes.Equal(val, value) {
code = 2
}
return nil
})
}
return nil
})
if err != nil {
panic(err)
}
return code
}
```
Now you can rewrite the `CheckTx` method to use the helper function:
```go
func (app *KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx {
code := app.isValid(req.Tx)
return abcitypes.ResponseCheckTx{Code: code}
return abcitypes.ResponseCheckTx{Code: code, GasWanted: 1}
}
```
While this `CheckTx` is simple and only validates that the transaction is well-formed,
it is very common for `CheckTx` to make more complex use of the state of an application.
For example, you may refuse to overwrite an existing value, or you can associate
versions to the key/value pairs and allow the caller to specify a version to
perform a conditional update.
Don't worry if this does not compile yet.
Depending on the checks and on the conditions violated, the function may return
different values, but any response with a non-zero code will be considered invalid
by Tendermint. Our `CheckTx` logic returns 0 to Tendermint when a transaction passes
its validation checks. The specific value of the code is meaningless to Tendermint.
Non-zero codes are logged by Tendermint so applications can provide more specific
information on why the transaction was rejected.
If the transaction does not have a form of `{bytes}={bytes}`, we return `1`
code. When the same key=value already exist (same key and value), we return `2`
code. For others, we return a zero code indicating that they are valid.
Note that `CheckTx` does not execute the transaction, it only verifies that that the transaction could be executed. We do not know yet if the rest of the network has agreed to accept this transaction into a block.
Note that anything with non-zero code will be considered invalid (`-1`, `100`,
etc.) by Tendermint Core.
Valid transactions will eventually be committed given they are not too big and
have enough gas. To learn more about gas, check out ["the
specification"](https://github.com/tendermint/tendermint/blob/main/spec/abci/abci++_app_requirements.md#gas).
Finally, make sure to add the bytes package to the `import` stanza at the top of `app.go`:
For the underlying key-value store we'll use
[badger](https://github.com/dgraph-io/badger), which is an embeddable,
persistent and fast key-value (KV) database.
```go
import(
"bytes"
import "github.com/dgraph-io/badger"
"github.com/dgraph-io/badger/v3"
abcitypes "github.com/tendermint/tendermint/abci/types"
)
type KVStoreApplication struct {
db *badger.DB
currentBatch *badger.Txn
}
func NewKVStoreApplication(db *badger.DB) *KVStoreApplication {
return &KVStoreApplication{
db: db,
}
}
```
### 1.3.2 BeginBlock -> DeliverTx -> EndBlock -> Commit
### 1.3.3 BeginBlock -> DeliverTx -> EndBlock -> Commit
When the Tendermint consensus engine has decided on the block, the block is transferred to the
application over three ABCI method calls: `BeginBlock`, `DeliverTx`, and `EndBlock`.
- `BeginBlock` is called once to indicate to the application that it is about to
receive a block.
- `DeliverTx` is called repeatedly, once for each application transaction that was included in the block.
- `EndBlock` is called once to indicate to the application that no more transactions
will be delivered to the application in within this block.
Note that, to implement these calls in our application we're going to make use of Badger's
transaction mechanism. We will always refer to these as Badger transactions, not to
confuse them with the transactions included in the blocks delivered by Tendermint,
the _application transactions_.
First, let's create a new Badger transaction during `BeginBlock`. All application transactions in the
current block will be executed within this Badger transaction.
Then, return informing Tendermint that the application is ready to receive application transactions:
When Tendermint Core has decided on the block, it's transferred to the
application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and
`EndBlock` in the end. DeliverTx are being transferred asynchronously, but the
responses are expected to come in order.
```go
func (app *KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock {
app.onGoingBlock = app.db.NewTransaction(true)
app.currentBatch = app.db.NewTransaction(true)
return abcitypes.ResponseBeginBlock{}
}
```
Next, let's modify `DeliverTx` to add the `key` and `value` to the database transaction every time our application
receives a new application transaction through `RequestDeliverTx`.
Here we create a batch, which will store block's transactions.
```go
func (app *KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx {
if code := app.isValid(req.Tx); code != 0 {
code := app.isValid(req.Tx)
if code != 0 {
return abcitypes.ResponseDeliverTx{Code: code}
}
parts := bytes.SplitN(req.Tx, []byte("="), 2)
parts := bytes.Split(req.Tx, []byte("="))
key, value := parts[0], parts[1]
if err := app.onGoingBlock.Set(key, value); err != nil {
log.Panicf("Error writing to database, unable to execute tx: %v", err)
err := app.currentBatch.Set(key, value)
if err != nil {
panic(err)
}
return abcitypes.ResponseDeliverTx{Code: 0}
}
```
Note that we check the validity of the transaction _again_ during `DeliverTx`.
Transactions are not guaranteed to be valid when they are delivered to an
application, even if they were valid when they were proposed.
This can happen if the application state is used to determine transaction
validity. Application state may have changed between the initial execution of `CheckTx`
and the transaction delivery in `DeliverTx` in a way that rendered the transaction
no longer valid.
If the transaction is badly formatted or the same key=value already exist, we
again return the non-zero code. Otherwise, we add it to the current batch.
`EndBlock` is called to inform the application that the full block has been delivered
and give the application a chance to perform any other computation needed, before the
effects of the transactions become permanent.
In the current design, a block can include incorrect transactions (those who
passed CheckTx, but failed DeliverTx or transactions included by the proposer
directly). This is done for performance reasons.
Note that `EndBlock` **cannot** yet commit the Badger transaction we were building
in during `DeliverTx`.
Since other methods, such as `Query`, rely on a consistent view of the application's
state, the application should only update its state by committing the Badger transactions
when the full block has been delivered and the `Commit` method is invoked.
Note we can't commit transactions inside the `DeliverTx` because in such case
`Query`, which may be called in parallel, will return inconsistent data (i.e.
it will report that some value already exist even when the actual block was not
yet committed).
The `Commit` method tells the application to make permanent the effects of
the application transactions.
Let's update the method to terminate the pending Badger transaction and
persist the resulting state:
`Commit` instructs the application to persist the new state.
```go
func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit {
if err := app.onGoingBlock.Commit(); err != nil {
log.Panicf("Error writing to database, unable to commit block: %v", err)
}
app.currentBatch.Commit()
return abcitypes.ResponseCommit{Data: []byte{}}
}
```
Finally, make sure to add the log library to the `import` stanza as well:
### 1.3.3 Query
Now, when the client wants to know whenever a particular key/value exist, it
will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call
the application's `Query` method.
Applications are free to provide their own APIs. But by using Tendermint Core
as a proxy, clients (including [light client
package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage
the unified API across different applications. Plus they won't have to call the
otherwise separate Tendermint Core API for additional proofs.
Note we don't include a proof here.
```go
import (
"bytes"
"log"
"github.com/dgraph-io/badger/v3"
abcitypes "github.com/tendermint/tendermint/abci/types"
)
```
You may have noticed that the application we are writing will crash if it receives
an unexpected error from the Badger database during the `DeliverTx` or `Commit` methods.
This is not an accident. If the application received an error from the database, there
is no deterministic way for it to make progress so the only safe option is to terminate.
### 1.3.4 Query
When a client tries to read some information from the `kvstore`, the request will be
handled in the `Query` method. To do this, let's rewrite the `Query` method in `app.go`:
```go
func (app *KVStoreApplication) Query(req abcitypes.RequestQuery) abcitypes.ResponseQuery {
resp := abcitypes.ResponseQuery{Key: req.Data}
dbErr := app.db.View(func(txn *badger.Txn) error {
item, err := txn.Get(req.Data)
if err != nil {
if err != badger.ErrKeyNotFound {
return err
}
resp.Log = "key does not exist"
return nil
func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery abcitypes.ResponseQuery) {
resQuery.Key = reqQuery.Data
err := app.db.View(func(txn *badger.Txn) error {
item, err := txn.Get(reqQuery.Data)
if err != nil && err != badger.ErrKeyNotFound {
return err
}
return item.Value(func(val []byte) error {
resp.Log = "exists"
resp.Value = val
return nil
})
if err == badger.ErrKeyNotFound {
resQuery.Log = "does not exist"
} else {
return item.Value(func(val []byte) error {
resQuery.Log = "exists"
resQuery.Value = val
return nil
})
}
return nil
})
if dbErr != nil {
log.Panicf("Error reading database, unable to execute query: %v", dbErr)
if err != nil {
panic(err)
}
return resp
return
}
```
Since it reads only committed data from the store, transactions that are part of a block
that is being processed are not reflected in the query result.
The complete specification can be found
[here](https://github.com/tendermint/tendermint/tree/main/spec/abci/).
### 1.3.5 PrepareProposal and ProcessProposal
`PrepareProposal` and `ProcessProposal` are methods introduced in Tendermint v0.37.0
to give the application more control over the construction and processing of transaction blocks.
## 1.4 Starting an application and a Tendermint Core instances
When Tendermint Core sees that valid transactions (validated through `CheckTx`) are available to be
included in blocks, it groups some of these transactions and then gives the application a chance
to modify the group by invoking `PrepareProposal`.
The application is free to modify the group before returning from the call, as long as the resulting set
does not use more bytes than `RequestPrepareProposal.max_tx_bytes'
For example, the application may reorder, add, or even remove transactions from the group to improve the
execution of the block once accepted.
In the following code, the application simply returns the unmodified group of transactions:
```go
func (app *KVStoreApplication) PrepareProposal(proposal abcitypes.RequestPrepareProposal) abcitypes.ResponsePrepareProposal {
return abcitypes.ResponsePrepareProposal{Txs: proposal.Txs}
}
```
Once a proposed block is received by a node, the proposal is passed to the application to give
its blessing before voting to accept the proposal.
This mechanism may be used for different reasons, for example to deal with blocks manipulated
by malicious nodes, in which case the block should not be considered valid.
The following code simply accepts all proposals:
```go
func (app *KVStoreApplication) ProcessProposal(proposal abcitypes.RequestProcessProposal) abcitypes.ResponseProcessProposal {
return abcitypes.ResponseProcessProposal{Status: abcitypes.ResponseProcessProposal_ACCEPT}
}
```
## 1.4 Starting an application and a Tendermint Core instance
Now that we have the basic functionality of our application in place, let's put it all together inside of our `main.go` file.
Change the contents of your `main.go` file to the following.
Put the following code into the "main.go" file:
```go
package main
@@ -497,49 +347,37 @@ package main
import (
"flag"
"fmt"
abciserver "github.com/tendermint/tendermint/abci/server"
"log"
"os"
"os/signal"
"path/filepath"
"syscall"
"github.com/dgraph-io/badger/v3"
tmlog "github.com/tendermint/tendermint/libs/log"
"github.com/dgraph-io/badger"
abciserver "github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/libs/log"
)
var homeDir string
var socketAddr string
func init() {
flag.StringVar(&homeDir, "kv-home", "", "Path to the kvstore directory (if empty, uses $HOME/.kvstore)")
flag.StringVar(&socketAddr, "socket-addr", "unix://example.sock", "Unix domain socket address (if empty, uses \"unix://example.sock\"")
flag.StringVar(&socketAddr, "socket-addr", "unix://example.sock", "Unix domain socket address")
}
func main() {
flag.Parse()
if homeDir == "" {
homeDir = os.ExpandEnv("$HOME/.kvstore")
}
dbPath := filepath.Join(homeDir, "badger")
db, err := badger.Open(badger.DefaultOptions(dbPath))
db, err := badger.Open(badger.DefaultOptions("/tmp/badger"))
if err != nil {
log.Fatalf("Opening database: %v", err)
fmt.Fprintf(os.Stderr, "failed to open badger db: %v", err)
os.Exit(1)
}
defer func() {
if err := db.Close(); err != nil {
log.Fatalf("Closing database: %v", err)
}
}()
defer db.Close()
app := NewKVStoreApplication(db)
logger := tmlog.NewTMLogger(tmlog.NewSyncWriter(os.Stdout))
flag.Parse()
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
server := abciserver.NewSocketServer(socketAddr, app)
server.SetLogger(logger)
if err := server.Start(); err != nil {
fmt.Fprintf(os.Stderr, "error starting socket server: %v", err)
os.Exit(1)
@@ -549,6 +387,7 @@ func main() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
<-c
os.Exit(0)
}
```
@@ -557,18 +396,20 @@ This is a huge blob of code, so let's break it down into pieces.
First, we initialize the Badger database and create an app instance:
```go
dbPath := filepath.Join(homeDir, "badger")
db, err := badger.Open(badger.DefaultOptions(dbPath))
if err != nil {
log.Fatalf("Opening database: %v", err)
}
defer func() {
if err := db.Close(); err != nil {
log.Fatalf("Closing database: %v", err)
}
}()
db, err := badger.Open(badger.DefaultOptions("/tmp/badger"))
if err != nil {
fmt.Fprintf(os.Stderr, "failed to open badger db: %v", err)
os.Exit(1)
}
defer db.Close()
app := NewKVStoreApplication(db)
```
app := NewKVStoreApplication(db)
For **Windows** users, restarting this app will make badger throw an error as it requires value log to be truncated. For more information on this, visit [here](https://github.com/dgraph-io/badger/issues/744).
This can be avoided by setting the truncate option to true, like this:
```go
db, err := badger.Open(badger.DefaultOptions("/tmp/badger").WithTruncate(true))
```
Then we start the ABCI server and add some signal handling to gracefully stop
@@ -576,132 +417,144 @@ it upon receiving SIGTERM or Ctrl-C. Tendermint Core will act as a client,
which connects to our server and send us transactions and other messages.
```go
server := abciserver.NewSocketServer(socketAddr, app)
server.SetLogger(logger)
server := abciserver.NewSocketServer(socketAddr, app)
server.SetLogger(logger)
if err := server.Start(); err != nil {
fmt.Fprintf(os.Stderr, "error starting socket server: %v", err)
os.Exit(1)
}
defer server.Stop()
if err := server.Start(); err != nil {
fmt.Fprintf(os.Stderr, "error starting socket server: %v", err)
os.Exit(1)
}
defer server.Stop()
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
<-c
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
<-c
os.Exit(0)
```
## 1.5 Initializing and Running
Our application is almost ready to run, but first we'll need to populate the Tendermint Core configuration files.
The following command will create a `tendermint-home` directory in your project and add a basic set of configuration files in `tendermint-home/config/`.
For more information on what these files contain see [the configuration documentation](https://github.com/tendermint/tendermint/blob/v0.37.0/docs/nodes/configuration.md).
## 1.5 Getting Up and Running
From the root of your project, run:
We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for
dependency management.
```bash
go run github.com/tendermint/tendermint/cmd/tendermint@v0.37.0 init --home /tmp/tendermint-home
go mod init github.com/me/example
go get github.com/tendermint/tendermint/@v0.34.0
```
You should see an output similar to the following:
After running the above commands you will see two generated files, go.mod and go.sum. The go.mod file should look similar to:
```bash
I[2022-11-09|09:06:34.444] Generated private validator module=main keyFile=/tmp/tendermint-home/config/priv_validator_key.json stateFile=/tmp/tendermint-home/data/priv_validator_state.json
I[2022-11-09|09:06:34.444] Generated node key module=main path=/tmp/tendermint-home/config/node_key.json
I[2022-11-09|09:06:34.444] Generated genesis file module=main path=/tmp/tendermint-home/config/genesis.json
```go
module github.com/me/example
go 1.15
require (
github.com/dgraph-io/badger v1.6.2
github.com/tendermint/tendermint v0.34.0
)
```
Now rebuild the app:
```bash
go build -mod=mod # use -mod=mod to automatically refresh the dependencies
```
Everything is now in place to run your application. Run:
```bash
./kvstore -kv-home /tmp/badger-home
```
The application will start and you should see an output similar to the following:
```bash
badger 2022/11/09 17:01:28 INFO: All 0 tables opened in 0s
badger 2022/11/09 17:01:28 INFO: Discard stats nextEmptySlot: 0
badger 2022/11/09 17:01:28 INFO: Set nextTxnTs to 0
I[2022-11-09|17:01:28.726] service start msg="Starting ABCIServer service" impl=ABCIServer
I[2022-11-09|17:01:28.726] Waiting for new connection...
```
Then we need to start Tendermint Core service and point it to our application.
Open a new terminal window and cd to the same folder where the app is running.
Then execute the following command:
```bash
go run github.com/tendermint/tendermint/cmd/tendermint@v0.37.0 node --home /tmp/tendermint-home --proxy_app=unix://example.sock
```
This should start the full node and connect to our ABCI application, which will be
reflected in the application output.
Finally, we will build our binary:
```sh
I[2022-11-09|17:07:08.124] service start msg="Starting ABCIServer service" impl=ABCIServer
I[2022-11-09|17:07:08.124] Waiting for new connection...
I[2022-11-09|17:08:12.702] Accepted a new connection
I[2022-11-09|17:08:12.703] Waiting for new connection...
I[2022-11-09|17:08:12.703] Accepted a new connection
I[2022-11-09|17:08:12.703] Waiting for new connection...
go build
```
Also, the application using Tendermint Core is producing blocks 🎉🎉 and you can see this reflected in the log output of the service in lines like this:
To create a default configuration, nodeKey and private validator files, let's
execute `tendermint init`. But before we do that, we will need to install
Tendermint Core. Please refer to [the official
guide](https://docs.tendermint.com/main/introduction/install.html). If you're
installing from source, don't forget to checkout the latest release (`git checkout vX.Y.Z`).
```bash
I[2022-11-09|09:08:52.147] received proposal module=consensus proposal="Proposal{2/0 (F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C:1:C73D3D1273F2, -1) AD19AE292A45 @ 2022-11-09T12:08:52.143393Z}"
I[2022-11-09|09:08:52.152] received complete proposal block module=consensus height=2 hash=F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C
I[2022-11-09|09:08:52.160] finalizing commit of block module=consensus height=2 hash=F518444C0E348270436A73FD0F0B9DFEA758286BEB29482F1E3BEA75330E825C root= num_txs=0
I[2022-11-09|09:08:52.167] executed block module=state height=2 num_valid_txs=0 num_invalid_txs=0
I[2022-11-09|09:08:52.171] committed state module=state height=2 num_txs=0 app_hash=
rm -rf /tmp/example
TMHOME="/tmp/example" tendermint init
I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json
I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json
I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json
```
The blocks, as you can see from the `num_valid_txs=0` part, are empty, but let's remedy that next.
## 1.6 Using the application
Let's try submitting a transaction to our new application.
Open another terminal window and run the following curl command:
Feel free to explore the generated files, which can be found at
`/tmp/example/config` directory. Documentation on the config can be found
[here](https://docs.tendermint.com/main/tendermint-core/configuration.html).
We are ready to start our application:
```bash
curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"'
```
If everything went well, you should see a response indicating which height the
transaction was included in the blockchain.
rm example.sock
./example
Finally, let's make sure that transaction really was persisted by the application.
Run the following command:
badger 2019/07/16 18:25:11 INFO: All 0 tables opened in 0s
badger 2019/07/16 18:25:11 INFO: Replaying file id: 0 at offset: 0
badger 2019/07/16 18:25:11 INFO: Replay took: 300.4s
I[2019-07-16|18:25:11.523] Starting ABCIServer impl=ABCIServ
```
Then we need to start Tendermint Core and point it to our application. Staying
within the application directory execute:
```bash
curl -s 'localhost:26657/abci_query?data="tendermint"'
TMHOME="/tmp/example" tendermint node --proxy_app=unix://example.sock
I[2019-07-16|18:26:20.362] Version info module=main software=0.32.1 block=10 p2p=7
I[2019-07-16|18:26:20.383] Starting Node module=main impl=Node
E[2019-07-16|18:26:20.392] Couldn't connect to any seeds module=p2p
I[2019-07-16|18:26:20.394] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:8dab80770ae8e295d4ce905d86af78c4ff634b79 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-nIO96P Version:0.32.1 Channels:4020212223303800 Moniker:app48.fun-box.ru Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}"
I[2019-07-16|18:26:21.440] Executed block module=state height=1 validTxs=0 invalidTxs=0
I[2019-07-16|18:26:21.446] Committed state module=state height=1 txs=0 appHash=
```
Let's examine the response object that this request returns.
The request returns a `json` object with a `key` and `value` field set.
This should start the full node and connect to our ABCI application.
```sh
I[2019-07-16|18:25:11.525] Waiting for new connection...
I[2019-07-16|18:26:20.329] Accepted a new connection
I[2019-07-16|18:26:20.329] Waiting for new connection...
I[2019-07-16|18:26:20.330] Accepted a new connection
I[2019-07-16|18:26:20.330] Waiting for new connection...
I[2019-07-16|18:26:20.330] Accepted a new connection
```
Now open another tab in your terminal and try sending a transaction:
```json
...
"key": "dGVuZGVybWludA==",
"value": "cm9ja3M=",
...
curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"'
{
"jsonrpc": "2.0",
"id": "",
"result": {
"check_tx": {
"gasWanted": "1"
},
"deliver_tx": {},
"hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB",
"height": "33"
}
```
Those values don't look like the `key` and `value` we sent to Tendermint.
What's going on here?
Response should contain the height where this transaction was committed.
The response contains a `base64` encoded representation of the data we submitted.
To get the original value out of this data, we can use the `base64` command line utility:
Now let's check if the given key now exists and its value:
```bash
echo cm9ja3M=" | base64 -d
```json
curl -s 'localhost:26657/abci_query?data="tendermint"'
{
"jsonrpc": "2.0",
"id": "",
"result": {
"response": {
"log": "exists",
"key": "dGVuZGVybWludA==",
"value": "cm9ja3My"
}
}
}
```
"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of
"tendermint" and "rocks" accordingly.
## Outro
I hope everything went smoothly and your first, but hopefully not the last,

View File

@@ -5,7 +5,6 @@ import (
"time"
"github.com/cosmos/gogoproto/proto"
clist "github.com/tendermint/tendermint/libs/clist"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"

32
go.mod
View File

@@ -21,7 +21,7 @@ require (
github.com/ory/dockertest v3.3.5+incompatible
github.com/pkg/errors v0.9.1
github.com/pointlander/peg v1.0.1
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_golang v1.13.0
github.com/prometheus/client_model v0.3.0
github.com/prometheus/common v0.37.0
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
@@ -29,12 +29,12 @@ require (
github.com/sasha-s/go-deadlock v0.3.1
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
github.com/spf13/cobra v1.6.1
github.com/spf13/viper v1.14.0
github.com/spf13/viper v1.13.0
github.com/stretchr/testify v1.8.1
github.com/tendermint/tm-db v0.6.6
golang.org/x/crypto v0.3.0
golang.org/x/net v0.2.0
google.golang.org/grpc v1.51.0
golang.org/x/crypto v0.1.0
golang.org/x/net v0.1.0
google.golang.org/grpc v1.50.1
)
require (
@@ -44,13 +44,13 @@ require (
)
require (
github.com/btcsuite/btcd/btcec/v2 v2.3.2
github.com/btcsuite/btcd/btcutil v1.1.3
github.com/cosmos/gogoproto v1.4.3
github.com/gofrs/uuid v4.3.1+incompatible
github.com/btcsuite/btcd/btcec/v2 v2.3.0
github.com/btcsuite/btcd/btcutil v1.1.2
github.com/cosmos/gogoproto v1.4.2
github.com/gofrs/uuid v4.3.0+incompatible
github.com/google/uuid v1.3.0
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae
github.com/vektra/mockery/v2 v2.15.0
github.com/vektra/mockery/v2 v2.14.1
gonum.org/v1/gonum v0.12.0
google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8
)
@@ -107,7 +107,7 @@ require (
github.com/fatih/color v1.13.0 // indirect
github.com/fatih/structtag v1.2.0 // indirect
github.com/firefart/nonamedreturns v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/fzipp/gocyclo v0.6.0 // indirect
github.com/go-chi/chi/v5 v5.0.7 // indirect
github.com/go-critic/go-critic v0.6.5 // indirect
@@ -222,7 +222,7 @@ require (
github.com/sivchari/tenv v1.7.0 // indirect
github.com/sonatard/noctx v0.0.1 // indirect
github.com/sourcegraph/go-diff v0.6.1 // indirect
github.com/spf13/afero v1.9.2 // indirect
github.com/spf13/afero v1.8.2 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
@@ -255,12 +255,12 @@ require (
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect
golang.org/x/mod v0.6.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.2.0 // indirect
golang.org/x/term v0.2.0 // indirect
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect
golang.org/x/sys v0.1.0 // indirect
golang.org/x/term v0.1.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/tools v0.2.0 // indirect
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

76
go.sum
View File

@@ -23,15 +23,14 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8=
cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
@@ -152,12 +151,12 @@ github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA=
github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/btcec/v2 v2.3.0 h1:S/6K1GEwlEsFzZP4cOOl5mg6PEd/pr0zz7hvXcaxhJ4=
github.com/btcsuite/btcd/btcec/v2 v2.3.0/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ=
github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ=
github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
@@ -241,8 +240,8 @@ github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
github.com/cosmos/gogoproto v1.4.3 h1:RP3yyVREh9snv/lsOvmsAPQt8f44LgL281X0IOIhhcI=
github.com/cosmos/gogoproto v1.4.3/go.mod h1:0hLIG5TR7IvV1fme1HCFKjfzW9X2x0Mo+RooWXCnOWU=
github.com/cosmos/gogoproto v1.4.2 h1:UeGRcmFW41l0G0MiefWhkPEVEwvu78SZsHBvI78dAYw=
github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
@@ -337,8 +336,8 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -399,8 +398,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI=
github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gofrs/uuid v4.3.0+incompatible h1:CaSVZxm5B+7o45rtab4jC2G37WGYX1zQfuU2i6DSvnc=
github.com/gofrs/uuid v4.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -913,8 +912,8 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -1035,8 +1034,8 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
@@ -1060,8 +1059,8 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=
github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=
github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU=
github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw=
github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc=
@@ -1129,8 +1128,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y=
github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
github.com/vektra/mockery/v2 v2.15.0 h1:5Egbxoancm1hhkJUoAF+cf0FBzC9oxS28LL/ZKbC980=
github.com/vektra/mockery/v2 v2.15.0/go.mod h1:RswGtsqDbCR9j4UcgBQuAZY7OFxI+TgtHevc0gR0kCY=
github.com/vektra/mockery/v2 v2.14.1 h1:Xamr4zUkFBDGdZhJ6iCiJ1AwkGRmUgZd8zkwjRXt+TU=
github.com/vektra/mockery/v2 v2.14.1/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
@@ -1225,8 +1224,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1334,8 +1333,8 @@ golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1350,7 +1349,7 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1361,11 +1360,10 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpiB4nXvw1XYzFPGgzLUOY=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1460,19 +1458,19 @@ golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1489,8 +1487,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1675,8 +1673,8 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20211101144312-62acf1d99145/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y=
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a h1:GH6UPn3ixhWcKDhpnEC55S75cerLPdpp3hrhfKYjZgw=
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@@ -1706,8 +1704,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@@ -1,36 +0,0 @@
/*
Package inspect provides a tool for investigating the state of a
failed Tendermint node.
This package provides the Inspector type. The Inspector type runs a subset of the Tendermint
RPC endpoints that are useful for debugging issues with Tendermint consensus.
When a node running the Tendermint consensus engine detects an inconsistent consensus state,
the entire node will crash. The Tendermint consensus engine cannot run in this
inconsistent state so the node will not be able to start up again.
The RPC endpoints provided by the Inspector type allow for a node operator to inspect
the block store and state store to better understand what may have caused the inconsistent state.
The Inspector type's lifecycle is controlled by a context.Context
ins := inspect.NewFromConfig(rpcConfig)
ctx, cancelFunc:= context.WithCancel(context.Background())
// Run blocks until the Inspector server is shut down.
go ins.Run(ctx)
...
// calling the cancel function will stop the running inspect server
cancelFunc()
Inspector serves its RPC endpoints on the address configured in the RPC configuration
rpcConfig.ListenAddress = "tcp://127.0.0.1:26657"
ins := inspect.NewFromConfig(rpcConfig)
go ins.Run(ctx)
The list of available RPC endpoints can then be viewed by navigating to
http://127.0.0.1:26657/ in the web browser.
*/
package inspect

View File

@@ -1,138 +0,0 @@
package inspect
import (
"context"
"errors"
"net"
"os"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/inspect/rpc"
"github.com/tendermint/tendermint/libs/log"
tmstrings "github.com/tendermint/tendermint/libs/strings"
rpccore "github.com/tendermint/tendermint/rpc/core"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/indexer/block"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
"golang.org/x/sync/errgroup"
)
var (
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
)
// Inspector manages an RPC service that exports methods to debug a failed node.
// After a node shuts down due to a consensus failure, it will no longer start
// up its state cannot easily be inspected. An Inspector value provides a similar interface
// to the node, using the underlying Tendermint data stores, without bringing up
// any other components. A caller can query the Inspector service to inspect the
// persisted state and debug the failure.
type Inspector struct {
routes rpccore.RoutesMap
config *config.RPCConfig
logger log.Logger
// References to the state store and block store are maintained to enable
// the Inspector to safely close them on shutdown.
ss state.Store
bs state.BlockStore
}
// New returns an Inspector that serves RPC on the specified BlockStore and StateStore.
// The Inspector type does not modify the state or block stores.
// The sinks are used to enable block and transaction querying via the RPC server.
// The caller is responsible for starting and stopping the Inspector service.
//
//nolint:lll
func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, txidx txindex.TxIndexer, blkidx indexer.BlockIndexer, lg log.Logger) *Inspector {
routes := rpc.Routes(*cfg, ss, bs, txidx, blkidx, logger)
eb := types.NewEventBus()
eb.SetLogger(logger.With("module", "events"))
return &Inspector{
routes: routes,
config: cfg,
logger: logger,
ss: ss,
bs: bs,
}
}
// NewFromConfig constructs an Inspector using the values defined in the passed in config.
func NewFromConfig(cfg *config.Config) (*Inspector, error) {
bsDB, err := config.DefaultDBProvider(&config.DBContext{ID: "blockstore", Config: cfg})
if err != nil {
return nil, err
}
bs := store.NewBlockStore(bsDB)
sDB, err := config.DefaultDBProvider(&config.DBContext{ID: "state", Config: cfg})
if err != nil {
return nil, err
}
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
if err != nil {
return nil, err
}
txidx, blkidx, err := block.IndexerFromConfig(cfg, config.DefaultDBProvider, genDoc.ChainID)
if err != nil {
return nil, err
}
lg := logger.With("module", "inspect")
ss := state.NewStore(sDB, state.StoreOptions{})
return New(cfg.RPC, bs, ss, txidx, blkidx, lg), nil
}
// Run starts the Inspector servers and blocks until the servers shut down. The passed
// in context is used to control the lifecycle of the servers.
func (ins *Inspector) Run(ctx context.Context) error {
defer ins.bs.Close()
defer ins.ss.Close()
return startRPCServers(ctx, ins.config, ins.logger, ins.routes)
}
func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logger, routes rpccore.RoutesMap) error {
g, tctx := errgroup.WithContext(ctx)
listenAddrs := tmstrings.SplitAndTrimEmpty(cfg.ListenAddress, ",", " ")
rh := rpc.Handler(cfg, routes, logger)
for _, listenerAddr := range listenAddrs {
server := rpc.Server{
Logger: logger,
Config: cfg,
Handler: rh,
Addr: listenerAddr,
}
if cfg.IsTLSEnabled() {
keyFile := cfg.KeyFile()
certFile := cfg.CertFile()
listenerAddr := listenerAddr
g.Go(func() error {
logger.Info("RPC HTTPS server starting", "address", listenerAddr,
"certfile", certFile, "keyfile", keyFile)
err := server.ListenAndServeTLS(tctx, certFile, keyFile)
if !errors.Is(err, net.ErrClosed) {
return err
}
logger.Info("RPC HTTPS server stopped", "address", listenerAddr)
return nil
})
} else {
listenerAddr := listenerAddr
g.Go(func() error {
logger.Info("RPC HTTP server starting", "address", listenerAddr)
err := server.ListenAndServe(tctx)
if !errors.Is(err, net.ErrClosed) {
return err
}
logger.Info("RPC HTTP server stopped", "address", listenerAddr)
return nil
})
}
}
return g.Wait()
}

View File

@@ -1,605 +0,0 @@
package inspect_test
import (
"context"
"fmt"
"net"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/fortytw2/leaktest"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
abcitypes "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/inspect"
"github.com/tendermint/tendermint/internal/test"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/pubsub/query"
"github.com/tendermint/tendermint/proto/tendermint/state"
httpclient "github.com/tendermint/tendermint/rpc/client/http"
indexermocks "github.com/tendermint/tendermint/state/indexer/mocks"
statemocks "github.com/tendermint/tendermint/state/mocks"
txindexmocks "github.com/tendermint/tendermint/state/txindex/mocks"
"github.com/tendermint/tendermint/types"
)
func TestInspectConstructor(t *testing.T) {
cfg := test.ResetTestRoot("test")
t.Cleanup(leaktest.Check(t))
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
t.Run("from config", func(t *testing.T) {
d, err := inspect.NewFromConfig(cfg)
require.NoError(t, err)
require.NotNil(t, d)
})
}
func TestInspectRun(t *testing.T) {
cfg := test.ResetTestRoot("test")
t.Cleanup(leaktest.Check(t))
defer func() { _ = os.RemoveAll(cfg.RootDir) }()
t.Run("from config", func(t *testing.T) {
d, err := inspect.NewFromConfig(cfg)
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
stoppedWG := &sync.WaitGroup{}
stoppedWG.Add(1)
go func() {
require.NoError(t, d.Run(ctx))
stoppedWG.Done()
}()
cancel()
stoppedWG.Wait()
})
}
func TestBlock(t *testing.T) {
testHeight := int64(1)
testBlock := new(types.Block)
testBlock.Header.Height = testHeight
testBlock.Header.LastCommitHash = []byte("test hash")
stateStoreMock := &statemocks.Store{}
stateStoreMock.On("Close").Return(nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Height").Return(testHeight)
blockStoreMock.On("Base").Return(int64(0))
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{})
blockStoreMock.On("LoadBlock", testHeight).Return(testBlock)
blockStoreMock.On("Close").Return(nil)
txIndexerMock := &txindexmocks.TxIndexer{}
blkIdxMock := &indexermocks.BlockIndexer{}
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
require.NoError(t, err)
resultBlock, err := cli.Block(context.Background(), &testHeight)
require.NoError(t, err)
require.Equal(t, testBlock.Height, resultBlock.Block.Height)
require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestTxSearch(t *testing.T) {
testHash := []byte("test")
testTx := []byte("tx")
testQuery := fmt.Sprintf("tx.hash='%s'", string(testHash))
testTxResult := &abcitypes.TxResult{
Height: 1,
Index: 100,
Tx: testTx,
}
stateStoreMock := &statemocks.Store{}
stateStoreMock.On("Close").Return(nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Close").Return(nil)
txIndexerMock := &txindexmocks.TxIndexer{}
blkIdxMock := &indexermocks.BlockIndexer{}
txIndexerMock.On("Search", mock.Anything,
mock.MatchedBy(func(q *query.Query) bool {
return testQuery == strings.ReplaceAll(q.String(), " ", "")
})).
Return([]*abcitypes.TxResult{testTxResult}, nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
require.NoError(t, err)
var page = 1
resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "")
require.NoError(t, err)
require.Len(t, resultTxSearch.Txs, 1)
require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx)
cancel()
wg.Wait()
txIndexerMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
blockStoreMock.AssertExpectations(t)
}
func TestTx(t *testing.T) {
testHash := []byte("test")
testTx := []byte("tx")
stateStoreMock := &statemocks.Store{}
stateStoreMock.On("Close").Return(nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Close").Return(nil)
blkIdxMock := &indexermocks.BlockIndexer{}
txIndexerMock := &txindexmocks.TxIndexer{}
txIndexerMock.On("Get", testHash).Return(&abcitypes.TxResult{
Tx: testTx,
}, nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
require.NoError(t, err)
res, err := cli.Tx(context.Background(), testHash, false)
require.NoError(t, err)
require.Equal(t, types.Tx(testTx), res.Tx)
cancel()
wg.Wait()
txIndexerMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
blockStoreMock.AssertExpectations(t)
}
func TestConsensusParams(t *testing.T) {
testHeight := int64(1)
testMaxGas := int64(55)
stateStoreMock := &statemocks.Store{}
stateStoreMock.On("Close").Return(nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Close").Return(nil)
blockStoreMock.On("Height").Return(testHeight)
blockStoreMock.On("Base").Return(int64(0))
stateStoreMock.On("LoadConsensusParams", testHeight).Return(types.ConsensusParams{
Block: types.BlockParams{
MaxGas: testMaxGas,
},
}, nil)
txIndexerMock := &txindexmocks.TxIndexer{}
blkIdxMock := &indexermocks.BlockIndexer{}
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
require.NoError(t, err)
params, err := cli.ConsensusParams(context.Background(), &testHeight)
require.NoError(t, err)
require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestBlockResults(t *testing.T) {
testHeight := int64(1)
testGasUsed := int64(100)
stateStoreMock := &statemocks.Store{}
stateStoreMock.On("Close").Return(nil)
// tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{
DeliverTxs: []*abcitypes.ResponseDeliverTx{
{
GasUsed: testGasUsed,
},
},
EndBlock: &abcitypes.ResponseEndBlock{},
BeginBlock: &abcitypes.ResponseBeginBlock{},
}, nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Close").Return(nil)
blockStoreMock.On("Base").Return(int64(0))
blockStoreMock.On("Height").Return(testHeight)
txIndexerMock := &txindexmocks.TxIndexer{}
blkIdxMock := &indexermocks.BlockIndexer{}
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
require.NoError(t, err)
res, err := cli.BlockResults(context.Background(), &testHeight)
require.NoError(t, err)
require.Equal(t, res.TxsResults[0].GasUsed, testGasUsed)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestCommit(t *testing.T) {
testHeight := int64(1)
testRound := int32(101)
stateStoreMock := &statemocks.Store{}
stateStoreMock.On("Close").Return(nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Close").Return(nil)
blockStoreMock.On("Base").Return(int64(0))
blockStoreMock.On("Height").Return(testHeight)
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}, nil)
blockStoreMock.On("LoadSeenCommit", testHeight).Return(&types.Commit{
Height: testHeight,
Round: testRound,
}, nil)
txIndexerMock := &txindexmocks.TxIndexer{}
blkIdxMock := &indexermocks.BlockIndexer{}
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
require.NoError(t, err)
res, err := cli.Commit(context.Background(), &testHeight)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, res.SignedHeader.Commit.Round, testRound)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestBlockByHash(t *testing.T) {
testHeight := int64(1)
testHash := []byte("test hash")
testBlock := new(types.Block)
testBlock.Header.Height = testHeight
testBlock.Header.LastCommitHash = testHash
stateStoreMock := &statemocks.Store{}
stateStoreMock.On("Close").Return(nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Close").Return(nil)
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
BlockID: types.BlockID{
Hash: testHash,
},
Header: types.Header{
Height: testHeight,
},
}, nil)
blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil)
txIndexerMock := &txindexmocks.TxIndexer{}
blkIdxMock := &indexermocks.BlockIndexer{}
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
require.NoError(t, err)
res, err := cli.BlockByHash(context.Background(), testHash)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, []byte(res.BlockID.Hash), testHash)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestBlockchain(t *testing.T) {
testHeight := int64(1)
testBlock := new(types.Block)
testBlockHash := []byte("test hash")
testBlock.Header.Height = testHeight
testBlock.Header.LastCommitHash = testBlockHash
stateStoreMock := &statemocks.Store{}
stateStoreMock.On("Close").Return(nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Close").Return(nil)
blockStoreMock.On("Height").Return(testHeight)
blockStoreMock.On("Base").Return(int64(0))
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
BlockID: types.BlockID{
Hash: testBlockHash,
},
})
txIndexerMock := &txindexmocks.TxIndexer{}
blkIdxMock := &indexermocks.BlockIndexer{}
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
require.NoError(t, err)
res, err := cli.BlockchainInfo(context.Background(), 0, 100)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash))
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestValidators(t *testing.T) {
testHeight := int64(1)
testVotingPower := int64(100)
testValidators := types.ValidatorSet{
Validators: []*types.Validator{
{
VotingPower: testVotingPower,
},
},
}
stateStoreMock := &statemocks.Store{}
stateStoreMock.On("Close").Return(nil)
stateStoreMock.On("LoadValidators", testHeight).Return(&testValidators, nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Close").Return(nil)
blockStoreMock.On("Height").Return(testHeight)
blockStoreMock.On("Base").Return(int64(0))
txIndexerMock := &txindexmocks.TxIndexer{}
blkIdxMock := &indexermocks.BlockIndexer{}
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
require.NoError(t, err)
testPage := 1
testPerPage := 100
res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, testVotingPower, res.Validators[0].VotingPower)
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func TestBlockSearch(t *testing.T) {
testHeight := int64(1)
testBlockHash := []byte("test hash")
testQuery := "block.height = 1"
stateStoreMock := &statemocks.Store{}
stateStoreMock.On("Close").Return(nil)
blockStoreMock := &statemocks.BlockStore{}
blockStoreMock.On("Close").Return(nil)
txIndexerMock := &txindexmocks.TxIndexer{}
blkIdxMock := &indexermocks.BlockIndexer{}
blockStoreMock.On("LoadBlock", testHeight).Return(&types.Block{
Header: types.Header{
Height: testHeight,
},
}, nil)
blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{
BlockID: types.BlockID{
Hash: testBlockHash,
},
})
blkIdxMock.On("Search", mock.Anything,
mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })).
Return([]int64{testHeight}, nil)
rpcConfig := config.TestRPCConfig()
l := log.TestingLogger()
d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l)
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
startedWG := &sync.WaitGroup{}
startedWG.Add(1)
go func() {
startedWG.Done()
defer wg.Done()
require.NoError(t, d.Run(ctx))
}()
// FIXME: used to induce context switch.
// Determine more deterministic method for prompting a context switch
startedWG.Wait()
requireConnect(t, rpcConfig.ListenAddress, 20)
cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket")
require.NoError(t, err)
testPage := 1
testPerPage := 100
testOrderBy := "desc"
res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash))
cancel()
wg.Wait()
blockStoreMock.AssertExpectations(t)
stateStoreMock.AssertExpectations(t)
}
func requireConnect(t testing.TB, addr string, retries int) {
parts := strings.SplitN(addr, "://", 2)
if len(parts) != 2 {
t.Fatalf("malformed address to dial: %s", addr)
}
var err error
for i := 0; i < retries; i++ {
var conn net.Conn
conn, err = net.Dial(parts[0], parts[1])
if err == nil {
conn.Close()
return
}
// FIXME attempt to yield and let the other goroutine continue execution.
time.Sleep(time.Microsecond * 100)
}
t.Fatalf("unable to connect to server %s after %d tries: %s", addr, retries, err)
}

View File

@@ -1,128 +0,0 @@
package rpc
import (
"context"
"net/http"
"time"
"github.com/rs/cors"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/rpc/core"
"github.com/tendermint/tendermint/rpc/jsonrpc/server"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/txindex"
)
// Server defines parameters for running an Inspector rpc server.
type Server struct {
Addr string // TCP address to listen on, ":http" if empty
Handler http.Handler
Logger log.Logger
Config *config.RPCConfig
}
// Routes returns the set of routes used by the Inspector server.
func Routes(cfg config.RPCConfig, s state.Store, bs state.BlockStore, txidx txindex.TxIndexer, blkidx indexer.BlockIndexer, logger log.Logger) core.RoutesMap { //nolint: lll
env := &core.Environment{
Config: cfg,
BlockIndexer: blkidx,
TxIndexer: txidx,
StateStore: s,
BlockStore: bs,
ConsensusReactor: waitSyncCheckerImpl{},
Logger: logger,
}
return core.RoutesMap{
"blockchain": server.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight"),
"consensus_params": server.NewRPCFunc(env.ConsensusParams, "height"),
"block": server.NewRPCFunc(env.Block, "height"),
"block_by_hash": server.NewRPCFunc(env.BlockByHash, "hash"),
"block_results": server.NewRPCFunc(env.BlockResults, "height"),
"commit": server.NewRPCFunc(env.Commit, "height"),
"header": server.NewRPCFunc(env.Header, "height"),
"header_by_hash": server.NewRPCFunc(env.HeaderByHash, "hash"),
"validators": server.NewRPCFunc(env.Validators, "height,page,per_page"),
"tx": server.NewRPCFunc(env.Tx, "hash,prove"),
"tx_search": server.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by"),
"block_search": server.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by"),
}
}
// Handler returns the http.Handler configured for use with an Inspector server. Handler
// registers the routes on the http.Handler and also registers the websocket handler
// and the CORS handler if specified by the configuration options.
func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logger) http.Handler {
mux := http.NewServeMux()
wmLogger := logger.With("protocol", "websocket")
wm := server.NewWebsocketManager(routes,
server.ReadLimit(rpcConfig.MaxBodyBytes))
wm.SetLogger(wmLogger)
mux.HandleFunc("/websocket", wm.WebsocketHandler)
server.RegisterRPCFuncs(mux, routes, logger)
var rootHandler http.Handler = mux
if rpcConfig.IsCorsEnabled() {
rootHandler = addCORSHandler(rpcConfig, mux)
}
return rootHandler
}
func addCORSHandler(rpcConfig *config.RPCConfig, h http.Handler) http.Handler {
corsMiddleware := cors.New(cors.Options{
AllowedOrigins: rpcConfig.CORSAllowedOrigins,
AllowedMethods: rpcConfig.CORSAllowedMethods,
AllowedHeaders: rpcConfig.CORSAllowedHeaders,
})
h = corsMiddleware.Handler(h)
return h
}
type waitSyncCheckerImpl struct{}
func (waitSyncCheckerImpl) WaitSync() bool {
return false
}
// ListenAndServe listens on the address specified in srv.Addr and handles any
// incoming requests over HTTP using the Inspector rpc handler specified on the server.
func (srv *Server) ListenAndServe(ctx context.Context) error {
listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections)
if err != nil {
return err
}
go func() {
<-ctx.Done()
listener.Close()
}()
return server.Serve(listener, srv.Handler, srv.Logger, serverRPCConfig(srv.Config))
}
// ListenAndServeTLS listens on the address specified in srv.Addr. ListenAndServeTLS handles
// incoming requests over HTTPS using the Inspector rpc handler specified on the server.
func (srv *Server) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error {
listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections)
if err != nil {
return err
}
go func() {
<-ctx.Done()
listener.Close()
}()
return server.ServeTLS(listener, srv.Handler, certFile, keyFile, srv.Logger, serverRPCConfig(srv.Config))
}
func serverRPCConfig(r *config.RPCConfig) *server.Config {
cfg := server.DefaultConfig()
cfg.MaxBodyBytes = r.MaxBodyBytes
cfg.MaxHeaderBytes = r.MaxHeaderBytes
// If necessary adjust global WriteTimeout to ensure it's greater than
// TimeoutBroadcastTxCommit.
// See https://github.com/tendermint/tendermint/issues/3435
if cfg.WriteTimeout <= r.TimeoutBroadcastTxCommit {
cfg.WriteTimeout = r.TimeoutBroadcastTxCommit + 1*time.Second
}
return cfg
}

View File

@@ -27,7 +27,7 @@
// select {
// case msg <- subscription.Out():
// // handle msg.Data() and msg.Events()
// case <-subscription.Canceled():
// case <-subscription.Cancelled():
// return subscription.Err()
// }
// }

View File

@@ -431,7 +431,7 @@ func benchmarkNClients(n int, b *testing.B) {
select {
case <-subscription.Out():
continue
case <-subscription.Canceled():
case <-subscription.Cancelled():
return
}
}
@@ -472,7 +472,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) {
select {
case <-subscription.Out():
continue
case <-subscription.Canceled():
case <-subscription.Cancelled():
return
}
}
@@ -501,7 +501,7 @@ func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message,
}
func assertCancelled(t *testing.T, subscription *pubsub.Subscription, err error) {
_, ok := <-subscription.Canceled()
_, ok := <-subscription.Cancelled()
assert.False(t, ok)
assert.Equal(t, err, subscription.Err())
}

View File

@@ -9,7 +9,6 @@ import (
"time"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/pubsub"
"github.com/tendermint/tendermint/libs/pubsub/query"

View File

@@ -43,9 +43,11 @@ func (s *Subscription) Out() <-chan Message {
return s.out
}
// Canceled returns a channel that's closed when the subscription is
// Cancelled returns a channel that's closed when the subscription is
// terminated and supposed to be used in a select statement.
func (s *Subscription) Canceled() <-chan struct{} {
//
//nolint:misspell
func (s *Subscription) Cancelled() <-chan struct{} {
return s.canceled
}

View File

@@ -32,27 +32,6 @@ func SplitAndTrim(s, sep, cutset string) []string {
return spl
}
// SplitAndTrimEmpty slices s into all subslices separated by sep and returns a
// slice of the string s with all leading and trailing Unicode code points
// contained in cutset removed. If sep is empty, SplitAndTrim splits after each
// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
// -1. also filter out empty strings, only return non-empty strings.
func SplitAndTrimEmpty(s, sep, cutset string) []string {
if s == "" {
return []string{}
}
spl := strings.Split(s, sep)
nonEmptyStrings := make([]string, 0, len(spl))
for i := 0; i < len(spl); i++ {
element := strings.Trim(spl[i], cutset)
if element != "" {
nonEmptyStrings = append(nonEmptyStrings, element)
}
}
return nonEmptyStrings
}
// Returns true if s is a non-empty printable non-tab ascii character.
func IsASCIIText(s string) bool {
if len(s) == 0 {

View File

@@ -23,7 +23,7 @@ import (
// trace that was produced from the primary. If successful, it produces two sets of evidence
// and sends them to the opposite provider before halting.
//
// If there are no conflicting headers, the light client deems the verified target header
// If there are no conflictinge headers, the light client deems the verified target header
// trusted and saves it to the trusted store.
func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.LightBlock, now time.Time) error {
if primaryTrace == nil || len(primaryTrace) < 2 {
@@ -74,7 +74,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig
witnessesToRemove = append(witnessesToRemove, e.WitnessIndex)
case errBadWitness:
// these are all malevolent errors and should result in removing the
// these are all melevolent errors and should result in removing the
// witness
c.logger.Info("witness returned an error during header comparison, removing...",
"witness", c.witnesses[e.WitnessIndex], "err", err)
@@ -217,7 +217,7 @@ func (c *Client) sendEvidence(ctx context.Context, ev *types.LightClientAttackEv
func (c *Client) handleConflictingHeaders(
ctx context.Context,
primaryTrace []*types.LightBlock,
challengingBlock *types.LightBlock,
challendingBlock *types.LightBlock,
witnessIndex int,
now time.Time,
) error {
@@ -225,7 +225,7 @@ func (c *Client) handleConflictingHeaders(
witnessTrace, primaryBlock, err := c.examineConflictingHeaderAgainstTrace(
ctx,
primaryTrace,
challengingBlock,
challendingBlock,
supportingWitness,
now,
)
@@ -374,7 +374,7 @@ func (c *Client) examineConflictingHeaderAgainstTrace(
}
// getTargetBlockOrLatest gets the latest height, if it is greater than the target height then it queries
// the target height else it returns the latest. returns true if it successfully managed to acquire the target
// the target heght else it returns the latest. returns true if it successfully managed to acquire the target
// height.
func (c *Client) getTargetBlockOrLatest(
ctx context.Context,
@@ -394,7 +394,7 @@ func (c *Client) getTargetBlockOrLatest(
if lightBlock.Height > height {
// the witness has caught up. We recursively call the function again. However in order
// to avoid a wild goose chase where the witness sends us one header below and one header
// to avoud a wild goose chase where the witness sends us one header below and one header
// above the height we set a timeout to the context
lightBlock, err := witness.LightBlock(ctx, height)
return true, lightBlock, err

View File

@@ -113,7 +113,7 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) {
}
// 4) Start listening for new connections.
listener, err := rpcserver.Listen(p.Addr, p.Config.MaxOpenConnections)
listener, err := rpcserver.Listen(p.Addr, p.Config)
if err != nil {
return nil, mux, err
}

View File

@@ -21,22 +21,22 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
"health": rpcserver.NewRPCFunc(makeHealthFunc(c), ""),
"status": rpcserver.NewRPCFunc(makeStatusFunc(c), ""),
"net_info": rpcserver.NewRPCFunc(makeNetInfoFunc(c), ""),
"blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight", rpcserver.Cacheable()),
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), "", rpcserver.Cacheable()),
"genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), "", rpcserver.Cacheable()),
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height", rpcserver.Cacheable("height")),
"header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height", rpcserver.Cacheable("height")),
"header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash", rpcserver.Cacheable()),
"block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash", rpcserver.Cacheable()),
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height", rpcserver.Cacheable("height")),
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height", rpcserver.Cacheable("height")),
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove", rpcserver.Cacheable()),
"blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"),
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""),
"genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), ""),
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"),
"header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height"),
"header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash"),
"block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash"),
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height"),
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"),
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"),
"tx_search": rpcserver.NewRPCFunc(makeTxSearchFunc(c), "query,prove,page,per_page,order_by"),
"block_search": rpcserver.NewRPCFunc(makeBlockSearchFunc(c), "query,page,per_page,order_by"),
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page", rpcserver.Cacheable("height")),
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page"),
"dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), ""),
"consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), ""),
"consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height", rpcserver.Cacheable("height")),
"consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height"),
"unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit"),
"num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), ""),
@@ -47,7 +47,7 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
// abci API
"abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data,height,prove"),
"abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), "", rpcserver.Cacheable()),
"abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), ""),
// evidence API
"broadcast_evidence": rpcserver.NewRPCFunc(makeBroadcastEvidenceFunc(c), "evidence"),

View File

@@ -590,7 +590,7 @@ func (c *Client) updateLightClientIfNeededTo(ctx context.Context, height *int64)
l, err = c.lc.VerifyLightBlockAtHeight(ctx, *height, time.Now())
}
if err != nil {
return nil, fmt.Errorf("failed to update light client to %d: %w", *height, err)
return nil, fmt.Errorf("failed to update light client to %d: %w", height, err)
}
return l, nil
}

View File

@@ -11,7 +11,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/rs/cors"
bc "github.com/tendermint/tendermint/blocksync"
"github.com/tendermint/tendermint/blocksync"
cfg "github.com/tendermint/tendermint/config"
cs "github.com/tendermint/tendermint/consensus"
"github.com/tendermint/tendermint/evidence"
@@ -50,7 +50,6 @@ type Node struct {
privValidator types.PrivValidator // local node's validator key
// network
transport *p2p.MultiplexTransport
sw *p2p.Switch // p2p connections
addrBook pex.AddrBook // known peers
nodeInfo p2p.NodeInfo
@@ -60,11 +59,10 @@ type Node struct {
// services
eventBus *types.EventBus // pub/sub for services
stateStore sm.Store
blockStore *store.BlockStore // store the blockchain to disk
bcReactor p2p.Reactor // for block-syncing
mempoolReactor p2p.Reactor // for gossipping transactions
blockStore *store.BlockStore // store the blockchain to disk
bcReactor *blocksync.Reactor // for block-syncing
mempoolReactor p2p.Reactor // for gossipping transactions
mempool mempl.Mempool
stateSync bool // whether the node should state sync on startup
stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots
stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node
stateSyncGenesis sm.State // provides the genesis state for state sync
@@ -79,6 +77,7 @@ type Node struct {
indexerService *txindex.IndexerService
prometheusSrv *http.Server
pprofSrv *http.Server
customReactors map[string]p2p.Reactor
}
// Option sets a parameter for the node.
@@ -98,29 +97,7 @@ type Option func(*Node)
// - STATESYNC
func CustomReactors(reactors map[string]p2p.Reactor) Option {
return func(n *Node) {
for name, reactor := range reactors {
if existingReactor := n.sw.Reactor(name); existingReactor != nil {
n.sw.Logger.Info("Replacing existing reactor with a custom one",
"name", name, "existing", existingReactor, "custom", reactor)
n.sw.RemoveReactor(name, existingReactor)
}
n.sw.AddReactor(name, reactor)
// register the new channels to the nodeInfo
// NOTE: This is a bit messy now with the type casting but is
// cleaned up in the following version when NodeInfo is changed from
// and interface to a concrete type
if ni, ok := n.nodeInfo.(p2p.DefaultNodeInfo); ok {
for _, chDesc := range reactor.GetChannels() {
if !ni.HasChannel(chDesc.ID) {
ni.Channels = append(ni.Channels, chDesc.ID)
n.transport.AddChannel(chDesc.ID)
}
}
n.nodeInfo = ni
} else {
n.Logger.Error("Node info is not of type DefaultNodeInfo. Custom reactor channels can not be added.")
}
}
n.customReactors = reactors
}
}
@@ -141,7 +118,7 @@ func NewNode(config *cfg.Config,
nodeKey *p2p.NodeKey,
clientCreator proxy.ClientCreator,
genesisDocProvider GenesisDocProvider,
dbProvider cfg.DBProvider,
dbProvider DBProvider,
metricsProvider MetricsProvider,
logger log.Logger,
options ...Option,
@@ -163,71 +140,23 @@ func NewNode(config *cfg.Config,
csMetrics, p2pMetrics, memplMetrics, smMetrics, abciMetrics, bsMetrics, ssMetrics := metricsProvider(genDoc.ChainID)
// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, abciMetrics)
proxyApp, err := createProxyAppConns(clientCreator, logger, abciMetrics)
if err != nil {
return nil, err
return nil, fmt.Errorf("error starting proxy app connections: %v", err)
}
// EventBus and IndexerService must be started before the handshake because
// we might need to index the txs of the replayed block as this might not have happened
// when the node stopped last time (i.e. the node stopped after it saved the block
// but before it indexed the txs, or, endblocker panicked)
eventBus, err := createAndStartEventBus(logger)
if err != nil {
return nil, err
}
eventBus := createEventBus(logger)
indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(config,
indexerService, txIndexer, blockIndexer, err := createIndexerService(config,
genDoc.ChainID, dbProvider, eventBus, logger)
if err != nil {
return nil, err
}
// If an address is provided, listen on the socket for a connection from an
// external signing process.
if config.PrivValidatorListenAddr != "" {
// FIXME: we should start services inside OnStart
privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger)
if err != nil {
return nil, fmt.Errorf("error with private validator socket client: %w", err)
}
}
pubKey, err := privValidator.GetPubKey()
if err != nil {
return nil, fmt.Errorf("can't get pubkey: %w", err)
}
// Determine whether we should attempt state sync.
stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
if stateSync && state.LastBlockHeight > 0 {
logger.Info("Found local state with non-zero height, skipping state sync")
stateSync = false
}
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
// and replays any blocks as necessary to sync tendermint with the app.
consensusLogger := logger.With("module", "consensus")
if !stateSync {
if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil {
return nil, err
}
// Reload the state. It will have the Version.Consensus.App set by the
// Handshake, and may have other modifications as well (ie. depending on
// what happened during block replay).
state, err = stateStore.Load()
if err != nil {
return nil, fmt.Errorf("cannot load state: %w", err)
}
}
// Determine whether we should do block sync. This must happen after the handshake, since the
// app may modify the validator set, specifying ourself as the only validator.
blockSync := config.BlockSyncMode && !onlyValidatorIsUs(state, pubKey)
logNodeStartupInfo(state, pubKey, logger, consensusLogger)
// Make MempoolReactor
mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger)
@@ -249,15 +178,16 @@ func NewNode(config *cfg.Config,
)
// Make BlocksyncReactor. Don't start block sync if we're doing a state sync first.
bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger, bsMetrics)
bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, bsMetrics, logger)
if err != nil {
return nil, fmt.Errorf("could not create blocksync reactor: %w", err)
}
// Make ConsensusReactor
// Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first.
consensusLogger := logger.With("module", "consensus")
consensusReactor, consensusState := createConsensusReactor(
config, state, blockExec, blockStore, mempool, evidencePool,
privValidator, csMetrics, stateSync || blockSync, eventBus, consensusLogger,
config, blockExec, blockStore, mempool, evidencePool,
privValidator, csMetrics, eventBus, consensusLogger,
)
// Set up state sync reactor, and schedule a sync if requested.
@@ -273,19 +203,11 @@ func NewNode(config *cfg.Config,
)
stateSyncReactor.SetLogger(logger.With("module", "statesync"))
nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state)
if err != nil {
return nil, err
}
// Setup Transport.
transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp)
// Setup Switch.
p2pLogger := logger.With("module", "p2p")
sw := createSwitch(
config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor,
stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger,
config, p2pMetrics, mempoolReactor, bcReactor,
stateSyncReactor, consensusReactor, evidenceReactor, p2pLogger,
)
err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " "))
@@ -298,22 +220,11 @@ func NewNode(config *cfg.Config,
return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
}
addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
addrBook, err := createAddrBook(config, p2pLogger, nodeKey)
if err != nil {
return nil, fmt.Errorf("could not create addrbook: %w", err)
}
for _, addr := range splitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") {
netAddrs, err := p2p.NewNetAddressString(addr)
if err != nil {
return nil, fmt.Errorf("invalid bootstrap peer address: %w", err)
}
err = addrBook.AddAddress(netAddrs, netAddrs)
if err != nil {
return nil, fmt.Errorf("adding bootstrap address to addressbook: %w", err)
}
}
// Optionally, start the pex reactor
//
// TODO:
@@ -328,7 +239,8 @@ func NewNode(config *cfg.Config,
// Note we currently use the addrBook regardless at least for AddOurAddress
var pexReactor *pex.Reactor
if config.P2P.PexReactor {
pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger)
pexReactor = createPEXReactor(addrBook, config, logger)
sw.AddReactor("PEX", pexReactor)
}
// Add private IDs to addrbook to block those peers being added
@@ -339,11 +251,9 @@ func NewNode(config *cfg.Config,
genesisDoc: genDoc,
privValidator: privValidator,
transport: transport,
sw: sw,
addrBook: addrBook,
nodeInfo: nodeInfo,
nodeKey: nodeKey,
sw: sw,
addrBook: addrBook,
nodeKey: nodeKey,
stateStore: stateStore,
blockStore: blockStore,
@@ -353,7 +263,6 @@ func NewNode(config *cfg.Config,
consensusState: consensusState,
consensusReactor: consensusReactor,
stateSyncReactor: stateSyncReactor,
stateSync: stateSync,
stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state
pexReactor: pexReactor,
evidencePool: evidencePool,
@@ -369,6 +278,15 @@ func NewNode(config *cfg.Config,
option(node)
}
for name, reactor := range node.customReactors {
if existingReactor := node.sw.Reactor(name); existingReactor != nil {
node.sw.Logger.Info("Replacing existing reactor with a custom one",
"name", name, "existing", existingReactor, "custom", reactor)
node.sw.RemoveReactor(name, existingReactor)
}
node.sw.AddReactor(name, reactor)
}
return node, nil
}
@@ -381,6 +299,77 @@ func (n *Node) OnStart() error {
time.Sleep(genTime.Sub(now))
}
// If an address is provided, listen on the socket for a connection from an
// external signing process. This will overwrite the privvalidator provided in the constructor
if n.config.PrivValidatorListenAddr != "" {
var err error
n.privValidator, err = createPrivValidatorSocketClient(n.config.PrivValidatorListenAddr, n.genesisDoc.ChainID, n.Logger)
if err != nil {
return fmt.Errorf("error with private validator socket client: %w", err)
}
}
pubKey, err := n.privValidator.GetPubKey()
if err != nil {
return fmt.Errorf("can't get pubkey: %w", err)
}
state, err := n.stateStore.LoadFromDBOrGenesisDoc(n.genesisDoc)
if err != nil {
return fmt.Errorf("cannot load state: %w", err)
}
// Determine whether we should attempt state sync.
stateSync := n.config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey)
if stateSync && state.LastBlockHeight > 0 {
n.Logger.Info("Found local state with non-zero height, skipping state sync")
stateSync = false
}
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
// and replays any blocks as necessary to sync tendermint with the app.
if !stateSync {
if err := doHandshake(n.stateStore, state, n.blockStore, n.genesisDoc, n.eventBus, n.proxyApp, n.Logger); err != nil {
return err
}
// Reload the state. It will have the Version.Consensus.App set by the
// Handshake, and may have other modifications as well (ie. depending on
// what happened during block replay).
state, err = n.stateStore.Load()
if err != nil {
return fmt.Errorf("cannot load state: %w", err)
}
}
nodeInfo, err := makeNodeInfo(n.config, n.nodeKey, n.txIndexer, n.genesisDoc, state)
if err != nil {
return err
}
for _, reactor := range n.customReactors {
for _, chDesc := range reactor.GetChannels() {
if !nodeInfo.HasChannel(chDesc.ID) {
nodeInfo.Channels = append(nodeInfo.Channels, chDesc.ID)
}
}
}
n.nodeInfo = nodeInfo
addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
if err != nil {
return err
}
// Setup Transport.
transport, peerFilters := createTransport(n.config, n.nodeInfo, n.nodeKey, addr, n.proxyApp)
n.sw.SetTransport(transport)
n.sw.SetPeerFilters(peerFilters...)
n.sw.SetNodeInfo(n.nodeInfo)
n.sw.SetNodeKey(n.nodeKey)
// run pprof server if it is enabled
if n.config.RPC.IsPprofEnabled() {
n.pprofSrv = n.startPprofServer()
@@ -401,16 +390,13 @@ func (n *Node) OnStart() error {
n.rpcListeners = listeners
}
// Start the transport.
addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress))
if err != nil {
return err
}
if err := n.transport.Listen(*addr); err != nil {
if err := n.eventBus.Start(); err != nil {
return err
}
n.isListening = true
if err := n.indexerService.Start(); err != nil {
return err
}
// Start the switch (the P2P server).
err = n.sw.Start()
@@ -418,23 +404,37 @@ func (n *Node) OnStart() error {
return err
}
n.isListening = true
// Always connect to persistent peers
err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
if err != nil {
return fmt.Errorf("could not dial peers from persistent_peers field: %w", err)
}
// Run state sync
if n.stateSync {
bcR, ok := n.bcReactor.(blockSyncReactor)
if !ok {
return fmt.Errorf("this blocksync reactor does not support switching from state sync")
}
err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider,
// Determine whether we should do block sync. This must happen after the handshake, since the
// app may modify the validator set, specifying ourself as the only validator.
blockSync := n.config.BlockSyncMode && !onlyValidatorIsUs(state, pubKey)
logNodeStartupInfo(state, pubKey, n.Logger)
// Run start up phases
if stateSync {
err := startStateSync(n.stateSyncReactor, n.bcReactor, n.consensusReactor, n.stateSyncProvider,
n.config.StateSync, n.config.BlockSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis)
if err != nil {
return fmt.Errorf("failed to start state sync: %w", err)
}
} else if blockSync {
err := n.bcReactor.SwitchToBlockSync(state)
if err != nil {
return fmt.Errorf("failed to start block sync: %w", err)
}
} else {
err := n.consensusReactor.SwitchToConsensus(state, false)
if err != nil {
return fmt.Errorf("failed to switch to consensus: %w", err)
}
}
return nil
@@ -459,10 +459,6 @@ func (n *Node) OnStop() {
n.Logger.Error("Error closing switch", "err", err)
}
if err := n.transport.Close(); err != nil {
n.Logger.Error("Error closing transport", "err", err)
}
n.isListening = false
// finally stop the listeners / external services
@@ -503,12 +499,12 @@ func (n *Node) OnStop() {
}
// ConfigureRPC makes sure RPC has all the objects it needs to operate.
func (n *Node) ConfigureRPC() (*rpccore.Environment, error) {
func (n *Node) ConfigureRPC() error {
pubKey, err := n.privValidator.GetPubKey()
if pubKey == nil || err != nil {
return nil, fmt.Errorf("can't get pubkey: %w", err)
if err != nil {
return fmt.Errorf("can't get pubkey: %w", err)
}
rpcCoreEnv := rpccore.Environment{
rpccore.SetEnvironment(&rpccore.Environment{
ProxyAppQuery: n.proxyApp.Query(),
ProxyAppMempool: n.proxyApp.Mempool(),
@@ -518,36 +514,38 @@ func (n *Node) ConfigureRPC() (*rpccore.Environment, error) {
ConsensusState: n.consensusState,
P2PPeers: n.sw,
P2PTransport: n,
PubKey: pubKey,
PubKey: pubKey,
GenDoc: n.genesisDoc,
TxIndexer: n.txIndexer,
BlockIndexer: n.blockIndexer,
ConsensusReactor: n.consensusReactor,
BlocksyncReactor: n.bcReactor,
StatesyncReactor: n.stateSyncReactor,
EventBus: n.eventBus,
Mempool: n.mempool,
Logger: n.Logger.With("module", "rpc"),
Config: *n.config.RPC,
})
if err := rpccore.InitGenesisChunks(); err != nil {
return err
}
if err := rpcCoreEnv.InitGenesisChunks(); err != nil {
return nil, err
}
return &rpcCoreEnv, nil
return nil
}
func (n *Node) startRPC() ([]net.Listener, error) {
env, err := n.ConfigureRPC()
err := n.ConfigureRPC()
if err != nil {
return nil, err
}
listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
routes := env.GetRoutes()
if n.config.RPC.Unsafe {
env.AddUnsafeRoutes(routes)
rpccore.AddUnsafeRoutes()
}
config := rpcserver.DefaultConfig()
@@ -567,7 +565,7 @@ func (n *Node) startRPC() ([]net.Listener, error) {
mux := http.NewServeMux()
rpcLogger := n.Logger.With("module", "rpc-server")
wmLogger := rpcLogger.With("protocol", "websocket")
wm := rpcserver.NewWebsocketManager(routes,
wm := rpcserver.NewWebsocketManager(rpccore.Routes,
rpcserver.OnDisconnect(func(remoteAddr string) {
err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
if err != nil && err != tmpubsub.ErrSubscriptionNotFound {
@@ -579,10 +577,10 @@ func (n *Node) startRPC() ([]net.Listener, error) {
)
wm.SetLogger(wmLogger)
mux.HandleFunc("/websocket", wm.WebsocketHandler)
rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger)
rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
listener, err := rpcserver.Listen(
listenAddr,
config.MaxOpenConnections,
config,
)
if err != nil {
return nil, err
@@ -640,12 +638,12 @@ func (n *Node) startRPC() ([]net.Listener, error) {
if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit {
config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
}
listener, err := rpcserver.Listen(grpcListenAddr, config.MaxOpenConnections)
listener, err := rpcserver.Listen(grpcListenAddr, config)
if err != nil {
return nil, err
}
go func() {
if err := grpccore.StartGRPCServer(env, listener); err != nil {
if err := grpccore.StartGRPCServer(listener); err != nil {
n.Logger.Error("Error starting gRPC server", "err", err)
}
}()
@@ -657,7 +655,7 @@ func (n *Node) startRPC() ([]net.Listener, error) {
}
// startPrometheusServer starts a Prometheus HTTP server, listening for metrics
// collectors on addr.
// collectors on the provided address.
func (n *Node) startPrometheusServer() *http.Server {
srv := &http.Server{
Addr: n.config.Instrumentation.PrometheusListenAddr,
@@ -678,7 +676,7 @@ func (n *Node) startPrometheusServer() *http.Server {
return srv
}
// starts a ppro
// starts a pprof server at the specified listen address
func (n *Node) startPprofServer() *http.Server {
srv := &http.Server{
Addr: n.config.RPC.PprofListenAddress,
@@ -688,7 +686,7 @@ func (n *Node) startPprofServer() *http.Server {
go func() {
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
// Error starting or closing listener:
n.Logger.Error("pprof HTTP server ListenAndServe", "err", err)
n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
}
}()
return srv
@@ -799,7 +797,7 @@ func makeNodeInfo(
Network: genDoc.ChainID,
Version: version.TMCoreSemVer,
Channels: []byte{
bc.BlocksyncChannel,
blocksync.BlocksyncChannel,
cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
mempl.MempoolChannel,
evidence.EvidenceChannel,

View File

@@ -53,7 +53,7 @@ func TestNodeStartStop(t *testing.T) {
require.NoError(t, err)
select {
case <-blocksSub.Out():
case <-blocksSub.Canceled():
case <-blocksSub.Cancelled():
t.Fatal("blocksSub was canceled")
case <-time.After(10 * time.Second):
t.Fatal("timed out waiting for the node to produce a block")
@@ -123,6 +123,8 @@ func TestNodeSetAppVersion(t *testing.T) {
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
require.NoError(t, err)
require.NoError(t, n.Start())
defer n.Stop() //nolint:errcheck
// default config uses the kvstore app
var appVersion = kvstore.ProtocolVersion
@@ -189,6 +191,8 @@ func TestNodeSetPrivValTCP(t *testing.T) {
n, err := DefaultNewNode(config, log.TestingLogger())
require.NoError(t, err)
require.NoError(t, n.Start())
defer n.Stop() //nolint:errcheck
assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator())
}
@@ -201,7 +205,7 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
config.BaseConfig.PrivValidatorListenAddr = addrNoPrefix
_, err := DefaultNewNode(config, log.TestingLogger())
assert.Error(t, err)
require.NoError(t, err)
}
func TestNodeSetPrivValIPC(t *testing.T) {
@@ -451,7 +455,6 @@ func TestNodeNewNodeCustomReactors(t *testing.T) {
RecvMessageCapacity: 100,
},
}
customBlocksyncReactor := p2pmock.NewReactor()
nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
require.NoError(t, err)
@@ -461,10 +464,10 @@ func TestNodeNewNodeCustomReactors(t *testing.T) {
nodeKey,
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
DefaultGenesisDocProviderFunc(config),
cfg.DefaultDBProvider,
DefaultDBProvider,
DefaultMetricsProvider(config.Instrumentation),
log.TestingLogger(),
CustomReactors(map[string]p2p.Reactor{"FOO": cr, "BLOCKSYNC": customBlocksyncReactor}),
CustomReactors(map[string]p2p.Reactor{"FOO": cr}),
)
require.NoError(t, err)
@@ -475,9 +478,6 @@ func TestNodeNewNodeCustomReactors(t *testing.T) {
assert.True(t, cr.IsRunning())
assert.Equal(t, cr, n.Switch().Reactor("FOO"))
assert.True(t, customBlocksyncReactor.IsRunning())
assert.Equal(t, customBlocksyncReactor, n.Switch().Reactor("BLOCKSYNC"))
channels := n.NodeInfo().(p2p.DefaultNodeInfo).Channels
assert.Contains(t, channels, mempl.MempoolChannel)
assert.Contains(t, channels, cr.Channels[0].ID)

View File

@@ -7,8 +7,6 @@ import (
"fmt"
"net"
"strings"
_ "net/http/pprof" //nolint: gosec // securely exposed on separate, optional port
"time"
dbm "github.com/tendermint/tm-db"
@@ -19,7 +17,6 @@ import (
cs "github.com/tendermint/tendermint/consensus"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/evidence"
"github.com/tendermint/tendermint/statesync"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/libs/log"
@@ -33,8 +30,13 @@ import (
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/indexer/block"
blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv"
blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null"
"github.com/tendermint/tendermint/state/indexer/sink/psql"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/state/txindex/kv"
"github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/statesync"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
@@ -42,8 +44,24 @@ import (
_ "github.com/lib/pq" // provide the psql db driver
)
// DBContext specifies config information for loading a new DB.
type DBContext struct {
ID string
Config *cfg.Config
}
// DBProvider takes a DBContext and returns an instantiated DB.
type DBProvider func(*DBContext) (dbm.DB, error)
const readHeaderTimeout = 10 * time.Second
// DefaultDBProvider returns a database using the DBBackend and DBDir
// specified in the ctx.Config.
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
dbType := dbm.BackendType(ctx.Config.DBBackend)
return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
}
// GenesisDocProvider returns a GenesisDoc.
// It allows the GenesisDoc to be pulled from sources other than the
// filesystem, for instance from a distributed key-value store cluster.
@@ -74,7 +92,7 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
nodeKey,
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
DefaultGenesisDocProviderFunc(config),
cfg.DefaultDBProvider,
DefaultDBProvider,
DefaultMetricsProvider(config.Instrumentation),
logger,
)
@@ -106,15 +124,15 @@ type blockSyncReactor interface {
//------------------------------------------------------------------------------
func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
var blockStoreDB dbm.DB
blockStoreDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config})
blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
if err != nil {
return
}
blockStore = store.NewBlockStore(blockStoreDB)
stateDB, err = dbProvider(&cfg.DBContext{ID: "state", Config: config})
stateDB, err = dbProvider(&DBContext{"state", config})
if err != nil {
return
}
@@ -122,7 +140,7 @@ func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.B
return
}
func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) {
func createProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) {
proxyApp := proxy.NewAppConns(clientCreator, metrics)
proxyApp.SetLogger(logger.With("module", "proxy"))
if err := proxyApp.Start(); err != nil {
@@ -131,19 +149,16 @@ func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.L
return proxyApp, nil
}
func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
func createEventBus(logger log.Logger) *types.EventBus {
eventBus := types.NewEventBus()
eventBus.SetLogger(logger.With("module", "events"))
if err := eventBus.Start(); err != nil {
return nil, err
}
return eventBus, nil
return eventBus
}
func createAndStartIndexerService(
func createIndexerService(
config *cfg.Config,
chainID string,
dbProvider cfg.DBProvider,
dbProvider DBProvider,
eventBus *types.EventBus,
logger log.Logger,
) (*txindex.IndexerService, txindex.TxIndexer, indexer.BlockIndexer, error) {
@@ -151,18 +166,36 @@ func createAndStartIndexerService(
txIndexer txindex.TxIndexer
blockIndexer indexer.BlockIndexer
)
txIndexer, blockIndexer, err := block.IndexerFromConfig(config, dbProvider, chainID)
if err != nil {
return nil, nil, nil, err
switch config.TxIndex.Indexer {
case "kv":
store, err := dbProvider(&DBContext{"tx_index", config})
if err != nil {
return nil, nil, nil, err
}
txIndexer = kv.NewTxIndex(store)
blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
case "psql":
if config.TxIndex.PsqlConn == "" {
return nil, nil, nil, errors.New(`no psql-conn is set for the "psql" indexer`)
}
es, err := psql.NewEventSink(config.TxIndex.PsqlConn, chainID)
if err != nil {
return nil, nil, nil, fmt.Errorf("creating psql indexer: %w", err)
}
txIndexer = es.TxIndexer()
blockIndexer = es.BlockIndexer()
default:
txIndexer = &null.TxIndex{}
blockIndexer = &blockidxnull.BlockerIndexer{}
}
indexerService := txindex.NewIndexerService(txIndexer, blockIndexer, eventBus, false)
indexerService.SetLogger(logger.With("module", "txindex"))
if err := indexerService.Start(); err != nil {
return nil, nil, nil, err
}
return indexerService, txIndexer, blockIndexer, nil
}
@@ -184,7 +217,7 @@ func doHandshake(
return nil
}
func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) {
func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger log.Logger) {
// Log the version info.
logger.Info("Version info",
"tendermint_version", version.TMCoreSemVer,
@@ -205,9 +238,9 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL
addr := pubKey.Address()
// Log whether this node is a validator or an observer
if state.Validators.HasAddress(addr) {
consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
logger.Info("This node is a validator", "addr", addr, "pubKey", pubKey)
} else {
consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
logger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
}
}
@@ -275,10 +308,10 @@ func createMempoolAndMempoolReactor(
}
}
func createEvidenceReactor(config *cfg.Config, dbProvider cfg.DBProvider,
func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider,
stateStore sm.Store, blockStore *store.BlockStore, logger log.Logger,
) (*evidence.Reactor, *evidence.Pool, error) {
evidenceDB, err := dbProvider(&cfg.DBContext{ID: "evidence", Config: config})
evidenceDB, err := dbProvider(&DBContext{"evidence", config})
if err != nil {
return nil, nil, err
}
@@ -296,13 +329,12 @@ func createBlocksyncReactor(config *cfg.Config,
state sm.State,
blockExec *sm.BlockExecutor,
blockStore *store.BlockStore,
blockSync bool,
logger log.Logger,
metrics *blocksync.Metrics,
) (bcReactor p2p.Reactor, err error) {
logger log.Logger,
) (bcReactor *blocksync.Reactor, err error) {
switch config.BlockSync.Version {
case "v0":
bcReactor = blocksync.NewReactor(state.Copy(), blockExec, blockStore, blockSync, metrics)
bcReactor = blocksync.NewReactor(state.Copy(), blockExec, blockStore, metrics)
case "v1", "v2":
return nil, fmt.Errorf("block sync version %s has been deprecated. Please use v0", config.BlockSync.Version)
default:
@@ -314,31 +346,28 @@ func createBlocksyncReactor(config *cfg.Config,
}
func createConsensusReactor(config *cfg.Config,
state sm.State,
blockExec *sm.BlockExecutor,
blockStore sm.BlockStore,
mempool mempl.Mempool,
evidencePool *evidence.Pool,
privValidator types.PrivValidator,
csMetrics *cs.Metrics,
waitSync bool,
eventBus *types.EventBus,
consensusLogger log.Logger,
) (*cs.Reactor, *cs.State) {
consensusState := cs.NewState(
config.Consensus,
state.Copy(),
blockExec,
blockStore,
mempool,
evidencePool,
cs.StateMetrics(csMetrics),
cs.WithMetrics(csMetrics),
)
consensusState.SetLogger(consensusLogger)
if privValidator != nil {
consensusState.SetPrivValidator(privValidator)
}
consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics))
consensusReactor := cs.NewReactor(consensusState, cs.ReactorMetrics(csMetrics))
consensusReactor.SetLogger(consensusLogger)
// services which will be publishing and/or subscribing for messages (events)
// consensusReactor will set it on consensusState and blockExecutor
@@ -350,6 +379,7 @@ func createTransport(
config *cfg.Config,
nodeInfo p2p.NodeInfo,
nodeKey *p2p.NodeKey,
netAddr *p2p.NetAddress,
proxyApp proxy.AppConns,
) (
*p2p.MultiplexTransport,
@@ -357,7 +387,7 @@ func createTransport(
) {
var (
mConnConfig = p2p.MConnConfig(config.P2P)
transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, *netAddr, mConnConfig)
connFilters = []p2p.ConnFilterFunc{}
peerFilters = []p2p.PeerFilterFunc{}
)
@@ -416,23 +446,17 @@ func createTransport(
}
func createSwitch(config *cfg.Config,
transport p2p.Transport,
p2pMetrics *p2p.Metrics,
peerFilters []p2p.PeerFilterFunc,
mempoolReactor p2p.Reactor,
bcReactor p2p.Reactor,
stateSyncReactor *statesync.Reactor,
consensusReactor *cs.Reactor,
evidenceReactor *evidence.Reactor,
nodeInfo p2p.NodeInfo,
nodeKey *p2p.NodeKey,
p2pLogger log.Logger,
) *p2p.Switch {
sw := p2p.NewSwitch(
config.P2P,
transport,
p2p.WithMetrics(p2pMetrics),
p2p.SwitchPeerFilters(peerFilters...),
)
sw.SetLogger(p2pLogger)
sw.AddReactor("MEMPOOL", mempoolReactor)
@@ -440,15 +464,10 @@ func createSwitch(config *cfg.Config,
sw.AddReactor("CONSENSUS", consensusReactor)
sw.AddReactor("EVIDENCE", evidenceReactor)
sw.AddReactor("STATESYNC", stateSyncReactor)
sw.SetNodeInfo(nodeInfo)
sw.SetNodeKey(nodeKey)
p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
return sw
}
func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
func createAddrBook(config *cfg.Config,
p2pLogger log.Logger, nodeKey *p2p.NodeKey,
) (pex.AddrBook, error) {
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
@@ -469,15 +488,10 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
}
addrBook.AddOurAddress(addr)
}
sw.SetAddrBook(addrBook)
return addrBook, nil
}
func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
sw *p2p.Switch, logger log.Logger,
) *pex.Reactor {
func createPEXReactor(addrBook pex.AddrBook, config *cfg.Config, logger log.Logger) *pex.Reactor {
// TODO persistent peers ? so we can have their DNS addrs saved
pexReactor := pex.NewReactor(addrBook,
&pex.ReactorConfig{
@@ -492,7 +506,6 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
})
pexReactor.SetLogger(logger.With("module", "pex"))
sw.AddReactor("PEX", pexReactor)
return pexReactor
}
@@ -544,7 +557,11 @@ func startStateSync(ssR *statesync.Reactor, bcR blockSyncReactor, conR *cs.React
return
}
} else {
conR.SwitchToConsensus(state, true)
err := conR.SwitchToConsensus(state, true)
if err != nil {
ssR.Logger.Error("Failed to switch to consensus", "err", err)
return
}
}
}()
return nil
@@ -619,7 +636,7 @@ func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error {
return nil
}
func createAndStartPrivValidatorSocketClient(
func createPrivValidatorSocketClient(
listenAddr,
chainID string,
logger log.Logger,
@@ -634,12 +651,6 @@ func createAndStartPrivValidatorSocketClient(
return nil, fmt.Errorf("failed to start private validator: %w", err)
}
// try to get a pubkey from private validate first time
_, err = pvsc.GetPubKey()
if err != nil {
return nil, fmt.Errorf("can't get pubkey: %w", err)
}
const (
retries = 50 // 50 * 100ms = 5s total
timeout = 100 * time.Millisecond

View File

@@ -1,13 +1,13 @@
package p2p
import (
"errors"
"fmt"
"math"
"sync"
"time"
"github.com/cosmos/gogoproto/proto"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cmap"
"github.com/tendermint/tendermint/libs/rand"
@@ -108,7 +108,6 @@ type SwitchOption func(*Switch)
// NewSwitch creates a new Switch with the given config.
func NewSwitch(
cfg *config.P2PConfig,
transport Transport,
options ...SwitchOption,
) *Switch {
@@ -122,7 +121,6 @@ func NewSwitch(
dialing: cmap.NewCMap(),
reconnecting: cmap.NewCMap(),
metrics: NopMetrics(),
transport: transport,
filterTimeout: defaultFilterTimeout,
persistentPeersAddrs: make([]*NetAddress, 0),
unconditionalPeerIDs: make(map[ID]struct{}),
@@ -225,11 +223,30 @@ func (sw *Switch) SetNodeKey(nodeKey *NodeKey) {
sw.nodeKey = nodeKey
}
func (sw *Switch) SetPeerFilters(filters ...PeerFilterFunc) {
sw.peerFilters = filters
}
func (sw *Switch) SetTransport(transport Transport) {
if sw.IsRunning() {
panic("cannot set transport while switch is running")
}
sw.transport = transport
}
//---------------------------------------------------------------------
// Service start/stop
// OnStart implements BaseService. It starts all the reactors and peers.
func (sw *Switch) OnStart() error {
if sw.transport == nil {
return errors.New("transport not set")
}
if err := sw.transport.Start(); err != nil {
return err
}
// Start reactors
for _, reactor := range sw.reactors {
err := reactor.Start()
@@ -258,6 +275,10 @@ func (sw *Switch) OnStop() {
sw.Logger.Error("error while stopped reactor", "reactor", reactor, "error", err)
}
}
if err := sw.transport.Stop(); err != nil {
sw.Logger.Error("closing transport", "err", err)
}
}
//---------------------------------------------------------------------
@@ -727,7 +748,7 @@ func (sw *Switch) addOutboundPeerWithConfig(
addr *NetAddress,
cfg *config.P2PConfig,
) error {
sw.Logger.Debug("Dialing peer", "address", addr)
sw.Logger.Info("Dialing peer", "address", addr)
// XXX(xla): Remove the leakage of test concerns in implementation.
if cfg.TestDialFail {
@@ -855,7 +876,7 @@ func (sw *Switch) addPeer(p Peer) error {
reactor.AddPeer(p)
}
sw.Logger.Debug("Added peer", "peer", p)
sw.Logger.Info("Added peer", "peer", p)
return nil
}

View File

@@ -22,6 +22,7 @@ import (
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/p2p/conn"
p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p"
@@ -695,9 +696,16 @@ func TestSwitchAcceptRoutine(t *testing.T) {
}
type errorTransport struct {
service.BaseService
acceptErr error
}
func newErrTransport(acceptErr error) *errorTransport {
t := &errorTransport{acceptErr: acceptErr}
t.BaseService = *service.NewBaseService(nil, "Error Transport", t)
return t
}
func (et errorTransport) NetAddress() NetAddress {
panic("not implemented")
}
@@ -713,7 +721,9 @@ func (errorTransport) Cleanup(Peer) {
}
func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}})
sw := NewSwitch(cfg)
sw.SetTransport(newErrTransport(ErrFilterTimeout{}))
assert.NotPanics(t, func() {
err := sw.Start()
require.NoError(t, err)
@@ -721,7 +731,8 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
require.NoError(t, err)
})
sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}})
sw = NewSwitch(cfg)
sw.SetTransport(newErrTransport(ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}))
assert.NotPanics(t, func() {
err := sw.Start()
require.NoError(t, err)
@@ -730,7 +741,8 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
})
// TODO(melekes) check we remove our address from addrBook
sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}})
sw = NewSwitch(cfg)
sw.SetTransport(newErrTransport(ErrTransportClosed{}))
assert.NotPanics(t, func() {
err := sw.Start()
require.NoError(t, err)

View File

@@ -194,14 +194,11 @@ func MakeSwitch(
panic(err)
}
t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg))
if err := t.Listen(*addr); err != nil {
panic(err)
}
t := NewMultiplexTransport(nodeInfo, nodeKey, *addr, MConnConfig(cfg))
// TODO: let the config be passed in?
sw := initSwitch(i, NewSwitch(cfg, t, opts...))
sw := initSwitch(i, NewSwitch(cfg, opts...))
sw.SetTransport(t)
sw.SetLogger(log.TestingLogger().With("switch", i))
sw.SetNodeKey(&nodeKey)

View File

@@ -9,9 +9,9 @@ import (
"golang.org/x/net/netutil"
"github.com/cosmos/gogoproto/proto"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/libs/protoio"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/p2p/conn"
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
)
@@ -60,6 +60,8 @@ type peerConfig struct {
// the transport. Each transport is also responsible to filter establishing
// peers specific to its domain.
type Transport interface {
service.Service
// Listening address.
NetAddress() NetAddress
@@ -73,13 +75,6 @@ type Transport interface {
Cleanup(Peer)
}
// transportLifecycle bundles the methods for callers to control start and stop
// behavior.
type transportLifecycle interface {
Close() error
Listen(NetAddress) error
}
// ConnFilterFunc to be implemented by filter hooks after a new connection has
// been established. The set of exisiting connections is passed along together
// with all resolved IPs for the new connection.
@@ -137,6 +132,8 @@ func MultiplexTransportMaxIncomingConnections(n int) MultiplexTransportOption {
// MultiplexTransport accepts and dials tcp connections and upgrades them to
// multiplexed peers.
type MultiplexTransport struct {
service.BaseService
netAddr NetAddress
listener net.Listener
maxIncomingConnections int // see MaxIncomingConnections
@@ -163,15 +160,15 @@ type MultiplexTransport struct {
// Test multiplexTransport for interface completeness.
var _ Transport = (*MultiplexTransport)(nil)
var _ transportLifecycle = (*MultiplexTransport)(nil)
// NewMultiplexTransport returns a tcp connected multiplexed peer.
func NewMultiplexTransport(
nodeInfo NodeInfo,
nodeKey NodeKey,
netAddr NetAddress,
mConfig conn.MConnConfig,
) *MultiplexTransport {
return &MultiplexTransport{
t := &MultiplexTransport{
acceptc: make(chan accept),
closec: make(chan struct{}),
dialTimeout: defaultDialTimeout,
@@ -180,9 +177,12 @@ func NewMultiplexTransport(
mConfig: mConfig,
nodeInfo: nodeInfo,
nodeKey: nodeKey,
netAddr: netAddr,
conns: NewConnSet(),
resolver: net.DefaultResolver,
}
t.BaseService = *service.NewBaseService(nil, "P2P Transport", t)
return t
}
// NetAddress implements Transport.
@@ -235,20 +235,20 @@ func (mt *MultiplexTransport) Dial(
return p, nil
}
// Close implements transportLifecycle.
func (mt *MultiplexTransport) Close() error {
// OnStop implements Service.
func (mt *MultiplexTransport) OnStop() {
close(mt.closec)
if mt.listener != nil {
return mt.listener.Close()
if err := mt.listener.Close(); err != nil {
mt.Logger.Error("closing listener", "err", err)
}
}
return nil
}
// Listen implements transportLifecycle.
func (mt *MultiplexTransport) Listen(addr NetAddress) error {
ln, err := net.Listen("tcp", addr.DialString())
// OnStart implements Service.
func (mt *MultiplexTransport) OnStart() error {
ln, err := net.Listen("tcp", mt.netAddr.DialString())
if err != nil {
return err
}
@@ -257,7 +257,6 @@ func (mt *MultiplexTransport) Listen(addr NetAddress) error {
ln = netutil.LimitListener(ln, mt.maxIncomingConnections)
}
mt.netAddr = addr
mt.listener = ln
go mt.acceptPeers()

View File

@@ -10,6 +10,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/libs/protoio"
"github.com/tendermint/tendermint/p2p/conn"
@@ -28,20 +29,26 @@ func emptyNodeInfo() NodeInfo {
func newMultiplexTransport(
nodeInfo NodeInfo,
nodeKey NodeKey,
netAddr NetAddress,
) *MultiplexTransport {
return NewMultiplexTransport(
nodeInfo, nodeKey, conn.DefaultMConnConfig(),
nodeInfo, nodeKey, netAddr, conn.DefaultMConnConfig(),
)
}
func TestTransportMultiplexConnFilter(t *testing.T) {
nodeKey := NodeKey{
PrivKey: ed25519.GenPrivKey(),
}
id := nodeKey.ID()
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
require.NoError(t, err)
mt := newMultiplexTransport(
emptyNodeInfo(),
NodeKey{
PrivKey: ed25519.GenPrivKey(),
},
nodeKey,
*addr,
)
id := mt.nodeKey.ID()
MultiplexTransportConnFilters(
func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil },
@@ -51,14 +58,8 @@ func TestTransportMultiplexConnFilter(t *testing.T) {
},
)(mt)
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
if err != nil {
t.Fatal(err)
}
if err := mt.Listen(*addr); err != nil {
t.Fatal(err)
}
err = mt.Start()
require.NoError(t, err)
errc := make(chan error)
@@ -89,13 +90,18 @@ func TestTransportMultiplexConnFilter(t *testing.T) {
}
func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
nodeKey := NodeKey{
PrivKey: ed25519.GenPrivKey(),
}
id := nodeKey.ID()
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
require.NoError(t, err)
mt := newMultiplexTransport(
emptyNodeInfo(),
NodeKey{
PrivKey: ed25519.GenPrivKey(),
},
nodeKey,
*addr,
)
id := mt.nodeKey.ID()
MultiplexTransportFilterTimeout(5 * time.Millisecond)(mt)
MultiplexTransportConnFilters(
@@ -105,14 +111,9 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
},
)(mt)
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
if err != nil {
t.Fatal(err)
}
if err := mt.Listen(*addr); err != nil {
t.Fatal(err)
}
err = mt.Start()
require.NoError(t, err)
defer mt.Stop() //nolint:errcheck
errc := make(chan error)
go func() {
@@ -140,6 +141,10 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
func TestTransportMultiplexMaxIncomingConnections(t *testing.T) {
pv := ed25519.GenPrivKey()
id := PubKeyToID(pv.PubKey())
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
if err != nil {
t.Fatal(err)
}
mt := newMultiplexTransport(
testNodeInfo(
id, "transport",
@@ -147,26 +152,22 @@ func TestTransportMultiplexMaxIncomingConnections(t *testing.T) {
NodeKey{
PrivKey: pv,
},
*addr,
)
MultiplexTransportMaxIncomingConnections(0)(mt)
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
if err != nil {
t.Fatal(err)
}
const maxIncomingConns = 2
MultiplexTransportMaxIncomingConnections(maxIncomingConns)(mt)
if err := mt.Listen(*addr); err != nil {
t.Fatal(err)
}
err = mt.Start()
require.NoError(t, err)
laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
// Connect more peers than max
for i := 0; i <= maxIncomingConns; i++ {
errc := make(chan error)
go testDialer(*laddr, errc)
go testDialer(t, *laddr, errc)
err = <-errc
if i < maxIncomingConns {
@@ -198,7 +199,7 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) {
// Setup dialers.
for i := 0; i < nDialers; i++ {
go testDialer(*laddr, errc)
go testDialer(t, *laddr, errc)
}
// Catch connection errors.
@@ -235,23 +236,26 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) {
}
}
if err := mt.Close(); err != nil {
if err := mt.Stop(); err != nil {
t.Errorf("close errored: %v", err)
}
}
func testDialer(dialAddr NetAddress, errc chan error) {
var (
pv = ed25519.GenPrivKey()
dialer = newMultiplexTransport(
testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName),
NodeKey{
PrivKey: pv,
},
)
func testDialer(t *testing.T, dialAddr NetAddress, errc chan error) {
pv := ed25519.GenPrivKey()
id := PubKeyToID(pv.PubKey())
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
require.NoError(t, err)
dialer := newMultiplexTransport(
testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName),
NodeKey{
PrivKey: pv,
},
*addr,
)
_, err := dialer.Dial(dialAddr, peerConfig{})
_, err = dialer.Dial(dialAddr, peerConfig{})
if err != nil {
errc <- err
return
@@ -319,15 +323,14 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) {
go func() {
<-slowc
var (
dialer = newMultiplexTransport(
fastNodeInfo,
NodeKey{
PrivKey: fastNodePV,
},
)
)
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
dialer := newMultiplexTransport(
fastNodeInfo,
NodeKey{
PrivKey: fastNodePV,
},
*addr,
)
_, err := dialer.Dial(*addr, peerConfig{})
if err != nil {
@@ -360,17 +363,16 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) {
errc := make(chan error)
go func() {
var (
pv = ed25519.GenPrivKey()
dialer = newMultiplexTransport(
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
NodeKey{
PrivKey: pv,
},
)
)
pv := ed25519.GenPrivKey()
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
dialer := newMultiplexTransport(
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
NodeKey{
PrivKey: pv,
},
*addr,
)
_, err := dialer.Dial(*addr, peerConfig{})
if err != nil {
@@ -401,6 +403,7 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
errc := make(chan error)
go func() {
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
dialer := newMultiplexTransport(
testNodeInfo(
PubKeyToID(ed25519.GenPrivKey().PubKey()), "dialer",
@@ -408,8 +411,8 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
NodeKey{
PrivKey: ed25519.GenPrivKey(),
},
*addr,
)
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
_, err := dialer.Dial(*addr, peerConfig{})
if err != nil {
@@ -437,18 +440,16 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
func TestTransportMultiplexDialRejectWrongID(t *testing.T) {
mt := testSetupMultiplexTransport(t)
var (
pv = ed25519.GenPrivKey()
dialer = newMultiplexTransport(
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
NodeKey{
PrivKey: pv,
},
)
)
pv := ed25519.GenPrivKey()
wrongID := PubKeyToID(ed25519.GenPrivKey().PubKey())
addr := NewNetAddress(wrongID, mt.listener.Addr())
dialer := newMultiplexTransport(
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
NodeKey{
PrivKey: pv,
},
*addr,
)
_, err := dialer.Dial(*addr, peerConfig{})
if err != nil {
@@ -471,14 +472,15 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) {
go func() {
var (
pv = ed25519.GenPrivKey()
addr = NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
dialer = newMultiplexTransport(
testNodeInfoWithNetwork(PubKeyToID(pv.PubKey()), "dialer", "incompatible-network"),
NodeKey{
PrivKey: pv,
},
*addr,
)
)
addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
_, err := dialer.Dial(*addr, peerConfig{})
if err != nil {
@@ -622,11 +624,16 @@ func TestTransportHandshake(t *testing.T) {
}
func TestTransportAddChannel(t *testing.T) {
pv := ed25519.GenPrivKey()
id := PubKeyToID(pv.PubKey())
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
require.NoError(t, err)
mt := newMultiplexTransport(
emptyNodeInfo(),
NodeKey{
PrivKey: ed25519.GenPrivKey(),
PrivKey: pv,
},
*addr,
)
testChannel := byte(0x01)
@@ -638,27 +645,27 @@ func TestTransportAddChannel(t *testing.T) {
// create listener
func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport {
var (
pv = ed25519.GenPrivKey()
id = PubKeyToID(pv.PubKey())
mt = newMultiplexTransport(
testNodeInfo(
id, "transport",
),
NodeKey{
PrivKey: pv,
},
)
)
pv := ed25519.GenPrivKey()
id := PubKeyToID(pv.PubKey())
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
if err != nil {
t.Fatal(err)
}
mt := newMultiplexTransport(
testNodeInfo(
id, "transport",
),
NodeKey{
PrivKey: pv,
},
*addr,
)
if err := mt.Listen(*addr); err != nil {
t.Fatal(err)
}
err = mt.Start()
require.NoError(t, err)
t.Cleanup(func() {
mt.Stop() //nolint:errcheck
})
// give the listener some time to get ready
time.Sleep(20 * time.Millisecond)

View File

@@ -2,7 +2,6 @@ package p2p
import (
"github.com/cosmos/gogoproto/proto"
"github.com/tendermint/tendermint/p2p/conn"
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
)

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/cosmos/gogoproto/proto"
"github.com/tendermint/tendermint/p2p"
)

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/cosmos/gogoproto/proto"
"github.com/tendermint/tendermint/p2p"
)

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/cosmos/gogoproto/proto"
"github.com/tendermint/tendermint/p2p"
)

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/cosmos/gogoproto/proto"
"github.com/tendermint/tendermint/p2p"
)

View File

@@ -39,26 +39,6 @@ func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) {
return abcicli.NewLocalClient(l.mtx, l.app), nil
}
//---------------------------------------------------------------
// unsynchronized local proxy on an in-proc app (no mutex)
type unsyncLocalClientCreator struct {
app types.Application
}
// NewUnsyncLocalClientCreator returns a ClientCreator for the given app, which
// will be running locally. Unlike NewLocalClientCreator, this leaves
// synchronization up to the application.
func NewUnsyncLocalClientCreator(app types.Application) ClientCreator {
return &unsyncLocalClientCreator{
app: app,
}
}
func (l *unsyncLocalClientCreator) NewABCIClient() (abcicli.Client, error) {
return abcicli.NewUnsyncLocalClient(l.app), nil
}
//---------------------------------------------------------------
// remote proxy opens new connections to an external app process
@@ -103,12 +83,6 @@ func DefaultClientCreator(addr, transport, dbDir string) ClientCreator {
panic(err)
}
return NewLocalClientCreator(app)
case "e2e_sync":
app, err := e2e.NewSyncApplication(e2e.DefaultConfig(dbDir))
if err != nil {
panic(err)
}
return NewUnsyncLocalClientCreator(app)
case "noop":
return NewLocalClientCreator(types.NewBaseApplication())
default:

4
release_notes.md Normal file
View File

@@ -0,0 +1,4 @@
Tendermint Core v0.34.0 is the Tendermint Core release which supports the Stargate upgrade.
For more information on how to upgrade to Tendermint 0.34, please see [UPGRADING.md](https://github.com/tendermint/tendermint/blob/release/v0.34.0/UPGRADING.md).
For a full list of user-facing changes, please see [CHANGELOG.md](https://github.com/tendermint/tendermint/blob/release/v0.34.0/CHANGELOG.md).

View File

@@ -41,20 +41,22 @@ type Local struct {
*types.EventBus
Logger log.Logger
ctx *rpctypes.Context
env *core.Environment
}
// NewLocal configures a client that calls the Node directly.
//
// Note that given how rpc/core works with package singletons, that
// you can only have one node per process. So make sure test cases
// don't run in parallel, or try to simulate an entire network in
// one process...
func New(node *nm.Node) *Local {
env, err := node.ConfigureRPC()
if err != nil {
if err := node.ConfigureRPC(); err != nil {
node.Logger.Error("Error configuring RPC", "err", err)
}
return &Local{
EventBus: node.EventBus(),
Logger: log.NewNopLogger(),
ctx: &rpctypes.Context{},
env: env,
}
}
@@ -66,11 +68,11 @@ func (c *Local) SetLogger(l log.Logger) {
}
func (c *Local) Status(ctx context.Context) (*ctypes.ResultStatus, error) {
return c.env.Status(c.ctx)
return core.Status(c.ctx)
}
func (c *Local) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) {
return c.env.ABCIInfo(c.ctx)
return core.ABCIInfo(c.ctx)
}
func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) {
@@ -82,55 +84,55 @@ func (c *Local) ABCIQueryWithOptions(
path string,
data bytes.HexBytes,
opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove)
return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove)
}
func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return c.env.BroadcastTxCommit(c.ctx, tx)
return core.BroadcastTxCommit(c.ctx, tx)
}
func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.env.BroadcastTxAsync(c.ctx, tx)
return core.BroadcastTxAsync(c.ctx, tx)
}
func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.env.BroadcastTxSync(c.ctx, tx)
return core.BroadcastTxSync(c.ctx, tx)
}
func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) {
return c.env.UnconfirmedTxs(c.ctx, limit)
return core.UnconfirmedTxs(c.ctx, limit)
}
func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) {
return c.env.NumUnconfirmedTxs(c.ctx)
return core.NumUnconfirmedTxs(c.ctx)
}
func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) {
return c.env.CheckTx(c.ctx, tx)
return core.CheckTx(c.ctx, tx)
}
func (c *Local) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) {
return c.env.NetInfo(c.ctx)
return core.NetInfo(c.ctx)
}
func (c *Local) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) {
return c.env.DumpConsensusState(c.ctx)
return core.DumpConsensusState(c.ctx)
}
func (c *Local) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) {
return c.env.GetConsensusState(c.ctx)
return core.ConsensusState(c.ctx)
}
func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) {
return c.env.ConsensusParams(c.ctx, height)
return core.ConsensusParams(c.ctx, height)
}
func (c *Local) Health(ctx context.Context) (*ctypes.ResultHealth, error) {
return c.env.Health(c.ctx)
return core.Health(c.ctx)
}
func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) {
return c.env.UnsafeDialSeeds(c.ctx, seeds)
return core.UnsafeDialSeeds(c.ctx, seeds)
}
func (c *Local) DialPeers(
@@ -140,51 +142,51 @@ func (c *Local) DialPeers(
unconditional,
private bool,
) (*ctypes.ResultDialPeers, error) {
return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private)
return core.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private)
}
func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight)
return core.BlockchainInfo(c.ctx, minHeight, maxHeight)
}
func (c *Local) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) {
return c.env.Genesis(c.ctx)
return core.Genesis(c.ctx)
}
func (c *Local) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) {
return c.env.GenesisChunked(c.ctx, id)
return core.GenesisChunked(c.ctx, id)
}
func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) {
return c.env.Block(c.ctx, height)
return core.Block(c.ctx, height)
}
func (c *Local) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) {
return c.env.BlockByHash(c.ctx, hash)
return core.BlockByHash(c.ctx, hash)
}
func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) {
return c.env.BlockResults(c.ctx, height)
return core.BlockResults(c.ctx, height)
}
func (c *Local) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) {
return c.env.Header(c.ctx, height)
return core.Header(c.ctx, height)
}
func (c *Local) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) {
return c.env.HeaderByHash(c.ctx, hash)
return core.HeaderByHash(c.ctx, hash)
}
func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
return c.env.Commit(c.ctx, height)
return core.Commit(c.ctx, height)
}
func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) {
return c.env.Validators(c.ctx, height, page, perPage)
return core.Validators(c.ctx, height, page, perPage)
}
func (c *Local) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) {
return c.env.Tx(c.ctx, hash, prove)
return core.Tx(c.ctx, hash, prove)
}
func (c *Local) TxSearch(
@@ -195,7 +197,7 @@ func (c *Local) TxSearch(
perPage *int,
orderBy string,
) (*ctypes.ResultTxSearch, error) {
return c.env.TxSearch(c.ctx, query, prove, page, perPage, orderBy)
return core.TxSearch(c.ctx, query, prove, page, perPage, orderBy)
}
func (c *Local) BlockSearch(
@@ -204,11 +206,11 @@ func (c *Local) BlockSearch(
page, perPage *int,
orderBy string,
) (*ctypes.ResultBlockSearch, error) {
return c.env.BlockSearch(c.ctx, query, page, perPage, orderBy)
return core.BlockSearch(c.ctx, query, page, perPage, orderBy)
}
func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
return c.env.BroadcastEvidence(c.ctx, ev)
return core.BroadcastEvidence(c.ctx, ev)
}
func (c *Local) Subscribe(
@@ -260,7 +262,7 @@ func (c *Local) eventsRoutine(
c.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query)
}
}
case <-sub.Canceled():
case <-sub.Cancelled():
if sub.Err() == tmpubsub.ErrUnsubscribed {
return
}

View File

@@ -28,6 +28,10 @@ import (
)
// Client wraps arbitrary implementations of the various interfaces.
//
// We provide a few choices to mock out each one in this package.
// Nothing hidden here, so no New function, just construct it from
// some parts, and swap them out them during the tests.
type Client struct {
client.ABCIClient
client.SignClient
@@ -37,14 +41,6 @@ type Client struct {
client.EvidenceClient
client.MempoolClient
service.Service
env *core.Environment
}
func New() Client {
return Client{
env: &core.Environment{},
}
}
var _ client.Client = Client{}
@@ -84,11 +80,11 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) {
}
func (c Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) {
return c.env.Status(&rpctypes.Context{})
return core.Status(&rpctypes.Context{})
}
func (c Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) {
return c.env.ABCIInfo(&rpctypes.Context{})
return core.ABCIInfo(&rpctypes.Context{})
}
func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) {
@@ -100,47 +96,47 @@ func (c Client) ABCIQueryWithOptions(
path string,
data bytes.HexBytes,
opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
return c.env.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove)
return core.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove)
}
func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return c.env.BroadcastTxCommit(&rpctypes.Context{}, tx)
return core.BroadcastTxCommit(&rpctypes.Context{}, tx)
}
func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.env.BroadcastTxAsync(&rpctypes.Context{}, tx)
return core.BroadcastTxAsync(&rpctypes.Context{}, tx)
}
func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.env.BroadcastTxSync(&rpctypes.Context{}, tx)
return core.BroadcastTxSync(&rpctypes.Context{}, tx)
}
func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) {
return c.env.CheckTx(&rpctypes.Context{}, tx)
return core.CheckTx(&rpctypes.Context{}, tx)
}
func (c Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) {
return c.env.NetInfo(&rpctypes.Context{})
return core.NetInfo(&rpctypes.Context{})
}
func (c Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) {
return c.env.GetConsensusState(&rpctypes.Context{})
return core.ConsensusState(&rpctypes.Context{})
}
func (c Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) {
return c.env.DumpConsensusState(&rpctypes.Context{})
return core.DumpConsensusState(&rpctypes.Context{})
}
func (c Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) {
return c.env.ConsensusParams(&rpctypes.Context{}, height)
return core.ConsensusParams(&rpctypes.Context{}, height)
}
func (c Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) {
return c.env.Health(&rpctypes.Context{})
return core.Health(&rpctypes.Context{})
}
func (c Client) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) {
return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds)
return core.UnsafeDialSeeds(&rpctypes.Context{}, seeds)
}
func (c Client) DialPeers(
@@ -150,33 +146,33 @@ func (c Client) DialPeers(
unconditional,
private bool,
) (*ctypes.ResultDialPeers, error) {
return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private)
return core.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private)
}
func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight)
return core.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight)
}
func (c Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) {
return c.env.Genesis(&rpctypes.Context{})
return core.Genesis(&rpctypes.Context{})
}
func (c Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) {
return c.env.Block(&rpctypes.Context{}, height)
return core.Block(&rpctypes.Context{}, height)
}
func (c Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) {
return c.env.BlockByHash(&rpctypes.Context{}, hash)
return core.BlockByHash(&rpctypes.Context{}, hash)
}
func (c Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) {
return c.env.Commit(&rpctypes.Context{}, height)
return core.Commit(&rpctypes.Context{}, height)
}
func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) {
return c.env.Validators(&rpctypes.Context{}, height, page, perPage)
return core.Validators(&rpctypes.Context{}, height, page, perPage)
}
func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
return c.env.BroadcastEvidence(&rpctypes.Context{}, ev)
return core.BroadcastEvidence(&rpctypes.Context{}, ev)
}

View File

@@ -9,8 +9,8 @@ import (
)
// ABCIQuery queries the application for some information.
// More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_query
func (env *Environment) ABCIQuery(
// More: https://docs.tendermint.com/main/rpc/#/ABCI/abci_query
func ABCIQuery(
ctx *rpctypes.Context,
path string,
data bytes.HexBytes,
@@ -31,8 +31,8 @@ func (env *Environment) ABCIQuery(
}
// ABCIInfo gets some info about the application.
// More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info
func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) {
// More: https://docs.tendermint.com/main/rpc/#/ABCI/abci_info
func ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) {
resInfo, err := env.ProxyAppQuery.InfoSync(proxy.RequestInfo)
if err != nil {
return nil, err

View File

@@ -15,19 +15,10 @@ import (
)
// BlockchainInfo gets block headers for minHeight <= height <= maxHeight.
//
// If maxHeight does not yet exist, blocks up to the current height will be
// returned. If minHeight does not exist (due to pruning), earliest existing
// height will be used.
//
// At most 20 items will be returned. Block headers are returned in descending
// order (highest first).
//
// More: https://docs.tendermint.com/master/rpc/#/Info/blockchain
func (env *Environment) BlockchainInfo(
ctx *rpctypes.Context,
minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
// Block headers are returned in descending order (highest first).
// More: https://docs.tendermint.com/main/rpc/#/Info/blockchain
func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
// maximum 20 block metas
const limit int64 = 20
var err error
minHeight, maxHeight, err = filterMinMax(
@@ -88,8 +79,8 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) {
// Header gets block header at a given height.
// If no height is provided, it will fetch the latest header.
// More: https://docs.tendermint.com/master/rpc/#/Info/header
func (env *Environment) Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) {
height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
func Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) {
height, err := getHeight(env.BlockStore.Height(), heightPtr)
if err != nil {
return nil, err
}
@@ -104,7 +95,7 @@ func (env *Environment) Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes
// HeaderByHash gets header by hash.
// More: https://docs.tendermint.com/master/rpc/#/Info/header_by_hash
func (env *Environment) HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) {
func HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) {
// N.B. The hash parameter is HexBytes so that the reflective parameter
// decoding logic in the HTTP service will correctly translate from JSON.
// See https://github.com/tendermint/tendermint/issues/6802 for context.
@@ -119,9 +110,9 @@ func (env *Environment) HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes)
// Block gets block at a given height.
// If no height is provided, it will fetch the latest block.
// More: https://docs.tendermint.com/master/rpc/#/Info/block
func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) {
height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
// More: https://docs.tendermint.com/main/rpc/#/Info/block
func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) {
height, err := getHeight(env.BlockStore.Height(), heightPtr)
if err != nil {
return nil, err
}
@@ -135,8 +126,8 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.
}
// BlockByHash gets block by hash.
// More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash
func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) {
// More: https://docs.tendermint.com/main/rpc/#/Info/block_by_hash
func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) {
block := env.BlockStore.LoadBlockByHash(hash)
if block == nil {
return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil
@@ -148,9 +139,9 @@ func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes
// Commit gets block commit at a given height.
// If no height is provided, it will fetch the commit for the latest block.
// More: https://docs.tendermint.com/master/rpc/#/Info/commit
func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) {
height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
// More: https://docs.tendermint.com/main/rpc/#/Info/commit
func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) {
height, err := getHeight(env.BlockStore.Height(), heightPtr)
if err != nil {
return nil, err
}
@@ -179,9 +170,9 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes
// Results are for the height of the block containing the txs.
// Thus response.results.deliver_tx[5] is the results of executing
// getBlock(h).Txs[5]
// More: https://docs.tendermint.com/master/rpc/#/Info/block_results
func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) {
height, err := env.getHeight(env.BlockStore.Height(), heightPtr)
// More: https://docs.tendermint.com/main/rpc/#/Info/block_results
func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) {
height, err := getHeight(env.BlockStore.Height(), heightPtr)
if err != nil {
return nil, err
}
@@ -203,7 +194,7 @@ func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*
// BlockSearch searches for a paginated set of blocks matching BeginBlock and
// EndBlock event search criteria.
func (env *Environment) BlockSearch(
func BlockSearch(
ctx *rpctypes.Context,
query string,
pagePtr, perPagePtr *int,
@@ -239,7 +230,7 @@ func (env *Environment) BlockSearch(
// paginate results
totalCount := len(results)
perPage := env.validatePerPage(perPagePtr)
perPage := validatePerPage(perPagePtr)
page, err := validatePage(pagePtr, perPage, totalCount)
if err != nil {

View File

@@ -80,7 +80,7 @@ func TestBlockResults(t *testing.T) {
BeginBlock: &abci.ResponseBeginBlock{},
}
env := &Environment{}
env = &Environment{}
env.StateStore = sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{
DiscardABCIResponses: false,
})
@@ -110,7 +110,7 @@ func TestBlockResults(t *testing.T) {
}
for _, tc := range testCases {
res, err := env.BlockResults(&rpctypes.Context{}, &tc.height)
res, err := BlockResults(&rpctypes.Context{}, &tc.height)
if tc.wantErr {
assert.Error(t, err)
} else {

Some files were not shown because too many files have changed in this diff Show More