mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-13 16:22:53 +00:00
Compare commits
50 Commits
cal/node-c
...
wb/issue-9
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9b2800ffcd | ||
|
|
a35140c4bd | ||
|
|
71293829a9 | ||
|
|
381244144f | ||
|
|
31c88322fb | ||
|
|
e2b96383ab | ||
|
|
609be4e28e | ||
|
|
4ebb6c213b | ||
|
|
53cd926797 | ||
|
|
9d01a6880e | ||
|
|
8fd66a6e8d | ||
|
|
f7bb0659be | ||
|
|
4af7568f99 | ||
|
|
3e766984a0 | ||
|
|
2d036c59fe | ||
|
|
12f0c4a624 | ||
|
|
7769467012 | ||
|
|
d16f17569f | ||
|
|
cc0c478c14 | ||
|
|
b9dcddd07a | ||
|
|
34ca3fb474 | ||
|
|
fc8df9a151 | ||
|
|
b85e13aa0c | ||
|
|
20ffa4fd32 | ||
|
|
f9bfdf4ce2 | ||
|
|
2b4436d1b4 | ||
|
|
627b77693f | ||
|
|
755e1474b1 | ||
|
|
b07e1fae89 | ||
|
|
ae164bf533 | ||
|
|
eb14a9564a | ||
|
|
a7dc8aaf91 | ||
|
|
d324430f82 | ||
|
|
99a7ac84dc | ||
|
|
f12588aab1 | ||
|
|
d534285bfe | ||
|
|
ffae184b62 | ||
|
|
c6a0dc8559 | ||
|
|
3aa6c816e5 | ||
|
|
ff0f98892f | ||
|
|
0beac722b0 | ||
|
|
45071d1f23 | ||
|
|
5a9a84eb02 | ||
|
|
f008a275d1 | ||
|
|
816c6bac00 | ||
|
|
9ec9085678 | ||
|
|
f58ba4d2f9 | ||
|
|
6bde634be2 | ||
|
|
d704c0a0b6 | ||
|
|
629cdc7f3d |
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -7,6 +7,6 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @tendermint/tendermint-engineering
|
||||
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
|
||||
2
.github/workflows/pre-release.yml
vendored
2
.github/workflows/pre-release.yml
vendored
@@ -45,7 +45,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon pre-release
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -42,7 +42,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon release
|
||||
uses: slackapi/slack-github-action@v1.22.0
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
|
||||
40
CHANGELOG.md
40
CHANGELOG.md
@@ -2,6 +2,46 @@
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.23
|
||||
|
||||
*Nov 9, 2022*
|
||||
|
||||
This release introduces some new Prometheus metrics to help in determining what
|
||||
kinds of messages are consuming the most P2P bandwidth. This builds towards our
|
||||
broader goal of optimizing Tendermint bandwidth consumption, and will give us
|
||||
meaningful insights once we can establish these metrics for a number of chains.
|
||||
|
||||
We now also return `Cache-Control` headers for select RPC endpoints to help
|
||||
facilitate caching.
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106
|
||||
|
||||
### IMPROVEMENTS
|
||||
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
|
||||
Envelope type and associated methods for sending and receiving Envelopes
|
||||
instead of raw bytes. This also adds new metrics,
|
||||
`tendermint_p2p_message_send_bytes_total` and
|
||||
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
|
||||
each message type have been sent.
|
||||
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
|
||||
caching of RPC responses (@JayT106)
|
||||
|
||||
The following RPC endpoints will return `Cache-Control` headers with a maximum
|
||||
age of 1 day:
|
||||
|
||||
- `/abci_info`
|
||||
- `/block`, if `height` is supplied
|
||||
- `/block_by_hash`
|
||||
- `/block_results`, if `height` is supplied
|
||||
- `/blockchain`
|
||||
- `/check_tx`
|
||||
- `/commit`, if `height` is supplied
|
||||
- `/consensus_params`, if `height` is supplied
|
||||
- `/genesis`
|
||||
- `/genesis_chunked`
|
||||
- `/tx`
|
||||
- `/validators`, if `height` is supplied
|
||||
|
||||
## v0.34.22
|
||||
|
||||
This release includes several bug fixes, [one of
|
||||
|
||||
@@ -17,21 +17,28 @@
|
||||
|
||||
- Data Storage
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
|
||||
- Tooling
|
||||
- [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [metrics] \#9682 move state-syncing and block-syncing metrics to their respective packages (@cmwaters)
|
||||
labels have moved from block_syncing -> blocksync_syncing and state_syncing -> statesync_syncing
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [config] \#9680 Introduce `BootstrapPeers` to the config to allow nodes to list peers to be added to
|
||||
the addressbook upon start up (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [crypto/merkle] \#6443 & \#6513 Improve HashAlternatives performance (@cuonglm, @marbar3778)
|
||||
- [rpc] \#9650 Enable caching of RPC responses (@JayT106)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [docker] \#9462 ensure Docker image uses consistent version of Go
|
||||
- [abci-cli] \#9717 fix broken abci-cli help command
|
||||
|
||||
## v0.37.0
|
||||
|
||||
@@ -97,4 +104,4 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)
|
||||
- [docker] \#9073 enable cross platform build using docker buildx
|
||||
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout
|
||||
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout
|
||||
|
||||
13
README.md
13
README.md
@@ -113,10 +113,15 @@ For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Supported Versions
|
||||
|
||||
Because we are a small core team, we only ship patch updates, including security
|
||||
updates, to the most recent minor release and the second-most recent minor
|
||||
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
|
||||
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
Because we are a small core team, we have limited capacity to ship patch
|
||||
updates, including security updates. Consequently, we strongly recommend keeping
|
||||
Tendermint up-to-date. Upgrading instructions can be found in
|
||||
[UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
Currently supported versions include:
|
||||
|
||||
- v0.34.x
|
||||
- v0.37.x (release candidate)
|
||||
|
||||
## Resources
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig
|
||||
* Make use of table driven testing where possible and not-cumbersome
|
||||
* [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)
|
||||
* Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require)
|
||||
* When using mocks, it is recommended to use Testify [mock] (<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
* When using mocks, it is recommended to use Testify [mock](<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
) along with [Mockery](https://github.com/vektra/mockery) for autogeneration
|
||||
|
||||
## Errors
|
||||
|
||||
@@ -5,6 +5,15 @@ Tendermint Core.
|
||||
|
||||
## Unreleased
|
||||
|
||||
## Config Changes
|
||||
|
||||
* A new config field, `BootstrapPeers` has been introduced as a means of
|
||||
adding a list of addresses to the addressbook upon initializing a node. This is an
|
||||
alternative to `PersistentPeers`. `PersistentPeers` shold be only used for
|
||||
nodes that you want to keep a constant connection with i.e. sentry nodes
|
||||
|
||||
----
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* The `ABCIVersion` is now `1.0.0`.
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
// case of malicious tx or query). It only makes sense for publicly exposed
|
||||
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
|
||||
@@ -24,8 +22,6 @@ var _ Client = (*localClient)(nil)
|
||||
|
||||
// NewLocalClient creates a local client, which will be directly calling the
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = new(tmsync.Mutex)
|
||||
@@ -309,7 +305,8 @@ func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*type
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkSync(
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -318,7 +315,8 @@ func (app *localClient) LoadSnapshotChunkSync(
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkSync(
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*types.ResponseApplySnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
|
||||
263
abci/client/unsync_local_client.go
Normal file
263
abci/client/unsync_local_client.go
Normal file
@@ -0,0 +1,263 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
type unsyncLocalClient struct {
|
||||
service.BaseService
|
||||
|
||||
types.Application
|
||||
|
||||
// This mutex is exclusively used to protect the callback.
|
||||
mtx sync.RWMutex
|
||||
Callback
|
||||
}
|
||||
|
||||
var _ Client = (*unsyncLocalClient)(nil)
|
||||
|
||||
// NewUnsyncLocalClient creates an unsynchronized local client, which will be
|
||||
// directly calling the methods of the given app.
|
||||
//
|
||||
// Unlike NewLocalClient, it does not hold a mutex around the application, so
|
||||
// it is up to the application to manage its synchronization properly.
|
||||
func NewUnsyncLocalClient(app types.Application) Client {
|
||||
cli := &unsyncLocalClient{
|
||||
Application: app,
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(nil, "unsyncLocalClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.Callback = cb
|
||||
}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
func (app *unsyncLocalClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) FlushAsync() *ReqRes {
|
||||
// Do nothing
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EchoAsync(msg string) *ReqRes {
|
||||
return app.callback(
|
||||
types.ToRequestEcho(msg),
|
||||
types.ToResponseEcho(msg),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
res := app.Application.Info(req)
|
||||
return app.callback(
|
||||
types.ToRequestInfo(req),
|
||||
types.ToResponseInfo(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
res := app.Application.DeliverTx(params)
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
res := app.Application.CheckTx(req)
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
res := app.Application.Query(req)
|
||||
return app.callback(
|
||||
types.ToRequestQuery(req),
|
||||
types.ToResponseQuery(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CommitAsync() *ReqRes {
|
||||
res := app.Application.Commit()
|
||||
return app.callback(
|
||||
types.ToRequestCommit(),
|
||||
types.ToResponseCommit(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
res := app.Application.InitChain(req)
|
||||
return app.callback(
|
||||
types.ToRequestInitChain(req),
|
||||
types.ToResponseInitChain(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
res := app.Application.BeginBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
res := app.Application.EndBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return app.callback(
|
||||
types.ToRequestListSnapshots(req),
|
||||
types.ToResponseListSnapshots(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return app.callback(
|
||||
types.ToRequestOfferSnapshot(req),
|
||||
types.ToResponseOfferSnapshot(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestLoadSnapshotChunk(req),
|
||||
types.ToResponseLoadSnapshotChunk(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestApplySnapshotChunk(req),
|
||||
types.ToResponseApplySnapshotChunk(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) PrepareProposalAsync(req types.RequestPrepareProposal) *ReqRes {
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestPrepareProposal(req),
|
||||
types.ToResponsePrepareProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ProcessProposalAsync(req types.RequestProcessProposal) *ReqRes {
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return app.callback(
|
||||
types.ToRequestProcessProposal(req),
|
||||
types.ToResponseProcessProposal(res),
|
||||
)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *unsyncLocalClient) FlushSync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
res := app.Application.DeliverTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
res := app.Application.CheckTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
res := app.Application.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
res := app.Application.InitChain(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
res := app.Application.BeginBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
res := app.Application.EndBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) LoadSnapshotChunkSync(
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ApplySnapshotChunkSync(
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*types.ResponseApplySnapshotChunk, error) {
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) PrepareProposalSync(req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
res := app.Application.PrepareProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) ProcessProposalSync(req types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
res := app.Application.ProcessProposal(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *unsyncLocalClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.Callback(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
rr.callbackInvoked = true
|
||||
return rr
|
||||
}
|
||||
@@ -54,7 +54,7 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "kvstore", "version":
|
||||
case "kvstore", "version", "help [command]":
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -38,8 +38,34 @@ function testExample() {
|
||||
rm "${INPUT}".out.new
|
||||
}
|
||||
|
||||
function testHelp() {
|
||||
INPUT=$1
|
||||
APP="$2 $3"
|
||||
|
||||
echo "Test: $APP"
|
||||
$APP &> "${INPUT}.new" &
|
||||
sleep 2
|
||||
|
||||
pre=$(shasum < "${INPUT}")
|
||||
post=$(shasum < "${INPUT}.new")
|
||||
|
||||
if [[ "$pre" != "$post" ]]; then
|
||||
echo "You broke the tutorial"
|
||||
echo "Got:"
|
||||
cat "${INPUT}.new"
|
||||
echo "Expected:"
|
||||
cat "${INPUT}"
|
||||
echo "Diff:"
|
||||
diff "${INPUT}" "${INPUT}.new"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm "${INPUT}".new
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli kvstore
|
||||
testHelp tests/test_cli/testHelp.out abci-cli help
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
30
abci/tests/test_cli/testHelp.out
Normal file
30
abci/tests/test_cli/testHelp.out
Normal file
@@ -0,0 +1,30 @@
|
||||
the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers
|
||||
|
||||
Usage:
|
||||
abci-cli [command]
|
||||
|
||||
Available Commands:
|
||||
batch run a batch of abci commands against an application
|
||||
check_tx validate a transaction
|
||||
commit commit the application state and return the Merkle root hash
|
||||
completion Generate the autocompletion script for the specified shell
|
||||
console start an interactive ABCI console for multiple commands
|
||||
deliver_tx deliver a new transaction to the application
|
||||
echo have the application echo a message
|
||||
help Help about any command
|
||||
info get some info about the application
|
||||
kvstore ABCI demo example
|
||||
prepare_proposal prepare proposal
|
||||
process_proposal process proposal
|
||||
query query the application state
|
||||
test run integration tests
|
||||
version print ABCI console version
|
||||
|
||||
Flags:
|
||||
--abci string either socket or grpc (default "socket")
|
||||
--address string address of application socket (default "tcp://0.0.0.0:26658")
|
||||
-h, --help help for abci-cli
|
||||
--log_level string set the logger level (default "debug")
|
||||
-v, --verbose print the command and results as if it were a console session
|
||||
|
||||
Use "abci-cli [command] --help" for more information about a command.
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
)
|
||||
|
||||
|
||||
30
blocksync/metrics.gen.go
Normal file
30
blocksync/metrics.gen.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Syncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Syncing: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
19
blocksync/metrics.go
Normal file
19
blocksync/metrics.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "blocksync"
|
||||
)
|
||||
|
||||
//go:generate go run ../scripts/metricsgen -struct=Metrics
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// Whether or not a node is block syncing. 1 if yes, 0 if no.
|
||||
Syncing metrics.Gauge
|
||||
}
|
||||
@@ -58,11 +58,13 @@ type Reactor struct {
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
blockSync bool) *Reactor {
|
||||
blockSync bool, metrics *Metrics) *Reactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
@@ -88,6 +90,7 @@ func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockS
|
||||
blockSync: blockSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
metrics: metrics,
|
||||
}
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("Reactor", bcR)
|
||||
return bcR
|
||||
@@ -236,6 +239,8 @@ func (bcR *Reactor) Receive(e p2p.Envelope) {
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (bcR *Reactor) poolRoutine(stateSynced bool) {
|
||||
bcR.metrics.Syncing.Set(1)
|
||||
defer bcR.metrics.Syncing.Set(0)
|
||||
|
||||
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
defer trySyncTicker.Stop()
|
||||
|
||||
@@ -76,7 +76,7 @@ func newReactor(
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
blockStore := store.NewBlockStore(blockDB, store.BlockStoreOptions{})
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
@@ -145,7 +145,7 @@ func newReactor(
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor := NewReactor(state.Copy(), blockExec, blockStore, fastSync, NopMetrics())
|
||||
bcReactor.SetLogger(logger.With("module", "blocksync"))
|
||||
|
||||
return ReactorPair{bcReactor, proxyApp}
|
||||
|
||||
@@ -67,7 +67,7 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
//nolint:all
|
||||
//nolint:gosec,nolintlint
|
||||
resp, err := http.Get(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
|
||||
@@ -78,7 +78,7 @@ func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store,
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
blockStore := store.NewBlockStore(blockStoreDB, store.BlockStoreOptions{})
|
||||
|
||||
if !os.FileExists(filepath.Join(config.DBDir(), "state.db")) {
|
||||
return nil, nil, fmt.Errorf("no statestore found in %v", config.DBDir())
|
||||
|
||||
@@ -66,6 +66,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().String("p2p.external-address", config.P2P.ExternalAddress, "ip:port address to advertise to peers for them to dial")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
|
||||
cmd.Flags().String("p2p.bootstrap_peers", config.P2P.BootstrapPeers, "comma-delimited ID@host:port peers to be added to the addressbook on startup")
|
||||
cmd.Flags().String("p2p.unconditional_peer_ids",
|
||||
config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers")
|
||||
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")
|
||||
|
||||
@@ -548,6 +548,11 @@ type P2PConfig struct { //nolint: maligned
|
||||
// We only use these if we can’t connect to peers in the addrbook
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
|
||||
// Comma separated list of peers to be added to the peer store
|
||||
// on startup. Either BootstrapPeers or PersistentPeers are
|
||||
// needed for peer discovery
|
||||
BootstrapPeers string `mapstructure:"bootstrap_peers"`
|
||||
|
||||
// Comma separated list of nodes to keep persistent connections to
|
||||
PersistentPeers string `mapstructure:"persistent_peers"`
|
||||
|
||||
@@ -708,11 +713,28 @@ type MempoolConfig struct {
|
||||
// Mempool version to use:
|
||||
// 1) "v0" - (default) FIFO mempool.
|
||||
// 2) "v1" - prioritized mempool.
|
||||
Version string `mapstructure:"version"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
Version string `mapstructure:"version"`
|
||||
// RootDir is the root directory for all data. This should be configured via
|
||||
// the $TMHOME env variable or --home cmd flag rather than overriding this
|
||||
// struct field.
|
||||
RootDir string `mapstructure:"home"`
|
||||
// Recheck (default: true) defines whether Tendermint should recheck the
|
||||
// validity for all remaining transaction in the mempool after a block.
|
||||
// Since a block affects the application state, some transactions in the
|
||||
// mempool may become invalid. If this does not apply to your application,
|
||||
// you can disable rechecking.
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
// Broadcast (default: true) defines whether the mempool should relay
|
||||
// transactions to other peers. Setting this to false will stop the mempool
|
||||
// from relaying transactions to other peers until they are included in a
|
||||
// block. In other words, if Broadcast is disabled, only the peer you send
|
||||
// the tx to will see it until it is included in a block.
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
// WalPath (default: "") configures the location of the Write Ahead Log
|
||||
// (WAL) for the mempool. The WAL is disabled by default. To enable, set
|
||||
// WalPath to where you want the WAL to be written (e.g.
|
||||
// "data/mempool.wal").
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
// Maximum number of transactions in the mempool
|
||||
Size int `mapstructure:"size"`
|
||||
// Limit the total size of all txs in the mempool.
|
||||
|
||||
@@ -283,6 +283,11 @@ external_address = "{{ .P2P.ExternalAddress }}"
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = "{{ .P2P.Seeds }}"
|
||||
|
||||
# Comma separated list of peers to be added to the peer store
|
||||
# on startup. Either BootstrapPeers or PersistentPeers are
|
||||
# needed for peer discovery
|
||||
bootstrap_peers = "{{ .P2P.BootstrapPeers }}"
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
persistent_peers = "{{ .P2P.PersistentPeers }}"
|
||||
|
||||
@@ -349,8 +354,24 @@ dial_timeout = "{{ .P2P.DialTimeout }}"
|
||||
# 2) "v1" - prioritized mempool.
|
||||
version = "{{ .Mempool.Version }}"
|
||||
|
||||
# Recheck (default: true) defines whether Tendermint should recheck the
|
||||
# validity for all remaining transaction in the mempool after a block.
|
||||
# Since a block affects the application state, some transactions in the
|
||||
# mempool may become invalid. If this does not apply to your application,
|
||||
# you can disable rechecking.
|
||||
recheck = {{ .Mempool.Recheck }}
|
||||
|
||||
# Broadcast (default: true) defines whether the mempool should relay
|
||||
# transactions to other peers. Setting this to false will stop the mempool
|
||||
# from relaying transactions to other peers until they are included in a
|
||||
# block. In other words, if Broadcast is disabled, only the peer you send
|
||||
# the tx to will see it until it is included in a block.
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
|
||||
# WalPath (default: "") configures the location of the Write Ahead Log
|
||||
# (WAL) for the mempool. The WAL is disabled by default. To enable, set
|
||||
# WalPath to where you want the WAL to be written (e.g.
|
||||
# "data/mempool.wal").
|
||||
wal_dir = "{{ js .Mempool.WalPath }}"
|
||||
|
||||
# Maximum number of transactions in the mempool
|
||||
@@ -436,7 +457,7 @@ chunk_fetchers = "{{ .StateSync.ChunkFetchers }}"
|
||||
[blocksync]
|
||||
|
||||
# Block Sync version to use:
|
||||
#
|
||||
#
|
||||
# In v0.37, v1 and v2 of the block sync protocols were deprecated.
|
||||
# Please use v0 instead.
|
||||
#
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
blockStore := store.NewBlockStore(blockDB, store.BlockStoreOptions{})
|
||||
|
||||
mtx := new(tmsync.Mutex)
|
||||
// one for mempool, one for consensus
|
||||
|
||||
@@ -392,7 +392,7 @@ func newStateWithConfigAndBlockStore(
|
||||
blockDB dbm.DB,
|
||||
) *State {
|
||||
// Get BlockStore
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
blockStore := store.NewBlockStore(blockDB, store.BlockStoreOptions{})
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
|
||||
@@ -118,18 +118,6 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "latest_block_height",
|
||||
Help: "The latest block height.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_syncing",
|
||||
Help: "Whether or not a node is block syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "state_syncing",
|
||||
Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
@@ -208,8 +196,6 @@ func NopMetrics() *Metrics {
|
||||
BlockSizeBytes: discard.NewGauge(),
|
||||
TotalTxs: discard.NewGauge(),
|
||||
CommittedHeight: discard.NewGauge(),
|
||||
BlockSyncing: discard.NewGauge(),
|
||||
StateSyncing: discard.NewGauge(),
|
||||
BlockParts: discard.NewCounter(),
|
||||
StepDurationSeconds: discard.NewHistogram(),
|
||||
BlockGossipPartsReceived: discard.NewCounter(),
|
||||
|
||||
@@ -61,10 +61,6 @@ type Metrics struct {
|
||||
TotalTxs metrics.Gauge
|
||||
// The latest block height.
|
||||
CommittedHeight metrics.Gauge `metrics_name:"latest_block_height"`
|
||||
// Whether or not a node is block syncing. 1 if yes, 0 if no.
|
||||
BlockSyncing metrics.Gauge
|
||||
// Whether or not a node is state syncing. 1 if yes, 0 if no.
|
||||
StateSyncing metrics.Gauge
|
||||
|
||||
// Number of block parts transmitted by each peer.
|
||||
BlockParts metrics.Counter `metrics_labels:"peer_id"`
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
|
||||
@@ -119,8 +119,6 @@ func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) {
|
||||
conR.mtx.Lock()
|
||||
conR.waitSync = false
|
||||
conR.mtx.Unlock()
|
||||
conR.Metrics.BlockSyncing.Set(0)
|
||||
conR.Metrics.StateSyncing.Set(0)
|
||||
|
||||
if skipWAL {
|
||||
conR.conS.doWALCatchup = false
|
||||
|
||||
@@ -155,7 +155,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// css[i] = newStateWithConfig(thisConfig, state, privVals[i], app)
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
blockStore := store.NewBlockStore(blockDB, store.BlockStoreOptions{})
|
||||
|
||||
mtx := new(tmsync.Mutex)
|
||||
memplMetrics := mempl.NopMetrics()
|
||||
|
||||
@@ -254,7 +254,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
|
||||
h.logger.Info("ABCI Handshake App Info",
|
||||
"height", blockHeight,
|
||||
"hash", appHash,
|
||||
"hash", log.NewLazySprintf("%X", appHash),
|
||||
"software-version", res.Version,
|
||||
"protocol-version", res.AppVersion,
|
||||
)
|
||||
@@ -271,7 +271,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
}
|
||||
|
||||
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
|
||||
"appHeight", blockHeight, "appHash", appHash)
|
||||
"appHeight", blockHeight, "appHash", log.NewLazySprintf("%X", appHash))
|
||||
|
||||
// TODO: (on restart) replay mempool
|
||||
|
||||
|
||||
@@ -290,7 +290,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
if err != nil {
|
||||
tmos.Exit(err.Error())
|
||||
}
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
blockStore := store.NewBlockStore(blockStoreDB, store.BlockStoreOptions{})
|
||||
|
||||
// Get State
|
||||
stateDB, err := dbm.NewDB("state", dbType, config.DBDir())
|
||||
|
||||
@@ -60,7 +60,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
blockStore := store.NewBlockStore(blockStoreDB, store.BlockStoreOptions{})
|
||||
|
||||
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), proxy.NopMetrics())
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
|
||||
@@ -27,22 +27,28 @@ Usage:
|
||||
abci-cli [command]
|
||||
|
||||
Available Commands:
|
||||
batch Run a batch of abci commands against an application
|
||||
check_tx Validate a tx
|
||||
commit Commit the application state and return the Merkle root hash
|
||||
console Start an interactive abci console for multiple commands
|
||||
deliver_tx Deliver a new tx to the application
|
||||
kvstore ABCI demo example
|
||||
echo Have the application echo a message
|
||||
help Help about any command
|
||||
info Get some info about the application
|
||||
query Query the application state
|
||||
batch run a batch of abci commands against an application
|
||||
check_tx validate a transaction
|
||||
commit commit the application state and return the Merkle root hash
|
||||
completion Generate the autocompletion script for the specified shell
|
||||
console start an interactive ABCI console for multiple commands
|
||||
deliver_tx deliver a new transaction to the application
|
||||
echo have the application echo a message
|
||||
help Help about any command
|
||||
info get some info about the application
|
||||
kvstore ABCI demo example
|
||||
prepare_proposal prepare proposal
|
||||
process_proposal process proposal
|
||||
query query the application state
|
||||
test run integration tests
|
||||
version print ABCI console version
|
||||
|
||||
Flags:
|
||||
--abci string socket or grpc (default "socket")
|
||||
--address string address of application socket (default "tcp://127.0.0.1:26658")
|
||||
-h, --help help for abci-cli
|
||||
-v, --verbose print the command and results as if it were a console session
|
||||
--abci string either socket or grpc (default "socket")
|
||||
--address string address of application socket (default "tcp://0.0.0.0:26658")
|
||||
-h, --help help for abci-cli
|
||||
--log_level string set the logger level (default "debug")
|
||||
-v, --verbose print the command and results as if it were a console session
|
||||
|
||||
Use "abci-cli [command] --help" for more information about a command.
|
||||
```
|
||||
@@ -58,47 +64,51 @@ purposes.
|
||||
|
||||
We'll start a kvstore application, which was installed at the same time
|
||||
as `abci-cli` above. The kvstore just stores transactions in a merkle
|
||||
tree.
|
||||
|
||||
Its code can be found
|
||||
[here](https://github.com/tendermint/tendermint/blob/v0.34.x/abci/cmd/abci-cli/abci-cli.go)
|
||||
and looks like:
|
||||
tree. Its code can be found
|
||||
[here](https://github.com/tendermint/tendermint/blob/main/abci/cmd/abci-cli/abci-cli.go)
|
||||
and looks like the following:
|
||||
|
||||
```go
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
app = kvstore.NewKVStoreApplication()
|
||||
} else {
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
}
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
var err error
|
||||
flagPersist, err = os.MkdirTemp("", "persistent_kvstore_tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddrD, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Start by running:
|
||||
Start the application by running:
|
||||
|
||||
```sh
|
||||
abci-cli kvstore
|
||||
@@ -163,32 +173,32 @@ Try running these commands:
|
||||
-> data: hello
|
||||
-> data.hex: 0x68656C6C6F
|
||||
|
||||
> info
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"size":0}
|
||||
-> data.hex: 0x7B2273697A65223A307D
|
||||
|
||||
> prepare_proposal "abc"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: abc action: UNMODIFIED
|
||||
-> log: Succeeded. Tx: abc
|
||||
|
||||
> process_proposal "abc"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> commit
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
|
||||
> deliver_tx "abc"
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"size":1}
|
||||
-> data.hex: 0x7B2273697A65223A317D
|
||||
|
||||
> commit
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
@@ -204,7 +214,7 @@ Try running these commands:
|
||||
> deliver_tx "def=xyz"
|
||||
-> code: OK
|
||||
|
||||
> commit
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
@@ -219,11 +229,9 @@ Try running these commands:
|
||||
|
||||
> prepare_proposal "preparedef"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: def action: ADDED
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: preparedef action: REMOVED
|
||||
-> log: Succeeded. Tx: replacedef
|
||||
|
||||
> process_proposal "def"
|
||||
> process_proposal "replacedef"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
@@ -245,21 +253,21 @@ Try running these commands:
|
||||
Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if
|
||||
we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
|
||||
|
||||
Similarly, you could put the commands in a file and run
|
||||
You could put the commands in a file and run
|
||||
`abci-cli --verbose batch < myfile`.
|
||||
|
||||
|
||||
Note that the `abci-cli` is designed strictly for testing and debugging. In a real
|
||||
deployment, the role of sending messages is taken by Tendermint, which
|
||||
connects to the app using three separate connections, each with its own
|
||||
pattern of messages.
|
||||
|
||||
For examples of running an ABCI app with Tendermint, see the
|
||||
[getting started guide](./getting-started.md).
|
||||
|
||||
## Bounties
|
||||
|
||||
Want to write an app in your favorite language?! We'd be happy
|
||||
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
|
||||
See [funding](https://github.com/interchainio/funding) opportunities from the
|
||||
[Interchain Foundation](https://interchain.io) for implementations in new languages and more.
|
||||
|
||||
The `abci-cli` is designed strictly for testing and debugging. In a real
|
||||
deployment, the role of sending messages is taken by Tendermint, which
|
||||
connects to the app using three separate connections, each with its own
|
||||
pattern of messages.
|
||||
|
||||
For examples of running an ABCI app with
|
||||
Tendermint, see the [getting started guide](./getting-started.md).
|
||||
Next is the ABCI specification.
|
||||
|
||||
@@ -11,9 +11,10 @@ application you want to run. So, to run a complete blockchain that does
|
||||
something useful, you must start two programs: one is Tendermint Core,
|
||||
the other is your application, which can be written in any programming
|
||||
language. Recall from [the intro to
|
||||
ABCI](../introduction/what-is-tendermint.md#abci-overview) that Tendermint Core handles all the p2p and consensus stuff, and just forwards transactions to the
|
||||
ABCI](../introduction/what-is-tendermint.md#abci-overview) that Tendermint Core
|
||||
handles all the p2p and consensus stuff, and just forwards transactions to the
|
||||
application when they need to be validated, or when they're ready to be
|
||||
committed to a block.
|
||||
executed and committed.
|
||||
|
||||
In this guide, we show you some examples of how to run an application
|
||||
using Tendermint.
|
||||
@@ -22,7 +23,8 @@ using Tendermint.
|
||||
|
||||
The first apps we will work with are written in Go. To install them, you
|
||||
need to [install Go](https://golang.org/doc/install), put
|
||||
`$GOPATH/bin` in your `$PATH` and enable go modules with these instructions:
|
||||
`$GOPATH/bin` in your `$PATH` and enable go modules. If you use `bash`,
|
||||
follow these instructions:
|
||||
|
||||
```bash
|
||||
echo export GOPATH=\"\$HOME/go\" >> ~/.bash_profile
|
||||
@@ -31,17 +33,48 @@ echo export PATH=\"\$PATH:\$GOPATH/bin\" >> ~/.bash_profile
|
||||
|
||||
Then run
|
||||
|
||||
```sh
|
||||
```bash
|
||||
go get github.com/tendermint/tendermint
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
make install_abci
|
||||
```
|
||||
|
||||
Now you should have the `abci-cli` installed; you'll notice the `kvstore`
|
||||
command, an example application written
|
||||
in Go. See below for an application written in JavaScript.
|
||||
Now you should have the `abci-cli` installed; run `abci-cli` to see the list of commands:
|
||||
|
||||
Now, let's run some apps!
|
||||
```
|
||||
Usage:
|
||||
abci-cli [command]
|
||||
|
||||
Available Commands:
|
||||
batch run a batch of abci commands against an application
|
||||
check_tx validate a transaction
|
||||
commit commit the application state and return the Merkle root hash
|
||||
completion Generate the autocompletion script for the specified shell
|
||||
console start an interactive ABCI console for multiple commands
|
||||
deliver_tx deliver a new transaction to the application
|
||||
echo have the application echo a message
|
||||
help Help about any command
|
||||
info get some info about the application
|
||||
kvstore ABCI demo example
|
||||
prepare_proposal prepare proposal
|
||||
process_proposal process proposal
|
||||
query query the application state
|
||||
test run integration tests
|
||||
version print ABCI console version
|
||||
|
||||
Flags:
|
||||
--abci string either socket or grpc (default "socket")
|
||||
--address string address of application socket (default "tcp://0.0.0.0:26658")
|
||||
-h, --help help for abci-cli
|
||||
--log_level string set the logger level (default "debug")
|
||||
-v, --verbose print the command and results as if it were a console session
|
||||
|
||||
Use "abci-cli [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
You'll notice the `kvstore` command, an example application written in Go.
|
||||
|
||||
Now, let's run an app!
|
||||
|
||||
## KVStore - A First Example
|
||||
|
||||
@@ -68,7 +101,7 @@ tendermint node
|
||||
```
|
||||
|
||||
If you have used Tendermint, you may want to reset the data for a new
|
||||
blockchain by running `tendermint unsafe_reset_all`. Then you can run
|
||||
blockchain by running `tendermint unsafe-reset-all`. Then you can run
|
||||
`tendermint node` to start Tendermint, and connect to the app. For more
|
||||
details, see [the guide on using Tendermint](../tendermint-core/using-tendermint.md).
|
||||
|
||||
@@ -164,47 +197,3 @@ curl -s 'localhost:26657/abci_query?data="name"'
|
||||
|
||||
Try some other transactions and queries to make sure everything is
|
||||
working!
|
||||
|
||||
|
||||
## CounterJS - Example in Another Language
|
||||
|
||||
We also want to run applications in another language - in this case,
|
||||
we'll run a Javascript version of the `counter`. To run it, you'll need
|
||||
to [install node](https://nodejs.org/en/download/).
|
||||
|
||||
You'll also need to fetch the relevant repository, from
|
||||
[here](https://github.com/tendermint/js-abci), then install it:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/tendermint/js-abci.git
|
||||
cd js-abci
|
||||
npm install abci
|
||||
```
|
||||
|
||||
Kill the previous `counter` and `tendermint` processes. Now run the app:
|
||||
|
||||
```sh
|
||||
node example/counter.js
|
||||
```
|
||||
|
||||
In another window, reset and start `tendermint`:
|
||||
|
||||
```sh
|
||||
tendermint unsafe_reset_all
|
||||
tendermint node
|
||||
```
|
||||
|
||||
Once again, you should see blocks streaming by - but now, our
|
||||
application is written in Javascript! Try sending some transactions, and
|
||||
like before - the results should be the same:
|
||||
|
||||
```sh
|
||||
# ok
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x00
|
||||
# invalid nonce
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x05
|
||||
# ok
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x01
|
||||
```
|
||||
|
||||
Neat, eh?
|
||||
|
||||
@@ -78,6 +78,7 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-039: Peer-Behaviour](./adr-039-peer-behaviour.md)
|
||||
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
||||
- [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md)
|
||||
- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md)
|
||||
- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md)
|
||||
- [ADR-079: Ed25519 Verification](./adr-079-ed25519-verification.md)
|
||||
- [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md)
|
||||
@@ -114,7 +115,6 @@ None
|
||||
- [ADR-064: Batch Verification](./adr-064-batch-verification.md)
|
||||
- [ADR-068: Reverse-Sync](./adr-068-reverse-sync.md)
|
||||
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
|
||||
- [ADR-071: Proposer-Based Timestamps](./adr-071-proposer-based-timestamps.md)
|
||||
- [ADR-073: Adopt LibP2P](./adr-073-libp2p.md)
|
||||
- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md)
|
||||
- [ADR-080: Reverse Sync](./adr-080-reverse-sync.md)
|
||||
|
||||
@@ -6,7 +6,7 @@ order: 4
|
||||
|
||||
Tendermint is software for securely and consistently replicating an
|
||||
application on many machines. By securely, we mean that Tendermint works
|
||||
even if up to 1/3 of machines fail in arbitrary ways. By consistently,
|
||||
as long as less than 1/3 of machines fail in arbitrary ways. By consistently,
|
||||
we mean that every non-faulty machine sees the same transaction log and
|
||||
computes the same state. Secure and consistent replication is a
|
||||
fundamental problem in distributed systems; it plays a critical role in
|
||||
@@ -22,15 +22,14 @@ reformalization of BFT in a more modern setting, with emphasis on
|
||||
peer-to-peer networking and cryptographic authentication. The name
|
||||
derives from the way transactions are batched in blocks, where each
|
||||
block contains a cryptographic hash of the previous one, forming a
|
||||
chain. In practice, the blockchain data structure actually optimizes BFT
|
||||
design.
|
||||
chain.
|
||||
|
||||
Tendermint consists of two chief technical components: a blockchain
|
||||
consensus engine and a generic application interface. The consensus
|
||||
engine, called Tendermint Core, ensures that the same transactions are
|
||||
recorded on every machine in the same order. The application interface,
|
||||
called the Application BlockChain Interface (ABCI), enables the
|
||||
transactions to be processed in any programming language. Unlike other
|
||||
called the Application BlockChain Interface (ABCI), delivers the transactions
|
||||
to applications for processing. Unlike other
|
||||
blockchain and consensus solutions, which come pre-packaged with built
|
||||
in state machines (like a fancy key-value store, or a quirky scripting
|
||||
language), developers can use Tendermint for BFT state machine
|
||||
@@ -51,13 +50,13 @@ Hyperledger's Burrow.
|
||||
|
||||
### Zookeeper, etcd, consul
|
||||
|
||||
Zookeeper, etcd, and consul are all implementations of a key-value store
|
||||
atop a classical, non-BFT consensus algorithm. Zookeeper uses a version
|
||||
of Paxos called Zookeeper Atomic Broadcast, while etcd and consul use
|
||||
the Raft consensus algorithm, which is much younger and simpler. A
|
||||
Zookeeper, etcd, and consul are all implementations of key-value stores
|
||||
atop a classical, non-BFT consensus algorithm. Zookeeper uses an
|
||||
algorithm called Zookeeper Atomic Broadcast, while etcd and consul use
|
||||
the Raft log replication algorithm. A
|
||||
typical cluster contains 3-5 machines, and can tolerate crash failures
|
||||
in up to 1/2 of the machines, but even a single Byzantine fault can
|
||||
destroy the system.
|
||||
in less than 1/2 of the machines (e.g., 1 out of 3 or 2 out of 5),
|
||||
but even a single Byzantine fault can jeopardize the whole system.
|
||||
|
||||
Each offering provides a slightly different implementation of a
|
||||
featureful key-value store, but all are generally focused around
|
||||
@@ -66,8 +65,8 @@ configuration, service discovery, locking, leader-election, and so on.
|
||||
|
||||
Tendermint is in essence similar software, but with two key differences:
|
||||
|
||||
- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a
|
||||
1/3 of failures, but those failures can include arbitrary behavior -
|
||||
- It is Byzantine Fault Tolerant, meaning it can only tolerate less than 1/3
|
||||
of machines failing, but those failures can include arbitrary behavior -
|
||||
including hacking and malicious attacks. - It does not specify a
|
||||
particular application, like a fancy key-value store. Instead, it
|
||||
focuses on arbitrary state machine replication, so developers can build
|
||||
@@ -106,8 +105,8 @@ docker containers, modules it calls "chaincode". It uses an
|
||||
implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf).
|
||||
from a team at IBM that is [augmented to handle potentially
|
||||
non-deterministic
|
||||
chaincode](https://drops.dagstuhl.de/opus/volltexte/2017/7093/pdf/LIPIcs-OPODIS-2016-24.pdf) It is
|
||||
possible to implement this docker-based behavior as a ABCI app in
|
||||
chaincode](https://drops.dagstuhl.de/opus/volltexte/2017/7093/pdf/LIPIcs-OPODIS-2016-24.pdf).
|
||||
It is possible to implement this docker-based behavior as an ABCI app in
|
||||
Tendermint, though extending Tendermint to handle non-determinism
|
||||
remains for future work.
|
||||
|
||||
@@ -143,24 +142,22 @@ in design and suffers from "spaghetti code".
|
||||
Another problem with monolithic design is that it limits you to the
|
||||
language of the blockchain stack (or vice versa). In the case of
|
||||
Ethereum which supports a Turing-complete bytecode virtual-machine, it
|
||||
limits you to languages that compile down to that bytecode; today, those
|
||||
are Serpent and Solidity.
|
||||
limits you to languages that compile down to that bytecode; while the
|
||||
[list](https://github.com/pirapira/awesome-ethereum-virtual-machine#programming-languages-that-compile-into-evm)
|
||||
is growing, it is still very limited.
|
||||
|
||||
In contrast, our approach is to decouple the consensus engine and P2P
|
||||
layers from the details of the application state of the particular
|
||||
layers from the details of the state of the particular
|
||||
blockchain application. We do this by abstracting away the details of
|
||||
the application to an interface, which is implemented as a socket
|
||||
protocol.
|
||||
|
||||
Thus we have an interface, the Application BlockChain Interface (ABCI),
|
||||
and its primary implementation, the Tendermint Socket Protocol (TSP, or
|
||||
Teaspoon).
|
||||
|
||||
### Intro to ABCI
|
||||
|
||||
[Tendermint Core](https://github.com/tendermint/tendermint) (the
|
||||
"consensus engine") communicates with the application via a socket
|
||||
protocol that satisfies the ABCI.
|
||||
[Tendermint Core](https://github.com/tendermint/tendermint), the
|
||||
"consensus engine", communicates with the application via a socket
|
||||
protocol that satisfies the ABCI, the Tendermint Socket Protocol
|
||||
(TSP, or Teaspoon).
|
||||
|
||||
To draw an analogy, lets talk about a well-known cryptocurrency,
|
||||
Bitcoin. Bitcoin is a cryptocurrency blockchain where each node
|
||||
@@ -180,7 +177,7 @@ The application will be responsible for
|
||||
- Allowing clients to query the UTXO database.
|
||||
|
||||
Tendermint is able to decompose the blockchain design by offering a very
|
||||
simple API (ie. the ABCI) between the application process and consensus
|
||||
simple API (i.e. the ABCI) between the application process and consensus
|
||||
process.
|
||||
|
||||
The ABCI consists of 3 primary message types that get delivered from the
|
||||
@@ -239,8 +236,7 @@ Solidity on Ethereum is a great language of choice for blockchain
|
||||
applications because, among other reasons, it is a completely
|
||||
deterministic programming language. However, it's also possible to
|
||||
create deterministic applications using existing popular languages like
|
||||
Java, C++, Python, or Go. Game programmers and blockchain developers are
|
||||
already familiar with creating deterministic programs by avoiding
|
||||
Java, C++, Python, or Go, by avoiding
|
||||
sources of non-determinism such as:
|
||||
|
||||
- random number generators (without deterministic seeding)
|
||||
@@ -271,14 +267,15 @@ committed in a chain, with one block at each **height**. A block may
|
||||
fail to be committed, in which case the protocol moves to the next
|
||||
**round**, and a new validator gets to propose a block for that height.
|
||||
Two stages of voting are required to successfully commit a block; we
|
||||
call them **pre-vote** and **pre-commit**. A block is committed when
|
||||
more than 2/3 of validators pre-commit for the same block in the same
|
||||
round.
|
||||
call them **pre-vote** and **pre-commit**.
|
||||
|
||||
There is a picture of a couple doing the polka because validators are
|
||||
doing something like a polka dance. When more than two-thirds of the
|
||||
validators pre-vote for the same block, we call that a **polka**. Every
|
||||
pre-commit must be justified by a polka in the same round.
|
||||
A block is committed when
|
||||
more than 2/3 of validators pre-commit for the same block in the same
|
||||
round.
|
||||
|
||||
Validators may fail to commit a block for a number of reasons; the
|
||||
current proposer may be offline, or the network may be slow. Tendermint
|
||||
|
||||
@@ -18,50 +18,52 @@ Listen address can be changed in the config file (see
|
||||
|
||||
The following metrics are available:
|
||||
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
|----------------------------------------|-----------|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| abci_connection_method_timing_seconds | Histogram | method, type | Timings for each of the ABCI methods |
|
||||
| consensus_height | Gauge | | Height of the chain |
|
||||
| consensus_validators | Gauge | | Number of validators |
|
||||
| consensus_validators_power | Gauge | | Total voting power of all validators |
|
||||
| consensus_validator_power | Gauge | | Voting power of the node if in the validator set |
|
||||
| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| consensus_missing_validators | Gauge | | Number of validators who did not sign |
|
||||
| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators |
|
||||
| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign |
|
||||
| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators |
|
||||
| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| consensus_rounds | Gauge | | Number of rounds |
|
||||
| consensus_num_txs | Gauge | | Number of transactions |
|
||||
| consensus_total_txs | Gauge | | Total number of transactions committed |
|
||||
| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer |
|
||||
| consensus_latest_block_height | gauge | | /status sync_info number |
|
||||
| consensus_block_syncing | gauge | | either 0 (not block syncing) or 1 (syncing) |
|
||||
| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) |
|
||||
| consensus_block_size_bytes | Gauge | | Block size in bytes |
|
||||
| consensus_step_duration | Histogram | step | Histogram of durations for each step in the consensus protocol |
|
||||
| consensus_round_duration | Histogram | | Histogram of durations for all the rounds that have occurred since the process started |
|
||||
| consensus_block_gossip_parts_received | Counter | matches_current | Number of block parts received by the node |
|
||||
| consensus_quorum_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum |
|
||||
| consensus_full_prevote_delay | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted |
|
||||
| consensus_proposal_receive_count | Counter | status | Total number of proposals received by the node since process start |
|
||||
| consensus_proposal_create_count | Counter | | Total number of proposals created by the node since process start |
|
||||
| consensus_round_voting_power_percent | Gauge | vote_type | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round |
|
||||
| consensus_late_votes | Counter | vote_type | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. |
|
||||
| p2p_peers | Gauge | | Number of peers node's connected to |
|
||||
| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer |
|
||||
| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer |
|
||||
| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer |
|
||||
| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id |
|
||||
| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer |
|
||||
| mempool_size | Gauge | | Number of uncommitted transactions |
|
||||
| mempool_tx_size_bytes | histogram | | transaction sizes in bytes |
|
||||
| mempool_failed_txs | counter | | number of failed transactions |
|
||||
| mempool_recheck_times | counter | | number of transactions rechecked in the mempool |
|
||||
| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms |
|
||||
| state_consensus_param_updates | Counter | | number of consensus parameter updates returned by the application since process start |
|
||||
| state_validator_set_updates | Counter | | number of validator set updates returned by the application since process start |
|
||||
| **Name** | **Type** | **Tags** | **Description** |
|
||||
|------------------------------------------|-----------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `abci_connection_method_timing_seconds` | Histogram | `method`, `type` | Timings for each of the ABCI methods |
|
||||
| `blocksync_syncing` | Gauge | | Either 0 (not block syncing) or 1 (syncing) |
|
||||
| `consensus_height` | Gauge | | Height of the chain |
|
||||
| `consensus_validators` | Gauge | | Number of validators |
|
||||
| `consensus_validators_power` | Gauge | | Total voting power of all validators |
|
||||
| `consensus_validator_power` | Gauge | | Voting power of the node if in the validator set |
|
||||
| `consensus_validator_last_signed_height` | Gauge | | Last height the node signed a block, if the node is a validator |
|
||||
| `consensus_validator_missed_blocks` | Gauge | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| `consensus_missing_validators` | Gauge | | Number of validators who did not sign |
|
||||
| `consensus_missing_validators_power` | Gauge | | Total voting power of the missing validators |
|
||||
| `consensus_byzantine_validators` | Gauge | | Number of validators who tried to double sign |
|
||||
| `consensus_byzantine_validators_power` | Gauge | | Total voting power of the byzantine validators |
|
||||
| `consensus_block_interval_seconds` | Histogram | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| `consensus_rounds` | Gauge | | Number of rounds |
|
||||
| `consensus_num_txs` | Gauge | | Number of transactions |
|
||||
| `consensus_total_txs` | Gauge | | Total number of transactions committed |
|
||||
| `consensus_block_parts` | Counter | `peer_id` | Number of blockparts transmitted by peer |
|
||||
| `consensus_latest_block_height` | Gauge | | /status sync\_info number |
|
||||
| `consensus_block_size_bytes` | Gauge | | Block size in bytes |
|
||||
| `consensus_step_duration` | Histogram | `step` | Histogram of durations for each step in the consensus protocol |
|
||||
| `consensus_round_duration` | Histogram | | Histogram of durations for all the rounds that have occurred since the process started |
|
||||
| `consensus_block_gossip_parts_received` | Counter | `matches_current` | Number of block parts received by the node |
|
||||
| `consensus_quorum_prevote_delay` | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the earliest prevote that achieved a quorum |
|
||||
| `consensus_full_prevote_delay` | Gauge | | Interval in seconds between the proposal timestamp and the timestamp of the latest prevote in a round where all validators voted |
|
||||
| `consensus_proposal_receive_count` | Counter | `status` | Total number of proposals received by the node since process start |
|
||||
| `consensus_proposal_create_count` | Counter | | Total number of proposals created by the node since process start |
|
||||
| `consensus_round_voting_power_percent` | Gauge | `vote_type` | A value between 0 and 1.0 representing the percentage of the total voting power per vote type received within a round |
|
||||
| `consensus_late_votes` | Counter | `vote_type` | Number of votes received by the node since process start that correspond to earlier heights and rounds than this node is currently in. |
|
||||
| `p2p_message_send_bytes_total` | Counter | `message_type` | Number of bytes sent to all peers per message type |
|
||||
| `p2p_message_receive_bytes_total` | Counter | `message_type` | Number of bytes received from all peers per message type |
|
||||
| `p2p_peers` | Gauge | | Number of peers node's connected to |
|
||||
| `p2p_peer_receive_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel received from a given peer |
|
||||
| `p2p_peer_send_bytes_total` | Counter | `peer_id`, `chID` | Number of bytes per channel sent to a given peer |
|
||||
| `p2p_peer_pending_send_bytes` | Gauge | `peer_id` | Number of pending bytes to be sent to a given peer |
|
||||
| `p2p_num_txs` | Gauge | `peer_id` | Number of transactions submitted by each peer\_id |
|
||||
| `p2p_pending_send_bytes` | Gauge | `peer_id` | Amount of data pending to be sent to peer |
|
||||
| `mempool_size` | Gauge | | Number of uncommitted transactions |
|
||||
| `mempool_tx_size_bytes` | Histogram | | Transaction sizes in bytes |
|
||||
| `mempool_failed_txs` | Counter | | Number of failed transactions |
|
||||
| `mempool_recheck_times` | Counter | | Number of transactions rechecked in the mempool |
|
||||
| `state_block_processing_time` | Histogram | | Time between BeginBlock and EndBlock in ms |
|
||||
| `state_consensus_param_updates` | Counter | | Number of consensus parameter updates returned by the application since process start |
|
||||
| `state_validator_set_updates` | Counter | | Number of validator set updates returned by the application since process start |
|
||||
| `statesync_syncing` | Gauge | | Either 0 (not state syncing) or 1 (syncing) |
|
||||
|
||||
## Useful queries
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -411,7 +411,7 @@ func initializeValidatorState(privVal types.PrivValidator, height int64) sm.Stor
|
||||
// initializeBlockStore creates a block storage and populates it w/ a dummy
|
||||
// block at +height+.
|
||||
func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.BlockStore, error) {
|
||||
blockStore := store.NewBlockStore(db)
|
||||
blockStore := store.NewBlockStore(db, store.BlockStoreOptions{})
|
||||
|
||||
for i := int64(1); i <= state.LastBlockHeight; i++ {
|
||||
lastCommit := makeCommit(i-1, valAddr)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
clist "github.com/tendermint/tendermint/libs/clist"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
|
||||
32
go.mod
32
go.mod
@@ -21,7 +21,7 @@ require (
|
||||
github.com/ory/dockertest v3.3.5+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pointlander/peg v1.0.1
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/prometheus/common v0.37.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
@@ -29,12 +29,12 @@ require (
|
||||
github.com/sasha-s/go-deadlock v0.3.1
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/viper v1.13.0
|
||||
github.com/spf13/viper v1.14.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
golang.org/x/crypto v0.1.0
|
||||
golang.org/x/net v0.1.0
|
||||
google.golang.org/grpc v1.50.1
|
||||
golang.org/x/crypto v0.3.0
|
||||
golang.org/x/net v0.2.0
|
||||
google.golang.org/grpc v1.51.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -44,13 +44,13 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.0
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2
|
||||
github.com/cosmos/gogoproto v1.4.2
|
||||
github.com/gofrs/uuid v4.3.0+incompatible
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3
|
||||
github.com/cosmos/gogoproto v1.4.3
|
||||
github.com/gofrs/uuid v4.3.1+incompatible
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae
|
||||
github.com/vektra/mockery/v2 v2.14.1
|
||||
github.com/vektra/mockery/v2 v2.15.0
|
||||
gonum.org/v1/gonum v0.12.0
|
||||
google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8
|
||||
)
|
||||
@@ -107,7 +107,7 @@ require (
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fatih/structtag v1.2.0 // indirect
|
||||
github.com/firefart/nonamedreturns v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/fzipp/gocyclo v0.6.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.0.7 // indirect
|
||||
github.com/go-critic/go-critic v0.6.5 // indirect
|
||||
@@ -222,7 +222,7 @@ require (
|
||||
github.com/sivchari/tenv v1.7.0 // indirect
|
||||
github.com/sonatard/noctx v0.0.1 // indirect
|
||||
github.com/sourcegraph/go-diff v0.6.1 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
@@ -255,12 +255,12 @@ require (
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.2.0 // indirect
|
||||
golang.org/x/term v0.2.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a // indirect
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
||||
75
go.sum
75
go.sum
@@ -23,14 +23,15 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=
|
||||
cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc=
|
||||
cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=
|
||||
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
@@ -151,12 +152,12 @@ github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA=
|
||||
github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.0 h1:S/6K1GEwlEsFzZP4cOOl5mg6PEd/pr0zz7hvXcaxhJ4=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.0/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
@@ -240,8 +241,8 @@ github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
|
||||
github.com/cosmos/gogoproto v1.4.2 h1:UeGRcmFW41l0G0MiefWhkPEVEwvu78SZsHBvI78dAYw=
|
||||
github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU=
|
||||
github.com/cosmos/gogoproto v1.4.3 h1:RP3yyVREh9snv/lsOvmsAPQt8f44LgL281X0IOIhhcI=
|
||||
github.com/cosmos/gogoproto v1.4.3/go.mod h1:0hLIG5TR7IvV1fme1HCFKjfzW9X2x0Mo+RooWXCnOWU=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
@@ -336,8 +337,8 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM
|
||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
|
||||
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
@@ -398,8 +399,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
|
||||
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gofrs/uuid v4.3.0+incompatible h1:CaSVZxm5B+7o45rtab4jC2G37WGYX1zQfuU2i6DSvnc=
|
||||
github.com/gofrs/uuid v4.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI=
|
||||
github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
@@ -912,8 +913,8 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP
|
||||
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -1034,8 +1035,8 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
|
||||
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
|
||||
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
@@ -1059,8 +1060,8 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU=
|
||||
github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw=
|
||||
github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=
|
||||
github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
|
||||
github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc=
|
||||
@@ -1128,8 +1129,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y=
|
||||
github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
|
||||
github.com/vektra/mockery/v2 v2.14.1 h1:Xamr4zUkFBDGdZhJ6iCiJ1AwkGRmUgZd8zkwjRXt+TU=
|
||||
github.com/vektra/mockery/v2 v2.14.1/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M=
|
||||
github.com/vektra/mockery/v2 v2.15.0 h1:5Egbxoancm1hhkJUoAF+cf0FBzC9oxS28LL/ZKbC980=
|
||||
github.com/vektra/mockery/v2 v2.15.0/go.mod h1:RswGtsqDbCR9j4UcgBQuAZY7OFxI+TgtHevc0gR0kCY=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
@@ -1224,8 +1225,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
|
||||
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1333,8 +1334,8 @@ golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1349,7 +1350,7 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -1362,8 +1363,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpiB4nXvw1XYzFPGgzLUOY=
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1458,19 +1459,19 @@ golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1487,8 +1488,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -1673,8 +1674,8 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20211101144312-62acf1d99145/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a h1:GH6UPn3ixhWcKDhpnEC55S75cerLPdpp3hrhfKYjZgw=
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y=
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
@@ -1704,8 +1705,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
|
||||
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
|
||||
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/pubsub"
|
||||
"github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
|
||||
@@ -21,22 +21,22 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
|
||||
"health": rpcserver.NewRPCFunc(makeHealthFunc(c), ""),
|
||||
"status": rpcserver.NewRPCFunc(makeStatusFunc(c), ""),
|
||||
"net_info": rpcserver.NewRPCFunc(makeNetInfoFunc(c), ""),
|
||||
"blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"),
|
||||
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""),
|
||||
"genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), ""),
|
||||
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"),
|
||||
"header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height"),
|
||||
"header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash"),
|
||||
"block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash"),
|
||||
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height"),
|
||||
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"),
|
||||
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"),
|
||||
"blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight", rpcserver.Cacheable()),
|
||||
"genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), "", rpcserver.Cacheable()),
|
||||
"genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), "", rpcserver.Cacheable()),
|
||||
"block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash", rpcserver.Cacheable()),
|
||||
"block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash", rpcserver.Cacheable()),
|
||||
"block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove", rpcserver.Cacheable()),
|
||||
"tx_search": rpcserver.NewRPCFunc(makeTxSearchFunc(c), "query,prove,page,per_page,order_by"),
|
||||
"block_search": rpcserver.NewRPCFunc(makeBlockSearchFunc(c), "query,page,per_page,order_by"),
|
||||
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page"),
|
||||
"validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page", rpcserver.Cacheable("height")),
|
||||
"dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), ""),
|
||||
"consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), ""),
|
||||
"consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height"),
|
||||
"consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height", rpcserver.Cacheable("height")),
|
||||
"unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit"),
|
||||
"num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), ""),
|
||||
|
||||
@@ -47,7 +47,7 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc {
|
||||
|
||||
// abci API
|
||||
"abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data,height,prove"),
|
||||
"abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), ""),
|
||||
"abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), "", rpcserver.Cacheable()),
|
||||
|
||||
// evidence API
|
||||
"broadcast_evidence": rpcserver.NewRPCFunc(makeBroadcastEvidenceFunc(c), "evidence"),
|
||||
|
||||
39
node/node.go
39
node/node.go
@@ -146,21 +146,26 @@ func NewNode(config *cfg.Config,
|
||||
logger log.Logger,
|
||||
options ...Option,
|
||||
) (*Node, error) {
|
||||
blockStore, stateDB, err := initDBs(config, dbProvider)
|
||||
blockStoreDB, stateDB, err := initDBs(config, dbProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
|
||||
})
|
||||
|
||||
state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
csMetrics, p2pMetrics, memplMetrics, smMetrics, abciMetrics := metricsProvider(genDoc.ChainID)
|
||||
csMetrics, p2pMetrics, memplMetrics, smMetrics, bstMetrics, abciMetrics, bsMetrics, ssMetrics := metricsProvider(genDoc.ChainID)
|
||||
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
|
||||
Metrics: smMetrics,
|
||||
})
|
||||
|
||||
blockStore := store.NewBlockStore(blockStoreDB, store.BlockStoreOptions{
|
||||
Metrics: bstMetrics,
|
||||
})
|
||||
|
||||
// Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
|
||||
proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, abciMetrics)
|
||||
@@ -249,18 +254,12 @@ func NewNode(config *cfg.Config,
|
||||
)
|
||||
|
||||
// Make BlocksyncReactor. Don't start block sync if we're doing a state sync first.
|
||||
bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger)
|
||||
bcReactor, err := createBlocksyncReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger, bsMetrics)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create blocksync reactor: %w", err)
|
||||
}
|
||||
|
||||
// Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first.
|
||||
// FIXME We need to update metrics here, since other reactors don't have access to them.
|
||||
if stateSync {
|
||||
csMetrics.StateSyncing.Set(1)
|
||||
} else if blockSync {
|
||||
csMetrics.BlockSyncing.Set(1)
|
||||
}
|
||||
// Make ConsensusReactor
|
||||
consensusReactor, consensusState := createConsensusReactor(
|
||||
config, state, blockExec, blockStore, mempool, evidencePool,
|
||||
privValidator, csMetrics, stateSync || blockSync, eventBus, consensusLogger,
|
||||
@@ -275,6 +274,7 @@ func NewNode(config *cfg.Config,
|
||||
proxyApp.Snapshot(),
|
||||
proxyApp.Query(),
|
||||
config.StateSync.TempDir,
|
||||
ssMetrics,
|
||||
)
|
||||
stateSyncReactor.SetLogger(logger.With("module", "statesync"))
|
||||
|
||||
@@ -308,6 +308,17 @@ func NewNode(config *cfg.Config,
|
||||
return nil, fmt.Errorf("could not create addrbook: %w", err)
|
||||
}
|
||||
|
||||
for _, addr := range splitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") {
|
||||
netAddrs, err := p2p.NewNetAddressString(addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid bootstrap peer address: %w", err)
|
||||
}
|
||||
err = addrBook.AddAddress(netAddrs, netAddrs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("adding bootstrap address to addressbook: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Optionally, start the pex reactor
|
||||
//
|
||||
// TODO:
|
||||
|
||||
@@ -295,7 +295,7 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
|
||||
// Make EvidencePool
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
evidencePool.SetLogger(logger)
|
||||
@@ -402,7 +402,7 @@ func TestMaxProposalBlockSize(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
|
||||
// fill the mempool with one txs just below the maximum size
|
||||
txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1))
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
bc "github.com/tendermint/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/blocksync"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cs "github.com/tendermint/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
@@ -99,20 +99,23 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
|
||||
}
|
||||
|
||||
// MetricsProvider returns a consensus, p2p and mempool Metrics.
|
||||
type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics)
|
||||
type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *store.Metrics, *proxy.Metrics, *blocksync.Metrics, *statesync.Metrics)
|
||||
|
||||
// DefaultMetricsProvider returns Metrics build using Prometheus client library
|
||||
// if Prometheus is enabled. Otherwise, it returns no-op Metrics.
|
||||
func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
|
||||
return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *proxy.Metrics) {
|
||||
return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics, *store.Metrics, *proxy.Metrics, *blocksync.Metrics, *statesync.Metrics) {
|
||||
if config.Prometheus {
|
||||
return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
sm.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
proxy.PrometheusMetrics(config.Namespace, "chain_id", chainID)
|
||||
store.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
proxy.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
blocksync.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
statesync.PrometheusMetrics(config.Namespace, "chain_id", chainID)
|
||||
}
|
||||
return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics(), proxy.NopMetrics()
|
||||
return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics(), store.NopMetrics(), proxy.NopMetrics(), blocksync.NopMetrics(), statesync.NopMetrics()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,13 +125,11 @@ type blockSyncReactor interface {
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
|
||||
var blockStoreDB dbm.DB
|
||||
blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
|
||||
func initDBs(config *cfg.Config, dbProvider DBProvider) (bsDB dbm.DB, stateDB dbm.DB, err error) {
|
||||
bsDB, err = dbProvider(&DBContext{"blockstore", config})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
blockStore = store.NewBlockStore(blockStoreDB)
|
||||
|
||||
stateDB, err = dbProvider(&DBContext{"state", config})
|
||||
if err != nil {
|
||||
@@ -336,10 +337,11 @@ func createBlocksyncReactor(config *cfg.Config,
|
||||
blockStore *store.BlockStore,
|
||||
blockSync bool,
|
||||
logger log.Logger,
|
||||
metrics *blocksync.Metrics,
|
||||
) (bcReactor p2p.Reactor, err error) {
|
||||
switch config.BlockSync.Version {
|
||||
case "v0":
|
||||
bcReactor = bc.NewReactor(state.Copy(), blockExec, blockStore, blockSync)
|
||||
bcReactor = blocksync.NewReactor(state.Copy(), blockExec, blockStore, blockSync, metrics)
|
||||
case "v1", "v2":
|
||||
return nil, fmt.Errorf("block sync version %s has been deprecated. Please use v0", config.BlockSync.Version)
|
||||
default:
|
||||
@@ -575,9 +577,6 @@ func startStateSync(ssR *statesync.Reactor, bcR blockSyncReactor, conR *cs.React
|
||||
}
|
||||
|
||||
if blockSync {
|
||||
// FIXME Very ugly to have these metrics bleed through here.
|
||||
conR.Metrics.StateSyncing.Set(0)
|
||||
conR.Metrics.BlockSyncing.Set(1)
|
||||
err = bcR.SwitchToBlockSync(state)
|
||||
if err != nil {
|
||||
ssR.Logger.Error("Failed to switch to block sync", "err", err)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cmap"
|
||||
"github.com/tendermint/tendermint/libs/rand"
|
||||
@@ -726,7 +727,7 @@ func (sw *Switch) addOutboundPeerWithConfig(
|
||||
addr *NetAddress,
|
||||
cfg *config.P2PConfig,
|
||||
) error {
|
||||
sw.Logger.Info("Dialing peer", "address", addr)
|
||||
sw.Logger.Debug("Dialing peer", "address", addr)
|
||||
|
||||
// XXX(xla): Remove the leakage of test concerns in implementation.
|
||||
if cfg.TestDialFail {
|
||||
@@ -854,7 +855,7 @@ func (sw *Switch) addPeer(p Peer) error {
|
||||
reactor.AddPeer(p)
|
||||
}
|
||||
|
||||
sw.Logger.Info("Added peer", "peer", p)
|
||||
sw.Logger.Debug("Added peer", "peer", p)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"golang.org/x/net/netutil"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
|
||||
@@ -2,6 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
|
||||
@@ -39,6 +39,26 @@ func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) {
|
||||
return abcicli.NewLocalClient(l.mtx, l.app), nil
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------
|
||||
// unsynchronized local proxy on an in-proc app (no mutex)
|
||||
|
||||
type unsyncLocalClientCreator struct {
|
||||
app types.Application
|
||||
}
|
||||
|
||||
// NewUnsyncLocalClientCreator returns a ClientCreator for the given app, which
|
||||
// will be running locally. Unlike NewLocalClientCreator, this leaves
|
||||
// synchronization up to the application.
|
||||
func NewUnsyncLocalClientCreator(app types.Application) ClientCreator {
|
||||
return &unsyncLocalClientCreator{
|
||||
app: app,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *unsyncLocalClientCreator) NewABCIClient() (abcicli.Client, error) {
|
||||
return abcicli.NewUnsyncLocalClient(l.app), nil
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------
|
||||
// remote proxy opens new connections to an external app process
|
||||
|
||||
@@ -83,6 +103,12 @@ func DefaultClientCreator(addr, transport, dbDir string) ClientCreator {
|
||||
panic(err)
|
||||
}
|
||||
return NewLocalClientCreator(app)
|
||||
case "e2e_sync":
|
||||
app, err := e2e.NewSyncApplication(e2e.DefaultConfig(dbDir))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return NewUnsyncLocalClientCreator(app)
|
||||
case "noop":
|
||||
return NewLocalClientCreator(types.NewBaseApplication())
|
||||
default:
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
Tendermint Core v0.34.0 is the Tendermint Core release which supports the Stargate upgrade.
|
||||
|
||||
For more information on how to upgrade to Tendermint 0.34, please see [UPGRADING.md](https://github.com/tendermint/tendermint/blob/release/v0.34.0/UPGRADING.md).
|
||||
For a full list of user-facing changes, please see [CHANGELOG.md](https://github.com/tendermint/tendermint/blob/release/v0.34.0/CHANGELOG.md).
|
||||
@@ -17,23 +17,23 @@ var Routes = map[string]*rpc.RPCFunc{
|
||||
"health": rpc.NewRPCFunc(Health, ""),
|
||||
"status": rpc.NewRPCFunc(Status, ""),
|
||||
"net_info": rpc.NewRPCFunc(NetInfo, ""),
|
||||
"blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"),
|
||||
"genesis": rpc.NewRPCFunc(Genesis, ""),
|
||||
"genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk"),
|
||||
"block": rpc.NewRPCFunc(Block, "height"),
|
||||
"block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash"),
|
||||
"block_results": rpc.NewRPCFunc(BlockResults, "height"),
|
||||
"commit": rpc.NewRPCFunc(Commit, "height"),
|
||||
"header": rpc.NewRPCFunc(Header, "height"),
|
||||
"header_by_hash": rpc.NewRPCFunc(HeaderByHash, "hash"),
|
||||
"blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight", rpc.Cacheable()),
|
||||
"genesis": rpc.NewRPCFunc(Genesis, "", rpc.Cacheable()),
|
||||
"genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk", rpc.Cacheable()),
|
||||
"block": rpc.NewRPCFunc(Block, "height", rpc.Cacheable("height")),
|
||||
"block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash", rpc.Cacheable()),
|
||||
"block_results": rpc.NewRPCFunc(BlockResults, "height", rpc.Cacheable("height")),
|
||||
"commit": rpc.NewRPCFunc(Commit, "height", rpc.Cacheable("height")),
|
||||
"header": rpc.NewRPCFunc(Header, "height", rpc.Cacheable("height")),
|
||||
"header_by_hash": rpc.NewRPCFunc(HeaderByHash, "hash", rpc.Cacheable()),
|
||||
"check_tx": rpc.NewRPCFunc(CheckTx, "tx"),
|
||||
"tx": rpc.NewRPCFunc(Tx, "hash,prove"),
|
||||
"tx": rpc.NewRPCFunc(Tx, "hash,prove", rpc.Cacheable()),
|
||||
"tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"),
|
||||
"block_search": rpc.NewRPCFunc(BlockSearch, "query,page,per_page,order_by"),
|
||||
"validators": rpc.NewRPCFunc(Validators, "height,page,per_page"),
|
||||
"validators": rpc.NewRPCFunc(Validators, "height,page,per_page", rpc.Cacheable("height")),
|
||||
"dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""),
|
||||
"consensus_state": rpc.NewRPCFunc(ConsensusState, ""),
|
||||
"consensus_params": rpc.NewRPCFunc(ConsensusParams, "height"),
|
||||
"consensus_params": rpc.NewRPCFunc(ConsensusParams, "height", rpc.Cacheable("height")),
|
||||
"unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"),
|
||||
"num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""),
|
||||
|
||||
@@ -44,7 +44,7 @@ var Routes = map[string]*rpc.RPCFunc{
|
||||
|
||||
// abci API
|
||||
"abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"),
|
||||
"abci_info": rpc.NewRPCFunc(ABCIInfo, ""),
|
||||
"abci_info": rpc.NewRPCFunc(ABCIInfo, "", rpc.Cacheable()),
|
||||
|
||||
// evidence API
|
||||
"broadcast_evidence": rpc.NewRPCFunc(BroadcastEvidence, "evidence"),
|
||||
|
||||
@@ -7,8 +7,10 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -37,9 +39,7 @@ const (
|
||||
testVal = "acbd"
|
||||
)
|
||||
|
||||
var (
|
||||
ctx = context.Background()
|
||||
)
|
||||
var ctx = context.Background()
|
||||
|
||||
type ResultEcho struct {
|
||||
Value string `json:"value"`
|
||||
@@ -57,6 +57,10 @@ type ResultEchoDataBytes struct {
|
||||
Value tmbytes.HexBytes `json:"value"`
|
||||
}
|
||||
|
||||
type ResultEchoWithDefault struct {
|
||||
Value int `json:"value"`
|
||||
}
|
||||
|
||||
// Define some routes
|
||||
var Routes = map[string]*server.RPCFunc{
|
||||
"echo": server.NewRPCFunc(EchoResult, "arg"),
|
||||
@@ -64,6 +68,7 @@ var Routes = map[string]*server.RPCFunc{
|
||||
"echo_bytes": server.NewRPCFunc(EchoBytesResult, "arg"),
|
||||
"echo_data_bytes": server.NewRPCFunc(EchoDataBytesResult, "arg"),
|
||||
"echo_int": server.NewRPCFunc(EchoIntResult, "arg"),
|
||||
"echo_default": server.NewRPCFunc(EchoWithDefault, "arg", server.Cacheable("arg")),
|
||||
}
|
||||
|
||||
func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) {
|
||||
@@ -86,6 +91,14 @@ func EchoDataBytesResult(ctx *types.Context, v tmbytes.HexBytes) (*ResultEchoDat
|
||||
return &ResultEchoDataBytes{v}, nil
|
||||
}
|
||||
|
||||
func EchoWithDefault(ctx *types.Context, v *int) (*ResultEchoWithDefault, error) {
|
||||
val := -1
|
||||
if v != nil {
|
||||
val = *v
|
||||
}
|
||||
return &ResultEchoWithDefault{val}, nil
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
setup()
|
||||
code := m.Run()
|
||||
@@ -199,26 +212,47 @@ func echoDataBytesViaHTTP(cl client.Caller, bytes tmbytes.HexBytes) (tmbytes.Hex
|
||||
return result.Value, nil
|
||||
}
|
||||
|
||||
func echoWithDefaultViaHTTP(cl client.Caller, v *int) (int, error) {
|
||||
params := map[string]interface{}{}
|
||||
if v != nil {
|
||||
params["arg"] = *v
|
||||
}
|
||||
result := new(ResultEchoWithDefault)
|
||||
if _, err := cl.Call(ctx, "echo_default", params, result); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return result.Value, nil
|
||||
}
|
||||
|
||||
func testWithHTTPClient(t *testing.T, cl client.HTTPClient) {
|
||||
val := testVal
|
||||
got, err := echoViaHTTP(cl, val)
|
||||
require.Nil(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, got, val)
|
||||
|
||||
val2 := randBytes(t)
|
||||
got2, err := echoBytesViaHTTP(cl, val2)
|
||||
require.Nil(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, got2, val2)
|
||||
|
||||
val3 := tmbytes.HexBytes(randBytes(t))
|
||||
got3, err := echoDataBytesViaHTTP(cl, val3)
|
||||
require.Nil(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, got3, val3)
|
||||
|
||||
val4 := tmrand.Intn(10000)
|
||||
got4, err := echoIntViaHTTP(cl, val4)
|
||||
require.Nil(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, got4, val4)
|
||||
|
||||
got5, err := echoWithDefaultViaHTTP(cl, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, got5, -1)
|
||||
|
||||
val6 := tmrand.Intn(10000)
|
||||
got6, err := echoWithDefaultViaHTTP(cl, &val6)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, got6, val6)
|
||||
}
|
||||
|
||||
func echoViaWS(cl *client.WSClient, val string) (string, error) {
|
||||
@@ -233,7 +267,6 @@ func echoViaWS(cl *client.WSClient, val string) (string, error) {
|
||||
msg := <-cl.ResponsesCh
|
||||
if msg.Error != nil {
|
||||
return "", err
|
||||
|
||||
}
|
||||
result := new(ResultEcho)
|
||||
err = json.Unmarshal(msg.Result, result)
|
||||
@@ -255,7 +288,6 @@ func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) {
|
||||
msg := <-cl.ResponsesCh
|
||||
if msg.Error != nil {
|
||||
return []byte{}, msg.Error
|
||||
|
||||
}
|
||||
result := new(ResultEchoBytes)
|
||||
err = json.Unmarshal(msg.Result, result)
|
||||
@@ -399,6 +431,74 @@ func TestWSClientPingPong(t *testing.T) {
|
||||
time.Sleep(6 * time.Second)
|
||||
}
|
||||
|
||||
func TestJSONRPCCaching(t *testing.T) {
|
||||
httpAddr := strings.Replace(tcpAddr, "tcp://", "http://", 1)
|
||||
cl, err := client.DefaultHTTPClient(httpAddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Not supplying the arg should result in not caching
|
||||
params := make(map[string]interface{})
|
||||
req, err := types.MapToRequest(types.JSONRPCIntID(1000), "echo_default", params)
|
||||
require.NoError(t, err)
|
||||
|
||||
res1, err := rawJSONRPCRequest(t, cl, httpAddr, req)
|
||||
defer func() { _ = res1.Body.Close() }()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "", res1.Header.Get("Cache-control"))
|
||||
|
||||
// Supplying the arg should result in caching
|
||||
params["arg"] = tmrand.Intn(10000)
|
||||
req, err = types.MapToRequest(types.JSONRPCIntID(1001), "echo_default", params)
|
||||
require.NoError(t, err)
|
||||
|
||||
res2, err := rawJSONRPCRequest(t, cl, httpAddr, req)
|
||||
defer func() { _ = res2.Body.Close() }()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "public, max-age=86400", res2.Header.Get("Cache-control"))
|
||||
}
|
||||
|
||||
func rawJSONRPCRequest(t *testing.T, cl *http.Client, url string, req interface{}) (*http.Response, error) {
|
||||
reqBytes, err := json.Marshal(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
reqBuf := bytes.NewBuffer(reqBytes)
|
||||
httpReq, err := http.NewRequest(http.MethodPost, url, reqBuf)
|
||||
require.NoError(t, err)
|
||||
|
||||
httpReq.Header.Set("Content-type", "application/json")
|
||||
|
||||
return cl.Do(httpReq)
|
||||
}
|
||||
|
||||
func TestURICaching(t *testing.T) {
|
||||
httpAddr := strings.Replace(tcpAddr, "tcp://", "http://", 1)
|
||||
cl, err := client.DefaultHTTPClient(httpAddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Not supplying the arg should result in not caching
|
||||
args := url.Values{}
|
||||
res1, err := rawURIRequest(t, cl, httpAddr+"/echo_default", args)
|
||||
defer func() { _ = res1.Body.Close() }()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "", res1.Header.Get("Cache-control"))
|
||||
|
||||
// Supplying the arg should result in caching
|
||||
args.Set("arg", fmt.Sprintf("%d", tmrand.Intn(10000)))
|
||||
res2, err := rawURIRequest(t, cl, httpAddr+"/echo_default", args)
|
||||
defer func() { _ = res2.Body.Close() }()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "public, max-age=86400", res2.Header.Get("Cache-control"))
|
||||
}
|
||||
|
||||
func rawURIRequest(t *testing.T, cl *http.Client, url string, args url.Values) (*http.Response, error) {
|
||||
req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(args.Encode()))
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
return cl.Do(req)
|
||||
}
|
||||
|
||||
func randBytes(t *testing.T) []byte {
|
||||
n := tmrand.Intn(10) + 2
|
||||
buf := make([]byte, n)
|
||||
|
||||
@@ -55,6 +55,11 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
|
||||
requests = []types.RPCRequest{request}
|
||||
}
|
||||
|
||||
// Set the default response cache to true unless
|
||||
// 1. Any RPC request error.
|
||||
// 2. Any RPC request doesn't allow to be cached.
|
||||
// 3. Any RPC request has the height argument and the value is 0 (the default).
|
||||
cache := true
|
||||
for _, request := range requests {
|
||||
request := request
|
||||
|
||||
@@ -72,11 +77,13 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
|
||||
responses,
|
||||
types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)),
|
||||
)
|
||||
cache = false
|
||||
continue
|
||||
}
|
||||
rpcFunc, ok := funcMap[request.Method]
|
||||
if !ok || rpcFunc.ws {
|
||||
if !ok || (rpcFunc.ws) {
|
||||
responses = append(responses, types.RPCMethodNotFoundError(request.ID))
|
||||
cache = false
|
||||
continue
|
||||
}
|
||||
ctx := &types.Context{JSONReq: &request, HTTPReq: r}
|
||||
@@ -88,11 +95,16 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
|
||||
responses,
|
||||
types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)),
|
||||
)
|
||||
cache = false
|
||||
continue
|
||||
}
|
||||
args = append(args, fnArgs...)
|
||||
}
|
||||
|
||||
if cache && !rpcFunc.cacheableWithArgs(args) {
|
||||
cache = false
|
||||
}
|
||||
|
||||
returns := rpcFunc.f.Call(args)
|
||||
result, err := unreflectResult(returns)
|
||||
if err != nil {
|
||||
@@ -103,7 +115,13 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
|
||||
}
|
||||
|
||||
if len(responses) > 0 {
|
||||
if wErr := WriteRPCResponseHTTP(w, responses...); wErr != nil {
|
||||
var wErr error
|
||||
if cache {
|
||||
wErr = WriteCacheableRPCResponseHTTP(w, responses...)
|
||||
} else {
|
||||
wErr = WriteRPCResponseHTTP(w, responses...)
|
||||
}
|
||||
if wErr != nil {
|
||||
logger.Error("failed to write responses", "res", responses, "err", wErr)
|
||||
}
|
||||
}
|
||||
@@ -128,7 +146,6 @@ func mapParamsToArgs(
|
||||
params map[string]json.RawMessage,
|
||||
argsOffset int,
|
||||
) ([]reflect.Value, error) {
|
||||
|
||||
values := make([]reflect.Value, len(rpcFunc.argNames))
|
||||
for i, argName := range rpcFunc.argNames {
|
||||
argType := rpcFunc.args[i+argsOffset]
|
||||
@@ -153,7 +170,6 @@ func arrayParamsToArgs(
|
||||
params []json.RawMessage,
|
||||
argsOffset int,
|
||||
) ([]reflect.Value, error) {
|
||||
|
||||
if len(rpcFunc.argNames) != len(params) {
|
||||
return nil, fmt.Errorf("expected %v parameters (%v), got %v (%v)",
|
||||
len(rpcFunc.argNames), rpcFunc.argNames, len(params), params)
|
||||
|
||||
@@ -18,7 +18,8 @@ import (
|
||||
|
||||
func testMux() *http.ServeMux {
|
||||
funcMap := map[string]*RPCFunc{
|
||||
"c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"),
|
||||
"c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"),
|
||||
"block": NewRPCFunc(func(ctx *types.Context, h int) (string, error) { return "block", nil }, "height", Cacheable("height")),
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
buf := new(bytes.Buffer)
|
||||
@@ -227,3 +228,52 @@ func TestUnknownRPCPath(t *testing.T) {
|
||||
require.Equal(t, http.StatusNotFound, res.StatusCode, "should always return 404")
|
||||
res.Body.Close()
|
||||
}
|
||||
|
||||
func TestRPCResponseCache(t *testing.T) {
|
||||
mux := testMux()
|
||||
body := strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["1"]}`)
|
||||
req, _ := http.NewRequest("Get", "http://localhost/", body)
|
||||
rec := httptest.NewRecorder()
|
||||
mux.ServeHTTP(rec, req)
|
||||
res := rec.Result()
|
||||
|
||||
// Always expecting back a JSONRPCResponse
|
||||
require.True(t, statusOK(res.StatusCode), "should always return 2XX")
|
||||
require.Equal(t, "public, max-age=86400", res.Header.Get("Cache-control"))
|
||||
|
||||
_, err := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
require.Nil(t, err, "reading from the body should not give back an error")
|
||||
|
||||
// send a request with default height.
|
||||
body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["0"]}`)
|
||||
req, _ = http.NewRequest("Get", "http://localhost/", body)
|
||||
rec = httptest.NewRecorder()
|
||||
mux.ServeHTTP(rec, req)
|
||||
res = rec.Result()
|
||||
|
||||
// Always expecting back a JSONRPCResponse
|
||||
require.True(t, statusOK(res.StatusCode), "should always return 2XX")
|
||||
require.Equal(t, "", res.Header.Get("Cache-control"))
|
||||
|
||||
_, err = io.ReadAll(res.Body)
|
||||
|
||||
res.Body.Close()
|
||||
require.Nil(t, err, "reading from the body should not give back an error")
|
||||
|
||||
// send a request with default height, but as empty set of parameters.
|
||||
body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": []}`)
|
||||
req, _ = http.NewRequest("Get", "http://localhost/", body)
|
||||
rec = httptest.NewRecorder()
|
||||
mux.ServeHTTP(rec, req)
|
||||
res = rec.Result()
|
||||
|
||||
// Always expecting back a JSONRPCResponse
|
||||
require.True(t, statusOK(res.StatusCode), "should always return 2XX")
|
||||
require.Equal(t, "", res.Header.Get("Cache-control"))
|
||||
|
||||
_, err = io.ReadAll(res.Body)
|
||||
|
||||
res.Body.Close()
|
||||
require.Nil(t, err, "reading from the body should not give back an error")
|
||||
}
|
||||
|
||||
@@ -104,7 +104,7 @@ func WriteRPCResponseHTTPError(
|
||||
panic("tried to write http error response without RPC error")
|
||||
}
|
||||
|
||||
jsonBytes, err := json.MarshalIndent(res, "", " ")
|
||||
jsonBytes, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
return fmt.Errorf("json marshal: %w", err)
|
||||
}
|
||||
@@ -117,6 +117,22 @@ func WriteRPCResponseHTTPError(
|
||||
|
||||
// WriteRPCResponseHTTP marshals res as JSON (with indent) and writes it to w.
|
||||
func WriteRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error {
|
||||
return writeRPCResponseHTTP(w, []httpHeader{}, res...)
|
||||
}
|
||||
|
||||
// WriteCacheableRPCResponseHTTP marshals res as JSON (with indent) and writes
|
||||
// it to w. Adds cache-control to the response header and sets the expiry to
|
||||
// one day.
|
||||
func WriteCacheableRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error {
|
||||
return writeRPCResponseHTTP(w, []httpHeader{{"Cache-Control", "public, max-age=86400"}}, res...)
|
||||
}
|
||||
|
||||
type httpHeader struct {
|
||||
name string
|
||||
value string
|
||||
}
|
||||
|
||||
func writeRPCResponseHTTP(w http.ResponseWriter, headers []httpHeader, res ...types.RPCResponse) error {
|
||||
var v interface{}
|
||||
if len(res) == 1 {
|
||||
v = res[0]
|
||||
@@ -124,11 +140,14 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error
|
||||
v = res
|
||||
}
|
||||
|
||||
jsonBytes, err := json.MarshalIndent(v, "", " ")
|
||||
jsonBytes, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("json marshal: %w", err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
for _, header := range headers {
|
||||
w.Header().Set(header.name, header.value)
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
_, err = w.Write(jsonBytes)
|
||||
return err
|
||||
@@ -166,7 +185,6 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler
|
||||
// Without this, Chrome & Firefox were retrying aborted ajax requests,
|
||||
// at least to my localhost.
|
||||
if e := recover(); e != nil {
|
||||
|
||||
// If RPCResponse
|
||||
if res, ok := e.(types.RPCResponse); ok {
|
||||
if wErr := WriteRPCResponseHTTP(rww, res); wErr != nil {
|
||||
|
||||
@@ -112,7 +112,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) {
|
||||
|
||||
// one argument
|
||||
w := httptest.NewRecorder()
|
||||
err := WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(id, &sampleResult{"hello"}))
|
||||
err := WriteCacheableRPCResponseHTTP(w, types.NewRPCSuccessResponse(id, &sampleResult{"hello"}))
|
||||
require.NoError(t, err)
|
||||
resp := w.Result()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
@@ -120,13 +120,8 @@ func TestWriteRPCResponseHTTP(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
assert.Equal(t, "application/json", resp.Header.Get("Content-Type"))
|
||||
assert.Equal(t, `{
|
||||
"jsonrpc": "2.0",
|
||||
"id": -1,
|
||||
"result": {
|
||||
"value": "hello"
|
||||
}
|
||||
}`, string(body))
|
||||
assert.Equal(t, "public, max-age=86400", resp.Header.Get("Cache-control"))
|
||||
assert.Equal(t, `{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}}`, string(body))
|
||||
|
||||
// multiple arguments
|
||||
w = httptest.NewRecorder()
|
||||
@@ -141,22 +136,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
assert.Equal(t, "application/json", resp.Header.Get("Content-Type"))
|
||||
assert.Equal(t, `[
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": -1,
|
||||
"result": {
|
||||
"value": "hello"
|
||||
}
|
||||
},
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": -1,
|
||||
"result": {
|
||||
"value": "world"
|
||||
}
|
||||
}
|
||||
]`, string(body))
|
||||
assert.Equal(t, `[{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}},{"jsonrpc":"2.0","id":-1,"result":{"value":"world"}}]`, string(body))
|
||||
}
|
||||
|
||||
func TestWriteRPCResponseHTTPError(t *testing.T) {
|
||||
@@ -172,13 +152,5 @@ func TestWriteRPCResponseHTTPError(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusInternalServerError, resp.StatusCode)
|
||||
assert.Equal(t, "application/json", resp.Header.Get("Content-Type"))
|
||||
assert.Equal(t, `{
|
||||
"jsonrpc": "2.0",
|
||||
"id": -1,
|
||||
"error": {
|
||||
"code": -32603,
|
||||
"message": "Internal error",
|
||||
"data": "foo"
|
||||
}
|
||||
}`, string(body))
|
||||
assert.Equal(t, `{"jsonrpc":"2.0","id":-1,"error":{"code":-32603,"message":"Internal error","data":"foo"}}`, string(body))
|
||||
}
|
||||
|
||||
@@ -63,7 +63,14 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit
|
||||
}
|
||||
return
|
||||
}
|
||||
if err := WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(dummyID, result)); err != nil {
|
||||
|
||||
resp := types.NewRPCSuccessResponse(dummyID, result)
|
||||
if rpcFunc.cacheableWithArgs(args) {
|
||||
err = WriteCacheableRPCResponseHTTP(w, resp)
|
||||
} else {
|
||||
err = WriteRPCResponseHTTP(w, resp)
|
||||
}
|
||||
if err != nil {
|
||||
logger.Error("failed to write response", "res", result, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -23,40 +23,96 @@ func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger lo
|
||||
mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger)))
|
||||
}
|
||||
|
||||
// Function introspection
|
||||
type Option func(*RPCFunc)
|
||||
|
||||
// Cacheable enables returning a cache control header from RPC functions to
|
||||
// which it is applied.
|
||||
//
|
||||
// `noCacheDefArgs` is a list of argument names that, if omitted or set to
|
||||
// their defaults when calling the RPC function, will skip the response
|
||||
// caching.
|
||||
func Cacheable(noCacheDefArgs ...string) Option {
|
||||
return func(r *RPCFunc) {
|
||||
r.cacheable = true
|
||||
r.noCacheDefArgs = make(map[string]interface{})
|
||||
for _, arg := range noCacheDefArgs {
|
||||
r.noCacheDefArgs[arg] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ws enables WebSocket communication.
|
||||
func Ws() Option {
|
||||
return func(r *RPCFunc) {
|
||||
r.ws = true
|
||||
}
|
||||
}
|
||||
|
||||
// RPCFunc contains the introspected type information for a function
|
||||
type RPCFunc struct {
|
||||
f reflect.Value // underlying rpc function
|
||||
args []reflect.Type // type of each function arg
|
||||
returns []reflect.Type // type of each return arg
|
||||
argNames []string // name of each argument
|
||||
ws bool // websocket only
|
||||
f reflect.Value // underlying rpc function
|
||||
args []reflect.Type // type of each function arg
|
||||
returns []reflect.Type // type of each return arg
|
||||
argNames []string // name of each argument
|
||||
cacheable bool // enable cache control
|
||||
ws bool // enable websocket communication
|
||||
noCacheDefArgs map[string]interface{} // a lookup table of args that, if not supplied or are set to default values, cause us to not cache
|
||||
}
|
||||
|
||||
// NewRPCFunc wraps a function for introspection.
|
||||
// f is the function, args are comma separated argument names
|
||||
func NewRPCFunc(f interface{}, args string) *RPCFunc {
|
||||
return newRPCFunc(f, args, false)
|
||||
func NewRPCFunc(f interface{}, args string, options ...Option) *RPCFunc {
|
||||
return newRPCFunc(f, args, options...)
|
||||
}
|
||||
|
||||
// NewWSRPCFunc wraps a function for introspection and use in the websockets.
|
||||
func NewWSRPCFunc(f interface{}, args string) *RPCFunc {
|
||||
return newRPCFunc(f, args, true)
|
||||
func NewWSRPCFunc(f interface{}, args string, options ...Option) *RPCFunc {
|
||||
options = append(options, Ws())
|
||||
return newRPCFunc(f, args, options...)
|
||||
}
|
||||
|
||||
func newRPCFunc(f interface{}, args string, ws bool) *RPCFunc {
|
||||
// cacheableWithArgs returns whether or not a call to this function is cacheable,
|
||||
// given the specified arguments.
|
||||
func (f *RPCFunc) cacheableWithArgs(args []reflect.Value) bool {
|
||||
if !f.cacheable {
|
||||
return false
|
||||
}
|
||||
// Skip the context variable common to all RPC functions
|
||||
for i := 1; i < len(f.args); i++ {
|
||||
// f.argNames does not include the context variable
|
||||
argName := f.argNames[i-1]
|
||||
if _, hasDefault := f.noCacheDefArgs[argName]; hasDefault {
|
||||
// Argument with default value was not supplied
|
||||
if i >= len(args) {
|
||||
return false
|
||||
}
|
||||
// Argument with default value is set to its zero value
|
||||
if args[i].IsZero() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func newRPCFunc(f interface{}, args string, options ...Option) *RPCFunc {
|
||||
var argNames []string
|
||||
if args != "" {
|
||||
argNames = strings.Split(args, ",")
|
||||
}
|
||||
return &RPCFunc{
|
||||
|
||||
r := &RPCFunc{
|
||||
f: reflect.ValueOf(f),
|
||||
args: funcArgTypes(f),
|
||||
returns: funcReturnTypes(f),
|
||||
argNames: argNames,
|
||||
ws: ws,
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
opt(r)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// return a function's argument types
|
||||
|
||||
@@ -431,7 +431,10 @@ func (wsc *wsConnection) writeRoutine() {
|
||||
return
|
||||
}
|
||||
case msg := <-wsc.writeChan:
|
||||
jsonBytes, err := json.MarshalIndent(msg, "", " ")
|
||||
// Use json.MarshalIndent instead of Marshal for pretty output.
|
||||
// Pretty output not necessary, since most consumers of WS events are
|
||||
// automated processes, not humans.
|
||||
jsonBytes, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err)
|
||||
continue
|
||||
|
||||
@@ -216,6 +216,9 @@ paths:
|
||||
Please refer to
|
||||
https://docs.tendermint.com/main/tendermint-core/using-tendermint.html#formatting
|
||||
for formatting/encoding rules.
|
||||
|
||||
Upon success, the `Cache-Control` header will be set with the default
|
||||
maximum age.
|
||||
parameters:
|
||||
- in: query
|
||||
name: tx
|
||||
@@ -621,9 +624,12 @@ paths:
|
||||
tags:
|
||||
- Info
|
||||
description: |
|
||||
Get block headers for minHeight <= height maxHeight.
|
||||
Get block headers for minHeight <= height <= maxHeight.
|
||||
|
||||
At most 20 items will be returned.
|
||||
|
||||
Upon success, the `Cache-Control` header will be set with the default
|
||||
maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: Block headers, returned in descending order (highest first).
|
||||
@@ -653,6 +659,9 @@ paths:
|
||||
- Info
|
||||
description: |
|
||||
Get Header.
|
||||
|
||||
If the `height` field is set to a non-default value, upon success, the
|
||||
`Cache-Control` header will be set with the default maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: Header informations.
|
||||
@@ -682,6 +691,9 @@ paths:
|
||||
- Info
|
||||
description: |
|
||||
Get Header By Hash.
|
||||
|
||||
Upon success, the `Cache-Control` header will be set with the default
|
||||
maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: Header informations.
|
||||
@@ -711,6 +723,9 @@ paths:
|
||||
- Info
|
||||
description: |
|
||||
Get Block.
|
||||
|
||||
If the `height` field is set to a non-default value, upon success, the
|
||||
`Cache-Control` header will be set with the default maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: Block informations.
|
||||
@@ -740,6 +755,9 @@ paths:
|
||||
- Info
|
||||
description: |
|
||||
Get Block By Hash.
|
||||
|
||||
Upon success, the `Cache-Control` header will be set with the default
|
||||
maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: Block informations.
|
||||
@@ -760,7 +778,7 @@ paths:
|
||||
parameters:
|
||||
- in: query
|
||||
name: height
|
||||
description: height to return. If no height is provided, it will fetch informations regarding the latest block.
|
||||
description: height to return. If no height is provided, it will fetch information regarding the latest block.
|
||||
schema:
|
||||
type: integer
|
||||
default: 0
|
||||
@@ -769,6 +787,9 @@ paths:
|
||||
- Info
|
||||
description: |
|
||||
Get block_results.
|
||||
|
||||
If the `height` field is set to a non-default value, upon success, the
|
||||
`Cache-Control` header will be set with the default maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: Block results.
|
||||
@@ -798,6 +819,9 @@ paths:
|
||||
- Info
|
||||
description: |
|
||||
Get Commit.
|
||||
|
||||
If the `height` field is set to a non-default value, upon success, the
|
||||
`Cache-Control` header will be set with the default maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: |
|
||||
@@ -845,7 +869,11 @@ paths:
|
||||
tags:
|
||||
- Info
|
||||
description: |
|
||||
Get Validators. Validators are sorted first by voting power (descending), then by address (ascending).
|
||||
Get Validators. Validators are sorted first by voting power
|
||||
(descending), then by address (ascending).
|
||||
|
||||
If the `height` field is set to a non-default value, upon success, the
|
||||
`Cache-Control` header will be set with the default maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: Commit results.
|
||||
@@ -867,6 +895,9 @@ paths:
|
||||
- Info
|
||||
description: |
|
||||
Get genesis.
|
||||
|
||||
Upon success, the `Cache-Control` header will be set with the default
|
||||
maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: Genesis results.
|
||||
@@ -945,6 +976,9 @@ paths:
|
||||
- Info
|
||||
description: |
|
||||
Get consensus parameters.
|
||||
|
||||
If the `height` field is set to a non-default value, upon success, the
|
||||
`Cache-Control` header will be set with the default maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: consensus parameters results.
|
||||
@@ -1135,14 +1169,14 @@ paths:
|
||||
parameters:
|
||||
- in: query
|
||||
name: hash
|
||||
description: transaction Hash to retrive
|
||||
description: hash of transaction to retrieve
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED"
|
||||
- in: query
|
||||
name: prove
|
||||
description: Include proofs of the transactions inclusion in the block
|
||||
description: Include proofs of the transaction's inclusion in the block
|
||||
required: false
|
||||
schema:
|
||||
type: boolean
|
||||
@@ -1151,7 +1185,10 @@ paths:
|
||||
tags:
|
||||
- Info
|
||||
description: |
|
||||
Get a trasasction
|
||||
Get a transaction
|
||||
|
||||
Upon success, the `Cache-Control` header will be set with the default
|
||||
maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: Get a transaction`
|
||||
@@ -1167,12 +1204,15 @@ paths:
|
||||
$ref: "#/components/schemas/ErrorResponse"
|
||||
/abci_info:
|
||||
get:
|
||||
summary: Get some info about the application.
|
||||
summary: Get info about the application.
|
||||
operationId: abci_info
|
||||
tags:
|
||||
- ABCI
|
||||
description: |
|
||||
Get some info about the application.
|
||||
Get info about the application.
|
||||
|
||||
Upon success, the `Cache-Control` header will be set with the default
|
||||
maximum age.
|
||||
responses:
|
||||
"200":
|
||||
description: Get some info about the application.
|
||||
|
||||
@@ -5,7 +5,7 @@ kiwisolver==1.4.4
|
||||
matplotlib==3.6.1
|
||||
numpy==1.23.4
|
||||
packaging==21.3
|
||||
Pillow==9.2.0
|
||||
Pillow==9.3.0
|
||||
pyparsing==3.0.9
|
||||
python-dateutil==2.8.2
|
||||
six==1.16.0
|
||||
|
||||
@@ -82,7 +82,8 @@ call sequences of these methods.
|
||||
been locked at Tendermint level. Tendermint gathers outstanding transactions from the
|
||||
mempool, generates a block header, and uses them to create a block to propose. Then, it calls
|
||||
`RequestPrepareProposal` with the newly created proposal, called *raw proposal*. The Application
|
||||
can make changes to the raw proposal, such as modifying transactions, and returns the
|
||||
can make changes to the raw proposal, such as modifying the set of transactions or the order
|
||||
in which they appear, and returns the
|
||||
(potentially) modified proposal, called *prepared proposal* in the `ResponsePrepareProposal`
|
||||
call. The logic modifying the raw proposal can be non-deterministic.
|
||||
|
||||
@@ -109,9 +110,9 @@ call sequences of these methods.
|
||||
returned by `DeliverTx` are included in the header of the next block.
|
||||
|
||||
- [**EndBlock**](./abci++_methods.md#endblock) It is executed once all transactions have been processed via
|
||||
`DeliverTx` to inform the application that the block can now be committed and inform it of potential changes such
|
||||
as a new validator set to be proposed in the next round. As with `DeliverTx`, cryptographic commitments of the responses returned
|
||||
are included in the header of the next block.
|
||||
`DeliverTx` to inform the application that no other transactions will be delivered as part of the current
|
||||
block and to ask for changes of the validator set and consensus parameters to be used in the following block.
|
||||
As with `DeliverTx`, cryptographic commitments of the responses returned are included in the header of the next block.
|
||||
<!--
|
||||
|
||||
- [**ExtendVote:**](./abci++_methods.md#extendvote) It allows applications to force their
|
||||
|
||||
@@ -44,7 +44,7 @@ title: Methods
|
||||
| version | string | The application software semantic version | 2 |
|
||||
| app_version | uint64 | The application protocol version | 3 |
|
||||
| last_block_height | int64 | Latest height for which the app persisted its state | 4 |
|
||||
| last_block_app_hash | bytes | Latest AppHash returned by `FinalizeBlock` | 5 |
|
||||
| last_block_app_hash | bytes | Latest AppHash returned by `Commit` | 5 |
|
||||
|
||||
* **Usage**:
|
||||
* Return information about the application state.
|
||||
@@ -52,7 +52,7 @@ title: Methods
|
||||
that happens on startup or on recovery.
|
||||
* The returned `app_version` will be included in the Header of every block.
|
||||
* Tendermint expects `last_block_app_hash` and `last_block_height` to
|
||||
be updated during `FinalizeBlock` and persisted during `Commit`.
|
||||
be updated and persisted during `Commit`.
|
||||
|
||||
> Note: Semantic version is a reference to [semantic versioning](https://semver.org/). Semantic versions in info will be displayed as X.X.x.
|
||||
|
||||
@@ -541,16 +541,16 @@ proposal and will not call `RequestPrepareProposal`.
|
||||
|
||||
#### When does Tendermint call `ProcessProposal`?
|
||||
|
||||
When a validator _p_ enters Tendermint consensus round _r_, height _h_, in which _q_ is the proposer (possibly _p_ = _q_):
|
||||
When a node _p_ enters Tendermint consensus round _r_, height _h_, in which _q_ is the proposer (possibly _p_ = _q_):
|
||||
|
||||
1. _p_ sets up timer `ProposeTimeout`.
|
||||
2. If _p_ is the proposer, _p_ executes steps 1-6 in [PrepareProposal](#prepareproposal).
|
||||
3. Upon reception of Proposal message (which contains the header) for round _r_, height _h_ from
|
||||
_q_, _p_'s Tendermint verifies the block header.
|
||||
4. Upon reception of Proposal message, along with all the block parts, for round _r_, height _h_
|
||||
from _q_, _p_'s Tendermint follows its algorithm to check whether it should prevote for the
|
||||
from _q_, _p_'s Tendermint follows the validators' algorithm to check whether it should prevote for the
|
||||
proposed block, or `nil`.
|
||||
5. If Tendermint should prevote for the proposed block:
|
||||
5. If the validators' algorithm indicates Tendermint should prevote for the proposed block:
|
||||
1. Tendermint calls `RequestProcessProposal` with the block. The call is synchronous.
|
||||
2. The Application checks/processes the proposed block, which is read-only, and returns
|
||||
`ACCEPT` or `REJECT` in the `ResponseProcessProposal.status` field.
|
||||
@@ -559,7 +559,9 @@ When a validator _p_ enters Tendermint consensus round _r_, height _h_, in which
|
||||
* or after doing some basic checks, and process the block asynchronously. In this case the
|
||||
Application will not be able to reject the block, or force prevote/precommit `nil`
|
||||
afterwards.
|
||||
3. If the returned value is
|
||||
* or immediately, returning `ACCEPT`, if _p_ is not a validator
|
||||
and the Application does not want non-validating nodes to handle `ProcessProposal`
|
||||
3. If _p_ is a validator and the returned value is
|
||||
* `ACCEPT`: Tendermint prevotes on this proposal for round _r_, height _h_.
|
||||
* `REJECT`: Tendermint prevotes `nil`.
|
||||
<!--
|
||||
|
||||
@@ -136,13 +136,13 @@ Let's assume that after the last run the proposer priorities were as shown in fi
|
||||
The procedure could continue without modifications. However, after a sufficiently large number of modifications in validator set, the priority values would migrate towards maximum or minimum allowed values causing truncations due to overflow detection.
|
||||
For this reason, the selection procedure adds another __new step__ that centers the current priority values such that the priority sum remains close to 0.
|
||||
|
||||
| Priority Run | -3 | -2 | -1 | 0 | 1 | 2 | 4 | Comment |
|
||||
|----------------|----|----|----|---|----|----|---|-----------------------|
|
||||
| last run | p3 | | | | p1 | p2 | | __remove p2__ |
|
||||
| nextrun | | | | | | | | |
|
||||
| __new step__ | | p3 | | | | p1 | | A(i) -= avg, avg = -1 |
|
||||
| | | | | | p3 | p1 | | A(i)+=VP(i) |
|
||||
| | | | p1 | | p3 | | | A(p1)-= P |
|
||||
| Priority Run | -3 | -2 | -1 | 0 | 1 | 2 | 3 | Comment |
|
||||
|----------------|----|----|----|---|----|----|----|-----------------------|
|
||||
| last run | p3 | | | | p1 | p2 | | __remove p2__ |
|
||||
| nextrun | | | | | | | | |
|
||||
| __new step__ | | p3 | | | | p1 | | A(i) -= avg, avg = -1 |
|
||||
| | | | | | p3 | | p1 | A(i)+=VP(i) |
|
||||
| | | | p1 | | p3 | | | A(p1)-= P |
|
||||
|
||||
The modified selection algorithm is:
|
||||
|
||||
@@ -200,7 +200,7 @@ In the next run, p3 will still be ahead in the queue, elected as proposer and mo
|
||||
| Priority Run | -13 | -9 | -5 | -2 | -1 | 0 | 1 | 2 | 5 | 6 | 7 | Alg step |
|
||||
|----------------|-----|----|----|----|----|---|---|----|----|----|----|-----------------------|
|
||||
| last run | | | | p2 | | | | p1 | | | | __add p3__ |
|
||||
| | p3 | | | p2 | | | | p1 | | | | A(p3) = -4 |
|
||||
| | p3 | | | p2 | | | | p1 | | | | A(p3) = -13 |
|
||||
| next run | | p3 | | | | | | p2 | | p1 | | A(i) -= avg, avg = -4 |
|
||||
| | | | | | p3 | | | | p2 | | p1 | A(i)+=VP(i) |
|
||||
| | | | p1 | | p3 | | | | p2 | | | A(p1)-=P |
|
||||
@@ -215,7 +215,7 @@ Validator | p1 | p2 | Comment
|
||||
----------|------|------|------------------
|
||||
VP | 80k | 10 |
|
||||
A | 0 | -90k | __added p2__
|
||||
A | -45k | 45k | __run selection__
|
||||
A | 45k | -45k | __run selection__
|
||||
|
||||
Then execute the following steps:
|
||||
|
||||
|
||||
@@ -17,6 +17,14 @@ and upon incoming connection shares some peers and disconnects.
|
||||
Dials these seeds when we need more peers. They should return a list of peers and then disconnect.
|
||||
If we already have enough peers in the address book, we may never need to dial them.
|
||||
|
||||
## Bootstrap Peers
|
||||
|
||||
`--p2p.bootstrap_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”`
|
||||
|
||||
A list of peers to be added to the addressbook upon startup to ensure that the node has some peers to initially dial.
|
||||
Unlike persistent peers, these addresses don't have any extra privileges. The node may not necessarily connect on redial
|
||||
these peers.
|
||||
|
||||
## Persistent Peers
|
||||
|
||||
`--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”`
|
||||
|
||||
@@ -4,3 +4,10 @@ parent:
|
||||
title: P2P
|
||||
order: 6
|
||||
---
|
||||
|
||||
# Peer-to-Peer Communication
|
||||
|
||||
The operation of the p2p adopted in production Tendermint networks is [HERE](./v0.34/).
|
||||
|
||||
> This is part of an ongoing [effort](https://github.com/tendermint/tendermint/issues/9089)
|
||||
> to produce a high-level specification of the operation of the p2p layer.
|
||||
|
||||
70
spec/p2p/v0.34/README.md
Normal file
70
spec/p2p/v0.34/README.md
Normal file
@@ -0,0 +1,70 @@
|
||||
# Peer-to-Peer Communication
|
||||
|
||||
This document describes the implementation of the peer-to-peer (p2p)
|
||||
communication layer in Tendermint.
|
||||
|
||||
It is part of an [effort](https://github.com/tendermint/tendermint/issues/9089)
|
||||
to produce a high-level specification of the operation of the p2p layer adopted
|
||||
in production Tendermint networks.
|
||||
|
||||
This documentation, therefore, considers the releases `0.34.*` of Tendermint, more
|
||||
specifically, the branch [`v0.34.x`](https://github.com/tendermint/tendermint/tree/v0.34.x)
|
||||
of this repository.
|
||||
|
||||
## Overview
|
||||
|
||||
A Tendermint network is composed of multiple Tendermint instances, hereafter
|
||||
called **nodes**, that interact by exchanging messages.
|
||||
|
||||
Tendermint assumes a partially-connected network model.
|
||||
This means that a node is not assumed to be directly connected to every other
|
||||
node in the network.
|
||||
Instead, each node is directly connected to a subset of other nodes in the
|
||||
network, hereafter called its **peers**.
|
||||
|
||||
The peer-to-peer (p2p) communication layer is responsible for establishing
|
||||
connections between nodes in a Tendermint network,
|
||||
for managing the communication between a node and its peers,
|
||||
and for intermediating the exchange of messages between peers in Tendermint protocols.
|
||||
|
||||
## Contents
|
||||
|
||||
The documentation follows the organization of the `p2p` package of Tendermint,
|
||||
which implements the following abstractions:
|
||||
|
||||
- [Transport](./transport.md): establishes secure and authenticated
|
||||
connections with peers;
|
||||
- [Switch](./switch.md): responsible for dialing peers and accepting
|
||||
connections from peers, for managing established connections, and for
|
||||
routing messages between the reactors and peers,
|
||||
that is, between local and remote instances of the Tendermint protocols;
|
||||
- [PEX Reactor](./pex.md): a reactor is the implementation of a protocol which
|
||||
exchanges messages through the p2p layer. The PEX reactor manages the [Address Book](./addressbook.md) and implements both the [PEX protocol](./pex-protocol.md) and the [Peer Manager](./peer_manager.md) role.
|
||||
- [Peer Exchange protocol](./pex-protocol.md): enables nodes to exchange peer addresses, thus implementing a peer discovery service;
|
||||
- [Address Book](./addressbook.md): stores discovered peer addresses and
|
||||
quality metrics associated to peers with which the node has interacted;
|
||||
- [Peer Manager](./peer_manager.md): defines when and to which peers a node
|
||||
should dial, in order to establish outbound connections;
|
||||
- Finally, [Types](./types.md) and [Configuration](./configuration.md) provide
|
||||
a list of existing types and configuration parameters used by the p2p layer implementation.
|
||||
|
||||
## Further References
|
||||
|
||||
Existing documentation referring to the p2p layer:
|
||||
|
||||
- https://github.com/tendermint/tendermint/tree/main/spec/p2p: p2p-related
|
||||
configuration flags; overview of connections, peer instances, and reactors;
|
||||
overview of peer discovery and node types; peer identity, secure connections
|
||||
and peer authentication handshake.
|
||||
- https://github.com/tendermint/tendermint/tree/main/spec/p2p/messages: message
|
||||
types and channel IDs of Block Sync, Mempool, Evidence, State Sync, PEX, and
|
||||
Consensus reactors.
|
||||
- https://docs.tendermint.com/v0.34/tendermint-core: the p2p layer
|
||||
configuration and operation is documented in several pages.
|
||||
This content is not necessarily up-to-date, some settings and concepts may
|
||||
refer to the release `v0.35`, that was [discontinued][v35postmorten].
|
||||
- https://github.com/tendermint/tendermint/tree/master/docs/tendermint-core/pex:
|
||||
peer types, peer discovery, peer management overview, address book and peer
|
||||
ranking. This documentation refers to the release `v0.35`, that was [discontinued][v35postmorten].
|
||||
|
||||
[v35postmorten]: https://interchain-io.medium.com/discontinuing-tendermint-v0-35-a-postmortem-on-the-new-networking-layer-3696c811dabc
|
||||
367
spec/p2p/v0.34/addressbook.md
Normal file
367
spec/p2p/v0.34/addressbook.md
Normal file
@@ -0,0 +1,367 @@
|
||||
# Address Book
|
||||
|
||||
The address book tracks information about peers, i.e., about other nodes in the network.
|
||||
|
||||
The primary information stored in the address book are peer addresses.
|
||||
A peer address is composed by a node ID and a network address; a network
|
||||
address is composed by an IP address or a DNS name plus a port number.
|
||||
The same node ID can be associated to multiple network addresses.
|
||||
|
||||
There are two sources for the addresses stored in the address book.
|
||||
The [Peer Exchange protocol](./pex-protocol.md) stores in the address book
|
||||
the peer addresses it discovers, i.e., it learns from connected peers.
|
||||
And the [Switch](./switch.md) registers the addresses of peers with which it
|
||||
has interacted: to which it has dialed or from which it has accepted a
|
||||
connection.
|
||||
|
||||
The address book also records additional information about peers with which the
|
||||
node has interacted, from which is possible to rank peers.
|
||||
The Switch reports [connection attempts](#dial-attempts) to a peer address; too
|
||||
much failed attempts indicate that a peer address is invalid.
|
||||
Reactors, in they turn, report a peer as [good](#good-peers) when it behaves as
|
||||
expected, or as a [bad peer](#bad-peers), when it misbehaves.
|
||||
|
||||
There are two entities that retrieve peer addresses from the address book.
|
||||
The [Peer Manager](./peer_manager.md) retrieves peer addresses to dial, so to
|
||||
establish outbound connections.
|
||||
This selection is random, but has a configurable bias towards peers that have
|
||||
been marked as good peers.
|
||||
The [Peer Exchange protocol](./pex-protocol.md) retrieves random samples of
|
||||
addresses to offer (send) to peers.
|
||||
This selection is also random but it includes, in particular for nodes that
|
||||
operate in seed mode, some bias toward peers marked as good ones.
|
||||
|
||||
## Buckets
|
||||
|
||||
Peer addresses are stored in buckets.
|
||||
There are buckets for new addresses and buckets for old addresses.
|
||||
The buckets for new addresses store addresses of peers about which the node
|
||||
does not have much information; the first address registered for a peer ID is
|
||||
always stored in a bucket for new addresses.
|
||||
The buckets for old addresses store addresses of peers with which the node has
|
||||
interacted and that were reported as [good peers](#good-peers) by a reactor.
|
||||
An old address therefore can be seen as an alias for a good address.
|
||||
|
||||
> Note that new addresses does not mean bad addresses.
|
||||
> The addresses of peers marked as [bad peers](#bad-peers) are removed from the
|
||||
> buckets where they are stored, and temporarily kept in a table of banned peers.
|
||||
|
||||
The number of buckets is fixed and there are more buckets for new addresses
|
||||
(`256`) than buckets for old addresses (`64`), a ratio of 4:1.
|
||||
Each bucket can store up to `64` addresses.
|
||||
When a bucket becomes full, the peer address with the lowest ranking is removed
|
||||
from the bucket.
|
||||
The first choice is to remove bad addresses, with multiple failed attempts
|
||||
associated.
|
||||
In the absence of those, the *oldest* address in the bucket is removed, i.e.,
|
||||
the address with the oldest last attempt to dial.
|
||||
|
||||
When a bucket for old addresses becomes full, the lowest-ranked peer address in
|
||||
the bucket is moved to a bucket of new addresses.
|
||||
When a bucket for new addresses becomes full, the lowest-ranked peer address in
|
||||
the bucket is removed from the address book.
|
||||
In other words, exceeding old or good addresses are downgraded to new
|
||||
addresses, while exceeding new addresses are dropped.
|
||||
|
||||
The bucket that stores an `address` is defined by the following two methods,
|
||||
for new and old addresses:
|
||||
|
||||
- `calcNewBucket(address, source) = hash(key + groupKey(source) + hash(key + groupKey(address) + groupKey(source)) % newBucketsPerGroup) % newBucketCount`
|
||||
- `calcOldBucket(address) = hash(key + groupKey(address) + hash(key + address) % oldBucketsPerGroup) % oldBucketCount`
|
||||
|
||||
The `key` is a fixed random 96-bit (8-byte) string.
|
||||
The `groupKey` for an address is a string representing its network group.
|
||||
The `source` of an address is the address of the peer from which we learn the
|
||||
address..
|
||||
The first (internal) hash is reduced to an integer up to `newBucketsPerGroup =
|
||||
32`, for new addresses, and `oldBucketsPerGroup = 4`, for old addresses.
|
||||
The second (external) hash is reduced to bucket indexes, in the interval from 0
|
||||
to the number of new (`newBucketCount = 256`) or old (`oldBucketCount = 64`) buckets.
|
||||
|
||||
Notice that new addresses with sources from the same network group are more
|
||||
likely to end up in the same bucket, therefore to competing for it.
|
||||
For old address, instead, two addresses are more likely to end up in the same
|
||||
bucket when they belong to the same network group.
|
||||
|
||||
## Adding addresses
|
||||
|
||||
The `AddAddress` method adds the address of a peer to the address book.
|
||||
|
||||
The added address is associated to a *source* address, which identifies the
|
||||
node from which the peer address was learned.
|
||||
|
||||
Addresses are added to the address book in the following situations:
|
||||
|
||||
1. When a peer address is learned via PEX protocol, having the sender
|
||||
of the PEX message as its source
|
||||
2. When an inbound peer is added, in this case the peer itself is set as the
|
||||
source of its own address
|
||||
3. When the switch is instructed to dial addresses via the `DialPeersAsync`
|
||||
method, in this case the node itself is set as the source
|
||||
|
||||
If the added address contains a node ID that is not registered in the address
|
||||
book, the address is added to a [bucket](#buckets) of new addresses.
|
||||
Otherwise, the additional address for an existing node ID is **not added** to
|
||||
the address book when:
|
||||
|
||||
- The last address added with the same node ID is stored in an old bucket, so
|
||||
it is considered a "good" address
|
||||
- There are addresses associated to the same node ID stored in
|
||||
`maxNewBucketsPerAddress = 4` distinct buckets
|
||||
- Randomly, with a probability that increases exponentially with the number of
|
||||
buckets in which there is an address with the same node ID.
|
||||
So, a new address for a node ID which is already present in one bucket is
|
||||
added with 50% of probability; if the node ID is present in two buckets, the
|
||||
probability decreases to 25%; and if it is present in three buckets, the
|
||||
probability is 12.5%.
|
||||
|
||||
The new address is also added to the `addrLookup` table, which stores
|
||||
`knownAddress` entries indexed by their node IDs.
|
||||
If the new address is from an unknown peer, a new entry is added to the
|
||||
`addrLookup` table; otherwise, the existing entry is updated with the new
|
||||
address.
|
||||
Entries of this table contain, among other fields, the list of buckets where
|
||||
addresses of a peer are stored.
|
||||
The `addrLookup` table is used by most of the address book methods (e.g.,
|
||||
`HasAddress`, `IsGood`, `MarkGood`, `MarkAttempt`), as it provides fast access
|
||||
to addresses.
|
||||
|
||||
### Errors
|
||||
|
||||
- if the added address or the associated source address are nil
|
||||
- if the added address is invalid
|
||||
- if the added address is the local node's address
|
||||
- if the added address ID is of a [banned](#bad-peers) peer
|
||||
- if either the added address or the associated source address IDs are configured as private IDs
|
||||
- if `routabilityStrict` is set and the address is not routable
|
||||
- in case of failures computing the bucket for the new address (`calcNewBucket` method)
|
||||
- if the added address instance, which is a new address, is configured as an
|
||||
old address (sanity check of `addToNewBucket` method)
|
||||
|
||||
## Need for Addresses
|
||||
|
||||
The `NeedMoreAddrs` method verifies whether the address book needs more addresses.
|
||||
|
||||
It is invoked by the PEX reactor to define whether to request peer addresses
|
||||
to a new outbound peer or to a randomly selected connected peer.
|
||||
|
||||
The address book needs more addresses when it has less than `1000` addresses
|
||||
registered, counting all buckets for new and old addresses.
|
||||
|
||||
## Pick address
|
||||
|
||||
The `PickAddress` method returns an address stored in the address book, chosen
|
||||
at random with a configurable bias towards new addresses.
|
||||
|
||||
It is invoked by the Peer Manager to obtain a peer address to dial, as part of
|
||||
its `ensurePeers` routine.
|
||||
The bias starts from 10%, when the peer has no outbound peers, increasing by
|
||||
10% for each outbound peer the node has, up to 90%, when the node has at least
|
||||
8 outbound peers.
|
||||
|
||||
The configured bias is a parameter that influences the probability of choosing
|
||||
an address from a bucket of new addresses or from a bucket of old addresses.
|
||||
A second parameter influencing this choice is the number of new and old
|
||||
addresses stored in the address book.
|
||||
In the absence of bias (i.e., if the configured bias is 50%), the probability
|
||||
of picking a new address is given by the square root of the number of new
|
||||
addresses divided by the sum of the square roots of the numbers of new and old
|
||||
addresses.
|
||||
By adding a bias toward new addresses (i.e., configured bias larger than 50%),
|
||||
the portion on the sample occupied by the square root of the number of new
|
||||
addresses increases, while the corresponding portion for old addresses decreases.
|
||||
As a result, it becomes more likely to pick a new address at random from this sample.
|
||||
|
||||
> The use of the square roots softens the impact of disproportional numbers of
|
||||
> new and old addresses in the address book. This is actually the expected
|
||||
> scenario, as there are 4 times more buckets for new addresses than buckets
|
||||
> for old addresses.
|
||||
|
||||
Once the type of address, new or old, is defined, a non-empty bucket of this
|
||||
type is selected at random.
|
||||
From the selected bucket, an address is chosen at random and returned.
|
||||
If all buckets of the selected type are empty, no address is returned.
|
||||
|
||||
## Random selection
|
||||
|
||||
The `GetSelection` method returns a selection of addresses stored in the
|
||||
address book, with no bias toward new or old addresses.
|
||||
|
||||
It is invoked by the PEX protocol to obtain a list of peer addresses with two
|
||||
purposes:
|
||||
|
||||
- To send to a peer in a PEX response, in the case of outbound peers or of
|
||||
nodes not operating in seed mode
|
||||
- To crawl, in the case of nodes operating in seed mode, as part of every
|
||||
interaction of the `crawlPeersRoutine`
|
||||
|
||||
The selection is a random subset of the peer addresses stored in the
|
||||
`addrLookup` table, which stores the last address added for each peer ID.
|
||||
The target size of the selection is `23%` (`getSelectionPercent`) of the
|
||||
number of addresses stored in the address book, but it should not be lower than
|
||||
`32` (`minGetSelection`) --- if it is, all addresses in the book are returned
|
||||
--- nor greater than `250` (`maxGetSelection`).
|
||||
|
||||
> The random selection is produced by:
|
||||
> - Retrieving all entries of the `addrLookup` map, which by definition are
|
||||
> returned in random order.
|
||||
> - Randomly shuffling the retrieved list, using the Fisher-Yates algorithm
|
||||
|
||||
## Random selection with bias
|
||||
|
||||
The `GetSelectionWithBias` method returns a selection of addresses stored in
|
||||
the address book, with bias toward new addresses.
|
||||
|
||||
It is invoked by the PEX protocol to obtain a list of peer addresses to be sent
|
||||
to a peer in a PEX response.
|
||||
This method is only invoked by seed nodes, when replying to a PEX request
|
||||
received from an inbound peer (i.e., a peer that dialed the seed node).
|
||||
The bias used in this scenario is hard-coded to 30%, meaning that 70% of
|
||||
the returned addresses are expected to be old addresses.
|
||||
|
||||
The number of addresses that compose the selection is computed in the same way
|
||||
as for the non-biased random selection.
|
||||
The bias toward new addresses is implemented by requiring that the configured
|
||||
bias, interpreted as a percentage, of the select addresses come from buckets of
|
||||
new addresses, while the remaining come from buckets of old addresses.
|
||||
Since the number of old addresses is typically lower than the number of new
|
||||
addresses, it is possible that the address book does not have enough old
|
||||
addresses to include in the selection.
|
||||
In this case, additional new addresses are included in the selection.
|
||||
Thus, the configured bias, in practice, is towards old addresses, not towards
|
||||
new addresses.
|
||||
|
||||
To randomly select addresses of a type, the address book considers all
|
||||
addresses present in every bucket of that type.
|
||||
This list of all addresses of a type is randomly shuffled, and the requested
|
||||
number of addresses are retrieved from the tail of this list.
|
||||
The returned selection contains, at its beginning, a random selection of new
|
||||
addresses in random order, followed by a random selection of old addresses, in
|
||||
random order.
|
||||
|
||||
## Dial Attempts
|
||||
|
||||
The `MarkAttempt` method records a failed attempt to connect to an address.
|
||||
|
||||
It is invoked by the Peer Manager when it fails dialing a peer, but the failure
|
||||
is not in the authentication step (`ErrSwitchAuthenticationFailure` error).
|
||||
In case of authentication errors, the peer is instead marked as a [bad peer](#bad-peers).
|
||||
|
||||
The failed connection attempt is recorded in the address registered for the
|
||||
peer's ID in the `addrLookup` table, which is the last address added with that ID.
|
||||
The known address' counter of failed `Attempts` is increased and the failure
|
||||
time is registered in `LastAttempt`.
|
||||
|
||||
The possible effect of recording multiple failed connect attempts to a peer is
|
||||
to turn its address into a *bad* address (do not confuse with banned addresses).
|
||||
A known address becomes bad if it is stored in buckets of new addresses, and
|
||||
when connection attempts:
|
||||
|
||||
- Have not been made over a week, i.e., `LastAttempt` is older than a week
|
||||
- Have failed 3 times and never succeeded, i.e., `LastSucess` field is unset
|
||||
- Have failed 10 times in the last week, i.e., `LastSucess` is older than a week
|
||||
|
||||
Addresses marked as *bad* are the first candidates to be removed from a bucket of
|
||||
new addresses when the bucket becomes full.
|
||||
|
||||
> Note that failed connection attempts are reported for a peer address, but in
|
||||
> fact the address book records them for a peer.
|
||||
>
|
||||
> More precisely, failed connection attempts are recorded in the entry of the
|
||||
> `addrLookup` table with reported peer ID, which contains the last address
|
||||
> added for that node ID, which is not necessarily the reported peer address.
|
||||
|
||||
## Good peers
|
||||
|
||||
The `MarkGood` method marks a peer ID as good.
|
||||
|
||||
It is invoked by the consensus reactor, via switch, when the number of useful
|
||||
messages received from a peer is a multiple of `10000`.
|
||||
Vote and block part messages are considered for this number, they must be valid
|
||||
and not be duplicated messages to be considered useful.
|
||||
|
||||
> The `SwitchReporter` type of `behaviour` package also invokes the `MarkGood`
|
||||
> method when a "reason" associated with consensus votes and block parts is
|
||||
> reported.
|
||||
> No reactor, however, currently provides these "reasons" to the `SwitchReporter`.
|
||||
|
||||
The effect of this action is that the address registered for the peer's ID in the
|
||||
`addrLookup` table, which is the last address added with that ID, is marked as
|
||||
good and moved to a bucket of old addresses.
|
||||
An address marked as good has its failed to connect counter and timestamp reset.
|
||||
If the destination bucket of old addresses is full, the oldest address in the
|
||||
bucket is moved (downgraded) to a bucket of new addresses.
|
||||
|
||||
Moving the peer address to a bucket of old addresses has the effect of
|
||||
upgrading, or increasing the ranking of a peer in the address book.
|
||||
|
||||
## Bad peers
|
||||
|
||||
The `MarkBad` method marks a peer as bad and bans it for a period of time.
|
||||
|
||||
This method is only invoked within the PEX reactor, with a banning time of 24
|
||||
hours, for the following reasons:
|
||||
|
||||
- A peer misbehaves in the [PEX protocol](pex-protocol.md#misbehavior)
|
||||
- When the `maxAttemptsToDial` limit (`16`) is reached for a peer
|
||||
- If an `ErrSwitchAuthenticationFailure` error is returned when dialing a peer
|
||||
|
||||
The effect of this action is that the address registered for the peer's ID in the
|
||||
`addrLookup` table, which is the last address added with that ID, is banned for
|
||||
a period of time.
|
||||
The banned peer is removed from the `addrLookup` table and from all buckets
|
||||
where its addresses are stored.
|
||||
|
||||
The information about banned peers, however, is not discarded.
|
||||
It is maintained in the `badPeers` map, indexed by peer ID.
|
||||
This allows, in particular, addresses of banned peers to be
|
||||
[reinstated](#reinstating-addresses), i.e., to be added
|
||||
back to the address book, when their ban period expires.
|
||||
|
||||
## Reinstating addresses
|
||||
|
||||
The `ReinstateBadPeers` method attempts to re-add banned addresses to the address book.
|
||||
|
||||
It is invoked by the PEX reactor when dialing new peers.
|
||||
This action is taken before requesting additional addresses to peers,
|
||||
in the case that the node needs more peer addresses.
|
||||
|
||||
The set of banned peer addresses is retrieved from the `badPeers` map.
|
||||
Addresses that are not any longer banned, i.e., whose banned period has expired,
|
||||
are added back to the address book as new addresses, while the corresponding
|
||||
node IDs are removed from the `badPeers` map.
|
||||
|
||||
## Removing addresses
|
||||
|
||||
The `RemoveAddress` method removes an address from the address book.
|
||||
|
||||
It is invoked by the switch when it dials a peer or accepts a connection from a
|
||||
peer that ends up being the node itself (`IsSelf` error).
|
||||
In both cases, the address dialed or accepted is also added to the address book
|
||||
as a local address, via the `AddOurAddress` method.
|
||||
|
||||
The same logic is also internally used by the address book for removing
|
||||
addresses of a peer that is [marked as a bad peer](#bad-peers).
|
||||
|
||||
The entry registered with the peer ID of the address in the `addrLookup` table,
|
||||
which is the last address added with that ID, is removed from all buckets where
|
||||
it is stored and from the `addrLookup` table.
|
||||
|
||||
> FIXME: is it possible that addresses with the same ID as the removed address,
|
||||
> but with distinct network addresses, are kept in buckets of the address book?
|
||||
> While they will not be accessible anymore, as there is no reference to them
|
||||
> in the `addrLookup`, they will still be there.
|
||||
|
||||
## Persistence
|
||||
|
||||
The `loadFromFile` method, called when the address book is started, reads
|
||||
address book entries from a file, passed to the address book constructor.
|
||||
The file, at this point, does not need to exist.
|
||||
|
||||
The `saveRoutine` is started when the address book is started.
|
||||
It saves the address book to the configured file every `dumpAddressInterval`,
|
||||
hard-coded to 2 minutes.
|
||||
It is also possible to save the content of the address book using the `Save`
|
||||
method.
|
||||
Saving the address book content to a file acquires the address book lock, also
|
||||
employed by all other public methods.
|
||||
51
spec/p2p/v0.34/configuration.md
Normal file
51
spec/p2p/v0.34/configuration.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Tendermint p2p configuration
|
||||
|
||||
This document contains configurable parameters a node operator can use to tune the p2p behaviour.
|
||||
|
||||
| Parameter| Default| Description |
|
||||
| --- | --- | ---|
|
||||
| ListenAddress | "tcp://0.0.0.0:26656" | Address to listen for incoming connections (0.0.0.0:0 means any interface, any port) |
|
||||
| ExternalAddress | "" | Address to advertise to peers for them to dial |
|
||||
| [Seeds](pex-protocol.md#seed-nodes) | empty | Comma separated list of seed nodes to connect to (ID@host:port )|
|
||||
| [Persistent peers](peer_manager.md#persistent-peers) | empty | Comma separated list of nodes to keep persistent connections to (ID@host:port ) |
|
||||
| UPNP | false | UPNP port forwarding enabled |
|
||||
| [AddrBook](addressbook.md) | defaultAddrBookPath | Path do address book |
|
||||
| AddrBookStrict | true | Set true for strict address routability rules and false for private or local networks |
|
||||
| [MaxNumInboundPeers](switch.md#accepting-peers) | 40 | Maximum number of inbound peers |
|
||||
| [MaxNumOutboundPeers](peer_manager.md#ensure-peers) | 10 | Maximum number of outbound peers to connect to, excluding persistent peers |
|
||||
| [UnconditionalPeers](switch.md#accepting-peers) | empty | These are IDs of the peers which are allowed to be (re)connected as both inbound or outbound regardless of whether the node reached `max_num_inbound_peers` or `max_num_outbound_peers` or not. |
|
||||
| PersistentPeersMaxDialPeriod| 0 * time.Second | Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) |
|
||||
| FlushThrottleTimeout |100 * time.Millisecond| Time to wait before flushing messages out on the connection |
|
||||
| MaxPacketMsgPayloadSize | 1024 | Maximum size of a message packet payload, in bytes |
|
||||
| SendRate | 5120000 (5 mB/s) | Rate at which packets can be sent, in bytes/second |
|
||||
| RecvRate | 5120000 (5 mB/s) | Rate at which packets can be received, in bytes/second|
|
||||
| [PexReactor](pex.md) | true | Set true to enable the peer-exchange reactor |
|
||||
| SeedMode | false | Seed mode, in which node constantly crawls the network and looks for. Does not work if the peer-exchange reactor is disabled. |
|
||||
| PrivatePeerIDs | empty | Comma separated list of peer IDsthat we do not add to the address book or gossip to other peers. They stay private to us. |
|
||||
| AllowDuplicateIP | false | Toggle to disable guard against peers connecting from the same ip.|
|
||||
| [HandshakeTimeout](transport.md#connection-upgrade) | 20 * time.Second | Timeout for handshake completion between peers |
|
||||
| [DialTimeout](switch.md#dialing-peers) | 3 * time.Second | Timeout for dialing a peer |
|
||||
|
||||
|
||||
These parameters can be set using the `$TMHOME/config/config.toml` file. A subset of them can also be changed via command line using the following command line flags:
|
||||
|
||||
| Parameter | Flag| Example|
|
||||
| --- | --- | ---|
|
||||
| Listen address| `p2p.laddr` | "tcp://0.0.0.0:26656" |
|
||||
| Seed nodes | `p2p.seeds` | `--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”` |
|
||||
| Persistent peers | `p2p.persistent_peers` | `--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` |
|
||||
| Unconditional peers | `p2p.unconditional_peer_ids` | `--p2p.unconditional_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` |
|
||||
| UPNP | `p2p.upnp` | `--p2p.upnp` |
|
||||
| PexReactor | `p2p.pex` | `--p2p.pex` |
|
||||
| Seed mode | `p2p.seed_mode` | `--p2p.seed_mode` |
|
||||
| Private peer ids | `p2p.private_peer_ids` | `--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` |
|
||||
|
||||
**Note on persistent peers**
|
||||
|
||||
If `persistent_peers_max_dial_period` is set greater than zero, the
|
||||
pause between each dial to each persistent peer will not exceed `persistent_peers_max_dial_period`
|
||||
during exponential backoff and we keep trying again without giving up.
|
||||
|
||||
If `seeds` and `persistent_peers` intersect,
|
||||
the user will be warned that seeds may auto-close connections
|
||||
and that the node may not be able to keep the connection persistent.
|
||||
BIN
spec/p2p/v0.34/img/p2p_state.png
Normal file
BIN
spec/p2p/v0.34/img/p2p_state.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 129 KiB |
147
spec/p2p/v0.34/peer_manager.md
Normal file
147
spec/p2p/v0.34/peer_manager.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Peer Manager
|
||||
|
||||
The peer manager is responsible for establishing connections with peers.
|
||||
It defines when a node should dial peers and which peers it should dial.
|
||||
The peer manager is not an implementation abstraction of the p2p layer,
|
||||
but a role that is played by the [PEX reactor](./pex.md).
|
||||
|
||||
## Outbound peers
|
||||
|
||||
The `ensurePeersRoutine` is a persistent routine intended to ensure that a node
|
||||
is connected to `MaxNumOutboundPeers` outbound peers.
|
||||
This routine is continuously executed by regular nodes, i.e. nodes not
|
||||
operating in seed mode, as part of the PEX reactor implementation.
|
||||
|
||||
The logic defining when the node should dial peers, for selecting peers to dial
|
||||
and for actually dialing them is implemented in the `ensurePeers` method.
|
||||
This method is periodically invoked -- every `ensurePeersPeriod`, with default
|
||||
value to 30 seconds -- by the `ensurePeersRoutine`.
|
||||
|
||||
A node is expected to dial peers whenever the number of outbound peers is lower
|
||||
than the configured `MaxNumOutboundPeers` parameter.
|
||||
The current number of outbound peers is retrieved from the switch, using the
|
||||
`NumPeers` method, which also reports the number of nodes to which the switch
|
||||
is currently dialing.
|
||||
If the number of outbound peers plus the number of dialing routines equals to
|
||||
`MaxNumOutboundPeers`, nothing is done.
|
||||
Otherwise, the `ensurePeers` method will attempt to dial node addresses in
|
||||
order to reach the target number of outbound peers.
|
||||
|
||||
Once defined that the node needs additional outbound peers, the node queries
|
||||
the address book for candidate addresses.
|
||||
This is done using the [`PickAddress`](./addressbook.md#pick-address) method,
|
||||
which returns an address selected at random on the address book, with some bias
|
||||
towards new or old addresses.
|
||||
When the node has up to 3 outbound peers, the adopted bias is towards old
|
||||
addresses, i.e., addresses of peers that are believed to be "good".
|
||||
When the node has from 5 outbound peers, the adopted bias is towards new
|
||||
addresses, i.e., addresses of peers about which the node has not yet collected
|
||||
much information.
|
||||
So, the more outbound peers a node has, the less conservative it will be when
|
||||
selecting new peers.
|
||||
|
||||
The selected peer addresses are then dialed in parallel, by starting a dialing
|
||||
routine per peer address.
|
||||
Dialing a peer address can fail for multiple reasons.
|
||||
The node might have attempted to dial the peer too many times.
|
||||
In this case, the peer address is marked as bad and removed from the address book.
|
||||
The node might have attempted and failed to dial the peer recently
|
||||
and the exponential `backoffDuration` has not yet passed.
|
||||
Or the current connection attempt might fail, which is registered in the address book.
|
||||
None of these errors are explicitly handled by the `ensurePeers` method, which
|
||||
also does not wait until the connections are established.
|
||||
|
||||
The third step of the `ensurePeers` method is to ensure that the address book
|
||||
has enough addresses.
|
||||
This is done, first, by [reinstating banned peers](./addressbook.md#Reinstating-addresses)
|
||||
whose ban period has expired.
|
||||
Then, the node randomly selects a connected peer, which can be either an
|
||||
inbound or outbound peer, to [requests addresses](./pex-protocol.md#Requesting-Addresses)
|
||||
using the PEX protocol.
|
||||
Last, and this action is only performed if the node could not retrieve any new
|
||||
address to dial from the address book, the node dials the configured seed nodes
|
||||
in order to establish a connection to at least one of them.
|
||||
|
||||
### Fast dialing
|
||||
|
||||
As above described, seed nodes are actually the last source of peer addresses
|
||||
for regular nodes.
|
||||
They are contacted by a node when, after an invocation of the `ensurePeers`
|
||||
method, no suitable peer address to dial is retrieved from the address book
|
||||
(e.g., because it is empty).
|
||||
|
||||
Once a connection with a seed node is established, the node immediately
|
||||
[sends a PEX request](./pex-protocol.md#Requesting-Addresses) to it, as it is
|
||||
added as an outbound peer.
|
||||
When the corresponding PEX response is received, the addresses provided by the
|
||||
seed node are added to the address book.
|
||||
As a result, in the next invocation of the `ensurePeers` method, the node
|
||||
should be able to dial some of the peer addresses provided by the seed node.
|
||||
|
||||
However, as observed in this [issue](https://github.com/tendermint/tendermint/issues/2093),
|
||||
it can take some time, up to `ensurePeersPeriod` or 30 seconds, from when the
|
||||
node receives new peer addresses and when it dials the received addresses.
|
||||
To avoid this delay, which can be particularly relevant when the node has no
|
||||
peers, a node immediately attempts to dial peer addresses when they are
|
||||
received from a peer that is locally configured as a seed node.
|
||||
|
||||
> FIXME: The current logic was introduced in [#3762](https://github.com/tendermint/tendermint/pull/3762).
|
||||
> Although it fix the issue, the delay between receiving an address and dialing
|
||||
> the peer, it does not impose and limit on how many addresses are dialed in this
|
||||
> scenario.
|
||||
> So, all addresses received from a seed node are dialed, regardless of the
|
||||
> current number of outbound peers, the number of dialing routines, or the
|
||||
> `MaxNumOutboundPeers` parameter.
|
||||
>
|
||||
> Issue [#9548](https://github.com/tendermint/tendermint/issues/9548) was
|
||||
> created to handle this situation.
|
||||
|
||||
### First round
|
||||
|
||||
When the PEX reactor is started, the `ensurePeersRoutine` is created and it
|
||||
runs thorough the operation of a node, periodically invoking the `ensurePeers`
|
||||
method.
|
||||
However, if when the persistent routine is started the node already has some
|
||||
peers, either inbound or outbound peers, or is dialing some addresses, the
|
||||
first invocation of `ensurePeers` is delayed by a random amount of time from 0
|
||||
to `ensurePeersPeriod`.
|
||||
|
||||
### Persistent peers
|
||||
|
||||
The node configuration can contain a list of *persistent peers*.
|
||||
Those peers have preferential treatment compared to regular peers and the node
|
||||
is always trying to connect to them.
|
||||
Moreover, these peers are not removed from the address book in the case of
|
||||
multiple failed dial attempts.
|
||||
|
||||
On startup, the node immediately tries to dial the configured persistent peers
|
||||
by calling the switch's [`DialPeersAsync`](./switch.md#manual-operation) method.
|
||||
This is not done in the p2p package, but it is part of the procedure to set up a node.
|
||||
|
||||
> TODO: the handling of persistent peers should be described in more detail.
|
||||
|
||||
### Life cycle
|
||||
|
||||
The picture below is a first attempt of illustrating the life cycle of an outbound peer:
|
||||
|
||||
<img src="img/p2p_state.png" width="50%" title="Outgoing peers lifecycle">
|
||||
|
||||
A peer can be in the following states:
|
||||
|
||||
- Candidate peers: peer addresses stored in the address boook, that can be
|
||||
retrieved via the [`PickAddress`](./addressbook.md#pick-address) method
|
||||
- [Dialing](switch.md#dialing-peers): peer addresses that are currently being
|
||||
dialed. This state exists to ensure that a single dialing routine exist per peer.
|
||||
- [Reconnecting](switch.md#reconnect-to-peer): persistent peers to which a node
|
||||
is currently reconnecting, as a previous connection attempt has failed.
|
||||
- Connected peers: peers that a node has successfully dialed, added as outbound peers.
|
||||
- [Bad peers](addressbook.md#bad-peers): peers marked as bad in the address
|
||||
book due to exhibited [misbehavior](pex-protocol.md#misbehavior).
|
||||
Peers can be reinstated after being marked as bad.
|
||||
|
||||
## Pending of documentation
|
||||
|
||||
The `dialSeeds` method of the PEX reactor.
|
||||
|
||||
The `dialPeer` method of the PEX reactor.
|
||||
This includes `dialAttemptsInfo`, `maxBackoffDurationForPeer` methods.
|
||||
240
spec/p2p/v0.34/pex-protocol.md
Normal file
240
spec/p2p/v0.34/pex-protocol.md
Normal file
@@ -0,0 +1,240 @@
|
||||
# Peer Exchange Protocol
|
||||
|
||||
The Peer Exchange (PEX) protocol enables nodes to exchange peer addresses, thus
|
||||
implementing a peer discovery mechanism.
|
||||
|
||||
The PEX protocol uses two messages:
|
||||
|
||||
- `PexRequest`: sent by a node to [request](#requesting-addresses) peer
|
||||
addresses to a peer
|
||||
- `PexAddrs`: a list of peer addresses [provided](#providing-addresses) to a
|
||||
peer as response to a `PexRequest` message
|
||||
|
||||
While all nodes, with few exceptions, participate on the PEX protocol,
|
||||
a subset of nodes, configured as [seed nodes](#seed-nodes) have a particular
|
||||
role in the protocol.
|
||||
They crawl the network, connecting to random peers, in order to learn as many
|
||||
peer addresses as possible to provide to other nodes.
|
||||
|
||||
## Requesting Addresses
|
||||
|
||||
A node requests peer addresses by sending a `PexRequest` message to a peer.
|
||||
|
||||
For regular nodes, not operating in seed mode, a PEX request is sent when
|
||||
the node *needs* peers addresses, a condition checked:
|
||||
|
||||
1. When an *outbound* peer is added, causing the node to request addresses from
|
||||
the new peer
|
||||
2. Periodically, by the `ensurePeersRoutine`, causing the node to request peer
|
||||
addresses to a randomly selected peer
|
||||
|
||||
A node needs more peer addresses when its addresses book has
|
||||
[less than 1000 records](./addressbook.md#need-for-addresses).
|
||||
It is thus reasonable to assume that the common case is that a peer needs more
|
||||
peer addresses, so that PEX requests are sent whenever the above two situations happen.
|
||||
|
||||
A PEX request is sent when a new *outbound* peer is added.
|
||||
The same does not happen with new inbound peers because the implementation
|
||||
considers outbound peers, that the node has chosen for dialing, more
|
||||
trustworthy than inbound peers, that the node has accepted.
|
||||
Moreover, when a node is short of peer addresses, it dials the configured seed nodes;
|
||||
since they are added as outbound peers, the node can immediately request peer addresses.
|
||||
|
||||
The `ensurePeersRoutine` periodically checks, by default every 30 seconds (`ensurePeersPeriod`),
|
||||
whether the node has enough outbound peers.
|
||||
If it does not have, the node tries dialing some peer addresses stored in the address book.
|
||||
As part of this procedure, the node selects a peer at random,
|
||||
from the set of connected peers retrieved from the switch,
|
||||
and sends a PEX request to the selected peer.
|
||||
|
||||
Sending a PEX request to a peer is implemented by the `RequestAddrs` method of
|
||||
the PEX reactor.
|
||||
|
||||
### Responses
|
||||
|
||||
After a PEX request is sent to a peer, the node expects to receive,
|
||||
as a response, a `PexAddrs` message from the peer.
|
||||
This message encodes a list of peer addresses that are
|
||||
[added to address book](./addressbook.md#adding-addresses),
|
||||
having the peer from which the PEX response was received as their source.
|
||||
|
||||
Received PEX responses are handled by the `ReceiveAddrs` method of the PEX reactor.
|
||||
In the case of a PEX response received from a peer which is configured as
|
||||
a seed node, the PEX reactor attempts immediately to dial the provided peer
|
||||
addresses, as detailed [here](./peer_manager.md#fast-dialing).
|
||||
|
||||
### Misbehavior
|
||||
|
||||
Sending multiple PEX requests to a peer, before receiving a reply from it,
|
||||
is considered a misbehavior.
|
||||
To prevent it, the node maintains a `requestsSent` set of outstanding
|
||||
requests, indexed by destination peers.
|
||||
While a peer ID is present in the `requestsSent` set, the node does not send
|
||||
further PEX requests to that peer.
|
||||
A peer ID is removed from the `requestsSent` set when a PEX response is
|
||||
received from it.
|
||||
|
||||
Sending a PEX response to a peer that has not requested peer addresses
|
||||
is also considered a misbehavior.
|
||||
So, if a PEX response is received from a peer that is not registered in
|
||||
the `requestsSent` set, a `ErrUnsolicitedList` error is produced.
|
||||
This leads the peer to be disconnected and [marked as a bad peer](addressbook.md#bad-peers).
|
||||
|
||||
## Providing Addresses
|
||||
|
||||
When a node receives a `PexRequest` message from a peer,
|
||||
it replies with a `PexAddrs` message.
|
||||
|
||||
This message encodes a [random selection of peer addresses](./addressbook.md#random-selection)
|
||||
retrieved from the address book.
|
||||
|
||||
Sending a PEX response to a peer is implemented by the `SendAddrs` method of
|
||||
the PEX reactor.
|
||||
|
||||
### Misbehavior
|
||||
|
||||
Requesting peer addresses too often is considered a misbehavior.
|
||||
Since node are expected to send PEX requests every `ensurePeersPeriod`,
|
||||
the minimum accepted interval between requests from the same peer is set
|
||||
to `ensurePeersPeriod / 3`, 10 seconds by default.
|
||||
|
||||
The `receiveRequest` method is responsible for verifying this condition.
|
||||
The node keeps a `lastReceivedRequests` map with the time of the last PEX
|
||||
request received from every peer.
|
||||
If the interval between successive requests is less than the minimum accepted
|
||||
one, the peer is disconnected and [marked as a bad peer](addressbook.md#bad-peers).
|
||||
An exception is made for the first two PEX requests received from a peer.
|
||||
|
||||
> The probably reason is that, when a new peer is added, the two conditions for
|
||||
> a node to request peer addresses can be triggered with an interval lower than
|
||||
> the minimum accepted interval.
|
||||
> Since this is a legit behavior, it should not be punished.
|
||||
|
||||
## Seed nodes
|
||||
|
||||
A seed node is a node configured to operate in `SeedMode`.
|
||||
|
||||
### Crawling peers
|
||||
|
||||
Seed nodes crawl the network, connecting to random peers and sending PEX
|
||||
requests to them, in order to learn as many peer addresses as possible.
|
||||
More specifically, a node operating in seed mode sends PEX requests in two cases:
|
||||
|
||||
1. When an outbound peer is added, and the seed node needs more peer addresses,
|
||||
it requests peer addresses to the new peer
|
||||
2. Periodically, the `crawlPeersRoutine` sends PEX requests to a random set of
|
||||
peers, whose addresses are registered in the Address Book
|
||||
|
||||
The first case also applies for nodes not operating in seed mode.
|
||||
The second case replaces the second for regular nodes, as seed nodes do not
|
||||
run the `ensurePeersRoutine`, as regular nodes,
|
||||
but run the `crawlPeersRoutine`, which is not run by regular nodes.
|
||||
|
||||
The `crawlPeersRoutine` periodically, every 30 seconds (`crawlPeerPeriod`),
|
||||
starts a new peer discovery round.
|
||||
First, the seed node retrieves a random selection of peer addresses from its
|
||||
Address Book.
|
||||
This selection is produced in the same way as in the random selection of peer
|
||||
addresses that are [provided](#providing-addresses) to a requesting peer.
|
||||
Peers that the seed node has crawled recently,
|
||||
less than 2 minutes ago (`minTimeBetweenCrawls`), are removed from this selection.
|
||||
The remaining peer addresses are registered in the `crawlPeerInfos` table.
|
||||
|
||||
The seed node is not necessarily connected to the peer whose address is
|
||||
selected for each round of crawling.
|
||||
So, the seed node dials the selected peer addresses.
|
||||
This is performed in foreground, one peer at a time.
|
||||
As a result, a round of crawling can take a substantial amount of time.
|
||||
For each selected peer it succeeds dialing to, this include already connected
|
||||
peers, the seed node sends a PEX request.
|
||||
|
||||
Dialing a selected peer address can fail for multiple reasons.
|
||||
The seed node might have attempted to dial the peer too many times.
|
||||
In this case, the peer address is marked as [bad in the address book](addressbook.md#bad-peers).
|
||||
The seed node might have attempted to dial the peer recently, without success,
|
||||
and the exponential `backoffDuration` has not yet passed.
|
||||
Or the current connection attempt might fail, which is registered in the address book.
|
||||
|
||||
Failures to dial to a peer address produce an information that is important for
|
||||
a seed node.
|
||||
They indicate that a peer is unreachable, or is not operating correctly, and
|
||||
therefore its address should not be provided to other nodes.
|
||||
This occurs when, due to multiple failed connection attempts or authentication
|
||||
failures, the peer address ends up being removed from the address book.
|
||||
As a result, the periodically crawling of selected peers not only enables the
|
||||
discovery of new peers, but also allows the seed node to stop providing
|
||||
addresses of bad peers.
|
||||
|
||||
### Offering addresses
|
||||
|
||||
Nodes operating in seed mode handle PEX requests differently than regular
|
||||
nodes, whose operation is described [here](#providing-addresses).
|
||||
|
||||
This distinction exists because nodes dial a seed node with the main, if not
|
||||
exclusive goal of retrieving peer addresses.
|
||||
In other words, nodes do not dial a seed node because they intend to have it as
|
||||
a peer in the multiple Tendermint protocols, but because they believe that a
|
||||
seed node is a good source of addresses of nodes to which they can establish
|
||||
connections and interact in the multiple Tendermint protocols.
|
||||
|
||||
So, when a seed node receives a `PexRequest` message from an inbound peer,
|
||||
it sends a `PexAddrs` message, containing a selection of peer
|
||||
addresses, back to the peer and *disconnects* from it.
|
||||
Seed nodes therefore treat inbound connections from peers as a short-term
|
||||
connections, exclusively intended to retrieve peer addresses.
|
||||
Once the requested peer addresses are sent, the connection with the peer is closed.
|
||||
|
||||
Moreover, the selection of peer addresses provided to inbound peers by a seed
|
||||
node, although still essentially random, has a [bias toward old
|
||||
addresses](./addressbook.md#random-selection-with-bias).
|
||||
The selection bias is defined by `biasToSelectNewPeers`, hard-coded to `30%`,
|
||||
meaning that `70%` of the peer addresses provided by a seed node are expected
|
||||
to be old addresses.
|
||||
Although this nomenclature is not clear, *old* addresses are the addresses that
|
||||
survived the most in the address book, that is, are addresses that the seed
|
||||
node believes being from *good* peers (more details [here](./addressbook.md#good-peers)).
|
||||
|
||||
Another distinction is on the handling of potential [misbehavior](#misbehavior-1)
|
||||
of peers requesting addresses.
|
||||
A seed node does not enforce, a priori, a minimal interval between PEX requests
|
||||
from inbound peers.
|
||||
Instead, it does not reply to more than one PEX request per peer inbound
|
||||
connection, and, as above mentioned, it disconnects from incoming peers after
|
||||
responding to them.
|
||||
If the same peer dials again to the seed node and requests peer addresses, the
|
||||
seed node will reply to this peer like it was the first time it has requested
|
||||
peer addresses.
|
||||
|
||||
> This is more an implementation restriction than a desired behavior.
|
||||
> The `lastReceivedRequests` map stores the last time a PEX request was
|
||||
> received from a peer, and the entry relative to a peer is removed from this
|
||||
> map when the peer is disconnected.
|
||||
>
|
||||
> It is debatable whether this approach indeed prevents abuse against seed nodes.
|
||||
|
||||
### Disconnecting from peers
|
||||
|
||||
Seed nodes treat connections with peers as short-term connections, which are
|
||||
mainly, if not exclusively, intended to exchange peer addresses.
|
||||
|
||||
In the case of inbound peers, that have dialed the seed node, the intent of the
|
||||
connection is achieved once a PEX response is sent to the peer.
|
||||
The seed node thus disconnects from an inbound peer after sending a `PexAddrs`
|
||||
message to it.
|
||||
|
||||
In the case of outbound peers, which the seed node has dialed for crawling peer
|
||||
addresses, the intent of the connection is essentially achieved when a PEX
|
||||
response is received from the peer.
|
||||
The seed node, however, does not disconnect from a peer after receiving a
|
||||
selection of peer addresses from it.
|
||||
As a result, after some rounds of crawling, a seed node will have established
|
||||
connections to a substantial amount of peers.
|
||||
|
||||
To couple with the existence of multiple connections with peers that have no
|
||||
longer purpose for the seed node, the `crawlPeersRoutine` also invokes, after
|
||||
each round of crawling, the `attemptDisconnects` method.
|
||||
This method retrieves the list of connected peers from the switch, and
|
||||
disconnects from peers that are not persistent peers, and with which a
|
||||
connection is established for more than `SeedDisconnectWaitPeriod`.
|
||||
This period is a configuration parameter, set to 28 hours when the PEX reactor
|
||||
is created by the default node constructor.
|
||||
111
spec/p2p/v0.34/pex.md
Normal file
111
spec/p2p/v0.34/pex.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# PEX Reactor
|
||||
|
||||
The PEX reactor is one of the reactors running in a Tendermint node.
|
||||
|
||||
Its implementation is located in the `p2p/pex` package, and it is considered
|
||||
part of the implementation of the p2p layer.
|
||||
|
||||
This document overviews the implementation of the PEX reactor, describing how
|
||||
the methods from the `Reactor` interface are implemented.
|
||||
|
||||
The actual operation of the PEX reactor is presented in documents describing
|
||||
the roles played by the PEX reactor in the p2p layer:
|
||||
|
||||
- [Address Book](./addressbook.md): stores known peer addresses and information
|
||||
about peers to which the node is connected or has attempted to connect
|
||||
- [Peer Manager](./peer_manager.md): manages connections established with peers,
|
||||
defining when a node should dial peers and which peers it should dial
|
||||
- [Peer Exchange protocol](./pex-protocol.md): enables nodes to exchange peer
|
||||
addresses, thus implementing a peer discovery service
|
||||
|
||||
## OnStart
|
||||
|
||||
The `OnStart` method implements `BaseService` and starts the PEX reactor.
|
||||
|
||||
The [address book](./addressbook.md), which is a `Service` is started.
|
||||
This loads the address book content from disk,
|
||||
and starts a routine that periodically persists the address book content to disk.
|
||||
|
||||
The PEX reactor is configured with the addresses of a number of seed nodes,
|
||||
the `Seeds` parameter of the `ReactorConfig`.
|
||||
The addresses of seed nodes are parsed into `NetAddress` instances and resolved
|
||||
into IP addresses, which is implemented by the `checkSeeds` method.
|
||||
Valid seed node addresses are stored in the `seedAddrs` field,
|
||||
and are used by the `dialSeeds` method to contact the configured seed nodes.
|
||||
|
||||
The last action is to start one of the following persistent routines, based on
|
||||
the `SeedMode` configuration parameter:
|
||||
|
||||
- Regular nodes run the `ensurePeersRoutine` to check whether the node has
|
||||
enough outbound peers, dialing peers when necessary
|
||||
- Seed nodes run the `crawlPeersRoutine` to periodically start a new round
|
||||
of [crawling](./pex-protocol.md#Crawling-peers) to discover as many peer
|
||||
addresses as possible
|
||||
|
||||
### Errors
|
||||
|
||||
Errors encountered when loading the address book from disk are returned,
|
||||
and prevent the reactor from being started.
|
||||
An exception is made for the `service.ErrAlreadyStarted` error, which is ignored.
|
||||
|
||||
Errors encountered when parsing the configured addresses of seed nodes
|
||||
are returned and cause the reactor startup to fail.
|
||||
An exception is made for DNS resolution `ErrNetAddressLookup` errors,
|
||||
which are not deemed fatal and are only logged as invalid addresses.
|
||||
|
||||
If none of the configured seed node addresses is valid, and the loaded address
|
||||
book is empty, the reactor is not started and an error is returned.
|
||||
|
||||
## OnStop
|
||||
|
||||
The `OnStop` method implements `BaseService` and stops the PEX reactor.
|
||||
|
||||
The address book routine that periodically saves its content to disk is stopped.
|
||||
|
||||
## GetChannels
|
||||
|
||||
The `GetChannels` method, from the `Reactor` interface, returns the descriptor
|
||||
of the channel used by the PEX protocol.
|
||||
|
||||
The channel ID is `PexChannel` (0), with priority `1`, send queue capacity of
|
||||
`10`, and maximum message size of `64000` bytes.
|
||||
|
||||
## AddPeer
|
||||
|
||||
The `AddPeer` method, from the `Reactor` interface,
|
||||
adds a new peer to the PEX protocol.
|
||||
|
||||
If the new peer is an **inbound peer**, i.e., if the peer has dialed the node,
|
||||
the peer's address is [added to the address book](./addressbook.md#adding-addresses).
|
||||
Since the peer was authenticated when establishing a secret connection with it,
|
||||
the source of the peer address is trusted, and its source is set by the peer itself.
|
||||
In the case of an outbound peer, the node should already have its address in
|
||||
the address book, as the switch has dialed the peer.
|
||||
|
||||
If the peer is an **outbound peer**, i.e., if the node has dialed the peer,
|
||||
and the PEX protocol needs more addresses,
|
||||
the node [sends a PEX request](./pex-protocol.md#Requesting-Addresses) to the peer.
|
||||
The same is not done when inbound peers are added because they are deemed least
|
||||
trustworthy than outbound peers.
|
||||
|
||||
## RemovePeer
|
||||
|
||||
The `RemovePeer` method, from the `Reactor` interface,
|
||||
removes a peer from the PEX protocol.
|
||||
|
||||
The peer's ID is removed from the tables tracking PEX requests
|
||||
[sent](./pex-protocol.md#misbehavior) but not yet replied
|
||||
and PEX requests [received](./pex-protocol.md#misbehavior-1).
|
||||
|
||||
## Receive
|
||||
|
||||
The `Receive` method, from the `Reactor` interface,
|
||||
handles a message received by the PEX protocol.
|
||||
|
||||
A node receives two type of messages as part of the PEX protocol:
|
||||
|
||||
- `PexRequest`: a request for addresses received from a peer, handled as
|
||||
described [here](./pex-protocol.md#providing-addresses)
|
||||
- `PexAddrs`: a list of addresses received from a peer, as a reponse to a PEX
|
||||
request sent by the node, as described [here](./pex-protocol.md#responses)
|
||||
|
||||
237
spec/p2p/v0.34/switch.md
Normal file
237
spec/p2p/v0.34/switch.md
Normal file
@@ -0,0 +1,237 @@
|
||||
# Switch
|
||||
|
||||
The switch is a core component of the p2p layer.
|
||||
It manages the procedures for [dialing peers](#dialing-peers) and
|
||||
[accepting](#accepting-peers) connections from peers, which are actually
|
||||
implemented by the [transport](./transport.md).
|
||||
It also manages the reactors, i.e., protocols implemented by the node that
|
||||
interact with its peers.
|
||||
Once a connection with a peer is established, the peer is [added](#add-peer) to
|
||||
the switch and all registered reactors.
|
||||
Reactors may also instruct the switch to [stop a peer](#stop-peer), namely
|
||||
disconnect from it.
|
||||
The switch, in this case, makes sure that the peer is removed from all
|
||||
registered reactors.
|
||||
|
||||
## Dialing peers
|
||||
|
||||
Dialing a peer is implemented by the `DialPeerWithAddress` method.
|
||||
|
||||
This method is invoked by the [peer manager](./peer_manager.md#ensure-peers)
|
||||
to dial a peer address and establish a connection with an outbound peer.
|
||||
|
||||
The switch keeps a single dialing routine per peer ID.
|
||||
This is ensured by keeping a synchronized map `dialing` with the IDs of peers
|
||||
to which the peer is dialing.
|
||||
A peer ID is added to `dialing` when the `DialPeerWithAddress` method is called
|
||||
for that peer, and it is removed when the method returns for whatever reason.
|
||||
The method returns immediately when invoked for a peer which ID is already in
|
||||
the `dialing` structure.
|
||||
|
||||
The actual dialing is implemented by the [`Dial`](./transport.md#dial) method
|
||||
of the transport configured for the switch, in the `addOutboundPeerWithConfig`
|
||||
method.
|
||||
If the transport succeeds establishing a connection, the returned `Peer` is
|
||||
added to the switch using the [`addPeer`](#add-peer) method.
|
||||
This operation can fail, returning an error. In this case, the switch invokes
|
||||
the transport's [`Cleanup`](./transport.md#cleanup) method to clean any resources
|
||||
associated with the peer.
|
||||
|
||||
If the transport fails to establish a connection with the peer that is configured
|
||||
as a persistent peer, the switch spawns a routine to [reconnect to the peer](#reconnect-to-peer).
|
||||
If the peer is already in the `reconnecting` state, the spawned routine has no
|
||||
effect and returns immediately.
|
||||
This is in fact a likely scenario, as the `reconnectToPeer` routine relies on
|
||||
this same `DialPeerWithAddress` method for dialing peers.
|
||||
|
||||
### Manual operation
|
||||
|
||||
The `DialPeersAsync` method receives a list of peer addresses (strings)
|
||||
and dials all of them in parallel.
|
||||
It is invoked in two situations:
|
||||
|
||||
- In the [setup](https://github.com/tendermint/tendermint/blob/29c5a062d23aaef653f11195db55c45cd9e02715/node/node.go#L985) of a node, to establish connections with every configured
|
||||
persistent peer
|
||||
- In the RPC package, to implement two unsafe RPC commands, not used in production:
|
||||
[`DialSeeds`](https://github.com/tendermint/tendermint/blob/29c5a062d23aaef653f11195db55c45cd9e02715/rpc/core/net.go#L47) and
|
||||
[`DialPeers`](https://github.com/tendermint/tendermint/blob/29c5a062d23aaef653f11195db55c45cd9e02715/rpc/core/net.go#L87)
|
||||
|
||||
The received list of peer addresses to dial is parsed into `NetAddress` instances.
|
||||
In case of parsing errors, the method returns. An exception is made for
|
||||
DNS resolution `ErrNetAddressLookup` errors, which do not interrupt the procedure.
|
||||
|
||||
As the peer addresses provided to this method are typically not known by the node,
|
||||
contrarily to the addressed dialed using the `DialPeerWithAddress` method,
|
||||
they are added to the node's address book, which is persisted to disk.
|
||||
|
||||
The switch dials the provided peers in parallel.
|
||||
The list of peer addresses is randomly shuffled, and for each peer a routine is
|
||||
spawned.
|
||||
Each routine sleeps for a random interval, up to 3 seconds, then invokes the
|
||||
`DialPeerWithAddress` method that actually dials the peer.
|
||||
|
||||
### Reconnect to peer
|
||||
|
||||
The `reconnectToPeer` method is invoked when a connection attempt to a peer fails,
|
||||
and the peer is configured as a persistent peer.
|
||||
|
||||
The `reconnecting` synchronized map keeps the peer's in this state, identified
|
||||
by their IDs (string).
|
||||
This should ensure that a single instance of this method is running at any time.
|
||||
The peer is kept in this map while this method is running for it: it is set on
|
||||
the beginning, and removed when the method returns for whatever reason.
|
||||
If the peer is already in the `reconnecting` state, nothing is done.
|
||||
|
||||
The remaining of the method performs multiple connection attempts to the peer,
|
||||
via `DialPeerWithAddress` method.
|
||||
If a connection attempt succeeds, the methods returns and the routine finishes.
|
||||
The same applies when an `ErrCurrentlyDialingOrExistingAddress` error is
|
||||
returned by the dialing method, as it indicates that peer is already connected
|
||||
or that another routine is attempting to (re)connect to it.
|
||||
|
||||
A first set of connection attempts is done at (about) regular intervals.
|
||||
More precisely, between two attempts, the switch waits for a interval of
|
||||
`reconnectInterval`, hard-coded to 5 seconds, plus a random jitter up to
|
||||
`dialRandomizerIntervalMilliseconds`, hard-coded to 3 seconds.
|
||||
At most `reconnectAttempts`, hard-coded to 20, are made using this
|
||||
regular-interval approach.
|
||||
|
||||
A second set of connection attempts is done with exponentially increasing
|
||||
intervals.
|
||||
The base interval `reconnectBackOffBaseSeconds` is hard-coded to 3 seconds,
|
||||
which is also the increasing factor.
|
||||
The exponentially increasing dialing interval is adjusted as well by a random
|
||||
jitter up to `dialRandomizerIntervalMilliseconds`.
|
||||
At most `reconnectBackOffAttempts`, hard-coded to 10, are made using this approach.
|
||||
|
||||
> Note: the first sleep interval, to which a random jitter is applied, is 1,
|
||||
> not `reconnectBackOffBaseSeconds`, as the first exponent is `0`...
|
||||
|
||||
## Accepting peers
|
||||
|
||||
The `acceptRoutine` method is a persistent routine that handles connections
|
||||
accepted by the transport configured for the switch.
|
||||
|
||||
The [`Accept`](./transport.md#accept) method of the configured transport
|
||||
returns a `Peer` with which an inbound connection was established.
|
||||
The switch accepts a new peer if the maximum number of inbound peers was not
|
||||
reached, or if the peer was configured as an _unconditional peer_.
|
||||
The maximum number of inbound peers is determined by the `MaxNumInboundPeers`
|
||||
configuration parameter, whose default value is `40`.
|
||||
|
||||
If accepted, the peer is added to the switch using the [`addPeer`](#add-peer) method.
|
||||
If the switch does not accept the established incoming connection, or if the
|
||||
`addPeer` method returns an error, the switch invokes the transport's
|
||||
[`Cleanup`](./transport.md#cleanup) method to clean any resources associated
|
||||
with the peer.
|
||||
|
||||
The transport's `Accept` method can also return a number of errors.
|
||||
Errors of `ErrRejected` or `ErrFilterTimeout` types are ignored,
|
||||
an `ErrTransportClosed` causes the accepted routine to be interrupted,
|
||||
while other errors cause the routine to panic.
|
||||
|
||||
> TODO: which errors can cause the routine to panic?
|
||||
|
||||
## Add peer
|
||||
|
||||
The `addPeer` method adds a peer to the switch,
|
||||
either after dialing (by `addOutboundPeerWithConfig`, called by `DialPeerWithAddress`)
|
||||
a peer and establishing an outbound connection,
|
||||
or after accepting (`acceptRoutine`) a peer and establishing an inbound connection.
|
||||
|
||||
The first step is to invoke the `filterPeer` method.
|
||||
It checks whether the peer is already in the set of connected peers,
|
||||
and whether any of the configured `peerFilter` methods reject the peer.
|
||||
If the peer is already present or it is rejected by any filter, the `addPeer`
|
||||
method fails and returns an error.
|
||||
|
||||
Then, the new peer is started, added to the set of connected peers, and added
|
||||
to all reactors.
|
||||
More precisely, first the new peer's information is first provided to every
|
||||
reactor (`InitPeer` method).
|
||||
Next, the peer's sending and receiving routines are started, and the peer is
|
||||
added to set of connected peers.
|
||||
These two operations can fail, causing `addPeer` to return an error.
|
||||
Then, in the absence of previous errors, the peer is added to every reactor (`AddPeer` method).
|
||||
|
||||
> Adding the peer to the peer set returns a `ErrSwitchDuplicatePeerID` error
|
||||
> when a peer with the same ID is already presented.
|
||||
>
|
||||
> TODO: Starting a peer could be reduced as starting the MConn with that peer?
|
||||
|
||||
## Stop peer
|
||||
|
||||
There are two methods for stopping a peer, namely disconnecting from it, and
|
||||
removing it from the table of connected peers.
|
||||
|
||||
The `StopPeerForError` method is invoked to stop a peer due to an external
|
||||
error, which is provided to method as a generic "reason".
|
||||
|
||||
The `StopPeerGracefully` method stops a peer in the absence of errors or, more
|
||||
precisely, not providing to the switch any "reason" for that.
|
||||
|
||||
In both cases the `Peer` instance is stopped, the peer is removed from all
|
||||
registered reactors, and finally from the list of connected peers.
|
||||
|
||||
> Issue https://github.com/tendermint/tendermint/issues/3338 is mentioned in
|
||||
> the internal `stopAndRemovePeer` method explaining why removing the peer from
|
||||
> the list of connected peers is the last action taken.
|
||||
|
||||
When there is a "reason" for stopping the peer (`StopPeerForError` method)
|
||||
and the peer is a persistent peer, the method creates a routine to attempt
|
||||
reconnecting to the peer address, using the `reconnectToPeer` method.
|
||||
If the peer is an outbound peer, the peer's address is know, since the switch
|
||||
has dialed the peer.
|
||||
Otherwise, the peer address is retrieved from the `NodeInfo` instance from the
|
||||
connection handshake.
|
||||
|
||||
## Add reactor
|
||||
|
||||
The `AddReactor` method registers a `Reactor` to the switch.
|
||||
|
||||
The reactor is associated to the set of channel ids it employs.
|
||||
Two reactors (in the same node) cannot share the same channel id.
|
||||
|
||||
There is a call back to the reactor, in which the switch passes itself to the
|
||||
reactor.
|
||||
|
||||
## Remove reactor
|
||||
|
||||
The `RemoveReactor` method unregisters a `Reactor` from the switch.
|
||||
|
||||
The reactor is disassociated from the set of channel ids it employs.
|
||||
|
||||
There is a call back to the reactor, in which the switch passes `nil` to the
|
||||
reactor.
|
||||
|
||||
## OnStart
|
||||
|
||||
This is a `BaseService` method.
|
||||
|
||||
All registered reactors are started.
|
||||
|
||||
The switch's `acceptRoutine` is started.
|
||||
|
||||
## OnStop
|
||||
|
||||
This is a `BaseService` method.
|
||||
|
||||
All (connected) peers are stopped and removed from the peer's list using the
|
||||
`stopAndRemovePeer` method.
|
||||
|
||||
All registered reactors are stopped.
|
||||
|
||||
## Broadcast
|
||||
|
||||
This method broadcasts a message on a channel, by sending the message in
|
||||
parallel to all connected peers.
|
||||
|
||||
The method spawns a thread for each connected peer, invoking the `Send` method
|
||||
provided by each `Peer` instance with the provided message and channel ID.
|
||||
The return value (a boolean) of these calls are redirected to a channel that is
|
||||
returned by the method.
|
||||
|
||||
> TODO: detail where this method is invoked:
|
||||
> - By the consensus protocol, in `broadcastNewRoundStepMessage`,
|
||||
> `broadcastNewValidBlockMessage`, and `broadcastHasVoteMessage`
|
||||
> - By the state sync protocol
|
||||
222
spec/p2p/v0.34/transport.md
Normal file
222
spec/p2p/v0.34/transport.md
Normal file
@@ -0,0 +1,222 @@
|
||||
# Transport
|
||||
|
||||
The transport establishes secure and authenticated connections with peers.
|
||||
|
||||
The transport [`Dial`](#dial)s peer addresses to establish outbound connections,
|
||||
and [`Listen`](#listen)s in a configured network address
|
||||
to [`Accept`](#accept) inbound connections from peers.
|
||||
|
||||
The transport establishes raw TCP connections with peers
|
||||
and [upgrade](#connection-upgrade) them into authenticated secret connections.
|
||||
The established secret connection is then wrapped into `Peer` instance, which
|
||||
is returned to the caller, typically the [switch](./switch.md).
|
||||
|
||||
## Dial
|
||||
|
||||
The `Dial` method is used by the switch to establish an outbound connection with a peer.
|
||||
It is a synchronous method, which blocks until a connection is established or an error occurs.
|
||||
The method returns an outbound `Peer` instance wrapping the established connection.
|
||||
|
||||
The transport first dials the provided peer's address to establish a raw TCP connection.
|
||||
The dialing maximum duration is determined by `dialTimeout`, hard-coded to 1 second.
|
||||
The established raw connection is then submitted to a set of [filters](#connection-filtering),
|
||||
which can reject it.
|
||||
If the connection is not rejected, it is recorded in the table of established connections.
|
||||
|
||||
The established raw TCP connection is then [upgraded](#connection-upgrade) into
|
||||
an authenticated secret connection.
|
||||
This procedure should ensure, in particular, that the public key of the remote peer
|
||||
matches the ID of the dialed peer, which is part of peer address provided to this method.
|
||||
In the absence of errors,
|
||||
the established secret connection (`conn.SecretConnection` type)
|
||||
and the information about the peer (`NodeInfo` record) retrieved and verified
|
||||
during the version handshake,
|
||||
are wrapped into an outbound `Peer` instance and returned to the switch.
|
||||
|
||||
## Listen
|
||||
|
||||
The `Listen` method produces a TCP listener instance for the provided network
|
||||
address, and spawns an `acceptPeers` routine to handle the raw connections
|
||||
accepted by the listener.
|
||||
The `NetAddress` method exports the listen address configured for the transport.
|
||||
|
||||
The maximum number of simultaneous incoming connections accepted by the listener
|
||||
is bound to `MaxNumInboundPeer` plus the configured number of unconditional peers,
|
||||
using the `MultiplexTransportMaxIncomingConnections` option,
|
||||
in the node [initialization](https://github.com/tendermint/tendermint/blob/29c5a062d23aaef653f11195db55c45cd9e02715/node/node.go#L563).
|
||||
|
||||
This method is called when a node is [started](https://github.com/tendermint/tendermint/blob/29c5a062d23aaef653f11195db55c45cd9e02715/node/node.go#L972).
|
||||
In case of errors, the `acceptPeers` routine is not started and the error is returned.
|
||||
|
||||
## Accept
|
||||
|
||||
The `Accept` method returns to the switch inbound connections established with a peer.
|
||||
It is a synchronous method, which blocks until a connection is accepted or an error occurs.
|
||||
The method returns an inbound `Peer` instance wrapping the established connection.
|
||||
|
||||
The transport handles incoming connections in the `acceptPeers` persistent routine.
|
||||
This routine is started by the [`Listen`](#listen) method
|
||||
and accepts raw connections from a TCP listener.
|
||||
A new routine is spawned for each accepted connection.
|
||||
The raw connection is submitted to a set of [filters](#connection-filtering),
|
||||
which can reject it.
|
||||
If the connection is not rejected, it is recorded in the table of established connections.
|
||||
|
||||
The established raw TCP connection is then [upgraded](#connection-upgrade) into
|
||||
an authenticated secret connection.
|
||||
The established secret connection (`conn.SecretConnection` type),
|
||||
the information about the peer (`NodeInfo` record) retrieved and verified
|
||||
during the version handshake,
|
||||
as well any error returned in this process are added to a queue of accepted connections.
|
||||
This queue is consumed by the `Accept` method.
|
||||
|
||||
> Handling accepted connection asynchronously was introduced due to this issue:
|
||||
> https://github.com/tendermint/tendermint/issues/2047
|
||||
|
||||
## Connection Filtering
|
||||
|
||||
The `filterConn` method is invoked for every new raw connection established by the transport.
|
||||
Its main goal is avoid the transport to maintain duplicated connections with the same peer.
|
||||
It also runs a set of configured connection filters.
|
||||
|
||||
The transports keeps a table `conns` of established connections.
|
||||
The table maps the remote address returned by a generic connection to a list of
|
||||
IP addresses, to which the connection remote address is resolved.
|
||||
If the remote address of the new connection is already present in the table,
|
||||
the connection is rejected.
|
||||
Otherwise, the connection's remote address is resolved into a list of IPs,
|
||||
which are recorded in the established connections table.
|
||||
|
||||
The connection and the resolved IPs are then passed through a set of connection filters,
|
||||
configured via the `MultiplexTransportConnFilters` transport option.
|
||||
The maximum duration for the filters execution, which is performed in parallel,
|
||||
is determined by `filterTimeout`.
|
||||
Its default value is 5 seconds,
|
||||
which can be changed using the `MultiplexTransportFilterTimeout` transport option.
|
||||
|
||||
If the connection and the resolved remote addresses are not filtered out,
|
||||
the transport registers them into the `conns` table and returns.
|
||||
|
||||
In case of errors, the connection is removed from the table of established
|
||||
connections and closed.
|
||||
|
||||
### Errors
|
||||
|
||||
If the address of the new connection is already present in the `conns` table,
|
||||
an `ErrRejected` error with the `isDuplicate` reason is returned.
|
||||
|
||||
If the IP resolution of the connection's remote address fails,
|
||||
an `AddrError` or `DNSError` error is returned.
|
||||
|
||||
If any of the filters reject the connection,
|
||||
an `ErrRejected` error with the `isRejected` reason is returned.
|
||||
|
||||
If the filters execution times out,
|
||||
an `ErrFilterTimeout` error is returned.
|
||||
|
||||
## Connection Upgrade
|
||||
|
||||
The `upgrade` method is invoked for every new raw connection established by the
|
||||
transport that was not [filtered out](#connection-filtering).
|
||||
It upgrades an established raw TCP connection into a secret authenticated
|
||||
connection, and validates the information provided by the peer.
|
||||
|
||||
This is a complex procedure, that can be summarized by the following three
|
||||
message exchanges between the node and the new peer:
|
||||
|
||||
1. Encryption: the nodes produce ephemeral key pairs and exchange ephemeral
|
||||
public keys, from which are derived: (i) a pair of secret keys used to
|
||||
encrypt the data exchanged between the nodes, and (ii) a challenge message.
|
||||
1. Authentication: the nodes exchange their persistent public keys and a
|
||||
signature of the challenge message produced with the their persistent
|
||||
private keys. This allows validating the peer's persistent public key,
|
||||
which plays the role of node ID.
|
||||
1. Version handshake: nodes exchange and validate each other `NodeInfo` records.
|
||||
This records contain, among other fields, their node IDs, the network/chain
|
||||
ID they are part of, and the list of supported channel IDs.
|
||||
|
||||
Steps (1) and (2) are implemented in the `conn` package.
|
||||
In case of success, they produce the secret connection that is actually used by
|
||||
the node to communicate with the peer.
|
||||
An overview of this procedure, which implements the station-to-station (STS)
|
||||
[protocol][sts-paper] ([PDF][sts-paper-pdf]), can be found [here][peer-sts].
|
||||
The maximum duration for establishing a secret connection with the peer is
|
||||
defined by `handshakeTimeout`, hard-coded to 3 seconds.
|
||||
|
||||
The established secret connection stores the persistent public key of the peer,
|
||||
which has been validated via the challenge authentication of step (2).
|
||||
If the connection being upgraded is an outbound connection, i.e., if the node has
|
||||
dialed the peer, the dialed peer's ID is compared to the peer's persistent public key:
|
||||
if they do not match, the connection is rejected.
|
||||
This verification is not performed in the case of inbound (accepted) connections,
|
||||
as the node does not know a priori the remote node's ID.
|
||||
|
||||
Step (3), the version handshake, is performed by the transport.
|
||||
Its maximum duration is also defined by `handshakeTimeout`, hard-coded to 3 seconds.
|
||||
The version handshake retrieves the `NodeInfo` record of the new peer,
|
||||
which can be rejected for multiple reasons, listed [here][peer-handshake].
|
||||
|
||||
If the connection upgrade succeeds, the method returns the established secret
|
||||
connection, an instance of `conn.SecretConnection` type,
|
||||
and the `NodeInfo` record of the peer.
|
||||
|
||||
In case of errors, the connection is removed from the table of established
|
||||
connections and closed.
|
||||
|
||||
### Errors
|
||||
|
||||
The timeouts for steps (1) and (2), and for step (3), are configured as the
|
||||
deadline for operations on the TCP connection that is being upgraded.
|
||||
If this deadline it is reached, the connection produces an
|
||||
`os.ErrDeadlineExceeded` error, returned by the corresponding step.
|
||||
|
||||
Any error produced when establishing a secret connection with the peer (steps 1 and 2) or
|
||||
during the version handshake (step 3), including timeouts,
|
||||
is encapsulated into an `ErrRejected` error with reason `isAuthFailure` and returned.
|
||||
|
||||
If the upgraded connection is an outbound connection, and the peer ID learned in step (2)
|
||||
does not match the dialed peer's ID,
|
||||
an `ErrRejected` error with reason `isAuthFailure` is returned.
|
||||
|
||||
If the peer's `NodeInfo` record, retrieved in step (3), is invalid,
|
||||
or if reports a node ID that does not match peer ID learned in step (2),
|
||||
an `ErrRejected` error with reason `isAuthFailure` is returned.
|
||||
If it reports a node ID equals to the local node ID,
|
||||
an `ErrRejected` error with reason `isSelf` is returned.
|
||||
If it is not compatible with the local `NodeInfo`,
|
||||
an `ErrRejected` error with reason `isIncompatible` is returned.
|
||||
|
||||
## Close
|
||||
|
||||
The `Close` method closes the TCP listener created by the `Listen` method,
|
||||
and sends a signal for interrupting the `acceptPeers` routine.
|
||||
|
||||
This method is called when a node is [stopped](https://github.com/tendermint/tendermint/blob/46badfabd9d5491c78283a0ecdeb695e21785508/node/node.go#L1019).
|
||||
|
||||
## Cleanup
|
||||
|
||||
The `Cleanup` method receives a `Peer` instance,
|
||||
and removes the connection established with a peer from the table of established connections.
|
||||
It also invokes the `Peer` interface method to close the connection associated with a peer.
|
||||
|
||||
It is invoked when the connection with a peer is closed.
|
||||
|
||||
## Supported channels
|
||||
|
||||
The `AddChannel` method registers a channel in the transport.
|
||||
|
||||
The channel ID is added to the list of supported channel IDs,
|
||||
stored in the local `NodeInfo` record.
|
||||
|
||||
The `NodeInfo` record is exchanged with peers in the version handshake.
|
||||
For this reason, this method is not invoked with a started transport.
|
||||
|
||||
> The only call to this method is performed in the `CustomReactors` constructor
|
||||
> option of a node, i.e., before the node is started.
|
||||
> Note that the default list of supported channel IDs, including the default reactors,
|
||||
> is provided to the transport as its original `NodeInfo` record.
|
||||
|
||||
[peer-sts]: https://github.com/tendermint/tendermint/blob/main/spec/p2p/peer.md#authenticated-encryption-handshake
|
||||
[peer-handshake]:https://github.com/tendermint/tendermint/blob/main/spec/p2p/peer.md#tendermint-version-handshake
|
||||
[sts-paper]: https://link.springer.com/article/10.1007/BF00124891
|
||||
[sts-paper-pdf]: https://github.com/tendermint/tendermint/blob/0.1/docs/sts-final.pdf
|
||||
239
spec/p2p/v0.34/types.md
Normal file
239
spec/p2p/v0.34/types.md
Normal file
@@ -0,0 +1,239 @@
|
||||
# Types adopted in the p2p implementation
|
||||
|
||||
This document lists the packages and source files, excluding test units, that
|
||||
implement the p2p layer, and summarizes the main types they implement.
|
||||
Types play the role of classes in Go.
|
||||
|
||||
The reference version for this documentation is the branch
|
||||
[`v0.34.x`](https://github.com/tendermint/tendermint/tree/v0.34.x/p2p).
|
||||
|
||||
State of August 2022.
|
||||
|
||||
## Package `p2p`
|
||||
|
||||
Implementation of the p2p layer of Tendermint.
|
||||
|
||||
### `base_reactor.go`
|
||||
|
||||
`Reactor` interface.
|
||||
|
||||
`BaseReactor` implements `Reactor`.
|
||||
|
||||
**Not documented yet**.
|
||||
|
||||
### `conn_set.go`
|
||||
|
||||
`ConnSet` interface, a "lookup table for connections and their ips".
|
||||
|
||||
Internal type `connSet` implements the `ConnSet` interface.
|
||||
|
||||
Used by the [transport](#transportgo) to store connected peers.
|
||||
|
||||
### `errors.go`
|
||||
|
||||
Defines several error types.
|
||||
|
||||
`ErrRejected` enumerates a number of reason for which a peer was rejected.
|
||||
Mainly produced by the [transport](#transportgo),
|
||||
but also by the [switch](#switchgo).
|
||||
|
||||
`ErrSwitchDuplicatePeerID` is produced by the `PeerSet` used by the [switch](#switchgo).
|
||||
|
||||
`ErrSwitchConnectToSelf` is handled by the [switch](#switchgo),
|
||||
but currently is not produced outside tests.
|
||||
|
||||
`ErrSwitchAuthenticationFailure` is handled by the [PEX reactor](#pex_reactorgo),
|
||||
but currently is not produced outside tests.
|
||||
|
||||
`ErrTransportClosed` is produced by the [transport](#transportgo)
|
||||
and handled by the [switch](#switchgo).
|
||||
|
||||
`ErrNetAddressNoID`, `ErrNetAddressInvalid`, and `ErrNetAddressLookup`
|
||||
are parsing a string to create an instance of `NetAddress`.
|
||||
It can be returned in the setup of the [switch](#switchgo)
|
||||
and of the [PEX reactor](#pex_reactorgo),
|
||||
as well when the [transport](#transportgo) validates a `NodeInfo`, as part of
|
||||
the connection handshake.
|
||||
|
||||
`ErrCurrentlyDialingOrExistingAddress` is produced by the [switch](#switchgo),
|
||||
and handled by the switch and the [PEX reactor](#pex_reactorgo).
|
||||
|
||||
### `fuzz.go`
|
||||
|
||||
For testing purposes.
|
||||
|
||||
`FuzzedConnection` wraps a `net.Conn` and injects random delays.
|
||||
|
||||
### `key.go`
|
||||
|
||||
`NodeKey` is the persistent key of a node, namely its private key.
|
||||
|
||||
The `ID` of a node is a string representing the node's public key.
|
||||
|
||||
### `metrics.go`
|
||||
|
||||
Prometheus `Metrics` exposed by the p2p layer.
|
||||
|
||||
### `netaddress.go`
|
||||
|
||||
Type `NetAddress` contains the `ID` and the network address (IP and port) of a node.
|
||||
|
||||
The API of the [address book](#addrbookgo) receives and returns `NetAddress` instances.
|
||||
|
||||
This source file was adapted from [`btcd`](https://github.com/btcsuite/btcd),
|
||||
a Go implementation of Bitcoin.
|
||||
|
||||
### `node_info.go`
|
||||
|
||||
Interface `NodeInfo` stores the basic information about a node exchanged with a
|
||||
peer during the handshake.
|
||||
|
||||
It is implemented by `DefaultNodeInfo` type.
|
||||
|
||||
The [switch](#switchgo) stores the local `NodeInfo`.
|
||||
|
||||
The `NodeInfo` of connected peers is produced by the
|
||||
[transport](#transportgo) during the handshake, and stored in [`Peer`](#peergo) instances.
|
||||
|
||||
### `peer.go`
|
||||
|
||||
Interface `Peer` represents a connected peer.
|
||||
|
||||
It is implemented by the internal `peer` type.
|
||||
|
||||
The [transport](#transportgo) API methods return `Peer` instances,
|
||||
wrapping established secure connection with peers.
|
||||
|
||||
The [switch](#switchgo) API methods receive `Peer` instances.
|
||||
The switch stores connected peers in a `PeerSet`.
|
||||
|
||||
The [`Reactor`](#base_reactorgo) methods, invoked by the switch, receive `Peer` instances.
|
||||
|
||||
### `peer_set.go`
|
||||
|
||||
Interface `IPeerSet` offers methods to access a table of [`Peer`](#peergo) instances.
|
||||
|
||||
Type `PeerSet` implements a thread-safe table of [`Peer`](#peergo) instances,
|
||||
used by the [switch](#switchgo).
|
||||
|
||||
The switch provides limited access to this table by returing a `IPeerSet`
|
||||
instance, used by the [PEX reactor](#pex_reactorgo).
|
||||
|
||||
### `switch.go`
|
||||
|
||||
Documented in [switch](./switch.md).
|
||||
|
||||
The `Switch` implements the [peer manager](./peer_manager.md) role for inbound peers.
|
||||
|
||||
[`Reactor`](#base_reactorgo)s have access to the `Switch` and may invoke its methods.
|
||||
This includes the [PEX reactor](#pex_reactorgo).
|
||||
|
||||
### `transport.go`
|
||||
|
||||
Documented in [transport](./transport.md).
|
||||
|
||||
The `Transport` interface is implemented by `MultiplexTransport`.
|
||||
|
||||
The [switch](#switchgo) contains a `Transport` and uses it to establish
|
||||
connections with peers.
|
||||
|
||||
### `types.go`
|
||||
|
||||
Aliases for p2p's `conn` package types.
|
||||
|
||||
## Package `p2p.conn`
|
||||
|
||||
Implements the connection between Tendermint nodes,
|
||||
which is encrypted, authenticated, and multiplexed.
|
||||
|
||||
### `connection.go`
|
||||
|
||||
Implements the `MConnection` type and the `Channel` abstraction.
|
||||
|
||||
A `MConnection` multiplexes a generic network connection (`net.Conn`) into
|
||||
multiple independent `Channel`s, used by different [`Reactor`](#base_reactorgo)s.
|
||||
|
||||
A [`Peer`](#peergo) stores the `MConnection` instance used to interact with a
|
||||
peer, which multiplex a [`SecretConnection`](#secret_connectiongo).
|
||||
|
||||
### `conn_go110.go`
|
||||
|
||||
Support for go 1.10.
|
||||
|
||||
### `secret_connection.go`
|
||||
|
||||
Implements the `SecretConnection` type, which is an encrypted authenticated
|
||||
connection built atop a raw network (TCP) connection.
|
||||
|
||||
A [`Peer`](#peergo) stores the `SecretConnection` established by the transport,
|
||||
which is the underlying connection multiplexed by [`MConnection`](#connectiongo).
|
||||
|
||||
As briefly documented in the [transport](./transport.md#Connection-Upgrade),
|
||||
a `SecretConnection` implements the Station-To-Station (STS) protocol.
|
||||
|
||||
The `SecretConnection` type implements the `net.Conn` interface,
|
||||
which is a generic network connection.
|
||||
|
||||
## Package `p2p.mock`
|
||||
|
||||
Mock implementations of [`Peer`](#peergo) and [`Reactor`](#base_reactorgo) interfaces.
|
||||
|
||||
## Package `p2p.mocks`
|
||||
|
||||
Code generated by `mockery`.
|
||||
|
||||
## Package `p2p.pex`
|
||||
|
||||
Implementation of the [PEX reactor](./pex.md).
|
||||
|
||||
### `addrbook.go`
|
||||
|
||||
Documented in [address book](./addressbook.md).
|
||||
|
||||
This source file was adapted from [`btcd`](https://github.com/btcsuite/btcd),
|
||||
a Go implementation of Bitcoin.
|
||||
|
||||
### `errors.go`
|
||||
|
||||
A number of errors produced and handled by the [address book](#addrbookgo).
|
||||
|
||||
`ErrAddrBookNilAddr` is produced by the address book, but handled (logged) by
|
||||
the [PEX reactor](#pex_reactorgo).
|
||||
|
||||
`ErrUnsolicitedList` is produced and handled by the [PEX protocol](#pex_reactorgo).
|
||||
|
||||
### `file.go`
|
||||
|
||||
Implements the [address book](#addrbookgo) persistence.
|
||||
|
||||
### `known_address.go`
|
||||
|
||||
Type `knownAddress` represents an address stored in the [address book](#addrbookgo).
|
||||
|
||||
### `params.go`
|
||||
|
||||
Constants used by the [address book](#addrbookgo).
|
||||
|
||||
### `pex_reactor.go`
|
||||
|
||||
Implementation of the [PEX reactor](./pex.md), which is a [`Reactor`](#base_reactorgo).
|
||||
|
||||
This includes the implementation of the [PEX protocol](./pex-protocol.md)
|
||||
and of the [peer manager](./peer_manager.md) role for outbound peers.
|
||||
|
||||
The PEX reactor also manages an [address book](#addrbookgo) instance.
|
||||
|
||||
## Package `p2p.trust`
|
||||
|
||||
Go documentation of `Metric` type:
|
||||
|
||||
> // Metric - keeps track of peer reliability
|
||||
> // See tendermint/docs/architecture/adr-006-trust-metric.md for details
|
||||
|
||||
Not imported by any other Tendermint source file.
|
||||
|
||||
## Package `p2p.upnp`
|
||||
|
||||
This package implementation was taken from "taipei-torrent".
|
||||
|
||||
It is used by the `probe-upnp` command of the Tendermint binary.
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abciclientmocks "github.com/tendermint/tendermint/abci/client/mocks"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
abcimocks "github.com/tendermint/tendermint/abci/types/mocks"
|
||||
@@ -29,7 +31,6 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -49,7 +50,7 @@ func TestApplyBlock(t *testing.T) {
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
|
||||
mp := &mpmocks.Mempool{}
|
||||
mp.On("Lock").Return()
|
||||
@@ -234,7 +235,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mp, evpool, blockStore)
|
||||
@@ -272,7 +273,7 @@ func TestProcessProposal(t *testing.T) {
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
eventBus := types.NewEventBus()
|
||||
err = eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -493,7 +494,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
|
||||
mock.Anything).Return(nil)
|
||||
mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{})
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
@@ -569,7 +570,7 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
@@ -623,7 +624,7 @@ func TestEmptyPrepareProposal(t *testing.T) {
|
||||
mock.Anything).Return(nil)
|
||||
mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{})
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
@@ -666,7 +667,7 @@ func TestPrepareProposalTxsAllIncluded(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
@@ -719,7 +720,7 @@ func TestPrepareProposalReorderTxs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
@@ -774,7 +775,7 @@ func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.NewNopLogger(),
|
||||
@@ -824,7 +825,7 @@ func TestPrepareProposalErrorOnPrepareProposalError(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore for tests
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.NewNopLogger(),
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/google/orderedcode"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/pubsub/query/syntax"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -34,13 +34,22 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "validator_set_updates",
|
||||
Help: "ValidatorSetUpdates is the total number of times the application has udated the validator set since process start.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
StoreAccessDurationSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "store_access_duration_seconds",
|
||||
Help: "The duration of accesses to the state store labeled by which method was called on the store.",
|
||||
|
||||
Buckets: stdprometheus.ExponentialBuckets(0.00002, 5, 5),
|
||||
}, append(labels, "method")).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
BlockProcessingTime: discard.NewHistogram(),
|
||||
ConsensusParamUpdates: discard.NewCounter(),
|
||||
ValidatorSetUpdates: discard.NewCounter(),
|
||||
BlockProcessingTime: discard.NewHistogram(),
|
||||
ConsensusParamUpdates: discard.NewCounter(),
|
||||
ValidatorSetUpdates: discard.NewCounter(),
|
||||
StoreAccessDurationSeconds: discard.NewHistogram(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,4 +24,8 @@ type Metrics struct {
|
||||
// ValidatorSetUpdates is the total number of times the application has
|
||||
// udated the validator set since process start.
|
||||
ValidatorSetUpdates metrics.Counter
|
||||
|
||||
// The duration of accesses to the state store labeled by which method
|
||||
// was called on the store.
|
||||
StoreAccessDurationSeconds metrics.Histogram `metrics_buckettype:"exp" metrics_bucketsizes:"0.00002, 5, 5" metrics_labels:"method"`
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ func TestRollback(t *testing.T) {
|
||||
|
||||
func TestRollbackHard(t *testing.T) {
|
||||
const height int64 = 100
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
stateStore := state.NewStore(dbm.NewMemDB(), state.StoreOptions{DiscardABCIResponses: false})
|
||||
|
||||
valSet, _ := types.RandValidatorSet(5, 10)
|
||||
|
||||
@@ -3,8 +3,10 @@ package state
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/go-kit/kit/metrics"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
@@ -80,6 +82,8 @@ type Store interface {
|
||||
type dbStore struct {
|
||||
db dbm.DB
|
||||
|
||||
metrics *Metrics
|
||||
|
||||
StoreOptions
|
||||
}
|
||||
|
||||
@@ -89,18 +93,31 @@ type StoreOptions struct {
|
||||
// the store will maintain only the response object from the latest
|
||||
// height.
|
||||
DiscardABCIResponses bool
|
||||
|
||||
// Metrics defines the metrics collector to use for the state store.
|
||||
// if none is specified then a NopMetrics collector is used.
|
||||
Metrics *Metrics
|
||||
}
|
||||
|
||||
var _ Store = (*dbStore)(nil)
|
||||
|
||||
// NewStore creates the dbStore of the state pkg.
|
||||
func NewStore(db dbm.DB, options StoreOptions) Store {
|
||||
return dbStore{db, options}
|
||||
m := NopMetrics()
|
||||
if options.Metrics != nil {
|
||||
m = options.Metrics
|
||||
}
|
||||
return dbStore{
|
||||
db: db,
|
||||
metrics: m,
|
||||
StoreOptions: options,
|
||||
}
|
||||
}
|
||||
|
||||
// LoadStateFromDBOrGenesisFile loads the most recent state from the database,
|
||||
// or creates a new one from the given genesisFilePath.
|
||||
func (store dbStore) LoadFromDBOrGenesisFile(genesisFilePath string) (State, error) {
|
||||
defer addTimeSample(store.metrics.StoreAccessDurationSeconds.With("method", "load_from_db_or_genesis_file"))()
|
||||
state, err := store.Load()
|
||||
if err != nil {
|
||||
return State{}, err
|
||||
@@ -119,6 +136,7 @@ func (store dbStore) LoadFromDBOrGenesisFile(genesisFilePath string) (State, err
|
||||
// LoadStateFromDBOrGenesisDoc loads the most recent state from the database,
|
||||
// or creates a new one from the given genesisDoc.
|
||||
func (store dbStore) LoadFromDBOrGenesisDoc(genesisDoc *types.GenesisDoc) (State, error) {
|
||||
defer addTimeSample(store.metrics.StoreAccessDurationSeconds.With("method", "load_from_db_or_genesis_doc"))()
|
||||
state, err := store.Load()
|
||||
if err != nil {
|
||||
return State{}, err
|
||||
@@ -137,6 +155,7 @@ func (store dbStore) LoadFromDBOrGenesisDoc(genesisDoc *types.GenesisDoc) (State
|
||||
|
||||
// LoadState loads the State from the database.
|
||||
func (store dbStore) Load() (State, error) {
|
||||
defer addTimeSample(store.metrics.StoreAccessDurationSeconds.With("method", "load"))()
|
||||
return store.loadState(stateKey)
|
||||
}
|
||||
|
||||
@@ -169,6 +188,7 @@ func (store dbStore) loadState(key []byte) (state State, err error) {
|
||||
// Save persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database.
|
||||
// This flushes the writes (e.g. calls SetSync).
|
||||
func (store dbStore) Save(state State) error {
|
||||
defer addTimeSample(store.metrics.StoreAccessDurationSeconds.With("method", "save"))()
|
||||
return store.save(state, stateKey)
|
||||
}
|
||||
|
||||
@@ -203,6 +223,7 @@ func (store dbStore) save(state State, key []byte) error {
|
||||
// BootstrapState saves a new state, used e.g. by state sync when starting from non-zero height.
|
||||
func (store dbStore) Bootstrap(state State) error {
|
||||
height := state.LastBlockHeight + 1
|
||||
defer addTimeSample(store.metrics.StoreAccessDurationSeconds.With("method", "bootstrap"))()
|
||||
if height == 1 {
|
||||
height = state.InitialHeight
|
||||
}
|
||||
@@ -238,6 +259,7 @@ func (store dbStore) Bootstrap(state State) error {
|
||||
// This will cause some old states to be left behind when doing incremental partial prunes,
|
||||
// specifically older checkpoints and LastHeightChanged targets.
|
||||
func (store dbStore) PruneStates(from int64, to int64, evidenceThresholdHeight int64) error {
|
||||
defer addTimeSample(store.metrics.StoreAccessDurationSeconds.With("method", "prune_states"))()
|
||||
if from <= 0 || to <= 0 {
|
||||
return fmt.Errorf("from height %v and to height %v must be greater than 0", from, to)
|
||||
}
|
||||
@@ -379,6 +401,7 @@ func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte {
|
||||
// database. If the node has DiscardABCIResponses set to true, ErrABCIResponsesNotPersisted
|
||||
// is persisted. If not found, ErrNoABCIResponsesForHeight is returned.
|
||||
func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, error) {
|
||||
defer addTimeSample(store.metrics.StoreAccessDurationSeconds.With("method", "load_abci_responses"))()
|
||||
if store.DiscardABCIResponses {
|
||||
return nil, ErrABCIResponsesNotPersisted
|
||||
}
|
||||
@@ -411,6 +434,7 @@ func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, er
|
||||
// This method is used for recovering in the case that we called the Commit ABCI
|
||||
// method on the application but crashed before persisting the results.
|
||||
func (store dbStore) LoadLastABCIResponse(height int64) (*tmstate.ABCIResponses, error) {
|
||||
defer addTimeSample(store.metrics.StoreAccessDurationSeconds.With("method", "load_last_abci_response"))()
|
||||
bz, err := store.db.Get(lastABCIResponseKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -442,6 +466,7 @@ func (store dbStore) LoadLastABCIResponse(height int64) (*tmstate.ABCIResponses,
|
||||
//
|
||||
// CONTRACT: height must be monotonically increasing every time this is called.
|
||||
func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error {
|
||||
defer addTimeSample(store.metrics.StoreAccessDurationSeconds.With("method", "save_abci_responses"))()
|
||||
var dtxs []*abci.ResponseDeliverTx
|
||||
// strip nil values,
|
||||
for _, tx := range abciResponses.DeliverTxs {
|
||||
@@ -482,6 +507,7 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI
|
||||
// LoadValidators loads the ValidatorSet for a given height.
|
||||
// Returns ErrNoValSetForHeight if the validator set can't be found for this height.
|
||||
func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) {
|
||||
defer addTimeSample(store.metrics.StoreAccessDurationSeconds.With("method", "load_validators"))()
|
||||
valInfo, err := loadValidatorsInfo(store.db, height)
|
||||
if err != nil {
|
||||
return nil, ErrNoValSetForHeight{height}
|
||||
@@ -590,6 +616,7 @@ func (store dbStore) saveValidatorsInfo(height, lastHeightChanged int64, valSet
|
||||
|
||||
// LoadConsensusParams loads the ConsensusParams for a given height.
|
||||
func (store dbStore) LoadConsensusParams(height int64) (types.ConsensusParams, error) {
|
||||
defer addTimeSample(store.metrics.StoreAccessDurationSeconds.With("method", "load_consensus_params"))()
|
||||
var (
|
||||
empty = types.ConsensusParams{}
|
||||
emptypb = tmproto.ConsensusParams{}
|
||||
@@ -671,3 +698,12 @@ func min(a int64, b int64) int64 {
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// addTimeSample returns a function that, when called, adds an observation to m.
|
||||
// The observation added to m is the number of seconds ellapsed since addTimeSample
|
||||
// was initially called. addTimeSample is meant to be called in a defer to calculate
|
||||
// the amount of time a function takes to complete.
|
||||
func addTimeSample(m metrics.Histogram) func() {
|
||||
start := time.Now()
|
||||
return func() { m.Observe(time.Since(start).Seconds()) }
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
@@ -20,7 +22,6 @@ import (
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
const validationTestsStopHeight int64 = 10
|
||||
@@ -46,7 +47,7 @@ func TestValidateBlockHeader(t *testing.T) {
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
@@ -134,7 +135,7 @@ func TestValidateBlockCommit(t *testing.T) {
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
@@ -276,7 +277,7 @@ func TestValidateBlockEvidence(t *testing.T) {
|
||||
mock.Anything,
|
||||
mock.Anything).Return(nil)
|
||||
state.ConsensusParams.Evidence.MaxBytes = 1000
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
|
||||
30
statesync/metrics.gen.go
Normal file
30
statesync/metrics.gen.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package statesync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Syncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "syncing",
|
||||
Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Syncing: discard.NewGauge(),
|
||||
}
|
||||
}
|
||||
19
statesync/metrics.go
Normal file
19
statesync/metrics.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package statesync
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "statesync"
|
||||
)
|
||||
|
||||
//go:generate go run ../scripts/metricsgen -struct=Metrics
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// Whether or not a node is state syncing. 1 if yes, 0 if no.
|
||||
Syncing metrics.Gauge
|
||||
}
|
||||
@@ -33,6 +33,7 @@ type Reactor struct {
|
||||
conn proxy.AppConnSnapshot
|
||||
connQuery proxy.AppConnQuery
|
||||
tempDir string
|
||||
metrics *Metrics
|
||||
|
||||
// This will only be set when a state sync is in progress. It is used to feed received
|
||||
// snapshots and chunks into the sync.
|
||||
@@ -46,12 +47,14 @@ func NewReactor(
|
||||
conn proxy.AppConnSnapshot,
|
||||
connQuery proxy.AppConnQuery,
|
||||
tempDir string,
|
||||
metrics *Metrics,
|
||||
) *Reactor {
|
||||
|
||||
r := &Reactor{
|
||||
cfg: cfg,
|
||||
conn: conn,
|
||||
connQuery: connQuery,
|
||||
metrics: metrics,
|
||||
}
|
||||
r.BaseReactor = *p2p.NewBaseReactor("StateSync", r)
|
||||
|
||||
@@ -265,6 +268,7 @@ func (r *Reactor) Sync(stateProvider StateProvider, discoveryTime time.Duration)
|
||||
r.mtx.Unlock()
|
||||
return sm.State{}, nil, errors.New("a state sync is already in progress")
|
||||
}
|
||||
r.metrics.Syncing.Set(1)
|
||||
r.syncer = newSyncer(r.cfg, r.Logger, r.conn, r.connQuery, stateProvider, r.tempDir)
|
||||
r.mtx.Unlock()
|
||||
|
||||
@@ -284,6 +288,7 @@ func (r *Reactor) Sync(stateProvider StateProvider, discoveryTime time.Duration)
|
||||
|
||||
r.mtx.Lock()
|
||||
r.syncer = nil
|
||||
r.metrics.Syncing.Set(0)
|
||||
r.mtx.Unlock()
|
||||
return state, commit, err
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) {
|
||||
|
||||
// Start a reactor and send a ssproto.ChunkRequest, then wait for and check response
|
||||
cfg := config.DefaultStateSyncConfig()
|
||||
r := NewReactor(*cfg, conn, nil, "")
|
||||
r := NewReactor(*cfg, conn, nil, "", NopMetrics())
|
||||
err := r.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
@@ -161,7 +161,7 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) {
|
||||
|
||||
// Start a reactor and send a SnapshotsRequestMessage, then wait for and check responses
|
||||
cfg := config.DefaultStateSyncConfig()
|
||||
r := NewReactor(*cfg, conn, nil, "")
|
||||
r := NewReactor(*cfg, conn, nil, "", NopMetrics())
|
||||
err := r.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
|
||||
@@ -498,8 +498,8 @@ func (s *syncer) verifyApp(snapshot *snapshot, appVersion uint64) error {
|
||||
}
|
||||
if !bytes.Equal(snapshot.trustedAppHash, resp.LastBlockAppHash) {
|
||||
s.logger.Error("appHash verification failed",
|
||||
"expected", snapshot.trustedAppHash,
|
||||
"actual", resp.LastBlockAppHash)
|
||||
"expected", fmt.Sprintf("%X", snapshot.trustedAppHash),
|
||||
"actual", fmt.Sprintf("%X", resp.LastBlockAppHash))
|
||||
return errVerifyFailed
|
||||
}
|
||||
if uint64(resp.LastBlockHeight) != snapshot.Height {
|
||||
@@ -511,6 +511,6 @@ func (s *syncer) verifyApp(snapshot *snapshot, appVersion uint64) error {
|
||||
return errVerifyFailed
|
||||
}
|
||||
|
||||
s.logger.Info("Verified ABCI app", "height", snapshot.Height, "appHash", snapshot.trustedAppHash)
|
||||
s.logger.Info("Verified ABCI app", "height", snapshot.Height, "appHash", log.NewLazySprintf("%X", snapshot.trustedAppHash))
|
||||
return nil
|
||||
}
|
||||
|
||||
32
store/metrics.gen.go
Normal file
32
store/metrics.gen.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Code generated by metricsgen. DO NOT EDIT.
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
BlockStoreAccessDurationSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_store_access_duration_seconds",
|
||||
Help: "The duration of accesses to the state store labeled by which method was called on the store.",
|
||||
|
||||
Buckets: stdprometheus.ExponentialBuckets(0.00002, 5, 5),
|
||||
}, append(labels, "method")).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
BlockStoreAccessDurationSeconds: discard.NewHistogram(),
|
||||
}
|
||||
}
|
||||
20
store/metrics.go
Normal file
20
store/metrics.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "store"
|
||||
)
|
||||
|
||||
//go:generate go run ../scripts/metricsgen -struct=Metrics
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// The duration of accesses to the state store labeled by which method
|
||||
// was called on the store.
|
||||
BlockStoreAccessDurationSeconds metrics.Histogram `metrics_buckettype:"exp" metrics_bucketsizes:"0.00002, 5, 5" metrics_labels:"method"`
|
||||
}
|
||||
@@ -3,8 +3,10 @@ package store
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/go-kit/kit/metrics"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/evidence"
|
||||
@@ -35,6 +37,8 @@ The store can be assumed to contain all contiguous blocks between base and heigh
|
||||
type BlockStore struct {
|
||||
db dbm.DB
|
||||
|
||||
metrics *Metrics
|
||||
|
||||
// mtx guards access to the struct fields listed below it. We rely on the database to enforce
|
||||
// fine-grained concurrency control for its data, and thus this mutex does not apply to
|
||||
// database contents. The only reason for keeping these fields in the struct is that the data
|
||||
@@ -45,14 +49,23 @@ type BlockStore struct {
|
||||
height int64
|
||||
}
|
||||
|
||||
type BlockStoreOptions struct {
|
||||
Metrics *Metrics
|
||||
}
|
||||
|
||||
// NewBlockStore returns a new BlockStore with the given DB,
|
||||
// initialized to the last height that was committed to the DB.
|
||||
func NewBlockStore(db dbm.DB) *BlockStore {
|
||||
func NewBlockStore(db dbm.DB, o BlockStoreOptions) *BlockStore {
|
||||
bs := LoadBlockStoreState(db)
|
||||
m := NopMetrics()
|
||||
if o.Metrics != nil {
|
||||
m = o.Metrics
|
||||
}
|
||||
return &BlockStore{
|
||||
base: bs.Base,
|
||||
height: bs.Height,
|
||||
db: db,
|
||||
metrics: m,
|
||||
base: bs.Base,
|
||||
height: bs.Height,
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,6 +106,7 @@ func (bs *BlockStore) LoadBaseMeta() *types.BlockMeta {
|
||||
// LoadBlock returns the block with the given height.
|
||||
// If no block is found for that height, it returns nil.
|
||||
func (bs *BlockStore) LoadBlock(height int64) *types.Block {
|
||||
defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block"))()
|
||||
var blockMeta = bs.LoadBlockMeta(height)
|
||||
if blockMeta == nil {
|
||||
return nil
|
||||
@@ -128,6 +142,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
|
||||
// If no block is found for that hash, it returns nil.
|
||||
// Panics if it fails to parse height associated with the given hash.
|
||||
func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block {
|
||||
defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block_by_hash"))()
|
||||
bz, err := bs.db.Get(calcBlockHashKey(hash))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -149,6 +164,7 @@ func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block {
|
||||
// from the block at the given height.
|
||||
// If no part is found for the given height and index, it returns nil.
|
||||
func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
|
||||
defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block_part"))()
|
||||
var pbpart = new(tmproto.Part)
|
||||
|
||||
bz, err := bs.db.Get(calcBlockPartKey(height, index))
|
||||
@@ -174,6 +190,7 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
|
||||
// LoadBlockMeta returns the BlockMeta for the given height.
|
||||
// If no block is found for the given height, it returns nil.
|
||||
func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block_meta"))()
|
||||
var pbbm = new(tmproto.BlockMeta)
|
||||
bz, err := bs.db.Get(calcBlockMetaKey(height))
|
||||
|
||||
@@ -201,6 +218,7 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
// LoadBlockMetaByHash returns the blockmeta who's header corresponds to the given
|
||||
// hash. If none is found, returns nil.
|
||||
func (bs *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta {
|
||||
defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block_meta_by_hash"))()
|
||||
bz, err := bs.db.Get(calcBlockHashKey(hash))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -223,6 +241,7 @@ func (bs *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta {
|
||||
// and it comes from the block.LastCommit for `height+1`.
|
||||
// If no commit is found for the given height, it returns nil.
|
||||
func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_block_commit"))()
|
||||
var pbc = new(tmproto.Commit)
|
||||
bz, err := bs.db.Get(calcBlockCommitKey(height))
|
||||
if err != nil {
|
||||
@@ -246,6 +265,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
// This is useful when we've seen a commit, but there has not yet been
|
||||
// a new block at `height + 1` that includes this commit in its block.LastCommit.
|
||||
func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "load_seen_commit"))()
|
||||
var pbc = new(tmproto.Commit)
|
||||
bz, err := bs.db.Get(calcSeenCommitKey(height))
|
||||
if err != nil {
|
||||
@@ -268,6 +288,7 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
|
||||
// PruneBlocks removes block up to (but not including) a height. It returns number of blocks pruned and the evidence retain height - the height at which data needed to prove evidence must not be removed.
|
||||
func (bs *BlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, error) {
|
||||
defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "prune_blocks"))()
|
||||
if height <= 0 {
|
||||
return 0, -1, fmt.Errorf("height must be greater than 0")
|
||||
}
|
||||
@@ -368,6 +389,7 @@ func (bs *BlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64,
|
||||
// we need this to reload the precommits to catch-up nodes to the
|
||||
// most recent height. Otherwise they'd stall at H-1.
|
||||
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "save_block"))()
|
||||
if block == nil {
|
||||
panic("BlockStore can only save a non-nil block")
|
||||
}
|
||||
@@ -455,6 +477,7 @@ func (bs *BlockStore) saveState() {
|
||||
|
||||
// SaveSeenCommit saves a seen commit, used by e.g. the state sync reactor when bootstrapping node.
|
||||
func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) error {
|
||||
defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "save_seen_commit"))()
|
||||
pbc := seenCommit.ToProto()
|
||||
seenCommitBytes, err := proto.Marshal(pbc)
|
||||
if err != nil {
|
||||
@@ -545,6 +568,7 @@ func mustEncode(pb proto.Message) []byte {
|
||||
// DeleteLatestBlock removes the block pointed to by height,
|
||||
// lowering height by one.
|
||||
func (bs *BlockStore) DeleteLatestBlock() error {
|
||||
defer addTimeSample(bs.metrics.BlockStoreAccessDurationSeconds.With("method", "delete_latest_block"))()
|
||||
bs.mtx.RLock()
|
||||
targetHeight := bs.height
|
||||
bs.mtx.RUnlock()
|
||||
@@ -586,3 +610,10 @@ func (bs *BlockStore) DeleteLatestBlock() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addTimeSample(h metrics.Histogram) func() {
|
||||
start := time.Now()
|
||||
return func() {
|
||||
h.Observe(time.Since(start).Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) }
|
||||
return state, NewBlockStore(blockDB, store.BlockStoreOptions{}) , func() { os.RemoveAll(config.RootDir) }
|
||||
}
|
||||
|
||||
func TestLoadBlockStoreState(t *testing.T) {
|
||||
@@ -82,13 +82,13 @@ func TestLoadBlockStoreState(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewBlockStore(t *testing.T) {
|
||||
func TestNewBlockStore(t *testing.T, store.BlockStoreOptions{}) {
|
||||
db := dbm.NewMemDB()
|
||||
bss := tmstore.BlockStoreState{Base: 100, Height: 10000}
|
||||
bz, _ := proto.Marshal(&bss)
|
||||
err := db.Set(blockStoreKey, bz)
|
||||
require.NoError(t, err)
|
||||
bs := NewBlockStore(db)
|
||||
bs := NewBlockStore(db, store.BlockStoreOptions{})
|
||||
require.Equal(t, int64(100), bs.Base(), "failed to properly parse blockstore")
|
||||
require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore")
|
||||
|
||||
@@ -106,7 +106,7 @@ func TestNewBlockStore(t *testing.T) {
|
||||
_, _, panicErr := doFn(func() (interface{}, error) {
|
||||
err := db.Set(blockStoreKey, tt.data)
|
||||
require.NoError(t, err)
|
||||
_ = NewBlockStore(db)
|
||||
_ = NewBlockStore(db, store.BlockStoreOptions{})
|
||||
return nil, nil
|
||||
})
|
||||
require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data)
|
||||
@@ -115,13 +115,13 @@ func TestNewBlockStore(t *testing.T) {
|
||||
|
||||
err = db.Set(blockStoreKey, []byte{})
|
||||
require.NoError(t, err)
|
||||
bs = NewBlockStore(db)
|
||||
bs = NewBlockStore(db, store.BlockStoreOptions{})
|
||||
assert.Equal(t, bs.Height(), int64(0), "expecting empty bytes to be unmarshaled alright")
|
||||
}
|
||||
|
||||
func freshBlockStore() (*BlockStore, dbm.DB) {
|
||||
db := dbm.NewMemDB()
|
||||
return NewBlockStore(db), db
|
||||
return NewBlockStore(db, store.BlockStoreOptions{}) , db
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -371,7 +371,7 @@ func TestLoadBaseMeta(t *testing.T) {
|
||||
})
|
||||
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
bs := NewBlockStore(dbm.NewMemDB())
|
||||
bs := NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
|
||||
for h := int64(1); h <= 10; h++ {
|
||||
block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address)
|
||||
@@ -434,7 +434,7 @@ func TestPruneBlocks(t *testing.T) {
|
||||
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
db := dbm.NewMemDB()
|
||||
bs := NewBlockStore(db)
|
||||
bs := NewBlockStore(db, store.BlockStoreOptions{})
|
||||
assert.EqualValues(t, 0, bs.Base())
|
||||
assert.EqualValues(t, 0, bs.Height())
|
||||
assert.EqualValues(t, 0, bs.Size())
|
||||
@@ -573,7 +573,7 @@ func TestLoadBlockMetaByHash(t *testing.T) {
|
||||
})
|
||||
state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
bs := NewBlockStore(dbm.NewMemDB())
|
||||
bs := NewBlockStore(dbm.NewMemDB(), store.BlockStoreOptions{})
|
||||
|
||||
b1 := state.MakeBlock(state.LastBlockHeight+1, test.MakeNTxs(state.LastBlockHeight+1, 10), new(types.Commit), nil, state.Validators.GetProposer().Address)
|
||||
partSet, err := b1.MakePartSet(2)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user