mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-12 15:52:50 +00:00
Compare commits
68 Commits
finalizeBl
...
wb/consens
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f5df349cee | ||
|
|
c3ae6f5b58 | ||
|
|
a393cf8bab | ||
|
|
0e2752ae42 | ||
|
|
97a8f125e0 | ||
|
|
84c15857e4 | ||
|
|
e70445f942 | ||
|
|
478f5321ad | ||
|
|
08e4e2ed3d | ||
|
|
7d63e991c5 | ||
|
|
7638235d33 | ||
|
|
2abfe20114 | ||
|
|
0bf7813c4e | ||
|
|
ff9038e2ce | ||
|
|
00a40835a2 | ||
|
|
c4f77ab6d1 | ||
|
|
2030875056 | ||
|
|
639e145729 | ||
|
|
68ffe8bc64 | ||
|
|
21309ccb7b | ||
|
|
f70396c6fd | ||
|
|
fdc246e4a8 | ||
|
|
78a0a5fe73 | ||
|
|
4f885209aa | ||
|
|
6dd0cf92c8 | ||
|
|
626d9b4fbe | ||
|
|
8addf99f90 | ||
|
|
76c6c67734 | ||
|
|
a46724e4f6 | ||
|
|
40fba3960d | ||
|
|
36a859ae54 | ||
|
|
ab5c63eff3 | ||
|
|
8228936155 | ||
|
|
a12e2bbb60 | ||
|
|
11bebfb6a0 | ||
|
|
4009102e2b | ||
|
|
cabd916517 | ||
|
|
363ea56680 | ||
|
|
aa4854ff8f | ||
|
|
581dd01d47 | ||
|
|
50b00dff71 | ||
|
|
051e127d38 | ||
|
|
5530726df8 | ||
|
|
decac693ab | ||
|
|
7ca0f24040 | ||
|
|
69848bef26 | ||
|
|
2c14d491f6 | ||
|
|
cd248576ea | ||
|
|
c256edc622 | ||
|
|
9d9360774f | ||
|
|
c7c11fc7d5 | ||
|
|
37bc1d74df | ||
|
|
d882f31569 | ||
|
|
ba3f7106b1 | ||
|
|
3ccfb26137 | ||
|
|
96863decca | ||
|
|
d4cda544ae | ||
|
|
800cce80b7 | ||
|
|
e850863296 | ||
|
|
1dec3e139a | ||
|
|
11b920480f | ||
|
|
4f8bcb1cce | ||
|
|
2d95e38986 | ||
|
|
6bb4b688e0 | ||
|
|
a1e1e6c290 | ||
|
|
736364178a | ||
|
|
a99c7188d7 | ||
|
|
a56b10fbef |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -7,5 +7,5 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish
|
||||
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield
|
||||
|
||||
|
||||
2
.github/workflows/coverage.yml
vendored
2
.github/workflows/coverage.yml
vendored
@@ -121,7 +121,7 @@ jobs:
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v1.5.2
|
||||
- uses: codecov/codecov-action@v2.0.1
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
4
.github/workflows/e2e-nightly-master.yml
vendored
4
.github/workflows/e2e-nightly-master.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01', '02', '03']
|
||||
group: ['00', '01']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
|
||||
2
.github/workflows/linkchecker.yml
vendored
2
.github/workflows/linkchecker.yml
vendored
@@ -7,6 +7,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
7
.github/workflows/stale.yml
vendored
7
.github/workflows/stale.yml
vendored
@@ -7,12 +7,13 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v3.0.19
|
||||
- uses: actions/stale@v4
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions."
|
||||
days-before-stale: 10
|
||||
days-before-close: 4
|
||||
days-before-stale: -1
|
||||
days-before-pr-stale: 10
|
||||
days-before-pr-close: 4
|
||||
exempt-pr-labels: "S:wip"
|
||||
|
||||
32
.github/workflows/tests.yml
vendored
32
.github/workflows/tests.yml
vendored
@@ -42,38 +42,6 @@ jobs:
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_abci_apps
|
||||
run: abci/tests/test_app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -15,7 +15,7 @@
|
||||
.vagrant
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci-cli
|
||||
abci/abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
build/*
|
||||
|
||||
@@ -21,7 +21,9 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [libs/CList] \#6626 Automatically detach the prev/next elements in Remove function (@JayT106)
|
||||
- [fastsync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
|
||||
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
|
||||
- [blockchain/v2] \#6730 Fast Sync v2 is deprecated, please use v0
|
||||
|
||||
- Apps
|
||||
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
@@ -30,6 +32,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
|
||||
- It is not required any longer to set ldflags to set version strings
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
@@ -95,6 +98,8 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
||||
- Transactions are gossiped in FIFO order as they are in `v0`.
|
||||
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||
- [fastsync/event] \#6619 Emit fastsync status event when switching consensus/fastsync (@JayT106)
|
||||
- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106)
|
||||
|
||||
### IMPROVEMENTS
|
||||
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
|
||||
@@ -147,3 +152,5 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106)
|
||||
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)
|
||||
- [gitignore] \#6668 Fix gitignore of abci-cli (@tanyabouman)
|
||||
- [light] \#6687 Fix bug with incorrecly handled contexts in the light client (@cmwaters)
|
||||
|
||||
11
Makefile
11
Makefile
@@ -202,7 +202,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@golangci-lint run
|
||||
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
@@ -231,6 +231,15 @@ build-docker: build-linux
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="mockery" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Local testnet using docker ###
|
||||
###############################################################################
|
||||
|
||||
@@ -32,6 +32,8 @@ This guide provides instructions for upgrading to specific versions of Tendermin
|
||||
- configuration values starting with `priv-validator-` have moved to the new
|
||||
`priv-validator` section, without the `priv-validator-` prefix.
|
||||
|
||||
* Fast Sync v2 has been deprecated, please use v0 to sync a node.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
|
||||
@@ -74,6 +76,10 @@ will need to change to accommodate these changes. Most notably:
|
||||
longer exported and have been replaced with `node.New` and
|
||||
`node.NewDefault` which provide more functional interfaces.
|
||||
|
||||
### RPC changes
|
||||
|
||||
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
|
||||
|
||||
## v0.34.0
|
||||
|
||||
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
|
||||
|
||||
@@ -35,29 +35,33 @@ type Client interface {
|
||||
FlushAsync(context.Context) (*ReqRes, error)
|
||||
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
||||
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
||||
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
|
||||
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
||||
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
||||
CommitAsync(context.Context) (*ReqRes, error)
|
||||
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
||||
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
|
||||
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
|
||||
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
||||
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
||||
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||
FinalizeBlockAsync(context.Context, types.RequestFinalizeBlock) (*ReqRes, error)
|
||||
|
||||
// Synchronous requests
|
||||
FlushSync(context.Context) error
|
||||
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
FinalizeBlockSync(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -194,6 +194,16 @@ func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo)
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
@@ -234,6 +244,26 @@ func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestI
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
@@ -284,22 +314,6 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(
|
||||
)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestFinalizeBlock(params)
|
||||
res, err := cli.client.FinalizeBlock(ctx, req.GetFinalizeBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(
|
||||
ctx,
|
||||
req,
|
||||
&types.Response{Value: &types.Response_FinalizeBlock{FinalizeBlock: res}},
|
||||
)
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
|
||||
@@ -366,6 +380,18 @@ func (cli *grpcClient) InfoSync(
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.DeliverTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestCheckTx,
|
||||
@@ -409,6 +435,30 @@ func (cli *grpcClient) InitChainSync(
|
||||
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.BeginBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.EndBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
params types.RequestListSnapshots,
|
||||
@@ -454,14 +504,3 @@ func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
reqres, err := cli.FinalizeBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetFinalizeBlock(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -77,6 +77,17 @@ func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(params)
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -121,6 +132,28 @@ func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestIni
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -171,20 +204,6 @@ func (app *localClient) ApplySnapshotChunkAsync(
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.FinalizeBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestFinalizeBlock(req),
|
||||
types.ToResponseFinalizeBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync(ctx context.Context) error {
|
||||
@@ -203,6 +222,18 @@ func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*t
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
@@ -245,6 +276,30 @@ func (app *localClient) InitChainSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
@@ -291,17 +346,6 @@ func (app *localClient) ApplySnapshotChunkSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.FinalizeBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -65,6 +65,52 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@@ -157,6 +203,52 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoAsync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
@@ -203,6 +295,52 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEndBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Error provides a mock function with given fields:
|
||||
func (_m *Client) Error() error {
|
||||
ret := _m.Called()
|
||||
@@ -217,52 +355,6 @@ func (_m *Client) Error() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// FinalizeBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlockAsync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FinalizeBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlockSync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FlushAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
@@ -245,6 +245,10 @@ func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
||||
}
|
||||
@@ -261,6 +265,14 @@ func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestIn
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
||||
}
|
||||
@@ -283,13 +295,6 @@ func (cli *socketClient) ApplySnapshotChunkAsync(
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestFinalizeBlock(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync(ctx context.Context) error {
|
||||
@@ -336,6 +341,18 @@ func (cli *socketClient) InfoSync(
|
||||
return reqres.Response.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetDeliverTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
@@ -378,6 +395,30 @@ func (cli *socketClient) InitChainSync(
|
||||
return reqres.Response.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetBeginBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetEndBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
@@ -424,17 +465,6 @@ func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
return reqres.Response.GetApplySnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestFinalizeBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetFinalizeBlock(), nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// queueRequest enqueues req onto the queue. If the queue is full, it ether
|
||||
@@ -539,6 +569,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Flush)
|
||||
case *types.Request_Info:
|
||||
_, ok = res.Value.(*types.Response_Info)
|
||||
case *types.Request_DeliverTx:
|
||||
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||
case *types.Request_CheckTx:
|
||||
_, ok = res.Value.(*types.Response_CheckTx)
|
||||
case *types.Request_Commit:
|
||||
@@ -547,6 +579,10 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Query)
|
||||
case *types.Request_InitChain:
|
||||
_, ok = res.Value.(*types.Response_InitChain)
|
||||
case *types.Request_BeginBlock:
|
||||
_, ok = res.Value.(*types.Response_BeginBlock)
|
||||
case *types.Request_EndBlock:
|
||||
_, ok = res.Value.(*types.Response_EndBlock)
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
@@ -555,8 +591,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_ListSnapshots)
|
||||
case *types.Request_OfferSnapshot:
|
||||
_, ok = res.Value.(*types.Response_OfferSnapshot)
|
||||
case *types.Request_FinalizeBlock:
|
||||
_, ok = res.Value.(*types.Response_FinalizeBlock)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -37,11 +37,11 @@ func TestProperSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
err = c.FlushSync(context.Background())
|
||||
assert.NoError(t, err)
|
||||
res := reqres.Response.GetFinalizeBlock()
|
||||
res := reqres.Response.GetBeginBlock()
|
||||
assert.NotNil(t, res)
|
||||
resp <- c.Error()
|
||||
}()
|
||||
@@ -73,7 +73,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// Start BeginBlock and flush it
|
||||
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
flush, err := c.FlushAsync(ctx)
|
||||
assert.NoError(t, err)
|
||||
@@ -84,7 +84,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
err = s.Stop()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// wait for the response from FinalizeBlock
|
||||
// wait for the response from BeginBlock
|
||||
reqres.Wait()
|
||||
flush.Wait()
|
||||
resp <- c.Error()
|
||||
@@ -121,7 +121,7 @@ type slowApp struct {
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseFinalizeBlock{}
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
@@ -47,9 +46,6 @@ var (
|
||||
flagHeight int
|
||||
flagProve bool
|
||||
|
||||
// counter
|
||||
flagSerial bool
|
||||
|
||||
// kvstore
|
||||
flagPersist string
|
||||
)
|
||||
@@ -61,9 +57,7 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
||||
return nil
|
||||
case "version": // skip running for version command
|
||||
case "kvstore", "version":
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -135,10 +129,6 @@ func addQueryFlags() {
|
||||
"whether or not to return a merkle proof of the query result")
|
||||
}
|
||||
|
||||
func addCounterFlags() {
|
||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
||||
}
|
||||
|
||||
func addKVStoreFlags() {
|
||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
@@ -157,8 +147,6 @@ func addCommands() {
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
|
||||
// examples
|
||||
addCounterFlags()
|
||||
RootCmd.AddCommand(counterCmd)
|
||||
addKVStoreFlags()
|
||||
RootCmd.AddCommand(kvstoreCmd)
|
||||
}
|
||||
@@ -258,14 +246,6 @@ var queryCmd = &cobra.Command{
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
var counterCmd = &cobra.Command{
|
||||
Use: "counter",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdCounter,
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
Use: "kvstore",
|
||||
Short: "ABCI demo example",
|
||||
@@ -504,18 +484,16 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tx := range res.Txs {
|
||||
printResponse(cmd, args, response{
|
||||
Code: tx.Code,
|
||||
Data: tx.Data,
|
||||
Info: tx.Info,
|
||||
Log: tx.Log,
|
||||
})
|
||||
}
|
||||
printResponse(cmd, args, response{
|
||||
Code: res.Code,
|
||||
Data: res.Data,
|
||||
Info: res.Info,
|
||||
Log: res.Log,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -595,32 +573,6 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
app := counter.NewApplication(flagSerial)
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
package counter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
hashCount int
|
||||
txCount int
|
||||
serial bool
|
||||
}
|
||||
|
||||
func NewApplication(serial bool) *Application {
|
||||
return &Application{serial: serial}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
|
||||
if app.serial {
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) > 8 {
|
||||
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}},
|
||||
}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(tx):], tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseFinalizeBlock{
|
||||
Txs: []*types.ResponseDeliverTx{{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() (resp types.ResponseCommit) {
|
||||
app.hashCount++
|
||||
if app.txCount == 0 {
|
||||
return types.ResponseCommit{}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
switch reqQuery.Path {
|
||||
case "hash":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
||||
case "tx":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
|
||||
default:
|
||||
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
||||
}
|
||||
}
|
||||
@@ -76,22 +76,20 @@ func testStream(t *testing.T, app types.Application) {
|
||||
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
|
||||
// Process response
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_FinalizeBlock:
|
||||
for _, tx := range r.FinalizeBlock.Txs {
|
||||
counter++
|
||||
if tx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", tx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
|
||||
}
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
close(done)
|
||||
}()
|
||||
return
|
||||
}
|
||||
case *types.Response_DeliverTx:
|
||||
counter++
|
||||
if r.DeliverTx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
|
||||
}
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
close(done)
|
||||
}()
|
||||
return
|
||||
}
|
||||
case *types.Response_Flush:
|
||||
// ignore
|
||||
@@ -105,8 +103,7 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
tx := []byte("test")
|
||||
_, err = client.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sometimes send flush messages
|
||||
@@ -166,25 +163,22 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
txt := []byte("test")
|
||||
response, err := client.FinalizeBlock(context.Background(), &types.RequestFinalizeBlock{Txs: [][]byte{txt}})
|
||||
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
|
||||
}
|
||||
counter++
|
||||
for _, tx := range response.Txs {
|
||||
if tx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", tx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatal("Too many DeliverTx responses")
|
||||
}
|
||||
t.Log("response", counter)
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
}
|
||||
if response.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", response.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatal("Too many DeliverTx responses")
|
||||
}
|
||||
t.Log("response", counter)
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -86,40 +86,35 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
|
||||
}
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var key, value string
|
||||
var txs = make([]*types.ResponseDeliverTx, len(req.Txs))
|
||||
|
||||
for i, tx := range req.Txs {
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = string(tx), string(tx)
|
||||
}
|
||||
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.state.Size++
|
||||
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
txs[i] = &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = string(req.Tx), string(req.Tx)
|
||||
}
|
||||
|
||||
return types.ResponseFinalizeBlock{Txs: txs}
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.state.Size++
|
||||
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
|
||||
@@ -27,16 +27,12 @@ const (
|
||||
var ctx = context.Background()
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
|
||||
ar := app.FinalizeBlock(req)
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar = app.FinalizeBlock(req)
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
ar = app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
app.Commit()
|
||||
|
||||
@@ -113,7 +109,8 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
}
|
||||
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
|
||||
resInfo = kvstore.Info(types.RequestInfo{})
|
||||
@@ -203,15 +200,16 @@ func makeApplyBlock(
|
||||
Height: height,
|
||||
}
|
||||
|
||||
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
|
||||
Hash: hash,
|
||||
Header: header,
|
||||
Txs: txs,
|
||||
})
|
||||
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
for _, tx := range txs {
|
||||
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
|
||||
t.Fatal(r)
|
||||
}
|
||||
}
|
||||
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
|
||||
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
|
||||
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||
|
||||
}
|
||||
|
||||
@@ -328,16 +326,13 @@ func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
|
||||
ar, err = app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) // repeating tx doesn't raise error
|
||||
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
_, err = app.CommitSync(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -66,19 +66,19 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
|
||||
return res
|
||||
}
|
||||
|
||||
// // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
// func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// // if it starts with "val:", update the validator set
|
||||
// // format is "val:pubkey!power"
|
||||
// if isValidatorTx(req.Tx) {
|
||||
// // update validators in the merkle tree
|
||||
// // and in app.ValUpdates
|
||||
// return app.execValidatorTx(req.Tx)
|
||||
// }
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(req.Tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(req.Tx)
|
||||
}
|
||||
|
||||
// // otherwise, update the key-value store
|
||||
// return app.app.DeliverTx(req)
|
||||
// }
|
||||
// otherwise, update the key-value store
|
||||
return app.app.DeliverTx(req)
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(req)
|
||||
@@ -119,40 +119,8 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
|
||||
return types.ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) FinalizeBlock(
|
||||
req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
// for i, tx := range req.Txs {
|
||||
// // if it starts with "val:", update the validator set
|
||||
// // format is "val:pubkey!power"
|
||||
// if isValidatorTx(tx) {
|
||||
// // update validators in the merkle tree
|
||||
// // and in app.ValUpdates
|
||||
// return app.execValidatorTx(req.Tx)
|
||||
// }
|
||||
|
||||
// // otherwise, update the key-value store
|
||||
// return app.app.DeliverTx(tx)
|
||||
// }
|
||||
// Track the block hash and header information
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
@@ -174,7 +142,32 @@ func (app *PersistentKVStoreApplication) FinalizeBlock(
|
||||
}
|
||||
}
|
||||
|
||||
return types.ResponseFinalizeBlock{ValidatorUpdates: app.ValUpdates}
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// Update the validator set
|
||||
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
|
||||
@@ -200,6 +200,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_Info:
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
case *types.Request_CheckTx:
|
||||
res := s.app.CheckTx(*r.CheckTx)
|
||||
responses <- types.ToResponseCheckTx(res)
|
||||
@@ -212,6 +215,12 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_InitChain:
|
||||
res := s.app.InitChain(*r.InitChain)
|
||||
responses <- types.ToResponseInitChain(res)
|
||||
case *types.Request_BeginBlock:
|
||||
res := s.app.BeginBlock(*r.BeginBlock)
|
||||
responses <- types.ToResponseBeginBlock(res)
|
||||
case *types.Request_EndBlock:
|
||||
res := s.app.EndBlock(*r.EndBlock)
|
||||
responses <- types.ToResponseEndBlock(res)
|
||||
case *types.Request_ListSnapshots:
|
||||
res := s.app.ListSnapshots(*r.ListSnapshots)
|
||||
responses <- types.ToResponseListSnapshots(res)
|
||||
@@ -224,9 +233,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
|
||||
responses <- types.ToResponseApplySnapshotChunk(res)
|
||||
case *types.Request_FinalizeBlock:
|
||||
res := s.app.FinalizeBlock(*r.FinalizeBlock)
|
||||
responses <- types.ToResponseFinalizeBlock(res)
|
||||
default:
|
||||
responses <- types.ToResponseException("Unknown request")
|
||||
}
|
||||
|
||||
@@ -51,22 +51,20 @@ func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func FinalizeBlock(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
for _, tx := range res.Txs {
|
||||
code, data, log := tx.Code, tx.Data, tx.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
fmt.Println("Passed test: DeliverTx")
|
||||
return nil
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func startClient(abciType string) abcicli.Client {
|
||||
// Start client
|
||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
client.SetLogger(logger.With("module", "abcicli"))
|
||||
if err := client.Start(); err != nil {
|
||||
panicf("connecting to abci_app: %v", err.Error())
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func commit(client abcicli.Client, hashExp []byte) {
|
||||
res, err := client.CommitSync(ctx)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(res.Data, hashExp) {
|
||||
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
|
||||
}
|
||||
}
|
||||
|
||||
type tx struct {
|
||||
Data []byte
|
||||
CodeExp uint32
|
||||
DataExp []byte
|
||||
}
|
||||
|
||||
func finalizeBlock(client abcicli.Client, txs []tx) {
|
||||
var txsData = make([][]byte, len(txs))
|
||||
for i, tx := range txs {
|
||||
txsData[i] = tx.Data
|
||||
}
|
||||
|
||||
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: txsData})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
for i, tx := range res.Txs {
|
||||
if tx.Code != txs[i].CodeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", tx.Code, txs[i].CodeExp, tx.Log)
|
||||
}
|
||||
if !bytes.Equal(tx.Data, txs[i].DataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", tx.Data, txs[i].DataExp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func panicf(format string, a ...interface{}) {
|
||||
panic(fmt.Sprintf(format, a...))
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
var abciType string
|
||||
|
||||
func init() {
|
||||
abciType = os.Getenv("ABCI")
|
||||
if abciType == "" {
|
||||
abciType = "socket"
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
testCounter()
|
||||
}
|
||||
|
||||
const (
|
||||
maxABCIConnectTries = 10
|
||||
)
|
||||
|
||||
func ensureABCIIsUp(typ string, n int) error {
|
||||
var err error
|
||||
cmdString := "abci-cli echo hello"
|
||||
if typ == "grpc" {
|
||||
cmdString = "abci-cli --abci grpc echo hello"
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
cmd := exec.Command("bash", "-c", cmdString)
|
||||
_, err = cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func testCounter() {
|
||||
abciApp := os.Getenv("ABCI_APP")
|
||||
if abciApp == "" {
|
||||
panic("No ABCI_APP specified")
|
||||
}
|
||||
|
||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
||||
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
|
||||
cmd := exec.Command("bash", "-c", subCommand)
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error on process kill: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
log.Fatalf("echo failed: %v", err) //nolint:gocritic
|
||||
}
|
||||
|
||||
client := startClient(abciType)
|
||||
defer func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
log.Printf("error trying client stop: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// commit(client, nil)
|
||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
finalizeBlock(client, []tx{{Data: []byte{0x00}, CodeExp: types.CodeTypeOK, DataExp: nil}})
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
txs := []tx{
|
||||
{Data: []byte{0x01}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x02}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x03}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x00, 0x04}, DataExp: nil, CodeExp: types.CodeTypeOK}}
|
||||
finalizeBlock(client, txs)
|
||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
#! /bin/bash
|
||||
set -e
|
||||
|
||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
||||
|
||||
# Get the directory of where this script is.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd "$DIR"
|
||||
|
||||
echo "RUN COUNTER OVER SOCKET"
|
||||
# test golang counter
|
||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
|
||||
echo "RUN COUNTER OVER GRPC"
|
||||
# test golang counter via grpc
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
# test nodejs counter
|
||||
# TODO: fix node app
|
||||
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter
|
||||
@@ -37,7 +37,6 @@ function testExample() {
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli counter
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
@@ -17,9 +17,11 @@ type Application interface {
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
|
||||
// State Sync Connection
|
||||
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
|
||||
@@ -44,6 +46,10 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
}
|
||||
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
|
||||
return ResponseCheckTx{Code: CodeTypeOK}
|
||||
}
|
||||
@@ -60,6 +66,14 @@ func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
|
||||
return ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
|
||||
return ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
|
||||
return ResponseEndBlock{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
|
||||
return ResponseListSnapshots{}
|
||||
}
|
||||
@@ -76,10 +90,6 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
|
||||
return ResponseApplySnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
|
||||
return ResponseFinalizeBlock{}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// GRPCApplication is a GRPC wrapper for Application
|
||||
@@ -104,6 +114,11 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
res := app.app.CheckTx(*req)
|
||||
return &res, nil
|
||||
@@ -124,6 +139,16 @@ func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
|
||||
res := app.app.BeginBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
|
||||
res := app.app.EndBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ListSnapshots(
|
||||
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||
res := app.app.ListSnapshots(*req)
|
||||
@@ -147,9 +172,3 @@ func (app *GRPCApplication) ApplySnapshotChunk(
|
||||
res := app.app.ApplySnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) FinalizeBlock(
|
||||
ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
|
||||
res := app.app.FinalizeBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -48,6 +48,12 @@ func ToRequestInfo(req RequestInfo) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestCheckTx(req RequestCheckTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_CheckTx{&req},
|
||||
@@ -72,6 +78,18 @@ func ToRequestInitChain(req RequestInitChain) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_BeginBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestEndBlock(req RequestEndBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_EndBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ListSnapshots{&req},
|
||||
@@ -96,12 +114,6 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_FinalizeBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func ToResponseException(errStr string) *Response {
|
||||
@@ -127,6 +139,11 @@ func ToResponseInfo(res ResponseInfo) *Response {
|
||||
Value: &Response_Info{&res},
|
||||
}
|
||||
}
|
||||
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_DeliverTx{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseCheckTx(res ResponseCheckTx) *Response {
|
||||
return &Response{
|
||||
@@ -152,6 +169,18 @@ func ToResponseInitChain(res ResponseInitChain) *Response {
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_BeginBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseEndBlock(res ResponseEndBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_EndBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ListSnapshots{&res},
|
||||
@@ -175,9 +204,3 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
|
||||
Value: &Response_ApplySnapshotChunk{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_FinalizeBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -48,9 +48,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
"proxy-app",
|
||||
config.ProxyApp,
|
||||
"proxy app address, or one of: 'kvstore',"+
|
||||
" 'persistent_kvstore',"+
|
||||
" 'counter',"+
|
||||
" 'counter_serial' or 'noop' for local testing.")
|
||||
" 'persistent_kvstore' or 'noop' for local testing.")
|
||||
cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)")
|
||||
|
||||
// rpc flags
|
||||
|
||||
@@ -446,6 +446,7 @@ type RPCConfig struct {
|
||||
|
||||
// TCP or UNIX socket address for the gRPC server to listen on
|
||||
// NOTE: This server only supports /broadcast_tx_commit
|
||||
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||
GRPCListenAddress string `mapstructure:"grpc-laddr"`
|
||||
|
||||
// Maximum number of simultaneous connections.
|
||||
@@ -453,6 +454,7 @@ type RPCConfig struct {
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
|
||||
|
||||
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
|
||||
@@ -782,25 +784,47 @@ type MempoolConfig struct {
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
|
||||
// Maximum number of transactions in the mempool
|
||||
Size int `mapstructure:"size"`
|
||||
|
||||
// Limit the total size of all txs in the mempool.
|
||||
// This only accounts for raw transactions (e.g. given 1MB transactions and
|
||||
// max-txs-bytes=5MB, mempool will only accept 5 transactions).
|
||||
MaxTxsBytes int64 `mapstructure:"max-txs-bytes"`
|
||||
|
||||
// Size of the cache (used to filter transactions we saw earlier) in transactions
|
||||
CacheSize int `mapstructure:"cache-size"`
|
||||
|
||||
// Do not remove invalid transactions from the cache (default: false)
|
||||
// Set to true if it's not possible for any invalid transaction to become
|
||||
// valid again in the future.
|
||||
KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"`
|
||||
|
||||
// Maximum size of a single transaction
|
||||
// NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
|
||||
MaxTxBytes int `mapstructure:"max-tx-bytes"`
|
||||
|
||||
// Maximum size of a batch of transactions to send to a peer
|
||||
// Including space needed by encoding (one varint per transaction).
|
||||
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
|
||||
|
||||
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
|
||||
// insertion time into the mempool is beyond TTLDuration.
|
||||
TTLDuration time.Duration `mapstructure:"ttl-duration"`
|
||||
|
||||
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLDuration is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if
|
||||
// it's insertion time into the mempool is beyond TTLDuration.
|
||||
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
|
||||
}
|
||||
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
|
||||
@@ -811,10 +835,12 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
Broadcast: true,
|
||||
// Each signature verification takes .5ms, Size reduced until we implement
|
||||
// ABCI Recheck
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
TTLDuration: 0 * time.Second,
|
||||
TTLNumBlocks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -840,6 +866,13 @@ func (cfg *MempoolConfig) ValidateBasic() error {
|
||||
if cfg.MaxTxBytes < 0 {
|
||||
return errors.New("max-tx-bytes can't be negative")
|
||||
}
|
||||
if cfg.TTLDuration < 0 {
|
||||
return errors.New("ttl-duration can't be negative")
|
||||
}
|
||||
if cfg.TTLNumBlocks < 0 {
|
||||
return errors.New("ttl-num-blocks can't be negative")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -959,7 +992,7 @@ func (cfg *FastSyncConfig) ValidateBasic() error {
|
||||
case BlockchainV0:
|
||||
return nil
|
||||
case BlockchainV2:
|
||||
return nil
|
||||
return errors.New("fastsync version v2 is no longer supported. Please use v0")
|
||||
default:
|
||||
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func TestFastSyncConfigValidateBasic(t *testing.T) {
|
||||
|
||||
// tamper with version
|
||||
cfg.Version = "v2"
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
|
||||
cfg.Version = "invalid"
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
|
||||
@@ -200,6 +200,7 @@ cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
|
||||
# Maximum number of simultaneous connections.
|
||||
@@ -209,6 +210,7 @@ grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
# 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
|
||||
|
||||
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
|
||||
@@ -397,6 +399,22 @@ max-tx-bytes = {{ .Mempool.MaxTxBytes }}
|
||||
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
max-batch-bytes = {{ .Mempool.MaxBatchBytes }}
|
||||
|
||||
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
|
||||
# insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-duration = "{{ .Mempool.TTLDuration }}"
|
||||
|
||||
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-duration is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if
|
||||
# it's insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
|
||||
|
||||
#######################################################
|
||||
### State Sync Configuration Options ###
|
||||
#######################################################
|
||||
@@ -440,7 +458,7 @@ fetchers = "{{ .StateSync.Fetchers }}"
|
||||
|
||||
# Fast Sync version to use:
|
||||
# 1) "v0" (default) - the legacy fast sync implementation
|
||||
# 2) "v2" - complete redesign of v0, optimized for testability & readability
|
||||
# 2) "v2" - DEPRECATED, please use v0
|
||||
version = "{{ .FastSync.Version }}"
|
||||
|
||||
#######################################################
|
||||
|
||||
@@ -31,7 +31,6 @@ Available Commands:
|
||||
check_tx Validate a tx
|
||||
commit Commit the application state and return the Merkle root hash
|
||||
console Start an interactive abci console for multiple commands
|
||||
counter ABCI demo example
|
||||
deliver_tx Deliver a new tx to the application
|
||||
kvstore ABCI demo example
|
||||
echo Have the application echo a message
|
||||
@@ -214,137 +213,9 @@ we do `deliver_tx "abc=efg"` it will store `(abc, efg)`.
|
||||
Similarly, you could put the commands in a file and run
|
||||
`abci-cli --verbose batch < myfile`.
|
||||
|
||||
## Counter - Another Example
|
||||
|
||||
Now that we've got the hang of it, let's try another application, the
|
||||
"counter" app.
|
||||
|
||||
Like the kvstore app, its code can be found
|
||||
[here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go)
|
||||
and looks like:
|
||||
|
||||
```go
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
|
||||
app := counter.NewCounterApplication(flagSerial)
|
||||
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddrC, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
```
|
||||
|
||||
The counter app doesn't use a Merkle tree, it just counts how many times
|
||||
we've sent a transaction, asked for a hash, or committed the state. The
|
||||
result of `commit` is just the number of transactions sent.
|
||||
|
||||
This application has two modes: `serial=off` and `serial=on`.
|
||||
|
||||
When `serial=on`, transactions must be a big-endian encoded incrementing
|
||||
integer, starting at 0.
|
||||
|
||||
If `serial=off`, there are no restrictions on transactions.
|
||||
|
||||
We can toggle the value of `serial` using the `set_option` ABCI message.
|
||||
|
||||
When `serial=on`, some transactions are invalid. In a live blockchain,
|
||||
transactions collect in memory before they are committed into blocks. To
|
||||
avoid wasting resources on invalid transactions, ABCI provides the
|
||||
`check_tx` message, which application developers can use to accept or
|
||||
reject transactions, before they are stored in memory or gossipped to
|
||||
other peers.
|
||||
|
||||
In this instance of the counter app, `check_tx` only allows transactions
|
||||
whose integer is greater than the last committed one.
|
||||
|
||||
Let's kill the console and the kvstore application, and start the
|
||||
counter app:
|
||||
|
||||
```sh
|
||||
abci-cli counter
|
||||
```
|
||||
|
||||
In another window, start the `abci-cli console`:
|
||||
|
||||
```sh
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0xff
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: BadNonce
|
||||
-> log: Invalid nonce. Expected >= 1, got 0
|
||||
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x04
|
||||
-> code: BadNonce
|
||||
-> log: Invalid nonce. Expected 2, got 4
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"hashes":0,"txs":2}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A327D
|
||||
```
|
||||
|
||||
This is a very simple application, but between `counter` and `kvstore`,
|
||||
its easy to see how you can build out arbitrary application states on
|
||||
top of the ABCI. [Hyperledger's
|
||||
Burrow](https://github.com/hyperledger/burrow) also runs atop ABCI,
|
||||
bringing with it Ethereum-like accounts, the Ethereum virtual-machine,
|
||||
Monax's permissioning scheme, and native contracts extensions.
|
||||
|
||||
But the ultimate flexibility comes from being able to write the
|
||||
application easily in any language.
|
||||
|
||||
We have implemented the counter in a number of languages [see the
|
||||
example directory](https://github.com/tendermint/tendermint/tree/master/abci/example).
|
||||
|
||||
To run the Node.js version, fist download & install [the Javascript ABCI server](https://github.com/tendermint/js-abci):
|
||||
|
||||
```sh
|
||||
git clone https://github.com/tendermint/js-abci.git
|
||||
cd js-abci
|
||||
npm install abci
|
||||
```
|
||||
|
||||
Now you can start the app:
|
||||
|
||||
```sh
|
||||
node example/counter.js
|
||||
```
|
||||
|
||||
(you'll have to kill the other counter application process). In another
|
||||
window, run the console and those previous ABCI commands. You should get
|
||||
the same results as for the Go version.
|
||||
|
||||
## Bounties
|
||||
|
||||
Want to write the counter app in your favorite language?! We'd be happy
|
||||
Want to write an app in your favorite language?! We'd be happy
|
||||
to add you to our [ecosystem](https://github.com/tendermint/awesome#ecosystem)!
|
||||
See [funding](https://github.com/interchainio/funding) opportunities from the
|
||||
[Interchain Foundation](https://interchain.io/) for implementations in new languages and more.
|
||||
|
||||
@@ -37,8 +37,8 @@ cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
make install_abci
|
||||
```
|
||||
|
||||
Now you should have the `abci-cli` installed; you'll see a couple of
|
||||
commands (`counter` and `kvstore`) that are example applications written
|
||||
Now you should have the `abci-cli` installed; you'll notice the `kvstore`
|
||||
command, an example application written
|
||||
in Go. See below for an application written in JavaScript.
|
||||
|
||||
Now, let's run some apps!
|
||||
@@ -165,92 +165,6 @@ curl -s 'localhost:26657/abci_query?data="name"'
|
||||
Try some other transactions and queries to make sure everything is
|
||||
working!
|
||||
|
||||
## Counter - Another Example
|
||||
|
||||
Now that we've got the hang of it, let's try another application, the
|
||||
`counter` app.
|
||||
|
||||
The counter app doesn't use a Merkle tree, it just counts how many times
|
||||
we've sent a transaction, or committed the state.
|
||||
|
||||
This application has two modes: `serial=off` and `serial=on`.
|
||||
|
||||
When `serial=on`, transactions must be a big-endian encoded incrementing
|
||||
integer, starting at 0.
|
||||
|
||||
If `serial=off`, there are no restrictions on transactions.
|
||||
|
||||
In a live blockchain, transactions collect in memory before they are
|
||||
committed into blocks. To avoid wasting resources on invalid
|
||||
transactions, ABCI provides the `CheckTx` message, which application
|
||||
developers can use to accept or reject transactions, before they are
|
||||
stored in memory or gossipped to other peers.
|
||||
|
||||
In this instance of the counter app, with `serial=on`, `CheckTx` only
|
||||
allows transactions whose integer is greater than the last committed
|
||||
one.
|
||||
|
||||
Let's kill the previous instance of `tendermint` and the `kvstore`
|
||||
application, and start the counter app. We can enable `serial=on` with a
|
||||
flag:
|
||||
|
||||
```sh
|
||||
abci-cli counter --serial
|
||||
```
|
||||
|
||||
In another window, reset then start Tendermint:
|
||||
|
||||
```sh
|
||||
tendermint unsafe_reset_all
|
||||
tendermint start
|
||||
```
|
||||
|
||||
Once again, you can see the blocks streaming by. Let's send some
|
||||
transactions. Since we have set `serial=on`, the first transaction must
|
||||
be the number `0`:
|
||||
|
||||
```sh
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x00
|
||||
```
|
||||
|
||||
Note the empty (hence successful) response. The next transaction must be
|
||||
the number `1`. If instead, we try to send a `5`, we get an error:
|
||||
|
||||
```json
|
||||
> curl localhost:26657/broadcast_tx_commit?tx=0x05
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"check_tx": {},
|
||||
"deliver_tx": {
|
||||
"code": 2,
|
||||
"log": "Invalid nonce. Expected 1, got 5"
|
||||
},
|
||||
"hash": "33B93DFF98749B0D6996A70F64071347060DC19C",
|
||||
"height": 34
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
But if we send a `1`, it works again:
|
||||
|
||||
```json
|
||||
> curl localhost:26657/broadcast_tx_commit?tx=0x01
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"check_tx": {},
|
||||
"deliver_tx": {},
|
||||
"hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D",
|
||||
"height": 60
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For more details on the `broadcast_tx` API, see [the guide on using
|
||||
Tendermint](../tendermint-core/using-tendermint.md).
|
||||
|
||||
## CounterJS - Example in Another Language
|
||||
|
||||
|
||||
@@ -31,24 +31,61 @@ For example:
|
||||
|
||||
would be equal to the composite key of `jack.account.number`.
|
||||
|
||||
Let's take a look at the `[tx_index]` config section:
|
||||
By default, Tendermint will index all transactions by their respective hashes
|
||||
and height and blocks by their height.
|
||||
|
||||
## Configuration
|
||||
|
||||
Operators can configure indexing via the `[tx_index]` section. The `indexer`
|
||||
field takes a series of supported indexers. If `null` is included, indexing will
|
||||
be turned off regardless of other values provided.
|
||||
|
||||
```toml
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
[tx-index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
# The backend database list to back the indexer.
|
||||
# If list contains null, meaning no indexer service will be used.
|
||||
#
|
||||
# The application will set which txs to index. In some cases a node operator will be able
|
||||
# to decide which txs to index based on configuration set in the application.
|
||||
#
|
||||
# Options:
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
# 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
# indexer = []
|
||||
```
|
||||
|
||||
By default, Tendermint will index all transactions by their respective hashes
|
||||
and height and blocks by their height.
|
||||
### Supported Indexers
|
||||
|
||||
You can turn off indexing completely by setting `tx_index` to `null`.
|
||||
#### KV
|
||||
|
||||
The `kv` indexer type is an embedded key-value store supported by the main
|
||||
underling Tendermint database. Using the `kv` indexer type allows you to query
|
||||
for block and transaction events directly against Tendermint's RPC. However, the
|
||||
query syntax is limited and so this indexer type might be deprecated or removed
|
||||
entirely in the future.
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
The `psql` indexer type allows an operator to enable block and transaction event
|
||||
indexing by proxying it to an external PostgreSQL instance allowing for the events
|
||||
to be stored in relational models. Since the events are stored in a RDBMS, operators
|
||||
can leverage SQL to perform a series of rich and complex queries that are not
|
||||
supported by the `kv` indexer type. Since operators can leverage SQL directly,
|
||||
searching is not enabled for the `psql` indexer type via Tendermint's RPC -- any
|
||||
such query will fail.
|
||||
|
||||
Note, the SQL schema is stored in `state/indexer/sink/psql/schema.sql` and operators
|
||||
must explicitly create the relations prior to starting Tendermint and enabling
|
||||
the `psql` indexer type.
|
||||
|
||||
Example:
|
||||
|
||||
```shell
|
||||
$ psql ... -f state/indexer/sink/psql/schema.sql
|
||||
```
|
||||
|
||||
## Default Indexes
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-053: State-Sync-Prototype](./adr-053-state-sync-prototype.md)
|
||||
- [ADR-054: Crypto-Encoding-2](./adr-054-crypto-encoding-2.md)
|
||||
- [ADR-055: Protobuf-Design](./adr-055-protobuf-design.md)
|
||||
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks)
|
||||
- [ADR-056: Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks.md)
|
||||
- [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md)
|
||||
- [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md)
|
||||
- [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md)
|
||||
|
||||
@@ -97,8 +97,7 @@ design for tendermint was originally tracked in
|
||||
[#828](https://github.com/tendermint/tendermint/issues/828).
|
||||
|
||||
#### Eager StateSync
|
||||
Warp Sync as implemented in Parity
|
||||
["Warp Sync"](https://wiki.parity.io/Warp-Sync-Snapshot-Format.html) to rapidly
|
||||
Warp Sync as implemented in OpenEthereum to rapidly
|
||||
download both blocks and state snapshots from peers. Data is carved into ~4MB
|
||||
chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a
|
||||
manifest file which co-ordinates the state-sync. Obtaining a correct manifest
|
||||
@@ -234,5 +233,3 @@ Proposed
|
||||
[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal
|
||||
[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos
|
||||
[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed
|
||||
|
||||
|
||||
|
||||
@@ -119,7 +119,7 @@ network usage.
|
||||
---
|
||||
|
||||
Check out the formal specification
|
||||
[here](https://docs.tendermint.com/master/spec/consensus/light-client.html).
|
||||
[here](https://github.com/tendermint/spec/tree/master/spec/light-client).
|
||||
|
||||
## Status
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ graceful here, but that's for another day.
|
||||
|
||||
It's possible to fool lite clients without there being a fork on the
|
||||
main chain - so called Fork-Lite. See the
|
||||
[fork accountability](https://docs.tendermint.com/master/spec/consensus/fork-accountability.html)
|
||||
[fork accountability](https://docs.tendermint.com/master/spec/light-client/accountability/)
|
||||
document for more details. For a sequential lite client, this can happen via
|
||||
equivocation or amnesia attacks. For a skipping lite client this can also happen
|
||||
via lunatic validator attacks. There must be some way for applications to punish
|
||||
|
||||
@@ -179,7 +179,7 @@ This then ends the process and the verify function that was called at the start
|
||||
the user.
|
||||
|
||||
For a detailed overview of how each of these three attacks can be conducted please refer to the
|
||||
[fork accountability spec]((https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md)).
|
||||
[fork accountability spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md).
|
||||
|
||||
## Full Node Verification
|
||||
|
||||
|
||||
@@ -275,9 +275,13 @@ dial-timeout = "3s"
|
||||
#######################################################
|
||||
[mempool]
|
||||
|
||||
# Mempool version to use:
|
||||
# 1) "v0" - The legacy non-prioritized mempool reactor.
|
||||
# 2) "v1" (default) - The prioritized mempool reactor.
|
||||
version = "v1"
|
||||
|
||||
recheck = true
|
||||
broadcast = true
|
||||
wal-dir = ""
|
||||
|
||||
# Maximum number of transactions in the mempool
|
||||
size = 5000
|
||||
@@ -304,6 +308,22 @@ max-tx-bytes = 1048576
|
||||
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
max-batch-bytes = 0
|
||||
|
||||
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
|
||||
# insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-duration = "0s"
|
||||
|
||||
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-duration is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if
|
||||
# it's insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-num-blocks = 0
|
||||
|
||||
#######################################################
|
||||
### State Sync Configuration Options ###
|
||||
#######################################################
|
||||
@@ -421,7 +441,6 @@ max-open-connections = 3
|
||||
|
||||
# Instrumentation namespace
|
||||
namespace = "tendermint"
|
||||
|
||||
```
|
||||
|
||||
## Empty blocks VS no empty blocks
|
||||
|
||||
@@ -32,7 +32,7 @@ tendermint start --log-level "info"
|
||||
Here is the list of modules you may encounter in Tendermint's log and a
|
||||
little overview what they do.
|
||||
|
||||
- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI
|
||||
- `abci-client` As mentioned in [Application Architecture Guide](../app-dev/app-architecture.md), Tendermint acts as an ABCI
|
||||
client with respect to the application and maintains 3 connections:
|
||||
mempool, consensus and query. The code used by Tendermint Core can
|
||||
be found [here](https://github.com/tendermint/tendermint/tree/master/abci/client).
|
||||
@@ -45,12 +45,12 @@ little overview what they do.
|
||||
from a crash.
|
||||
[here](https://github.com/tendermint/tendermint/blob/master/types/events.go).
|
||||
You can subscribe to them by calling `subscribe` RPC method. Refer
|
||||
to [RPC docs](./rpc.md) for additional information.
|
||||
to [RPC docs](../tendermint-core/rpc.md) for additional information.
|
||||
- `mempool` Mempool module handles all incoming transactions, whenever
|
||||
they are coming from peers or the application.
|
||||
- `p2p` Provides an abstraction around peer-to-peer communication. For
|
||||
more details, please check out the
|
||||
[README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md).
|
||||
[README](https://github.com/tendermint/spec/tree/master/spec/p2p).
|
||||
- `rpc-server` RPC server. For implementation details, please read the
|
||||
[doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/jsonrpc/doc.go).
|
||||
- `state` Represents the latest state and execution submodule, which
|
||||
|
||||
@@ -40,7 +40,7 @@ Default logging level (`log-level = "info"`) should suffice for
|
||||
normal operation mode. Read [this
|
||||
post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756)
|
||||
for details on how to configure `log-level` config variable. Some of the
|
||||
modules can be found [here](../nodes/logging#list-of-modules). If
|
||||
modules can be found [here](logging.md#list-of-modules). If
|
||||
you're trying to debug Tendermint or asked to provide logs with debug
|
||||
logging level, you can do so by running Tendermint with
|
||||
`--log-level="debug"`.
|
||||
@@ -114,7 +114,7 @@ just the votes seen at the current height.
|
||||
If, after consulting with the logs and above endpoints, you still have no idea
|
||||
what's happening, consider using `tendermint debug kill` sub-command. This
|
||||
command will scrap all the available info and kill the process. See
|
||||
[Debugging](../tools/debugging.md) for the exact format.
|
||||
[Debugging](../tools/debugging/README.md) for the exact format.
|
||||
|
||||
You can inspect the resulting archive yourself or create an issue on
|
||||
[Github](https://github.com/tendermint/tendermint). Before opening an issue
|
||||
@@ -134,7 +134,7 @@ Tendermint also can report and serve Prometheus metrics. See
|
||||
[Metrics](./metrics.md).
|
||||
|
||||
`tendermint debug dump` sub-command can be used to periodically dump useful
|
||||
information into an archive. See [Debugging](../tools/debugging.md) for more
|
||||
information into an archive. See [Debugging](../tools/debugging/README.md) for more
|
||||
information.
|
||||
|
||||
## What happens when my app dies
|
||||
@@ -268,6 +268,8 @@ While we do not favor any operation system, more secure and stable Linux server
|
||||
distributions (like Centos) should be preferred over desktop operation systems
|
||||
(like Mac OS).
|
||||
|
||||
Native Windows support is not provided. If you are using a windows machine, you can try using the [bash shell](https://docs.microsoft.com/en-us/windows/wsl/install-win10).
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
NOTE: if you are going to use Tendermint in a public domain, make sure
|
||||
@@ -313,7 +315,7 @@ We want `skip-timeout-commit=false` when there is economics on the line
|
||||
because proposers should wait to hear for more votes. But if you don't
|
||||
care about that and want the fastest consensus, you can skip it. It will
|
||||
be kept false by default for public deployments (e.g. [Cosmos
|
||||
Hub](https://cosmos.network/intro/hub)) while for enterprise
|
||||
Hub](https://hub.cosmos.network/main/hub-overview/overview.html)) while for enterprise
|
||||
applications, setting it to true is not a problem.
|
||||
|
||||
- `consensus.peer-gossip-sleep-duration`
|
||||
|
||||
@@ -45,3 +45,13 @@ version = "v0"
|
||||
|
||||
If we're lagging sufficiently, we should go back to fast syncing, but
|
||||
this is an [open issue](https://github.com/tendermint/tendermint/issues/129).
|
||||
|
||||
## The Fast Sync event
|
||||
When the tendermint blockchain core launches, it might switch to the `fast-sync`
|
||||
mode to catch up the states to the current network best height. the core will emits
|
||||
a fast-sync event to expose the current status and the sync height. Once it catched
|
||||
the network best height, it will switches to the state sync mechanism and then emit
|
||||
another event for exposing the fast-sync `complete` status and the state `height`.
|
||||
|
||||
The user can query the events by subscribing `EventQueryFastSyncStatus`
|
||||
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.
|
||||
@@ -9,3 +9,10 @@ With state sync your node will download data related to the head or near the hea
|
||||
This leads to drastically shorter times for joining a network.
|
||||
|
||||
Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md)
|
||||
|
||||
## Events
|
||||
|
||||
When a node starts with the statesync flag enabled in the config file, it will emit two events: one upon starting statesync and the other upon completion.
|
||||
|
||||
The user can query the events by subscribing `EventQueryStateSyncStatus`
|
||||
Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details.
|
||||
@@ -36,7 +36,7 @@ more information on query syntax and other options.
|
||||
|
||||
You can also use tags, given you had included them into DeliverTx
|
||||
response, to query transaction results. See [Indexing
|
||||
transactions](./indexing-transactions.md) for details.
|
||||
transactions](../app-dev/indexing-transactions.md) for details.
|
||||
|
||||
## ValidatorSetUpdates
|
||||
|
||||
|
||||
@@ -552,8 +552,7 @@ To make a Tendermint network that can tolerate one of the validators
|
||||
failing, you need at least four validator nodes (e.g., 2/3).
|
||||
|
||||
Updating validators in a live network is supported but must be
|
||||
explicitly programmed by the application developer. See the [application
|
||||
developers guide](../app-dev/app-development.md) for more details.
|
||||
explicitly programmed by the application developer.
|
||||
|
||||
### Local Network
|
||||
|
||||
|
||||
11
go.mod
11
go.mod
@@ -10,32 +10,31 @@ require (
|
||||
github.com/btcsuite/btcd v0.22.0-beta
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/fortytw2/leaktest v1.3.0
|
||||
github.com/go-kit/kit v0.10.0
|
||||
github.com/go-kit/kit v0.11.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/golangci/golangci-lint v1.41.1
|
||||
github.com/google/orderedcode v0.0.1
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/lib/pq v1.10.2
|
||||
github.com/libp2p/go-buffer-pool v0.0.2
|
||||
github.com/minio/highwayhash v1.0.2
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
|
||||
github.com/ory/dockertest v3.3.5+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
|
||||
github.com/rs/cors v1.8.0
|
||||
github.com/rs/zerolog v1.23.0
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.2.0
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/spf13/viper v1.8.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tendermint/tm-db v0.6.4
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
|
||||
google.golang.org/grpc v1.39.0
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
|
||||
@@ -1,17 +1,36 @@
|
||||
/*
|
||||
Package blockchain provides two implementations of the fast-sync protocol.
|
||||
Package blockchain implements two versions of a reactor Service that are
|
||||
responsible for block propagation and gossip between peers. This mechanism is
|
||||
more formally known as fast-sync.
|
||||
|
||||
- v0 was the very first implementation. it's battle tested, but does not have a
|
||||
lot of test coverage.
|
||||
- v2 is the newest implementation, with a focus on testability and readability.
|
||||
In order for a full node to successfully participate in consensus, it must have
|
||||
the latest view of state. The fast-sync protocol is a mechanism in which peers
|
||||
may exchange and gossip entire blocks with one another, in a request/response
|
||||
type model, until they've successfully synced to the latest head block. Once
|
||||
succussfully synced, the full node can switch to an active role in consensus and
|
||||
will no longer fast-sync and thus no longer run the fast-sync process.
|
||||
|
||||
Check out ADR-40 for the formal model and requirements.
|
||||
Note, the blockchain reactor Service gossips entire block and relevant data such
|
||||
that each receiving peer may construct the entire view of the blockchain state.
|
||||
|
||||
# Termination criteria
|
||||
There are two versions of the blockchain reactor Service, i.e. fast-sync:
|
||||
|
||||
1. the maximum peer height is reached
|
||||
2. termination timeout is triggered, which is set if the peer set is empty or
|
||||
there are no pending requests.
|
||||
- v0: The initial implementation that is battle-tested, but whose test coverage
|
||||
is lacking and is not formally verifiable.
|
||||
- v2: The latest implementation that has much higher test coverage and is formally
|
||||
verified. However, the current implementation of v2 is not as battle-tested and
|
||||
is known to have various bugs that could make it unreliable in production
|
||||
environments.
|
||||
|
||||
The v0 blockchain reactor Service has one p2p channel, BlockchainChannel. This
|
||||
channel is responsible for handling messages that both request blocks and respond
|
||||
to block requests from peers. For every block request from a peer, the reactor
|
||||
will execute respondToPeer which will fetch the block from the node's state store
|
||||
and respond to the peer. For every block response, the node will add the block
|
||||
to its pool via AddBlock.
|
||||
|
||||
Internally, v0 runs a poolRoutine that constantly checks for what blocks it needs
|
||||
and requests them. The poolRoutine is also responsible for taking blocks from the
|
||||
pool, saving and executing each block.
|
||||
*/
|
||||
package blockchain
|
||||
|
||||
@@ -83,6 +83,10 @@ type BlockPool struct {
|
||||
|
||||
requestsCh chan<- BlockRequest
|
||||
errorsCh chan<- peerError
|
||||
|
||||
startHeight int64
|
||||
lastHundredBlockTimeStamp time.Time
|
||||
lastSyncRate float64
|
||||
}
|
||||
|
||||
// NewBlockPool returns a new BlockPool with the height equal to start. Block
|
||||
@@ -91,12 +95,14 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
bp := &BlockPool{
|
||||
peers: make(map[types.NodeID]*bpPeer),
|
||||
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
numPending: 0,
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
startHeight: start,
|
||||
numPending: 0,
|
||||
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
lastSyncRate: 0,
|
||||
}
|
||||
bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp)
|
||||
return bp
|
||||
@@ -106,6 +112,7 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
pool.lastAdvance = time.Now()
|
||||
pool.lastHundredBlockTimeStamp = pool.lastAdvance
|
||||
go pool.makeRequestersRoutine()
|
||||
return nil
|
||||
}
|
||||
@@ -216,6 +223,19 @@ func (pool *BlockPool) PopRequest() {
|
||||
delete(pool.requesters, pool.height)
|
||||
pool.height++
|
||||
pool.lastAdvance = time.Now()
|
||||
|
||||
// the lastSyncRate will be updated every 100 blocks, it uses the adaptive filter
|
||||
// to smooth the block sync rate and the unit represents the number of blocks per second.
|
||||
if (pool.height-pool.startHeight)%100 == 0 {
|
||||
newSyncRate := 100 / time.Since(pool.lastHundredBlockTimeStamp).Seconds()
|
||||
if pool.lastSyncRate == 0 {
|
||||
pool.lastSyncRate = newSyncRate
|
||||
} else {
|
||||
pool.lastSyncRate = 0.9*pool.lastSyncRate + 0.1*newSyncRate
|
||||
}
|
||||
pool.lastHundredBlockTimeStamp = time.Now()
|
||||
}
|
||||
|
||||
} else {
|
||||
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
|
||||
}
|
||||
@@ -428,6 +448,20 @@ func (pool *BlockPool) debug() string {
|
||||
return str
|
||||
}
|
||||
|
||||
func (pool *BlockPool) targetSyncBlocks() int64 {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
return pool.maxPeerHeight - pool.startHeight + 1
|
||||
}
|
||||
|
||||
func (pool *BlockPool) getLastSyncRate() float64 {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
return pool.lastSyncRate
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bpPeer struct {
|
||||
|
||||
@@ -2,6 +2,7 @@ package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmSync "github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -72,7 +74,7 @@ func (e peerError) Error() string {
|
||||
return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
// Reactor handles long-term catchup syncing.
|
||||
type Reactor struct {
|
||||
service.BaseService
|
||||
|
||||
@@ -83,12 +85,19 @@ type Reactor struct {
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
consReactor consensusReactor
|
||||
fastSync bool
|
||||
fastSync *tmSync.AtomicBool
|
||||
|
||||
blockchainCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
peerUpdatesCh chan p2p.Envelope
|
||||
closeCh chan struct{}
|
||||
blockchainCh *p2p.Channel
|
||||
// blockchainOutBridgeCh defines a channel that acts as a bridge between sending Envelope
|
||||
// messages that the reactor will consume in processBlockchainCh and receiving messages
|
||||
// from the peer updates channel and other goroutines. We do this instead of directly
|
||||
// sending on blockchainCh.Out to avoid race conditions in the case where other goroutines
|
||||
// send Envelopes directly to the to blockchainCh.Out channel, since processBlockchainCh
|
||||
// may close the blockchainCh.Out channel at the same time that other goroutines send to
|
||||
// blockchainCh.Out.
|
||||
blockchainOutBridgeCh chan p2p.Envelope
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
closeCh chan struct{}
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
@@ -99,6 +108,8 @@ type Reactor struct {
|
||||
poolWG sync.WaitGroup
|
||||
|
||||
metrics *cons.Metrics
|
||||
|
||||
syncStartTime time.Time
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
@@ -126,19 +137,20 @@ func NewReactor(
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
|
||||
r := &Reactor{
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: fastSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
peerUpdates: peerUpdates,
|
||||
peerUpdatesCh: make(chan p2p.Envelope),
|
||||
closeCh: make(chan struct{}),
|
||||
metrics: metrics,
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: tmSync.NewBool(fastSync),
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
blockchainOutBridgeCh: make(chan p2p.Envelope),
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
metrics: metrics,
|
||||
syncStartTime: time.Time{},
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
|
||||
@@ -153,7 +165,7 @@ func NewReactor(
|
||||
// If fastSync is enabled, we also start the pool and the pool processing
|
||||
// goroutine. If the pool fails to start, an error is returned.
|
||||
func (r *Reactor) OnStart() error {
|
||||
if r.fastSync {
|
||||
if r.fastSync.IsSet() {
|
||||
if err := r.pool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -171,7 +183,7 @@ func (r *Reactor) OnStart() error {
|
||||
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
|
||||
// blocking until they all exit.
|
||||
func (r *Reactor) OnStop() {
|
||||
if r.fastSync {
|
||||
if r.fastSync.IsSet() {
|
||||
if err := r.pool.Stop(); err != nil {
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
@@ -265,7 +277,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error("recovering from processing message panic", "err", err)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -283,7 +299,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
}
|
||||
|
||||
// processBlockchainCh initiates a blocking process where we listen for and handle
|
||||
// envelopes on the BlockchainChannel and peerUpdatesCh. Any error encountered during
|
||||
// envelopes on the BlockchainChannel and blockchainOutBridgeCh. Any error encountered during
|
||||
// message execution will result in a PeerError being sent on the BlockchainChannel.
|
||||
// When the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// gracefully.
|
||||
@@ -301,8 +317,8 @@ func (r *Reactor) processBlockchainCh() {
|
||||
}
|
||||
}
|
||||
|
||||
case envelop := <-r.peerUpdatesCh:
|
||||
r.blockchainCh.Out <- envelop
|
||||
case envelope := <-r.blockchainOutBridgeCh:
|
||||
r.blockchainCh.Out <- envelope
|
||||
|
||||
case <-r.closeCh:
|
||||
r.Logger.Debug("stopped listening on blockchain channel; closing...")
|
||||
@@ -324,7 +340,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
switch peerUpdate.Status {
|
||||
case p2p.PeerStatusUp:
|
||||
// send a status update the newly added peer
|
||||
r.peerUpdatesCh <- p2p.Envelope{
|
||||
r.blockchainOutBridgeCh <- p2p.Envelope{
|
||||
To: peerUpdate.NodeID,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: r.store.Base(),
|
||||
@@ -358,7 +374,7 @@ func (r *Reactor) processPeerUpdates() {
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast
|
||||
// sync.
|
||||
func (r *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
r.fastSync = true
|
||||
r.fastSync.Set()
|
||||
r.initialState = state
|
||||
r.pool.height = state.LastBlockHeight + 1
|
||||
|
||||
@@ -366,6 +382,8 @@ func (r *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r.syncStartTime = time.Now()
|
||||
|
||||
r.poolWG.Add(1)
|
||||
go r.poolRoutine(true)
|
||||
|
||||
@@ -388,7 +406,7 @@ func (r *Reactor) requestRoutine() {
|
||||
return
|
||||
|
||||
case request := <-r.requestsCh:
|
||||
r.blockchainCh.Out <- p2p.Envelope{
|
||||
r.blockchainOutBridgeCh <- p2p.Envelope{
|
||||
To: request.PeerID,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
}
|
||||
@@ -405,7 +423,7 @@ func (r *Reactor) requestRoutine() {
|
||||
go func() {
|
||||
defer r.poolWG.Done()
|
||||
|
||||
r.blockchainCh.Out <- p2p.Envelope{
|
||||
r.blockchainOutBridgeCh <- p2p.Envelope{
|
||||
Broadcast: true,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
}
|
||||
@@ -478,6 +496,8 @@ FOR_LOOP:
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
|
||||
r.fastSync.UnSet()
|
||||
|
||||
if r.consReactor != nil {
|
||||
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
@@ -592,3 +612,27 @@ FOR_LOOP:
|
||||
func (r *Reactor) GetMaxPeerBlockHeight() int64 {
|
||||
return r.pool.MaxPeerHeight()
|
||||
}
|
||||
|
||||
func (r *Reactor) GetTotalSyncedTime() time.Duration {
|
||||
if !r.fastSync.IsSet() || r.syncStartTime.IsZero() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
return time.Since(r.syncStartTime)
|
||||
}
|
||||
|
||||
func (r *Reactor) GetRemainingSyncTime() time.Duration {
|
||||
if !r.fastSync.IsSet() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
targetSyncs := r.pool.targetSyncBlocks()
|
||||
currentSyncs := r.store.Height() - r.pool.startHeight + 1
|
||||
lastSyncRate := r.pool.getLastSyncRate()
|
||||
if currentSyncs < 0 || lastSyncRate < 0.001 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
remain := float64(targetSyncs-currentSyncs) / lastSyncRate
|
||||
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
|
||||
@@ -215,6 +215,29 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0])
|
||||
}
|
||||
|
||||
func TestReactor_SyncTime(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
|
||||
maxBlockHeight := int64(101)
|
||||
|
||||
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
rts.start(t)
|
||||
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
return rts.reactors[rts.nodes[1]].GetRemainingSyncTime() > time.Nanosecond &&
|
||||
rts.reactors[rts.nodes[1]].pool.getLastSyncRate() > 0.001
|
||||
},
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be partially synced",
|
||||
)
|
||||
}
|
||||
|
||||
func TestReactor_NoBlockResponse(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -34,8 +35,8 @@ type blockStore interface {
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
fastSync bool // if true, enable fast sync on start
|
||||
stateSynced bool // set to true when SwitchToFastSync is called by state sync
|
||||
fastSync *sync.AtomicBool // enable fast sync on start when it's been Set
|
||||
stateSynced bool // set to true when SwitchToFastSync is called by state sync
|
||||
scheduler *Routine
|
||||
processor *Routine
|
||||
logger log.Logger
|
||||
@@ -48,6 +49,10 @@ type BlockchainReactor struct {
|
||||
reporter behavior.Reporter
|
||||
io iIO
|
||||
store blockStore
|
||||
|
||||
syncStartTime time.Time
|
||||
syncStartHeight int64
|
||||
lastSyncRate float64 // # blocks sync per sec base on the last 100 blocks
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
@@ -68,12 +73,15 @@ func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
|
||||
processor := newPcState(pContext)
|
||||
|
||||
return &BlockchainReactor{
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
fastSync: fastSync,
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
fastSync: sync.NewBool(fastSync),
|
||||
syncStartHeight: initHeight,
|
||||
syncStartTime: time.Time{},
|
||||
lastSyncRate: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +137,7 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) {
|
||||
// Start implements cmn.Service interface
|
||||
func (r *BlockchainReactor) Start() error {
|
||||
r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch)
|
||||
if r.fastSync {
|
||||
if r.fastSync.IsSet() {
|
||||
err := r.startSync(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start fast sync: %w", err)
|
||||
@@ -175,7 +183,13 @@ func (r *BlockchainReactor) endSync() {
|
||||
func (r *BlockchainReactor) SwitchToFastSync(state state.State) error {
|
||||
r.stateSynced = true
|
||||
state = state.Copy()
|
||||
return r.startSync(&state)
|
||||
|
||||
err := r.startSync(&state)
|
||||
if err == nil {
|
||||
r.syncStartTime = time.Now()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// reactor generated ticker events:
|
||||
@@ -283,7 +297,6 @@ func (e bcResetState) String() string {
|
||||
|
||||
// Takes the channel as a parameter to avoid race conditions on r.events.
|
||||
func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
var lastRate = 0.0
|
||||
var lastHundred = time.Now()
|
||||
|
||||
var (
|
||||
@@ -414,10 +427,15 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
switch event := event.(type) {
|
||||
case pcBlockProcessed:
|
||||
r.setSyncHeight(event.height)
|
||||
if r.syncHeight%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
if (r.syncHeight-r.syncStartHeight)%100 == 0 {
|
||||
newSyncRate := 100 / time.Since(lastHundred).Seconds()
|
||||
if r.lastSyncRate == 0 {
|
||||
r.lastSyncRate = newSyncRate
|
||||
} else {
|
||||
r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate
|
||||
}
|
||||
r.logger.Info("Fast Sync Rate", "height", r.syncHeight,
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", lastRate)
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
r.scheduler.send(event)
|
||||
@@ -429,6 +447,7 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
r.logger.Error("Failed to switch to consensus reactor")
|
||||
}
|
||||
r.endSync()
|
||||
r.fastSync.UnSet()
|
||||
return
|
||||
case noOpEvent:
|
||||
default:
|
||||
@@ -596,3 +615,29 @@ func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 {
|
||||
defer r.mtx.RUnlock()
|
||||
return r.maxPeerHeight
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration {
|
||||
if !r.fastSync.IsSet() || r.syncStartTime.IsZero() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
return time.Since(r.syncStartTime)
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration {
|
||||
if !r.fastSync.IsSet() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
|
||||
targetSyncs := r.maxPeerHeight - r.syncStartHeight
|
||||
currentSyncs := r.syncHeight - r.syncStartHeight + 1
|
||||
if currentSyncs < 0 || r.lastSyncRate < 0.001 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
remain := float64(targetSyncs-currentSyncs) / r.lastSyncRate
|
||||
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
prevoteHeight := int64(2)
|
||||
testName := "consensus_byzantine_test"
|
||||
tickerFunc := newMockTickerFunc(true)
|
||||
appFunc := newCounter
|
||||
appFunc := newKVStore
|
||||
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, 30)
|
||||
states := make([]*State, nValidators)
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
@@ -449,7 +448,7 @@ func randState(config *cfg.Config, nValidators int) (*State, []*validatorStub) {
|
||||
|
||||
vss := make([]*validatorStub, nValidators)
|
||||
|
||||
cs := newState(state, privVals[0], counter.NewApplication(true))
|
||||
cs := newState(state, privVals[0], kvstore.NewApplication())
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
vss[i] = newValidatorStub(privVals[i], int32(i))
|
||||
@@ -862,10 +861,6 @@ func (m *mockTicker) Chan() <-chan timeoutInfo {
|
||||
|
||||
func (*mockTicker) SetLogger(log.Logger) {}
|
||||
|
||||
func newCounter() abci.Application {
|
||||
return counter.NewApplication(true)
|
||||
}
|
||||
|
||||
func newPersistentKVStore() abci.Application {
|
||||
dir, err := ioutil.TempDir("", "persistent-kvstore")
|
||||
if err != nil {
|
||||
@@ -874,6 +869,10 @@ func newPersistentKVStore() abci.Application {
|
||||
return kvstore.NewPersistentKVStoreApplication(dir)
|
||||
}
|
||||
|
||||
func newKVStore() abci.Application {
|
||||
return kvstore.NewApplication()
|
||||
}
|
||||
|
||||
func newPersistentKVStoreWithPath(dbDir string) abci.Application {
|
||||
return kvstore.NewPersistentKVStoreApplication(dbDir)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
config := configSetup(t)
|
||||
|
||||
n := 4
|
||||
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
states, cleanup := randConsensusState(t,
|
||||
config, n, "consensus_reactor_test",
|
||||
newMockTickerFunc(true), newKVStore)
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
|
||||
@@ -161,8 +161,8 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(0))
|
||||
|
||||
resDeliver := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
assert.False(t, resDeliver.Txs[0].IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
|
||||
resDeliver := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes})
|
||||
assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
|
||||
|
||||
resCommit := app.Commit()
|
||||
assert.True(t, len(resCommit.Data) > 0)
|
||||
@@ -233,16 +233,15 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
|
||||
return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
|
||||
txValue := txAsUint64(req.Txs[0])
|
||||
func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
txValue := txAsUint64(req.Tx)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return abci.ResponseFinalizeBlock{Txs: []*abci.ResponseDeliverTx{{
|
||||
return abci.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
|
||||
}
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
app.txCount++
|
||||
return abci.ResponseFinalizeBlock{Txs: []*abci.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
|
||||
return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
|
||||
28
internal/consensus/mocks/cons_sync_reactor.go
Normal file
28
internal/consensus/mocks/cons_sync_reactor.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Code generated by mockery 2.7.5. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
state "github.com/tendermint/tendermint/state"
|
||||
)
|
||||
|
||||
// ConsSyncReactor is an autogenerated mock type for the ConsSyncReactor type
|
||||
type ConsSyncReactor struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// SetFastSyncingMetrics provides a mock function with given fields: _a0
|
||||
func (_m *ConsSyncReactor) SetFastSyncingMetrics(_a0 float64) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// SetStateSyncingMetrics provides a mock function with given fields: _a0
|
||||
func (_m *ConsSyncReactor) SetStateSyncingMetrics(_a0 float64) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// SwitchToConsensus provides a mock function with given fields: _a0, _a1
|
||||
func (_m *ConsSyncReactor) SwitchToConsensus(_a0 state.State, _a1 bool) {
|
||||
_m.Called(_a0, _a1)
|
||||
}
|
||||
71
internal/consensus/mocks/fast_sync_reactor.go
Normal file
71
internal/consensus/mocks/fast_sync_reactor.go
Normal file
@@ -0,0 +1,71 @@
|
||||
// Code generated by mockery 2.7.5. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
state "github.com/tendermint/tendermint/state"
|
||||
|
||||
time "time"
|
||||
)
|
||||
|
||||
// FastSyncReactor is an autogenerated mock type for the FastSyncReactor type
|
||||
type FastSyncReactor struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetMaxPeerBlockHeight provides a mock function with given fields:
|
||||
func (_m *FastSyncReactor) GetMaxPeerBlockHeight() int64 {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 int64
|
||||
if rf, ok := ret.Get(0).(func() int64); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(int64)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetRemainingSyncTime provides a mock function with given fields:
|
||||
func (_m *FastSyncReactor) GetRemainingSyncTime() time.Duration {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 time.Duration
|
||||
if rf, ok := ret.Get(0).(func() time.Duration); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(time.Duration)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetTotalSyncedTime provides a mock function with given fields:
|
||||
func (_m *FastSyncReactor) GetTotalSyncedTime() time.Duration {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 time.Duration
|
||||
if rf, ok := ret.Get(0).(func() time.Duration); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(time.Duration)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SwitchToFastSync provides a mock function with given fields: _a0
|
||||
func (_m *FastSyncReactor) SwitchToFastSync(_a0 state.State) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(state.State) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package consensus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
|
||||
@@ -101,6 +102,22 @@ type FastSyncReactor interface {
|
||||
SwitchToFastSync(sm.State) error
|
||||
|
||||
GetMaxPeerBlockHeight() int64
|
||||
|
||||
// GetTotalSyncedTime returns the time duration since the fastsync starting.
|
||||
GetTotalSyncedTime() time.Duration
|
||||
|
||||
// GetRemainingSyncTime returns the estimating time the node will be fully synced,
|
||||
// if will return 0 if the fastsync does not perform or the number of block synced is
|
||||
// too small (less than 100).
|
||||
GetRemainingSyncTime() time.Duration
|
||||
}
|
||||
|
||||
//go:generate mockery --case underscore --name ConsSyncReactor
|
||||
// ConsSyncReactor defines an interface used for testing abilities of node.startStateSync.
|
||||
type ConsSyncReactor interface {
|
||||
SwitchToConsensus(sm.State, bool)
|
||||
SetStateSyncingMetrics(float64)
|
||||
SetFastSyncingMetrics(float64)
|
||||
}
|
||||
|
||||
// Reactor defines a reactor for the consensus service.
|
||||
@@ -295,6 +312,11 @@ conS:
|
||||
conR:
|
||||
%+v`, err, r.state, r))
|
||||
}
|
||||
|
||||
d := types.EventDataFastSyncStatus{Complete: true, Height: state.LastBlockHeight}
|
||||
if err := r.eventBus.PublishEventFastSyncStatus(d); err != nil {
|
||||
r.Logger.Error("failed to emit the fastsync complete event", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of the Reactor.
|
||||
@@ -1197,21 +1219,24 @@ func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message)
|
||||
// It will handle errors and any possible panics gracefully. A caller can handle
|
||||
// any error returned by sending a PeerError on the respective channel.
|
||||
//
|
||||
// NOTE: We process these messages even when we're fast_syncing. Messages affect
|
||||
// either a peer state or the consensus state. Peer state updates can happen in
|
||||
// parallel, but processing of proposals, block parts, and votes are ordered by
|
||||
// the p2p channel.
|
||||
//
|
||||
// NOTE: We block on consensus state for proposals, block parts, and votes.
|
||||
func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error("recovering from processing message panic", "err", err)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
// Just skip the entire message during syncing so that we can
|
||||
// process fewer messages.
|
||||
if r.WaitSync() {
|
||||
return
|
||||
}
|
||||
|
||||
// We wrap the envelope's message in a Proto wire type so we can convert back
|
||||
// the domain type that individual channel message handlers can work with. We
|
||||
// do this here once to avoid having to do it for each individual message type.
|
||||
@@ -1412,3 +1437,11 @@ func (r *Reactor) peerStatsRoutine() {
|
||||
func (r *Reactor) GetConsensusState() *State {
|
||||
return r.state
|
||||
}
|
||||
|
||||
func (r *Reactor) SetStateSyncingMetrics(v float64) {
|
||||
r.Metrics.StateSyncing.Set(v)
|
||||
}
|
||||
|
||||
func (r *Reactor) SetFastSyncingMetrics(v float64) {
|
||||
r.Metrics.FastSyncing.Set(v)
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
@@ -42,6 +43,7 @@ type reactorTestSuite struct {
|
||||
states map[types.NodeID]*State
|
||||
reactors map[types.NodeID]*Reactor
|
||||
subs map[types.NodeID]types.Subscription
|
||||
fastsyncSubs map[types.NodeID]types.Subscription
|
||||
stateChannels map[types.NodeID]*p2p.Channel
|
||||
dataChannels map[types.NodeID]*p2p.Channel
|
||||
voteChannels map[types.NodeID]*p2p.Channel
|
||||
@@ -58,10 +60,11 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
|
||||
t.Helper()
|
||||
|
||||
rts := &reactorTestSuite{
|
||||
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
|
||||
states: make(map[types.NodeID]*State),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
subs: make(map[types.NodeID]types.Subscription, numNodes),
|
||||
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
|
||||
states: make(map[types.NodeID]*State),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
subs: make(map[types.NodeID]types.Subscription, numNodes),
|
||||
fastsyncSubs: make(map[types.NodeID]types.Subscription, numNodes),
|
||||
}
|
||||
|
||||
rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size)
|
||||
@@ -69,6 +72,8 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
|
||||
rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel), new(tmcons.Message), size)
|
||||
rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel), new(tmcons.Message), size)
|
||||
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
|
||||
i := 0
|
||||
for nodeID, node := range rts.network.Nodes {
|
||||
state := states[i]
|
||||
@@ -89,9 +94,13 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
|
||||
blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, size)
|
||||
require.NoError(t, err)
|
||||
|
||||
fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryFastSyncStatus, size)
|
||||
require.NoError(t, err)
|
||||
|
||||
rts.states[nodeID] = state
|
||||
rts.subs[nodeID] = blocksSub
|
||||
rts.reactors[nodeID] = reactor
|
||||
rts.fastsyncSubs[nodeID] = fsSub
|
||||
|
||||
// simulate handle initChain in handshake
|
||||
if state.state.LastBlockHeight == 0 {
|
||||
@@ -117,6 +126,7 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
|
||||
}
|
||||
|
||||
leaktest.Check(t)
|
||||
cancel()
|
||||
})
|
||||
|
||||
return rts
|
||||
@@ -253,11 +263,22 @@ func waitForBlockWithUpdatedValsAndValidateIt(
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func ensureFastSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) {
|
||||
t.Helper()
|
||||
status, ok := msg.Data().(types.EventDataFastSyncStatus)
|
||||
|
||||
require.True(t, ok)
|
||||
require.Equal(t, complete, status.Complete)
|
||||
require.Equal(t, height, status.Height)
|
||||
}
|
||||
|
||||
func TestReactorBasic(t *testing.T) {
|
||||
config := configSetup(t)
|
||||
|
||||
n := 4
|
||||
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
states, cleanup := randConsensusState(t,
|
||||
config, n, "consensus_reactor_test",
|
||||
newMockTickerFunc(true), newKVStore)
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock
|
||||
@@ -273,8 +294,21 @@ func TestReactorBasic(t *testing.T) {
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
go func(s types.Subscription) {
|
||||
defer wg.Done()
|
||||
<-s.Out()
|
||||
wg.Done()
|
||||
}(sub)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
for _, sub := range rts.fastsyncSubs {
|
||||
wg.Add(1)
|
||||
|
||||
// wait till everyone makes the consensus switch
|
||||
go func(s types.Subscription) {
|
||||
defer wg.Done()
|
||||
msg := <-s.Out()
|
||||
ensureFastSyncStatus(t, msg, true, 0)
|
||||
}(sub)
|
||||
}
|
||||
|
||||
@@ -287,7 +321,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
n := 4
|
||||
testName := "consensus_reactor_test"
|
||||
tickerFunc := newMockTickerFunc(true)
|
||||
appFunc := newCounter
|
||||
appFunc := newKVStore
|
||||
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, n, false, 30)
|
||||
states := make([]*State, n)
|
||||
@@ -387,7 +421,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
n,
|
||||
"consensus_reactor_test",
|
||||
newMockTickerFunc(true),
|
||||
newCounter,
|
||||
newKVStore,
|
||||
func(c *cfg.Config) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
},
|
||||
@@ -431,7 +465,9 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
config := configSetup(t)
|
||||
|
||||
n := 4
|
||||
states, cleanup := randConsensusState(t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
states, cleanup := randConsensusState(t,
|
||||
config, n, "consensus_reactor_test",
|
||||
newMockTickerFunc(true), newKVStore)
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock
|
||||
|
||||
@@ -73,15 +73,20 @@ type mockProxyApp struct {
|
||||
abciResponses *tmstate.ABCIResponses
|
||||
}
|
||||
|
||||
func (mock *mockProxyApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
|
||||
r := mock.abciResponses.FinalizeBlock
|
||||
func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
r := mock.abciResponses.DeliverTxs[mock.txCount]
|
||||
mock.txCount++
|
||||
if r == nil {
|
||||
return abci.ResponseFinalizeBlock{}
|
||||
return abci.ResponseDeliverTx{}
|
||||
}
|
||||
return *r
|
||||
}
|
||||
|
||||
func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
|
||||
mock.txCount = 0
|
||||
return *mock.abciResponses.EndBlock
|
||||
}
|
||||
|
||||
func (mock *mockProxyApp) Commit() abci.ResponseCommit {
|
||||
return abci.ResponseCommit{Data: mock.appHash}
|
||||
}
|
||||
|
||||
@@ -619,8 +619,8 @@ func TestMockProxyApp(t *testing.T) {
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses)
|
||||
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, 0)
|
||||
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = append(abciResWithEmptyDeliverTx.FinalizeBlock.Txs, &abci.ResponseDeliverTx{})
|
||||
abciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0)
|
||||
abciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, &abci.ResponseDeliverTx{})
|
||||
|
||||
// called when saveABCIResponses:
|
||||
bytes, err := proto.Marshal(abciResWithEmptyDeliverTx)
|
||||
@@ -634,30 +634,28 @@ func TestMockProxyApp(t *testing.T) {
|
||||
mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes)
|
||||
|
||||
abciRes := new(tmstate.ABCIResponses)
|
||||
abciRes.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.FinalizeBlock.Txs))
|
||||
abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs))
|
||||
// Execute transactions and get hash.
|
||||
proxyCb := func(req *abci.Request, res *abci.Response) {
|
||||
if r, ok := res.Value.(*abci.Response_FinalizeBlock); ok {
|
||||
for i, tx := range r.FinalizeBlock.Txs {
|
||||
// TODO: make use of res.Log
|
||||
// TODO: make use of this info
|
||||
// Blocks may include invalid txs.
|
||||
txRes := tx
|
||||
if txRes.Code == abci.CodeTypeOK {
|
||||
validTxs++
|
||||
} else {
|
||||
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
|
||||
invalidTxs++
|
||||
}
|
||||
abciRes.FinalizeBlock.Txs[i] = txRes
|
||||
txIndex++
|
||||
if r, ok := res.Value.(*abci.Response_DeliverTx); ok {
|
||||
// TODO: make use of res.Log
|
||||
// TODO: make use of this info
|
||||
// Blocks may include invalid txs.
|
||||
txRes := r.DeliverTx
|
||||
if txRes.Code == abci.CodeTypeOK {
|
||||
validTxs++
|
||||
} else {
|
||||
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
|
||||
invalidTxs++
|
||||
}
|
||||
abciRes.DeliverTxs[txIndex] = txRes
|
||||
txIndex++
|
||||
}
|
||||
}
|
||||
mock.SetResponseCallback(proxyCb)
|
||||
|
||||
someTx := []byte("tx")
|
||||
_, err = mock.FinalizeBlockSync(context.Background(), abci.RequestFinalizeBlock{Txs: [][]byte{someTx}})
|
||||
_, err = mock.DeliverTxAsync(context.Background(), abci.RequestDeliverTx{Tx: someTx})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
assert.True(t, validTxs == 1)
|
||||
|
||||
@@ -1849,7 +1849,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
|
||||
// NOTE: block is not necessarily valid.
|
||||
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit,
|
||||
// once we have the full block.
|
||||
func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID) (added bool, err error) {
|
||||
func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID) (bool, error) {
|
||||
height, round, part := msg.Height, msg.Round, msg.Part
|
||||
|
||||
// Blocks might be reused, so round mismatch is OK
|
||||
@@ -1872,78 +1872,79 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID
|
||||
return false, nil
|
||||
}
|
||||
|
||||
added, err = cs.ProposalBlockParts.AddPart(part)
|
||||
if err != nil {
|
||||
added, err := cs.ProposalBlockParts.AddPart(part)
|
||||
if err != nil || !added {
|
||||
return added, err
|
||||
}
|
||||
|
||||
if cs.ProposalBlockParts.ByteSize() > cs.state.ConsensusParams.Block.MaxBytes {
|
||||
return added, fmt.Errorf("total size of proposal block parts exceeds maximum block bytes (%d > %d)",
|
||||
return true, fmt.Errorf("total size of proposal block parts exceeds maximum block bytes (%d > %d)",
|
||||
cs.ProposalBlockParts.ByteSize(), cs.state.ConsensusParams.Block.MaxBytes,
|
||||
)
|
||||
}
|
||||
if added && cs.ProposalBlockParts.IsComplete() {
|
||||
bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader())
|
||||
if err != nil {
|
||||
return added, err
|
||||
}
|
||||
|
||||
var pbb = new(tmproto.Block)
|
||||
err = proto.Unmarshal(bz, pbb)
|
||||
if err != nil {
|
||||
return added, err
|
||||
}
|
||||
|
||||
block, err := types.BlockFromProto(pbb)
|
||||
if err != nil {
|
||||
return added, err
|
||||
}
|
||||
|
||||
cs.ProposalBlock = block
|
||||
|
||||
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
|
||||
cs.Logger.Info("received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
|
||||
|
||||
if err := cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()); err != nil {
|
||||
cs.Logger.Error("failed publishing event complete proposal", "err", err)
|
||||
}
|
||||
|
||||
// Update Valid* if we can.
|
||||
prevotes := cs.Votes.Prevotes(cs.Round)
|
||||
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
|
||||
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Debug(
|
||||
"updating valid block to new proposal block",
|
||||
"valid_round", cs.Round,
|
||||
"valid_block_hash", cs.ProposalBlock.Hash(),
|
||||
)
|
||||
|
||||
cs.ValidRound = cs.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
}
|
||||
// TODO: In case there is +2/3 majority in Prevotes set for some
|
||||
// block and cs.ProposalBlock contains different block, either
|
||||
// proposer is faulty or voting power of faulty processes is more
|
||||
// than 1/3. We should trigger in the future accountability
|
||||
// procedure at this point.
|
||||
}
|
||||
|
||||
if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() {
|
||||
// Move onto the next step
|
||||
cs.enterPrevote(height, cs.Round)
|
||||
if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added
|
||||
cs.enterPrecommit(height, cs.Round)
|
||||
}
|
||||
} else if cs.Step == cstypes.RoundStepCommit {
|
||||
// If we're waiting on the proposal block...
|
||||
cs.tryFinalizeCommit(height)
|
||||
}
|
||||
|
||||
return added, nil
|
||||
if !cs.ProposalBlockParts.IsComplete() {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return added, nil
|
||||
bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader())
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
var pbb = new(tmproto.Block)
|
||||
err = proto.Unmarshal(bz, pbb)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
block, err := types.BlockFromProto(pbb)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
cs.ProposalBlock = block
|
||||
|
||||
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
|
||||
cs.Logger.Info("received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
|
||||
|
||||
if err := cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()); err != nil {
|
||||
cs.Logger.Error("failed publishing event complete proposal", "err", err)
|
||||
}
|
||||
|
||||
// Update Valid* if we can.
|
||||
prevotes := cs.Votes.Prevotes(cs.Round)
|
||||
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
|
||||
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Debug(
|
||||
"updating valid block to new proposal block",
|
||||
"valid_round", cs.Round,
|
||||
"valid_block_hash", cs.ProposalBlock.Hash(),
|
||||
)
|
||||
|
||||
cs.ValidRound = cs.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
}
|
||||
// TODO: In case there is +2/3 majority in Prevotes set for some
|
||||
// block and cs.ProposalBlock contains different block, either
|
||||
// proposer is faulty or voting power of faulty processes is more
|
||||
// than 1/3. We should trigger in the future accountability
|
||||
// procedure at this point.
|
||||
}
|
||||
|
||||
if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() {
|
||||
// Move onto the next step
|
||||
cs.enterPrevote(height, cs.Round)
|
||||
if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added
|
||||
cs.enterPrecommit(height, cs.Round)
|
||||
}
|
||||
} else if cs.Step == cstypes.RoundStepCommit {
|
||||
// If we're waiting on the proposal block...
|
||||
cs.tryFinalizeCommit(height)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
|
||||
p2pmock "github.com/tendermint/tendermint/internal/p2p/mock"
|
||||
@@ -654,7 +654,7 @@ func TestStateLockPOLRelock(t *testing.T) {
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// before we timeout to the new round set the new proposal
|
||||
cs2 := newState(cs1.state, vs2, counter.NewApplication(true))
|
||||
cs2 := newState(cs1.state, vs2, kvstore.NewApplication())
|
||||
prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1)
|
||||
if prop == nil || propBlock == nil {
|
||||
t.Fatal("Failed to create proposal block with vs2")
|
||||
@@ -843,7 +843,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) {
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// before we timeout to the new round set the new proposal
|
||||
cs2 := newState(cs1.state, vs2, counter.NewApplication(true))
|
||||
cs2 := newState(cs1.state, vs2, kvstore.NewApplication())
|
||||
prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1)
|
||||
if prop == nil || propBlock == nil {
|
||||
t.Fatal("Failed to create proposal block with vs2")
|
||||
@@ -887,7 +887,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) {
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// before we timeout to the new round set the new proposal
|
||||
cs3 := newState(cs1.state, vs3, counter.NewApplication(true))
|
||||
cs3 := newState(cs1.state, vs3, kvstore.NewApplication())
|
||||
prop, propBlock = decideProposal(cs3, vs3, vs3.Height, vs3.Round+1)
|
||||
if prop == nil || propBlock == nil {
|
||||
t.Fatal("Failed to create proposal block with vs2")
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -489,14 +489,12 @@ func (evpool *Pool) batchExpiredPendingEvidence(batch dbm.Batch) (int64, time.Ti
|
||||
func (evpool *Pool) removeEvidenceFromList(
|
||||
blockEvidenceMap map[string]struct{}) {
|
||||
|
||||
el := evpool.evidenceList
|
||||
var nextE *clist.CElement
|
||||
for e := el.Front(); e != nil; e = nextE {
|
||||
nextE = e.Next()
|
||||
for e := evpool.evidenceList.Front(); e != nil; e = e.Next() {
|
||||
// Remove from clist
|
||||
ev := e.Value.(types.Evidence)
|
||||
if _, ok := blockEvidenceMap[evMapKey(ev)]; ok {
|
||||
evpool.evidenceList.Remove(e)
|
||||
e.DetachPrev()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package evidence
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -165,6 +166,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -296,7 +302,11 @@ func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Clos
|
||||
r.peerWG.Done()
|
||||
|
||||
if e := recover(); e != nil {
|
||||
r.Logger.Error("recovering from broadcasting evidence loop", "err", e)
|
||||
r.Logger.Error(
|
||||
"recovering from broadcasting evidence loop",
|
||||
"err", e,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ func (g *Group) OnStart() error {
|
||||
func (g *Group) OnStop() {
|
||||
g.ticker.Stop()
|
||||
if err := g.FlushAndSync(); err != nil {
|
||||
g.Logger.Error("Error flushin to disk", "err", err)
|
||||
g.Logger.Error("Error flushing to disk", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ func (g *Group) Wait() {
|
||||
// Close closes the head file. The group must be stopped by this moment.
|
||||
func (g *Group) Close() {
|
||||
if err := g.FlushAndSync(); err != nil {
|
||||
g.Logger.Error("Error flushin to disk", "err", err)
|
||||
g.Logger.Error("Error flushing to disk", "err", err)
|
||||
}
|
||||
|
||||
g.mtx.Lock()
|
||||
|
||||
@@ -196,26 +196,21 @@ func (e *CElement) SetPrev(newPrev *CElement) {
|
||||
e.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Detach is a shortcut to mark the given element remove and detach the prev/next elements.
|
||||
func (e *CElement) Detach() {
|
||||
func (e *CElement) SetRemoved() {
|
||||
e.mtx.Lock()
|
||||
defer e.mtx.Unlock()
|
||||
|
||||
e.removed = true
|
||||
|
||||
// This wakes up anyone waiting in either direction.
|
||||
if e.prev == nil {
|
||||
e.prevWg.Done()
|
||||
close(e.prevWaitCh)
|
||||
} else {
|
||||
e.prev = nil
|
||||
}
|
||||
|
||||
if e.next == nil {
|
||||
e.nextWg.Done()
|
||||
close(e.nextWaitCh)
|
||||
} else {
|
||||
e.next = nil
|
||||
}
|
||||
e.mtx.Unlock()
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
@@ -353,26 +348,24 @@ func (l *CList) PushBack(v interface{}) *CElement {
|
||||
return e
|
||||
}
|
||||
|
||||
// Remove removes the given element in the CList
|
||||
// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks.
|
||||
// NOTE: As per the contract of CList, removed elements cannot be added back.
|
||||
// Because CList detachse the prev/next element when it removes the given element,
|
||||
// please do not use CElement.Next() in the for loop postcondition, uses
|
||||
// a variable ahead the for loop and then assigns the Next() element
|
||||
// to it in the loop as the postcondition.
|
||||
func (l *CList) Remove(e *CElement) interface{} {
|
||||
l.mtx.Lock()
|
||||
defer l.mtx.Unlock()
|
||||
|
||||
prev := e.Prev()
|
||||
next := e.Next()
|
||||
|
||||
if l.head == nil || l.tail == nil {
|
||||
l.mtx.Unlock()
|
||||
panic("Remove(e) on empty CList")
|
||||
}
|
||||
if prev == nil && l.head != e {
|
||||
l.mtx.Unlock()
|
||||
panic("Remove(e) with false head")
|
||||
}
|
||||
if next == nil && l.tail != e {
|
||||
l.mtx.Unlock()
|
||||
panic("Remove(e) with false tail")
|
||||
}
|
||||
|
||||
@@ -397,53 +390,13 @@ func (l *CList) Remove(e *CElement) interface{} {
|
||||
next.SetPrev(prev)
|
||||
}
|
||||
|
||||
e.Detach()
|
||||
// Set .Done() on e, otherwise waiters will wait forever.
|
||||
e.SetRemoved()
|
||||
|
||||
l.mtx.Unlock()
|
||||
return e.Value
|
||||
}
|
||||
|
||||
// Clear removes all the elements in the CList
|
||||
func (l *CList) Clear() {
|
||||
l.mtx.Lock()
|
||||
defer l.mtx.Unlock()
|
||||
|
||||
if l.head == nil || l.tail == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for e := l.head; e != nil; e = l.head {
|
||||
prevE := e.Prev()
|
||||
nextE := e.Next()
|
||||
|
||||
if prevE == nil && e != l.head {
|
||||
panic("CList.Clear failed due to nil prev element")
|
||||
}
|
||||
|
||||
if nextE == nil && e != l.tail {
|
||||
panic("CList.Clear failed due to nil next element")
|
||||
}
|
||||
|
||||
if l.len == 1 {
|
||||
l.wg = waitGroup1()
|
||||
l.waitCh = make(chan struct{})
|
||||
}
|
||||
l.len--
|
||||
|
||||
if prevE == nil {
|
||||
l.head = nextE
|
||||
} else {
|
||||
prevE.SetNext(nextE)
|
||||
}
|
||||
if nextE == nil {
|
||||
l.tail = prevE
|
||||
} else {
|
||||
nextE.SetPrev(prevE)
|
||||
}
|
||||
|
||||
e.Detach()
|
||||
}
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
wg = &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPanicOnMaxLength(t *testing.T) {
|
||||
@@ -252,16 +253,9 @@ func TestScanRightDeleteRandom(t *testing.T) {
|
||||
// time.Sleep(time.Second * 1)
|
||||
|
||||
// And remove all the elements.
|
||||
// we detach the prev/next element in CList.Remove, so just assign l.Front direcly.
|
||||
halfElements := numElements / 2
|
||||
for el := l.Front(); el != nil && halfElements > 0; el = l.Front() {
|
||||
for el := l.Front(); el != nil; el = el.Next() {
|
||||
l.Remove(el)
|
||||
halfElements--
|
||||
}
|
||||
|
||||
// remove the rest half elements in the CList
|
||||
l.Clear()
|
||||
|
||||
if l.Len() != 0 {
|
||||
t.Fatal("Failed to remove all elements from CList")
|
||||
}
|
||||
@@ -341,3 +335,50 @@ FOR_LOOP2:
|
||||
t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoved(t *testing.T) {
|
||||
l := New()
|
||||
el1 := l.PushBack(1)
|
||||
el2 := l.PushBack(2)
|
||||
l.Remove(el1)
|
||||
require.True(t, el1.Removed())
|
||||
require.False(t, el2.Removed())
|
||||
}
|
||||
|
||||
func TestNextWaitChan(t *testing.T) {
|
||||
l := New()
|
||||
el1 := l.PushBack(1)
|
||||
t.Run("tail element should not have a closed nextWaitChan", func(t *testing.T) {
|
||||
select {
|
||||
case <-el1.NextWaitChan():
|
||||
t.Fatal("nextWaitChan should not have been closed")
|
||||
default:
|
||||
}
|
||||
})
|
||||
|
||||
el2 := l.PushBack(2)
|
||||
t.Run("adding element should close tail nextWaitChan", func(t *testing.T) {
|
||||
select {
|
||||
case <-el1.NextWaitChan():
|
||||
require.NotNil(t, el1.Next())
|
||||
default:
|
||||
t.Fatal("nextWaitChan should have been closed")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-el2.NextWaitChan():
|
||||
t.Fatal("nextWaitChan should not have been closed")
|
||||
default:
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("removing element should close its nextWaitChan", func(t *testing.T) {
|
||||
l.Remove(el2)
|
||||
select {
|
||||
case <-el2.NextWaitChan():
|
||||
require.Nil(t, el2.Next())
|
||||
default:
|
||||
t.Fatal("nextWaitChan should have been closed")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -164,7 +164,10 @@ func (mem *CListMempool) Flush() {
|
||||
_ = atomic.SwapInt64(&mem.txsBytes, 0)
|
||||
mem.cache.Reset()
|
||||
|
||||
mem.txs.Clear()
|
||||
for e := mem.txs.Front(); e != nil; e = e.Next() {
|
||||
mem.txs.Remove(e)
|
||||
e.DetachPrev()
|
||||
}
|
||||
|
||||
mem.txsMap.Range(func(key, _ interface{}) bool {
|
||||
mem.txsMap.Delete(key)
|
||||
@@ -335,6 +338,7 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) {
|
||||
// - resCbRecheck (lock not held) if tx was invalidated
|
||||
func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) {
|
||||
mem.txs.Remove(elem)
|
||||
elem.DetachPrev()
|
||||
mem.txsMap.Delete(mempool.TxKey(tx))
|
||||
atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
@@ -217,7 +216,7 @@ func TestMempoolUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
|
||||
app := counter.NewApplication(true)
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
wcfg := cfg.DefaultConfig()
|
||||
wcfg.Mempool.KeepInvalidTxsInCache = true
|
||||
@@ -236,7 +235,8 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// simulate new block
|
||||
_ = app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{a, b}})
|
||||
_ = app.DeliverTx(abci.RequestDeliverTx{Tx: a})
|
||||
_ = app.DeliverTx(abci.RequestDeliverTx{Tx: b})
|
||||
err = mp.Update(1, []types.Tx{a, b},
|
||||
[]*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
@@ -308,7 +308,7 @@ func TestTxsAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSerialReap(t *testing.T) {
|
||||
app := counter.NewApplication(true)
|
||||
app := kvstore.NewApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
mp, cleanup := newMempoolWithApp(cc)
|
||||
@@ -362,29 +362,24 @@ func TestSerialReap(t *testing.T) {
|
||||
commitRange := func(start, end int) {
|
||||
ctx := context.Background()
|
||||
// Deliver some txs.
|
||||
var txs = make([][]byte, end)
|
||||
for i := start; i < end; i++ {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
txs[i] = txBytes
|
||||
}
|
||||
res, err := appConnCon.FinalizeBlockSync(ctx, abci.RequestFinalizeBlock{Txs: txs})
|
||||
if err != nil {
|
||||
t.Errorf("client error committing tx: %v", err)
|
||||
}
|
||||
for _, tx := range res.Txs {
|
||||
if tx.IsErr() {
|
||||
res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
t.Errorf("client error committing tx: %v", err)
|
||||
}
|
||||
if res.IsErr() {
|
||||
t.Errorf("error committing tx. Code:%v result:%X log:%v",
|
||||
tx.Code, tx.Data, tx.Log)
|
||||
res.Code, res.Data, res.Log)
|
||||
}
|
||||
}
|
||||
|
||||
resCommit, err := appConnCon.CommitSync(ctx)
|
||||
res, err := appConnCon.CommitSync(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("client error committing: %v", err)
|
||||
}
|
||||
if len(resCommit.Data) != 8 {
|
||||
t.Errorf("error committing. Hash:%X", resCommit.Data)
|
||||
if len(res.Data) != 8 {
|
||||
t.Errorf("error committing. Hash:%X", res.Data)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -409,7 +404,7 @@ func TestSerialReap(t *testing.T) {
|
||||
// Reap again. We should get the same amount
|
||||
reapCheck(1000)
|
||||
|
||||
// Commit from the consensus AppConn
|
||||
// Commit from the conensus AppConn
|
||||
commitRange(0, 500)
|
||||
updateRange(0, 500)
|
||||
|
||||
@@ -512,7 +507,7 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
}
|
||||
|
||||
// 6. zero after tx is rechecked and removed due to not being valid anymore
|
||||
app2 := counter.NewApplication(true)
|
||||
app2 := kvstore.NewApplication()
|
||||
cc = proxy.NewLocalClientCreator(app2)
|
||||
mp, cleanup = newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
@@ -534,11 +529,9 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
}
|
||||
})
|
||||
ctx := context.Background()
|
||||
res, err := appConnCon.FinalizeBlockSync(ctx, abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes})
|
||||
require.NoError(t, err)
|
||||
for _, tx := range res.Txs {
|
||||
require.EqualValues(t, 0, tx.Code)
|
||||
}
|
||||
require.EqualValues(t, 0, res.Code)
|
||||
res2, err := appConnCon.CommitSync(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, res2.Data)
|
||||
@@ -546,16 +539,16 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
// Pretend like we committed nothing so txBytes gets rechecked and removed.
|
||||
err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 0, mp.SizeBytes())
|
||||
assert.EqualValues(t, 8, mp.SizeBytes())
|
||||
|
||||
// 7. Test RemoveTxByKey function
|
||||
err = mp.CheckTx(context.Background(), []byte{0x06}, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 1, mp.SizeBytes())
|
||||
assert.EqualValues(t, 9, mp.SizeBytes())
|
||||
mp.RemoveTxByKey(mempool.TxKey([]byte{0x07}), true)
|
||||
assert.EqualValues(t, 1, mp.SizeBytes())
|
||||
assert.EqualValues(t, 9, mp.SizeBytes())
|
||||
mp.RemoveTxByKey(mempool.TxKey([]byte{0x06}), true)
|
||||
assert.EqualValues(t, 0, mp.SizeBytes())
|
||||
assert.EqualValues(t, 8, mp.SizeBytes())
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -188,6 +189,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -312,7 +318,11 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
r.peerWG.Done()
|
||||
|
||||
if e := recover(); e != nil {
|
||||
r.Logger.Error("recovering from broadcasting mempool loop", "err", e)
|
||||
r.Logger.Error(
|
||||
"recovering from broadcasting mempool loop",
|
||||
"err", e,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -74,6 +74,14 @@ type TxMempool struct {
|
||||
// thread-safe priority queue.
|
||||
priorityIndex *TxPriorityQueue
|
||||
|
||||
// heightIndex defines a height-based, in ascending order, transaction index.
|
||||
// i.e. older transactions are first.
|
||||
heightIndex *WrappedTxList
|
||||
|
||||
// timestampIndex defines a timestamp-based, in ascending order, transaction
|
||||
// index. i.e. older transactions are first.
|
||||
timestampIndex *WrappedTxList
|
||||
|
||||
// A read/write lock is used to safe guard updates, insertions and deletions
|
||||
// from the mempool. A read-lock is implicitly acquired when executing CheckTx,
|
||||
// however, a caller must explicitly grab a write-lock via Lock when updating
|
||||
@@ -101,6 +109,12 @@ func NewTxMempool(
|
||||
txStore: NewTxStore(),
|
||||
gossipIndex: clist.New(),
|
||||
priorityIndex: NewTxPriorityQueue(),
|
||||
heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
}),
|
||||
timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp)
|
||||
}),
|
||||
}
|
||||
|
||||
if cfg.CacheSize > 0 {
|
||||
@@ -279,6 +293,7 @@ func (txmp *TxMempool) CheckTx(
|
||||
tx: tx,
|
||||
hash: txHash,
|
||||
timestamp: time.Now().UTC(),
|
||||
height: txmp.height,
|
||||
}
|
||||
txmp.initTxCallback(wtx, res, txInfo)
|
||||
|
||||
@@ -300,12 +315,11 @@ func (txmp *TxMempool) Flush() {
|
||||
txmp.mtx.RLock()
|
||||
defer txmp.mtx.RUnlock()
|
||||
|
||||
txmp.heightIndex.Reset()
|
||||
txmp.timestampIndex.Reset()
|
||||
|
||||
for _, wtx := range txmp.txStore.GetAllTxs() {
|
||||
if !txmp.txStore.IsTxRemoved(wtx.hash) {
|
||||
txmp.txStore.RemoveTx(wtx)
|
||||
txmp.priorityIndex.RemoveTx(wtx)
|
||||
txmp.gossipIndex.Remove(wtx.gossipEl)
|
||||
}
|
||||
txmp.removeTx(wtx, false)
|
||||
}
|
||||
|
||||
atomic.SwapInt64(&txmp.sizeBytes, 0)
|
||||
@@ -443,6 +457,8 @@ func (txmp *TxMempool) Update(
|
||||
}
|
||||
}
|
||||
|
||||
txmp.purgeExpiredTxs(blockHeight)
|
||||
|
||||
// If there any uncommitted transactions left in the mempool, we either
|
||||
// initiate re-CheckTx per remaining transaction or notify that remaining
|
||||
// transactions are left.
|
||||
@@ -488,105 +504,110 @@ func (txmp *TxMempool) Update(
|
||||
// - An explicit lock is NOT required.
|
||||
func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo mempool.TxInfo) {
|
||||
checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
|
||||
if ok {
|
||||
var err error
|
||||
if txmp.postCheck != nil {
|
||||
err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
if txmp.postCheck != nil {
|
||||
err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx)
|
||||
}
|
||||
|
||||
if err != nil || checkTxRes.CheckTx.Code != abci.CodeTypeOK {
|
||||
// ignore bad transactions
|
||||
txmp.logger.Info(
|
||||
"rejected bad transaction",
|
||||
"priority", wtx.priority,
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"peer_id", txInfo.SenderNodeID,
|
||||
"code", checkTxRes.CheckTx.Code,
|
||||
"post_check_err", err,
|
||||
)
|
||||
|
||||
txmp.metrics.FailedTxs.Add(1)
|
||||
|
||||
if !txmp.config.KeepInvalidTxsInCache {
|
||||
txmp.cache.Remove(wtx.tx)
|
||||
}
|
||||
if err != nil {
|
||||
checkTxRes.CheckTx.MempoolError = err.Error()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil {
|
||||
sender := checkTxRes.CheckTx.Sender
|
||||
priority := checkTxRes.CheckTx.Priority
|
||||
sender := checkTxRes.CheckTx.Sender
|
||||
priority := checkTxRes.CheckTx.Priority
|
||||
|
||||
if len(sender) > 0 {
|
||||
if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil {
|
||||
txmp.logger.Error(
|
||||
"rejected incoming good transaction; tx already exists for sender",
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"sender", sender,
|
||||
)
|
||||
txmp.metrics.RejectedTxs.Add(1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := txmp.canAddTx(wtx); err != nil {
|
||||
evictTxs := txmp.priorityIndex.GetEvictableTxs(
|
||||
priority,
|
||||
int64(wtx.Size()),
|
||||
txmp.SizeBytes(),
|
||||
txmp.config.MaxTxsBytes,
|
||||
)
|
||||
if len(evictTxs) == 0 {
|
||||
// No room for the new incoming transaction so we just remove it from
|
||||
// the cache.
|
||||
txmp.cache.Remove(wtx.tx)
|
||||
txmp.logger.Error(
|
||||
"rejected incoming good transaction; mempool full",
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"err", err.Error(),
|
||||
)
|
||||
txmp.metrics.RejectedTxs.Add(1)
|
||||
return
|
||||
}
|
||||
|
||||
// evict an existing transaction(s)
|
||||
//
|
||||
// NOTE:
|
||||
// - The transaction, toEvict, can be removed while a concurrent
|
||||
// reCheckTx callback is being executed for the same transaction.
|
||||
for _, toEvict := range evictTxs {
|
||||
txmp.removeTx(toEvict, true)
|
||||
txmp.logger.Debug(
|
||||
"evicted existing good transaction; mempool full",
|
||||
"old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()),
|
||||
"old_priority", toEvict.priority,
|
||||
"new_tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"new_priority", wtx.priority,
|
||||
)
|
||||
txmp.metrics.EvictedTxs.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
wtx.gasWanted = checkTxRes.CheckTx.GasWanted
|
||||
wtx.height = txmp.height
|
||||
wtx.priority = priority
|
||||
wtx.sender = sender
|
||||
wtx.peers = map[uint16]struct{}{
|
||||
txInfo.SenderID: {},
|
||||
}
|
||||
|
||||
txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size()))
|
||||
txmp.metrics.Size.Set(float64(txmp.Size()))
|
||||
|
||||
txmp.insertTx(wtx)
|
||||
txmp.logger.Debug(
|
||||
"inserted good transaction",
|
||||
"priority", wtx.priority,
|
||||
if len(sender) > 0 {
|
||||
if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil {
|
||||
txmp.logger.Error(
|
||||
"rejected incoming good transaction; tx already exists for sender",
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"height", txmp.height,
|
||||
"num_txs", txmp.Size(),
|
||||
"sender", sender,
|
||||
)
|
||||
txmp.notifyTxsAvailable()
|
||||
|
||||
} else {
|
||||
// ignore bad transactions
|
||||
txmp.logger.Info(
|
||||
"rejected bad transaction",
|
||||
"priority", wtx.priority,
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"peer_id", txInfo.SenderNodeID,
|
||||
"code", checkTxRes.CheckTx.Code,
|
||||
"post_check_err", err,
|
||||
)
|
||||
|
||||
txmp.metrics.FailedTxs.Add(1)
|
||||
|
||||
if !txmp.config.KeepInvalidTxsInCache {
|
||||
txmp.cache.Remove(wtx.tx)
|
||||
}
|
||||
txmp.metrics.RejectedTxs.Add(1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := txmp.canAddTx(wtx); err != nil {
|
||||
evictTxs := txmp.priorityIndex.GetEvictableTxs(
|
||||
priority,
|
||||
int64(wtx.Size()),
|
||||
txmp.SizeBytes(),
|
||||
txmp.config.MaxTxsBytes,
|
||||
)
|
||||
if len(evictTxs) == 0 {
|
||||
// No room for the new incoming transaction so we just remove it from
|
||||
// the cache.
|
||||
txmp.cache.Remove(wtx.tx)
|
||||
txmp.logger.Error(
|
||||
"rejected incoming good transaction; mempool full",
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"err", err.Error(),
|
||||
)
|
||||
txmp.metrics.RejectedTxs.Add(1)
|
||||
return
|
||||
}
|
||||
|
||||
// evict an existing transaction(s)
|
||||
//
|
||||
// NOTE:
|
||||
// - The transaction, toEvict, can be removed while a concurrent
|
||||
// reCheckTx callback is being executed for the same transaction.
|
||||
for _, toEvict := range evictTxs {
|
||||
txmp.removeTx(toEvict, true)
|
||||
txmp.logger.Debug(
|
||||
"evicted existing good transaction; mempool full",
|
||||
"old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()),
|
||||
"old_priority", toEvict.priority,
|
||||
"new_tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"new_priority", wtx.priority,
|
||||
)
|
||||
txmp.metrics.EvictedTxs.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
wtx.gasWanted = checkTxRes.CheckTx.GasWanted
|
||||
wtx.priority = priority
|
||||
wtx.sender = sender
|
||||
wtx.peers = map[uint16]struct{}{
|
||||
txInfo.SenderID: {},
|
||||
}
|
||||
|
||||
txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size()))
|
||||
txmp.metrics.Size.Set(float64(txmp.Size()))
|
||||
|
||||
txmp.insertTx(wtx)
|
||||
txmp.logger.Debug(
|
||||
"inserted good transaction",
|
||||
"priority", wtx.priority,
|
||||
"tx", fmt.Sprintf("%X", wtx.tx.Hash()),
|
||||
"height", txmp.height,
|
||||
"num_txs", txmp.Size(),
|
||||
)
|
||||
txmp.notifyTxsAvailable()
|
||||
|
||||
}
|
||||
|
||||
// defaultTxCallback performs the default CheckTx application callback. This is
|
||||
@@ -721,6 +742,8 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error {
|
||||
func (txmp *TxMempool) insertTx(wtx *WrappedTx) {
|
||||
txmp.txStore.SetTx(wtx)
|
||||
txmp.priorityIndex.PushTx(wtx)
|
||||
txmp.heightIndex.Insert(wtx)
|
||||
txmp.timestampIndex.Insert(wtx)
|
||||
|
||||
// Insert the transaction into the gossip index and mark the reference to the
|
||||
// linked-list element, which will be needed at a later point when the
|
||||
@@ -738,10 +761,13 @@ func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) {
|
||||
|
||||
txmp.txStore.RemoveTx(wtx)
|
||||
txmp.priorityIndex.RemoveTx(wtx)
|
||||
txmp.heightIndex.Remove(wtx)
|
||||
txmp.timestampIndex.Remove(wtx)
|
||||
|
||||
// Remove the transaction from the gossip index and cleanup the linked-list
|
||||
// element so it can be garbage collected.
|
||||
txmp.gossipIndex.Remove(wtx.gossipEl)
|
||||
wtx.gossipEl.DetachPrev()
|
||||
|
||||
atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size()))
|
||||
|
||||
@@ -750,6 +776,56 @@ func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// purgeExpiredTxs removes all transactions that have exceeded their respective
|
||||
// height and/or time based TTLs from their respective indexes. Every expired
|
||||
// transaction will be removed from the mempool entirely, except for the cache.
|
||||
//
|
||||
// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which
|
||||
// the caller has a write-lock on the mempool and so we can safely iterate over
|
||||
// the height and time based indexes.
|
||||
func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) {
|
||||
now := time.Now()
|
||||
expiredTxs := make(map[[mempool.TxKeySize]byte]*WrappedTx)
|
||||
|
||||
if txmp.config.TTLNumBlocks > 0 {
|
||||
purgeIdx := -1
|
||||
for i, wtx := range txmp.heightIndex.txs {
|
||||
if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks {
|
||||
expiredTxs[mempool.TxKey(wtx.tx)] = wtx
|
||||
purgeIdx = i
|
||||
} else {
|
||||
// since the index is sorted, we know no other txs can be be purged
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if purgeIdx >= 0 {
|
||||
txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:]
|
||||
}
|
||||
}
|
||||
|
||||
if txmp.config.TTLDuration > 0 {
|
||||
purgeIdx := -1
|
||||
for i, wtx := range txmp.timestampIndex.txs {
|
||||
if now.Sub(wtx.timestamp) > txmp.config.TTLDuration {
|
||||
expiredTxs[mempool.TxKey(wtx.tx)] = wtx
|
||||
purgeIdx = i
|
||||
} else {
|
||||
// since the index is sorted, we know no other txs can be be purged
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if purgeIdx >= 0 {
|
||||
txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:]
|
||||
}
|
||||
}
|
||||
|
||||
for _, wtx := range expiredTxs {
|
||||
txmp.removeTx(wtx, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (txmp *TxMempool) notifyTxsAvailable() {
|
||||
if txmp.Size() == 0 {
|
||||
panic("attempt to notify txs available but mempool is empty!")
|
||||
|
||||
@@ -3,11 +3,13 @@ package v1
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -70,13 +72,13 @@ func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
}
|
||||
}
|
||||
|
||||
func setup(t testing.TB, cacheSize int) *TxMempool {
|
||||
func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool {
|
||||
t.Helper()
|
||||
|
||||
app := &application{kvstore.NewApplication()}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
cfg := config.ResetTestRoot(t.Name())
|
||||
cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|"))
|
||||
cfg.Mempool.CacheSize = cacheSize
|
||||
|
||||
appConnMem, err := cc.NewABCIClient()
|
||||
@@ -88,7 +90,7 @@ func setup(t testing.TB, cacheSize int) *TxMempool {
|
||||
require.NoError(t, appConnMem.Stop())
|
||||
})
|
||||
|
||||
return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0)
|
||||
return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...)
|
||||
}
|
||||
|
||||
func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx {
|
||||
@@ -102,14 +104,10 @@ func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx
|
||||
_, err := rng.Read(prefix)
|
||||
require.NoError(t, err)
|
||||
|
||||
// sender := make([]byte, 10)
|
||||
// _, err = rng.Read(sender)
|
||||
// require.NoError(t, err)
|
||||
|
||||
priority := int64(rng.Intn(9999-1000) + 1000)
|
||||
|
||||
txs[i] = testTx{
|
||||
tx: []byte(fmt.Sprintf("sender-%d=%X=%d", i, prefix, priority)),
|
||||
tx: []byte(fmt.Sprintf("sender-%d-%d=%X=%d", i, peerID, prefix, priority)),
|
||||
priority: priority,
|
||||
}
|
||||
require.NoError(t, txmp.CheckTx(context.Background(), txs[i].tx, nil, txInfo))
|
||||
@@ -176,7 +174,7 @@ func TestTxMempool_Size(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
txs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(txs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
rawTxs := make([]types.Tx, len(txs))
|
||||
for i, tx := range txs {
|
||||
@@ -193,14 +191,14 @@ func TestTxMempool_Size(t *testing.T) {
|
||||
txmp.Unlock()
|
||||
|
||||
require.Equal(t, len(rawTxs)/2, txmp.Size())
|
||||
require.Equal(t, int64(2750), txmp.SizeBytes())
|
||||
require.Equal(t, int64(2850), txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_Flush(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
txs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(txs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
rawTxs := make([]types.Tx, len(txs))
|
||||
for i, tx := range txs {
|
||||
@@ -225,7 +223,7 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
txMap := make(map[[mempool.TxKeySize]byte]testTx)
|
||||
priorities := make([]int64, len(tTxs))
|
||||
@@ -252,30 +250,30 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) {
|
||||
reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 50)
|
||||
|
||||
// reap by transaction bytes only
|
||||
reapedTxs = txmp.ReapMaxBytesMaxGas(1000, -1)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 17)
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.GreaterOrEqual(t, len(reapedTxs), 16)
|
||||
|
||||
// Reap by both transaction bytes and gas, where the size yields 31 reaped
|
||||
// transactions and the gas limit reaps 26 transactions.
|
||||
// transactions and the gas limit reaps 25 transactions.
|
||||
reapedTxs = txmp.ReapMaxBytesMaxGas(1500, 30)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 26)
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 25)
|
||||
}
|
||||
|
||||
func TestTxMempool_ReapMaxTxs(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
tTxs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
|
||||
txMap := make(map[[mempool.TxKeySize]byte]testTx)
|
||||
priorities := make([]int64, len(tTxs))
|
||||
@@ -302,21 +300,21 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) {
|
||||
reapedTxs := txmp.ReapMaxTxs(-1)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, len(tTxs))
|
||||
|
||||
// reap a single transaction
|
||||
reapedTxs = txmp.ReapMaxTxs(1)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, 1)
|
||||
|
||||
// reap half of the transactions
|
||||
reapedTxs = txmp.ReapMaxTxs(len(tTxs) / 2)
|
||||
ensurePrioritized(reapedTxs)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, int64(5490), txmp.SizeBytes())
|
||||
require.Equal(t, int64(5690), txmp.SizeBytes())
|
||||
require.Len(t, reapedTxs, len(tTxs)/2)
|
||||
}
|
||||
|
||||
@@ -324,11 +322,17 @@ func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) {
|
||||
txmp := setup(t, 0)
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
tx := make([]byte, txmp.config.MaxTxsBytes+1)
|
||||
tx := make([]byte, txmp.config.MaxTxBytes+1)
|
||||
_, err := rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0}))
|
||||
|
||||
tx = make([]byte, txmp.config.MaxTxBytes-1)
|
||||
_, err = rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0}))
|
||||
}
|
||||
|
||||
func TestTxMempool_CheckTxSamePeer(t *testing.T) {
|
||||
@@ -431,3 +435,93 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) {
|
||||
require.Zero(t, txmp.Size())
|
||||
require.Zero(t, txmp.SizeBytes())
|
||||
}
|
||||
|
||||
func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) {
|
||||
txmp := setup(t, 500)
|
||||
txmp.height = 100
|
||||
txmp.config.TTLNumBlocks = 10
|
||||
|
||||
tTxs := checkTxs(t, txmp, 100, 0)
|
||||
require.Equal(t, len(tTxs), txmp.Size())
|
||||
require.Equal(t, 100, txmp.heightIndex.Size())
|
||||
|
||||
// reap 5 txs at the next height -- no txs should expire
|
||||
reapedTxs := txmp.ReapMaxTxs(5)
|
||||
responses := make([]*abci.ResponseDeliverTx, len(reapedTxs))
|
||||
for i := 0; i < len(responses); i++ {
|
||||
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
|
||||
}
|
||||
|
||||
txmp.Lock()
|
||||
require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil))
|
||||
txmp.Unlock()
|
||||
|
||||
require.Equal(t, 95, txmp.Size())
|
||||
require.Equal(t, 95, txmp.heightIndex.Size())
|
||||
|
||||
// check more txs at height 101
|
||||
_ = checkTxs(t, txmp, 50, 1)
|
||||
require.Equal(t, 145, txmp.Size())
|
||||
require.Equal(t, 145, txmp.heightIndex.Size())
|
||||
|
||||
// Reap 5 txs at a height that would expire all the transactions from before
|
||||
// the previous Update (height 100).
|
||||
//
|
||||
// NOTE: When we reap txs below, we do not know if we're picking txs from the
|
||||
// initial CheckTx calls or from the second round of CheckTx calls. Thus, we
|
||||
// cannot guarantee that all 95 txs are remaining that should be expired and
|
||||
// removed. However, we do know that that at most 95 txs can be expired and
|
||||
// removed.
|
||||
reapedTxs = txmp.ReapMaxTxs(5)
|
||||
responses = make([]*abci.ResponseDeliverTx, len(reapedTxs))
|
||||
for i := 0; i < len(responses); i++ {
|
||||
responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK}
|
||||
}
|
||||
|
||||
txmp.Lock()
|
||||
require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil))
|
||||
txmp.Unlock()
|
||||
|
||||
require.GreaterOrEqual(t, txmp.Size(), 45)
|
||||
require.GreaterOrEqual(t, txmp.heightIndex.Size(), 45)
|
||||
}
|
||||
|
||||
func TestTxMempool_CheckTxPostCheckError(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "error",
|
||||
err: errors.New("test error"),
|
||||
},
|
||||
{
|
||||
name: "no error",
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
testCase := tc
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error {
|
||||
return testCase.err
|
||||
}
|
||||
txmp := setup(t, 0, WithPostCheck(postCheckFn))
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
tx := make([]byte, txmp.config.MaxTxBytes-1)
|
||||
_, err := rng.Read(tx)
|
||||
require.NoError(t, err)
|
||||
|
||||
callback := func(res *abci.Response) {
|
||||
checkTxRes, ok := res.Value.(*abci.Response_CheckTx)
|
||||
require.True(t, ok)
|
||||
expectedErrString := ""
|
||||
if testCase.err != nil {
|
||||
expectedErrString = testCase.err.Error()
|
||||
}
|
||||
require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError)
|
||||
}
|
||||
require.NoError(t, txmp.CheckTx(context.Background(), tx, callback, mempool.TxInfo{SenderID: 0}))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -187,6 +188,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -311,7 +317,11 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
r.peerWG.Done()
|
||||
|
||||
if e := recover(); e != nil {
|
||||
r.Logger.Error("recovering from broadcasting mempool loop", "err", e)
|
||||
r.Logger.Error(
|
||||
"recovering from broadcasting mempool loop",
|
||||
"err", e,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
@@ -198,3 +199,84 @@ func (txs *TxStore) GetOrSetPeerByTxHash(hash [mempool.TxKeySize]byte, peerID ui
|
||||
wtx.peers[peerID] = struct{}{}
|
||||
return wtx, false
|
||||
}
|
||||
|
||||
// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be
|
||||
// used to build generic transaction indexes in the mempool. It accepts a
|
||||
// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx
|
||||
// references which is used during Insert in order to determine sorted order. If
|
||||
// less returns true, a <= b.
|
||||
type WrappedTxList struct {
|
||||
mtx tmsync.RWMutex
|
||||
txs []*WrappedTx
|
||||
less func(*WrappedTx, *WrappedTx) bool
|
||||
}
|
||||
|
||||
func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList {
|
||||
return &WrappedTxList{
|
||||
txs: make([]*WrappedTx, 0),
|
||||
less: less,
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the number of WrappedTx objects in the list.
|
||||
func (wtl *WrappedTxList) Size() int {
|
||||
wtl.mtx.RLock()
|
||||
defer wtl.mtx.RUnlock()
|
||||
|
||||
return len(wtl.txs)
|
||||
}
|
||||
|
||||
// Reset resets the list of transactions to an empty list.
|
||||
func (wtl *WrappedTxList) Reset() {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
wtl.txs = make([]*WrappedTx, 0)
|
||||
}
|
||||
|
||||
// Insert inserts a WrappedTx reference into the sorted list based on the list's
|
||||
// comparator function.
|
||||
func (wtl *WrappedTxList) Insert(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
if i == len(wtl.txs) {
|
||||
// insert at the end
|
||||
wtl.txs = append(wtl.txs, wtx)
|
||||
return
|
||||
}
|
||||
|
||||
// Make space for the inserted element by shifting values at the insertion
|
||||
// index up one index.
|
||||
//
|
||||
// NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs).
|
||||
wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...)
|
||||
wtl.txs[i] = wtx
|
||||
}
|
||||
|
||||
// Remove attempts to remove a WrappedTx from the sorted list.
|
||||
func (wtl *WrappedTxList) Remove(wtx *WrappedTx) {
|
||||
wtl.mtx.Lock()
|
||||
defer wtl.mtx.Unlock()
|
||||
|
||||
i := sort.Search(len(wtl.txs), func(i int) bool {
|
||||
return wtl.less(wtl.txs[i], wtx)
|
||||
})
|
||||
|
||||
// Since the list is sorted, we evaluate all elements starting at i. Note, if
|
||||
// the element does not exist, we may potentially evaluate the entire remainder
|
||||
// of the list. However, a caller should not be expected to call Remove with a
|
||||
// non-existing element.
|
||||
for i < len(wtl.txs) {
|
||||
if wtl.txs[i] == wtx {
|
||||
wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...)
|
||||
return
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -132,3 +134,97 @@ func TestTxStore_Size(t *testing.T) {
|
||||
|
||||
require.Equal(t, numTxs, txStore.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Reset(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
require.Zero(t, list.Size())
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
list.Insert(&WrappedTx{height: int64(i)})
|
||||
}
|
||||
|
||||
require.Equal(t, 100, list.Size())
|
||||
|
||||
list.Reset()
|
||||
require.Zero(t, list.Size())
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Insert(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var expected []int
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
expected = append(expected, int(height))
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
|
||||
if i%10 == 0 {
|
||||
list.Insert(&WrappedTx{height: height})
|
||||
expected = append(expected, int(height))
|
||||
}
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
|
||||
func TestWrappedTxList_Remove(t *testing.T) {
|
||||
list := NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool {
|
||||
return wtx1.height >= wtx2.height
|
||||
})
|
||||
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var txs []*WrappedTx
|
||||
for i := 0; i < 100; i++ {
|
||||
height := rng.Int63n(10000)
|
||||
tx := &WrappedTx{height: height}
|
||||
|
||||
txs = append(txs, tx)
|
||||
list.Insert(tx)
|
||||
|
||||
if i%10 == 0 {
|
||||
tx = &WrappedTx{height: height}
|
||||
list.Insert(tx)
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
}
|
||||
|
||||
// remove a tx that does not exist
|
||||
list.Remove(&WrappedTx{height: 20000})
|
||||
|
||||
// remove a tx that exists (by height) but not referenced
|
||||
list.Remove(&WrappedTx{height: txs[0].height})
|
||||
|
||||
// remove a few existing txs
|
||||
for i := 0; i < 25; i++ {
|
||||
j := rng.Intn(len(txs))
|
||||
list.Remove(txs[j])
|
||||
txs = append(txs[:j], txs[j+1:]...)
|
||||
}
|
||||
|
||||
expected := make([]int, len(txs))
|
||||
for i, tx := range txs {
|
||||
expected[i] = int(tx.height)
|
||||
}
|
||||
|
||||
got := make([]int, list.Size())
|
||||
for i, wtx := range list.txs {
|
||||
got[i] = int(wtx.height)
|
||||
}
|
||||
|
||||
sort.Ints(expected)
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
|
||||
@@ -421,7 +421,6 @@ func (c *MConnection) CanSend(chID byte) bool {
|
||||
// sendRoutine polls for packets to send from channels.
|
||||
func (c *MConnection) sendRoutine() {
|
||||
defer c._recover()
|
||||
|
||||
protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter)
|
||||
|
||||
FOR_LOOP:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
34
internal/p2p/pex/doc.go
Normal file
34
internal/p2p/pex/doc.go
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
Package PEX (Peer exchange) handles all the logic necessary for nodes to share
|
||||
information about their peers to other nodes. Specifically, this is the exchange
|
||||
of addresses that a peer can use to discover more peers within the network.
|
||||
|
||||
The PEX reactor is a continuous service which periodically requests addresses
|
||||
and serves addresses to other peers. There are two versions of this service
|
||||
aligning with the two p2p frameworks that Tendermint currently supports.
|
||||
|
||||
V1 is coupled with the Switch (which handles peer connections and routing of
|
||||
messages) and, alongside exchanging peer information in the form of port/IP
|
||||
pairs, also has the responsibility of dialing peers and ensuring that a
|
||||
node has a sufficient amount of peers connected.
|
||||
|
||||
V2 is embedded with the new p2p stack and uses the peer manager to advertise
|
||||
peers as well as add new peers to the peer store. The V2 reactor passes a
|
||||
different set of proto messages which include a list of
|
||||
[urls](https://golang.org/pkg/net/url/#URL).These can be used to save a set of
|
||||
endpoints that each peer uses. The V2 reactor has backwards compatibility with
|
||||
V1. It can also handle V1 messages.
|
||||
|
||||
The V2 reactor is able to tweak the intensity of it's search by decreasing or
|
||||
increasing the interval between each request. It tracks connected peers via a
|
||||
linked list, sending a request to the node at the front of the list and adding
|
||||
it to the back of the list once a response is received. Using this method, a
|
||||
node is able to spread out the load of requesting peers across all the peers it
|
||||
is currently connected with.
|
||||
|
||||
With each inbound set of addresses, the reactor monitors the amount of new
|
||||
addresses to already seen addresses and uses the information to dynamically
|
||||
build a picture of the size of the network in order to ascertain how often the
|
||||
node needs to search for new peers.
|
||||
*/
|
||||
package pex
|
||||
@@ -3,6 +3,7 @@ package pex
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -367,6 +368,11 @@ func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (er
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -476,8 +476,10 @@ func (r *Router) routeChannel(
|
||||
}
|
||||
|
||||
if !contains {
|
||||
r.logger.Error("tried to send message across a channel that the peer doesn't have available",
|
||||
"peer", envelope.To, "channel", chID)
|
||||
// reactor tried to send a message across a channel that the
|
||||
// peer doesn't have available. This is a known issue due to
|
||||
// how peer subscriptions work:
|
||||
// https://github.com/tendermint/tendermint/issues/6598
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1021,6 +1023,15 @@ func (r *Router) NodeInfo() types.NodeInfo {
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (r *Router) OnStart() error {
|
||||
netAddr, _ := r.nodeInfo.NetAddress()
|
||||
r.Logger.Info(
|
||||
"starting router",
|
||||
"node_id", r.nodeInfo.NodeID,
|
||||
"channels", r.nodeInfo.Channels,
|
||||
"listen_addr", r.nodeInfo.ListenAddr,
|
||||
"net_addr", netAddr,
|
||||
)
|
||||
|
||||
go r.dialPeers()
|
||||
go r.evictPeers()
|
||||
|
||||
|
||||
@@ -151,9 +151,6 @@ func TestMConnTransport_Listen(t *testing.T) {
|
||||
[]*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}},
|
||||
p2p.MConnTransportOptions{},
|
||||
)
|
||||
t.Cleanup(func() {
|
||||
_ = transport.Close()
|
||||
})
|
||||
|
||||
// Transport should not listen on any endpoints yet.
|
||||
require.Empty(t, transport.Endpoints())
|
||||
@@ -166,19 +163,6 @@ func TestMConnTransport_Listen(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start a goroutine to just accept any connections.
|
||||
go func() {
|
||||
for {
|
||||
conn, err := transport.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
}
|
||||
}()
|
||||
|
||||
// Check the endpoint.
|
||||
endpoints := transport.Endpoints()
|
||||
require.Len(t, endpoints, 1)
|
||||
@@ -195,14 +179,40 @@ func TestMConnTransport_Listen(t *testing.T) {
|
||||
require.NotZero(t, endpoint.Port)
|
||||
require.Empty(t, endpoint.Path)
|
||||
|
||||
// Dialing the endpoint should work.
|
||||
conn, err := transport.Dial(ctx, endpoint)
|
||||
dialedChan := make(chan struct{})
|
||||
|
||||
var peerConn p2p.Connection
|
||||
go func() {
|
||||
// Dialing the endpoint should work.
|
||||
var err error
|
||||
peerConn, err = transport.Dial(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
close(dialedChan)
|
||||
}()
|
||||
|
||||
conn, err := transport.Accept()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, conn.Close())
|
||||
_ = conn.Close()
|
||||
<-dialedChan
|
||||
|
||||
time.Sleep(time.Minute)
|
||||
// closing the connection should not error
|
||||
require.NoError(t, peerConn.Close())
|
||||
|
||||
// try to read from the connection should error
|
||||
_, _, err = peerConn.ReceiveMessage()
|
||||
require.Error(t, err)
|
||||
|
||||
// Trying to listen again should error.
|
||||
err = transport.Listen(tc.endpoint)
|
||||
require.Error(t, err)
|
||||
|
||||
// close the transport
|
||||
_ = transport.Close()
|
||||
|
||||
// Dialing the closed endpoint should error
|
||||
_, err = transport.Dial(ctx, endpoint)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -403,6 +403,9 @@ func TestConnection_SendReceive(t *testing.T) {
|
||||
_, _, err = ab.ReceiveMessage()
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
_, err = ab.TrySendMessage(chID, []byte("closed try"))
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
_, err = ab.SendMessage(chID, []byte("closed"))
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
@@ -410,6 +413,9 @@ func TestConnection_SendReceive(t *testing.T) {
|
||||
_, _, err = ba.ReceiveMessage()
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
_, err = ba.TrySendMessage(chID, []byte("closed try"))
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
_, err = ba.SendMessage(chID, []byte("closed"))
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF, err)
|
||||
|
||||
@@ -23,9 +23,10 @@ type blockQueue struct {
|
||||
verifyHeight int64
|
||||
|
||||
// termination conditions
|
||||
stopHeight int64
|
||||
stopTime time.Time
|
||||
terminal *types.LightBlock
|
||||
initialHeight int64
|
||||
stopHeight int64
|
||||
stopTime time.Time
|
||||
terminal *types.LightBlock
|
||||
|
||||
// track failed heights so we know what blocks to try fetch again
|
||||
failed *maxIntHeap
|
||||
@@ -45,21 +46,22 @@ type blockQueue struct {
|
||||
}
|
||||
|
||||
func newBlockQueue(
|
||||
startHeight, stopHeight int64,
|
||||
startHeight, stopHeight, initialHeight int64,
|
||||
stopTime time.Time,
|
||||
maxRetries int,
|
||||
) *blockQueue {
|
||||
return &blockQueue{
|
||||
stopHeight: stopHeight,
|
||||
stopTime: stopTime,
|
||||
fetchHeight: startHeight,
|
||||
verifyHeight: startHeight,
|
||||
pending: make(map[int64]lightBlockResponse),
|
||||
failed: &maxIntHeap{},
|
||||
retries: 0,
|
||||
maxRetries: maxRetries,
|
||||
waiters: make([]chan int64, 0),
|
||||
doneCh: make(chan struct{}),
|
||||
stopHeight: stopHeight,
|
||||
initialHeight: initialHeight,
|
||||
stopTime: stopTime,
|
||||
fetchHeight: startHeight,
|
||||
verifyHeight: startHeight,
|
||||
pending: make(map[int64]lightBlockResponse),
|
||||
failed: &maxIntHeap{},
|
||||
retries: 0,
|
||||
maxRetries: maxRetries,
|
||||
waiters: make([]chan int64, 0),
|
||||
doneCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,9 +95,10 @@ func (q *blockQueue) add(l lightBlockResponse) {
|
||||
q.pending[l.block.Height] = l
|
||||
}
|
||||
|
||||
// Lastly, if the incoming block is past the stop time and stop height then
|
||||
// we mark it as the terminal block
|
||||
if l.block.Height <= q.stopHeight && l.block.Time.Before(q.stopTime) {
|
||||
// Lastly, if the incoming block is past the stop time and stop height or
|
||||
// is equal to the initial height then we mark it as the terminal block.
|
||||
if l.block.Height <= q.stopHeight && l.block.Time.Before(q.stopTime) ||
|
||||
l.block.Height == q.initialHeight {
|
||||
q.terminal = l.block
|
||||
}
|
||||
}
|
||||
@@ -115,7 +118,7 @@ func (q *blockQueue) nextHeight() <-chan int64 {
|
||||
return ch
|
||||
}
|
||||
|
||||
if q.terminal == nil {
|
||||
if q.terminal == nil && q.fetchHeight >= q.initialHeight {
|
||||
// return and decrement the fetch height
|
||||
ch <- q.fetchHeight
|
||||
q.fetchHeight--
|
||||
|
||||
@@ -25,7 +25,7 @@ func TestBlockQueueBasic(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1)
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
// asynchronously fetch blocks and add it to the queue
|
||||
@@ -72,7 +72,7 @@ func TestBlockQueueWithFailures(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, 200)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 200)
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
failureRate := 4
|
||||
@@ -121,7 +121,7 @@ func TestBlockQueueWithFailures(t *testing.T) {
|
||||
func TestBlockQueueBlocks(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, 2)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 2)
|
||||
expectedHeight := startHeight
|
||||
retryHeight := stopHeight + 2
|
||||
|
||||
@@ -168,7 +168,7 @@ loop:
|
||||
func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1)
|
||||
defer queue.close()
|
||||
|
||||
loop:
|
||||
@@ -194,7 +194,7 @@ func TestBlockQueueStopTime(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
|
||||
queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1)
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
baseTime := stopTime.Add(-50 * time.Second)
|
||||
@@ -233,6 +233,46 @@ func TestBlockQueueStopTime(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockQueueInitialHeight(t *testing.T) {
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
const initialHeight int64 = 120
|
||||
|
||||
queue := newBlockQueue(startHeight, stopHeight, initialHeight, stopTime, 1)
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
// asynchronously fetch blocks and add it to the queue
|
||||
for i := 0; i <= numWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case height := <-queue.nextHeight():
|
||||
require.GreaterOrEqual(t, height, initialHeight)
|
||||
queue.add(mockLBResp(t, peerID, height, endTime))
|
||||
case <-queue.done():
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-queue.done():
|
||||
wg.Wait()
|
||||
require.NoError(t, queue.error())
|
||||
break loop
|
||||
|
||||
case resp := <-queue.verifyNext():
|
||||
require.GreaterOrEqual(t, resp.block.Height, initialHeight)
|
||||
queue.success(resp.block.Height)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mockLBResp(t *testing.T, peer types.NodeID, height int64, time time.Time) lightBlockResponse {
|
||||
return lightBlockResponse{
|
||||
block: mockLB(t, height, time, factory.MakeBlockID()),
|
||||
|
||||
@@ -45,23 +45,26 @@ func newDispatcher(requestCh chan<- p2p.Envelope, timeout time.Duration) *dispat
|
||||
}
|
||||
}
|
||||
|
||||
// LightBlock uses the request channel to fetch a light block from the next peer
|
||||
// in a list, tracks the call and waits for the reactor to pass along the response
|
||||
func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*types.LightBlock, types.NodeID, error) {
|
||||
d.mtx.Lock()
|
||||
outgoingCalls := len(d.calls)
|
||||
d.mtx.Unlock()
|
||||
|
||||
// check to see that the dispatcher is connected to at least one peer
|
||||
if d.availablePeers.Len() == 0 && outgoingCalls == 0 {
|
||||
if d.availablePeers.Len() == 0 && len(d.calls) == 0 {
|
||||
d.mtx.Unlock()
|
||||
return nil, "", errNoConnectedPeers
|
||||
}
|
||||
d.mtx.Unlock()
|
||||
|
||||
// fetch the next peer id in the list and request a light block from that
|
||||
// peer
|
||||
peer := d.availablePeers.Pop()
|
||||
peer := d.availablePeers.Pop(ctx)
|
||||
lb, err := d.lightBlock(ctx, height, peer)
|
||||
return lb, peer, err
|
||||
}
|
||||
|
||||
// Providers turns the dispatcher into a set of providers (per peer) which can
|
||||
// be used by a light client
|
||||
func (d *dispatcher) Providers(chainID string, timeout time.Duration) []provider.Provider {
|
||||
d.mtx.Lock()
|
||||
defer d.mtx.Unlock()
|
||||
@@ -272,7 +275,7 @@ func (l *peerlist) Len() int {
|
||||
return len(l.peers)
|
||||
}
|
||||
|
||||
func (l *peerlist) Pop() types.NodeID {
|
||||
func (l *peerlist) Pop(ctx context.Context) types.NodeID {
|
||||
l.mtx.Lock()
|
||||
if len(l.peers) == 0 {
|
||||
// if we don't have any peers in the list we block until a peer is
|
||||
@@ -281,8 +284,13 @@ func (l *peerlist) Pop() types.NodeID {
|
||||
l.waiting = append(l.waiting, wait)
|
||||
// unlock whilst waiting so that the list can be appended to
|
||||
l.mtx.Unlock()
|
||||
peer := <-wait
|
||||
return peer
|
||||
select {
|
||||
case peer := <-wait:
|
||||
return peer
|
||||
|
||||
case <-ctx.Done():
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
peer := l.peers[0]
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
)
|
||||
|
||||
func TestDispatcherBasic(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
ch := make(chan p2p.Envelope, 100)
|
||||
closeCh := make(chan struct{})
|
||||
@@ -49,7 +51,83 @@ func TestDispatcherBasic(t *testing.T) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestDispatcherReturnsNoBlock(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
ch := make(chan p2p.Envelope, 100)
|
||||
d := newDispatcher(ch, 1*time.Second)
|
||||
peerFromSet := createPeerSet(1)[0]
|
||||
d.addPeer(peerFromSet)
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
<-ch
|
||||
require.NoError(t, d.respond(nil, peerFromSet))
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
lb, peerResult, err := d.LightBlock(context.Background(), 1)
|
||||
<-doneCh
|
||||
|
||||
require.Nil(t, lb)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, peerFromSet, peerResult)
|
||||
}
|
||||
|
||||
func TestDispatcherErrorsWhenNoPeers(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
ch := make(chan p2p.Envelope, 100)
|
||||
d := newDispatcher(ch, 1*time.Second)
|
||||
|
||||
lb, peerResult, err := d.LightBlock(context.Background(), 1)
|
||||
|
||||
require.Nil(t, lb)
|
||||
require.Empty(t, peerResult)
|
||||
require.Equal(t, errNoConnectedPeers, err)
|
||||
}
|
||||
|
||||
func TestDispatcherReturnsBlockOncePeerAvailable(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
dispatcherRequestCh := make(chan p2p.Envelope, 100)
|
||||
d := newDispatcher(dispatcherRequestCh, 1*time.Second)
|
||||
peerFromSet := createPeerSet(1)[0]
|
||||
d.addPeer(peerFromSet)
|
||||
ctx := context.Background()
|
||||
wrapped, cancelFunc := context.WithCancel(ctx)
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
lb, peerResult, err := d.LightBlock(wrapped, 1)
|
||||
require.Nil(t, lb)
|
||||
require.Equal(t, peerFromSet, peerResult)
|
||||
require.Nil(t, err)
|
||||
|
||||
// calls to dispatcher.Lightblock write into the dispatcher's requestCh.
|
||||
// we read from the requestCh here to unblock the requestCh for future
|
||||
// calls.
|
||||
<-dispatcherRequestCh
|
||||
close(doneCh)
|
||||
}()
|
||||
cancelFunc()
|
||||
<-doneCh
|
||||
|
||||
go func() {
|
||||
<-dispatcherRequestCh
|
||||
lb := &types.LightBlock{}
|
||||
asProto, err := lb.ToProto()
|
||||
require.Nil(t, err)
|
||||
err = d.respond(asProto, peerFromSet)
|
||||
require.Nil(t, err)
|
||||
}()
|
||||
|
||||
lb, peerResult, err := d.LightBlock(context.Background(), 1)
|
||||
|
||||
require.NotNil(t, lb)
|
||||
require.Equal(t, peerFromSet, peerResult)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestDispatcherProviders(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
ch := make(chan p2p.Envelope, 100)
|
||||
chainID := "state-sync-test"
|
||||
@@ -78,6 +156,7 @@ func TestDispatcherProviders(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPeerListBasic(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
peerList := newPeerList()
|
||||
assert.Zero(t, peerList.Len())
|
||||
numPeers := 10
|
||||
@@ -95,7 +174,7 @@ func TestPeerListBasic(t *testing.T) {
|
||||
|
||||
half := numPeers / 2
|
||||
for i := 0; i < half; i++ {
|
||||
assert.Equal(t, peerSet[i], peerList.Pop())
|
||||
assert.Equal(t, peerSet[i], peerList.Pop(ctx))
|
||||
}
|
||||
assert.Equal(t, half, peerList.Len())
|
||||
|
||||
@@ -104,11 +183,56 @@ func TestPeerListBasic(t *testing.T) {
|
||||
|
||||
peerList.Remove(peerSet[half])
|
||||
half++
|
||||
assert.Equal(t, peerSet[half], peerList.Pop())
|
||||
assert.Equal(t, peerSet[half], peerList.Pop(ctx))
|
||||
|
||||
}
|
||||
|
||||
func TestPeerListBlocksWhenEmpty(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
peerList := newPeerList()
|
||||
require.Zero(t, peerList.Len())
|
||||
doneCh := make(chan struct{})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go func() {
|
||||
peerList.Pop(ctx)
|
||||
close(doneCh)
|
||||
}()
|
||||
select {
|
||||
case <-doneCh:
|
||||
t.Error("empty peer list should not have returned result")
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyPeerListReturnsWhenContextCanceled(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
peerList := newPeerList()
|
||||
require.Zero(t, peerList.Len())
|
||||
doneCh := make(chan struct{})
|
||||
ctx := context.Background()
|
||||
wrapped, cancel := context.WithCancel(ctx)
|
||||
go func() {
|
||||
peerList.Pop(wrapped)
|
||||
close(doneCh)
|
||||
}()
|
||||
select {
|
||||
case <-doneCh:
|
||||
t.Error("empty peer list should not have returned result")
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
}
|
||||
|
||||
cancel()
|
||||
|
||||
select {
|
||||
case <-doneCh:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Error("peer list should have returned after context canceled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerListConcurrent(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
peerList := newPeerList()
|
||||
numPeers := 10
|
||||
|
||||
@@ -117,7 +241,7 @@ func TestPeerListConcurrent(t *testing.T) {
|
||||
// peer list hasn't been populated each these go routines should block
|
||||
for i := 0; i < numPeers/2; i++ {
|
||||
go func() {
|
||||
_ = peerList.Pop()
|
||||
_ = peerList.Pop(ctx)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
@@ -132,7 +256,7 @@ func TestPeerListConcurrent(t *testing.T) {
|
||||
// we request the second half of the peer set
|
||||
for i := 0; i < numPeers/2; i++ {
|
||||
go func() {
|
||||
_ = peerList.Pop()
|
||||
_ = peerList.Pop(ctx)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
50
internal/statesync/mock_sync_reactor.go
Normal file
50
internal/statesync/mock_sync_reactor.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package statesync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
state "github.com/tendermint/tendermint/state"
|
||||
)
|
||||
|
||||
// MockSyncReactor is an autogenerated mock type for the SyncReactor type.
|
||||
// Because of the stateprovider uses in Sync(), we use package statesync instead of mocks.
|
||||
type MockSyncReactor struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Backfill provides a mock function with given fields: _a0
|
||||
func (_m *MockSyncReactor) Backfill(_a0 state.State) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(state.State) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Sync provides a mock function with given fields: _a0, _a1, _a2
|
||||
func (_m *MockSyncReactor) Sync(_a0 context.Context, _a1 StateProvider, _a2 time.Duration) (state.State, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2)
|
||||
|
||||
var r0 state.State
|
||||
if rf, ok := ret.Get(0).(func(context.Context, StateProvider, time.Duration) state.State); ok {
|
||||
r0 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
r0 = ret.Get(0).(state.State)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, StateProvider, time.Duration) error); ok {
|
||||
r1 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
@@ -94,13 +95,19 @@ const (
|
||||
|
||||
// lightBlockResponseTimeout is how long the dispatcher waits for a peer to
|
||||
// return a light block
|
||||
lightBlockResponseTimeout = 10 * time.Second
|
||||
lightBlockResponseTimeout = 30 * time.Second
|
||||
|
||||
// maxLightBlockRequestRetries is the amount of retries acceptable before
|
||||
// the backfill process aborts
|
||||
maxLightBlockRequestRetries = 20
|
||||
)
|
||||
|
||||
// SyncReactor defines an interface used for testing abilities of node.startStateSync.
|
||||
type SyncReactor interface {
|
||||
Sync(context.Context, StateProvider, time.Duration) (sm.State, error)
|
||||
Backfill(sm.State) error
|
||||
}
|
||||
|
||||
// Reactor handles state sync, both restoring snapshots for the local node and
|
||||
// serving snapshots for other nodes.
|
||||
type Reactor struct {
|
||||
@@ -221,6 +228,11 @@ func (r *Reactor) Sync(
|
||||
return sm.State{}, errors.New("a state sync is already in progress")
|
||||
}
|
||||
|
||||
if stateProvider == nil {
|
||||
r.mtx.Unlock()
|
||||
return sm.State{}, errors.New("the stateProvider should not be nil when doing the state sync")
|
||||
}
|
||||
|
||||
r.syncer = newSyncer(
|
||||
r.cfg,
|
||||
r.Logger,
|
||||
@@ -280,7 +292,9 @@ func (r *Reactor) Backfill(state sm.State) error {
|
||||
return r.backfill(
|
||||
context.Background(),
|
||||
state.ChainID,
|
||||
state.LastBlockHeight, stopHeight,
|
||||
state.LastBlockHeight,
|
||||
stopHeight,
|
||||
state.InitialHeight,
|
||||
state.LastBlockID,
|
||||
stopTime,
|
||||
)
|
||||
@@ -289,7 +303,7 @@ func (r *Reactor) Backfill(state sm.State) error {
|
||||
func (r *Reactor) backfill(
|
||||
ctx context.Context,
|
||||
chainID string,
|
||||
startHeight, stopHeight int64,
|
||||
startHeight, stopHeight, initialHeight int64,
|
||||
trustedBlockID types.BlockID,
|
||||
stopTime time.Time,
|
||||
) error {
|
||||
@@ -302,7 +316,7 @@ func (r *Reactor) backfill(
|
||||
lastChangeHeight int64 = startHeight
|
||||
)
|
||||
|
||||
queue := newBlockQueue(startHeight, stopHeight, stopTime, maxLightBlockRequestRetries)
|
||||
queue := newBlockQueue(startHeight, stopHeight, initialHeight, stopTime, maxLightBlockRequestRetries)
|
||||
|
||||
// fetch light blocks across four workers. The aim with deploying concurrent
|
||||
// workers is to equate the network messaging time with the verification
|
||||
@@ -310,12 +324,17 @@ func (r *Reactor) backfill(
|
||||
// waiting on blocks. If it takes 4s to retrieve a block and 1s to verify
|
||||
// it, then steady state involves four workers.
|
||||
for i := 0; i < int(r.cfg.Fetchers); i++ {
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case height := <-queue.nextHeight():
|
||||
r.Logger.Debug("fetching next block", "height", height)
|
||||
lb, peer, err := r.dispatcher.LightBlock(ctx, height)
|
||||
lb, peer, err := r.dispatcher.LightBlock(ctxWithCancel, height)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
queue.retry(height)
|
||||
if errors.Is(err, errNoConnectedPeers) {
|
||||
@@ -332,8 +351,8 @@ func (r *Reactor) backfill(
|
||||
if lb == nil {
|
||||
r.Logger.Info("backfill: peer didn't have block, fetching from another peer", "height", height)
|
||||
queue.retry(height)
|
||||
// as we are fetching blocks backwards, if this node doesn't have the block it likely doesn't
|
||||
// have any prior ones, thus we remove it from the peer list
|
||||
// As we are fetching blocks backwards, if this node doesn't have the block it likely doesn't
|
||||
// have any prior ones, thus we remove it from the peer list.
|
||||
r.dispatcher.removePeer(peer)
|
||||
continue
|
||||
}
|
||||
@@ -452,10 +471,11 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error {
|
||||
}
|
||||
|
||||
for _, snapshot := range snapshots {
|
||||
logger.Debug(
|
||||
logger.Info(
|
||||
"advertising snapshot",
|
||||
"height", snapshot.Height,
|
||||
"format", snapshot.Format,
|
||||
"peer", envelope.From,
|
||||
)
|
||||
r.snapshotCh.Out <- p2p.Envelope{
|
||||
To: envelope.From,
|
||||
@@ -622,7 +642,6 @@ func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error {
|
||||
case *ssproto.LightBlockResponse:
|
||||
if err := r.dispatcher.respond(msg.LightBlock, envelope.From); err != nil {
|
||||
r.Logger.Error("error processing light block response", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
@@ -639,6 +658,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -3,12 +3,11 @@ package statesync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
// "github.com/fortytw2/leaktest"
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
@@ -421,11 +420,11 @@ func TestReactor_Dispatcher(t *testing.T) {
|
||||
|
||||
func TestReactor_Backfill(t *testing.T) {
|
||||
// test backfill algorithm with varying failure rates [0, 10]
|
||||
failureRates := []int{0, 3, 9}
|
||||
failureRates := []int{0, 2, 9}
|
||||
for _, failureRate := range failureRates {
|
||||
failureRate := failureRate
|
||||
t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) {
|
||||
// t.Cleanup(leaktest.Check(t))
|
||||
t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute))
|
||||
rts := setup(t, nil, nil, nil, 21)
|
||||
|
||||
var (
|
||||
@@ -464,10 +463,11 @@ func TestReactor_Backfill(t *testing.T) {
|
||||
factory.DefaultTestChainID,
|
||||
startHeight,
|
||||
stopHeight,
|
||||
1,
|
||||
factory.MakeBlockIDWithHash(chain[startHeight].Header.Hash()),
|
||||
stopTime,
|
||||
)
|
||||
if failureRate > 5 {
|
||||
if failureRate > 3 {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
@@ -506,6 +506,7 @@ func handleLightBlockRequests(t *testing.T,
|
||||
close chan struct{},
|
||||
failureRate int) {
|
||||
requests := 0
|
||||
errorCount := 0
|
||||
for {
|
||||
select {
|
||||
case envelope := <-receiving:
|
||||
@@ -520,7 +521,7 @@ func handleLightBlockRequests(t *testing.T,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
switch rand.Intn(3) {
|
||||
switch errorCount % 3 {
|
||||
case 0: // send a different block
|
||||
differntLB, err := mockLB(t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID()).ToProto()
|
||||
require.NoError(t, err)
|
||||
@@ -539,6 +540,7 @@ func handleLightBlockRequests(t *testing.T,
|
||||
}
|
||||
case 2: // don't do anything
|
||||
}
|
||||
errorCount++
|
||||
}
|
||||
}
|
||||
case <-close:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user